예제 #1
0
def debug_me():
    # Define network
    net = Net()
    print(net)

    data_transform = transforms.Compose(
        [Rescale(250), RandomCrop(224),
         Normalize()])

    aww_dataset = FacialKeypointsDataset(
        csv_file='data/aww_frames_keypoints.csv',
        root_dir='data/aww/',
        transform=data_transform)

    sample = aww_dataset[0]
    print(sample['image'].shape, sample['keypoints'].shape)
    print(np.max(sample['keypoints']))

    aww_loader = DataLoader(aww_dataset,
                            batch_size=10,
                            shuffle=True,
                            num_workers=4)

    aww_images, aww_outputs, gt_pts = net_sample_output(net, aww_loader)

    visualize_output(aww_images, aww_outputs, gt_pts, 1)
    '''
예제 #2
0
 def transform_data(self, csv_file, root_dir):
     # create the transformed dataset
     transformed_dataset = FacialKeypointsDataset(
         csv_file=csv_file,
         root_dir=root_dir,
         transform=self.data_transform)
     return transformed_dataset
예제 #3
0
def main():
    hyp_batch_size = 20
    net = Net2()
    model_dir = '../saved_models/'
    model_name = 'keypoints_model_2.pt'
    data_transform = transforms.Compose([
        Rescale(256),
        RandomCrop(224),
        Normalize(),
        ToTensor()
    ])

    # retreive the saved model
    net_state_dict = torch.load(model_dir+model_name)
    net.load_state_dict(net_state_dict)
    
    # load the test data
    test_dataset = FacialKeypointsDataset(csv_file='../files/test_frames_keypoints.csv',
                                                root_dir='../files/test/',
                                                transform=data_transform)
    # load test data in batches
    batch_size = hyp_batch_size

    test_loader = DataLoader(test_dataset, 
                            batch_size=batch_size,
                            shuffle=True, 
                            num_workers=0)

    test_images, test_outputs, gt_pts = net_sample_output(test_loader, net)

    print(test_images.data.size())
    print(test_outputs.data.size())
    print(gt_pts.size())
    # Get the weights in the first conv layer, "conv1"
    # if necessary, change this to reflect the name of your first conv layer
    weights1 = net.conv1.weight.data

    w = weights1.numpy()

    filter_index = 0

    print(w[filter_index][0])
    print(w[filter_index][0].shape)

    # display the filter weights
    plt.imshow(w[filter_index][0], cmap='gray')
    #plt.show()
    ##TODO: load in and display any image from the transformed test dataset
    i = 1
    show_image(test_images, w, i)
예제 #4
0
def initialize_test_loader(transform):
    test_dataset = FacialKeypointsDataset(
        csv_file='./data/test_frames_keypoints.csv',
        root_dir='./data/test/',
        transform=transform)
    print('Number of test images: ', len(test_dataset))

    assert test_dataset[0]['image'].size(
    ) == desired_image_shape, "Wrong test image dimension"
    assert test_dataset[0]['keypoints'].size(
    ) == desired_keypoints_shape, "Wrong test keypoints dimension"

    test_loader = DataLoader(test_dataset,
                             batch_size=evaluate_batch_size,
                             shuffle=True,
                             num_workers=0)

    return test_loader
예제 #5
0
 def __init__(self, root_dir=None):
     self.data_root_dir = root_dir or './data/'
     self.training_csv = os.path.join(self.data_root_dir,
                                      'training_frames_keypoints.csv')
     self.training_data_dir = os.path.join(self.data_root_dir, 'training/')
     self.test_csv = os.path.join(self.data_root_dir,
                                  'test_frames_keypoints.csv')
     self.test_data_dir = os.path.join(self.data_root_dir, 'test/')
     self.key_pts_frame = pd.read_csv(self.training_csv)
     self.face_dataset = FacialKeypointsDataset(
         csv_file=self.training_csv, root_dir=self.training_data_dir)
     self.face_dataset_len = len(self.face_dataset)
     # define the data tranform
     # order matters! i.e. rescaling should come before a smaller crop
     self.data_transform = transforms.Compose(
         [Rescale(250),
          RandomCrop(224),
          Normalize(),
          ToTensor()])
     self.transformed_training_data = self.transform_data(
         self.training_csv, self.training_data_dir)
     self.transformed_test_data = self.transform_data(
         self.test_csv, self.test_data_dir)
from models import Net

net = Net()
net.load_state_dict(
    torch.load(
        r'C:\Users\Semanti Basu\Documents\OneDrive_2020-02-19\3D Ceaser dataset\Image and point generation\Image and point generation\frontaltrainedmodel_10epoch.pth'
    ))

## print out your net and prepare it for testing (uncomment the line below)
net.eval()
data_transform = transforms.Compose(
    [Rescale(225), RandomCrop(224),
     Normalize(), ToTensor()])
transformed_dataset = FacialKeypointsDataset(
    csv_file=
    r'C:\Users\Semanti Basu\Documents\OneDrive_2020-02-19\3D Ceaser dataset\Image and point generation\Image and point generation\frontalpoints.csv',
    root_dir=
    r'C:\Users\Semanti Basu\Documents\OneDrive_2020-02-19\3D Ceaser dataset\Image and point generation\Image and point generation\ceasar_mat',
    transform=data_transform)
# load training data in batches
batch_size = 10

train_loader = DataLoader(transformed_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=0)


def net_sample_output():
    # iterate through the test dataset
    for i, sample in enumerate(train_loader):
assert (data_transform is not None), 'Define a data_transform'
# In[ ]:
# In[136]:

# In[137]:
###########load the csv
# order matters! i.e. rescaling should come before a smaller crop
data_transform = transforms.Compose([
    Rescale(224),
    # RandomCrop(222),
    Normalize(),
    ToTensor()
])
# create the transformed dataset
transformed_dataset = FacialKeypointsDataset(
    csv_file=('/home/jonathan/Project_Eye/data/try_harder_cleaned.csv'),
    root_dir='/home/jonathan/Project_Eye/data/training/',
    transform=data_transform)
print('Number of images: ', len(transformed_dataset))
# iterate through the transformed dataset and print some stats about the first few samples
for i in range(4):
    sample = transformed_dataset[i]
    print(i, sample['image'].size(), sample['keypoints'].size())

# ## Batching and loading data
#
# In[138]:
# load training data in batches
batch_size = 1
train_loader = DataLoader(transformed_dataset,
                          batch_size=batch_size,
                          shuffle=True,
예제 #8
0
package_dir = os.path.dirname(os.path.abspath(__file__))

net = Net()
# net.load_state_dict(torch.load(package_dir + '/saved_models/model3.pt'))
print(net)


data_transform = transforms.Compose([Rescale(250),
                                     RandomCrop(224),
                                     Normalize(),
                                     ToTensor()])

##########LOAD TRAIN DATASET
transformed_dataset = FacialKeypointsDataset(csv_file= package_dir + '/data/training_frames_keypoints.csv',
                                             root_dir= package_dir + '/data/training/',
                                             transform=data_transform)

batch_size = 30
train_loader = DataLoader(transformed_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=4)

##########LOAD TEST DATASET
test_dataset = FacialKeypointsDataset(csv_file=package_dir+'/data/test_frames_keypoints.csv',
                                             root_dir=package_dir+'/data/test/',
                                             transform=data_transform)

test_loader = DataLoader(test_dataset,
                          batch_size=batch_size,
## TODO: define the data_transform using transforms.Compose([all tx's, . , .])
# order matters! i.e. rescaling should come before a smaller crop
data_transform = transforms.Compose(
    [Rescale(227), RandomCrop(224),
     Normalize(), ToTensor()])

# testing that you've defined a transform
assert (data_transform is not None), 'Define a data_transform'

# In[5]:

# create the transformed dataset
transformed_dataset = FacialKeypointsDataset(
    csv_file=
    'D:/Computer_vision_udacity/P1_Facial_Keypoints-master/data/training_frames_keypoints.csv',
    root_dir=
    'D:/Computer_vision_udacity/P1_Facial_Keypoints-master/data/training/',
    transform=data_transform)

print('Number of images: ', len(transformed_dataset))

# iterate through the transformed dataset and print some stats about the first few samples
for i in range(4):
    sample = transformed_dataset[i]
    print(i, sample['image'].size(), sample['keypoints'].size(),
          sample['keypoints'])

# ## Batching and loading data
#
# Next, having defined the transformed dataset, we can use PyTorch's DataLoader class to load the training data in batches of whatever size as well as to shuffle the data for training the model. You can read more about the parameters of the DataLoader, in [this documentation](http://pytorch.org/docs/master/data.html).
#
예제 #10
0
파일: train.py 프로젝트: mxs30443/ANN_FKD
from data_load import Rescale, RandomCrop, Normalize, ToTensor

## TODO: define the data_transform using transforms.Compose([all tx's, . , .])
# order matters! i.e. rescaling should come before a smaller crop

# testing that you've defined a transform

data_transform = transforms.Compose(
    [Rescale(250), RandomCrop(224),
     Normalize(), ToTensor()])
assert (data_transform is not None), 'Define a data_transform'

# create the transformed dataset
transformed_dataset = FacialKeypointsDataset(
    csv_file=
    "D:\\Users\\Tsvetan\\FootDataset\\person_keypoints_train2017_foot_v1\\NEWOUT.csv",
    root_dir=
    "D:\\Users\\Tsvetan\\FootDataset\\person_keypoints_train2017_foot_v1\\out\\",
    transform=data_transform)

print('Number of images: ', len(transformed_dataset))

# iterate through the transformed dataset and print some stats about the first few samples
for i in range(4):
    sample = transformed_dataset[i]
    print(i, sample['image'].size(), sample['keypoints'].size())

# load training data in batches
batch_size = 32

train_loader = DataLoader(transformed_dataset,
                          batch_size=batch_size,
예제 #11
0
def train(cfg):

    # make run_dir with date

    d = datetime.datetime.now()
    run_dir = pjoin(cfg.out_dir, 'exp_{:%Y-%m-%d_%H-%M}'.format(d))

    if(not os.path.exists(run_dir)):
        os.makedirs(run_dir)

    with open(pjoin(run_dir, 'cfg.yml'), 'w') as outfile:
        yaml.dump(cfg.__dict__, stream=outfile, default_flow_style=False)

    # generate tensorboard object
    writer = SummaryWriter(run_dir)

    net = UNet(cfg.out_channels,
               in_channels=cfg.in_channels,
               depth=cfg.depth,
               merge_mode=cfg.merge_mode)

    #criterion = nn.CrossEntropyLoss()
    criterion = nn.MSELoss()

    optimizer = optim.SGD(net.parameters(), lr=cfg.lr, momentum=cfg.momentum)
    optimizer = optim.Adam(params=net.parameters(), lr=0.0025, weight_decay=0.00001)
    #softm = nn.Softmax(dim=1)
    softm = nn.Sigmoid()                                     ################################

    # make augmenter
    transf = iaa.Sequential([
        iaa.SomeOf(3, [
            iaa.Affine(rotate=iap.Uniform(-cfg.aug_rotate, cfg.aug_rotate)),
            iaa.Affine(shear=iap.Uniform(-cfg.aug_shear, cfg.aug_shear)),
            iaa.Fliplr(1.),
            iaa.Flipud(1.),
            iaa.GaussianBlur(sigma=iap.Uniform(0.0, cfg.aug_gaussblur))
        ]),
        iaa.Resize(cfg.in_shape), rescale_augmenter
    ])

    base_loader = FacialKeypointsDataset(csv_file=cfg.csv_file,
                                         root_dir=cfg.root_dir,
                                         sig_kp=cfg.sig_kp,
                                         transform=transf)

    # build train, val and test sets randomly with given split ratios
    idxs = np.arange(len(base_loader))
    np.random.shuffle(idxs)
    train_idxs = idxs[:int(len(base_loader) * cfg.train_split)]
    others_idxs = idxs[int(len(base_loader) * cfg.train_split):]
    val_idxs = others_idxs[:int(others_idxs.size * cfg.val_split)]
    test_idxs = others_idxs[int(others_idxs.size * cfg.val_split):]

    train_loader = torch.utils.data.Subset(base_loader, train_idxs)
    val_loader = torch.utils.data.Subset(base_loader, val_idxs)
    test_loader = torch.utils.data.Subset(base_loader, test_idxs)

    loaders = {'train': DataLoader(train_loader,
                                   batch_size=cfg.batch_size,
                                   num_workers=cfg.n_workers),
               'val': DataLoader(train_loader,
                                   batch_size=cfg.batch_size,
                                   num_workers=cfg.n_workers),
               'test': DataLoader(train_loader,
                                  batch_size=cfg.batch_size,
                                  num_workers=cfg.n_workers)}
    # convert batch to device
    device = torch.device('cuda' if cfg.cuda else 'cpu')

    net.to(device)

    batch_to_device = lambda batch: {
        k: v.to(device) if (isinstance(v, torch.Tensor)) else v
        for k, v in batch.items()
    }

    for epoch in range(cfg.n_epochs):  # loop over the dataset multiple times

        for phase in loaders.keys():
            if phase == 'train':
                net.train()

            else:
                net.eval()  # Set model to evaluate mode

            running_loss = 0.0
            # train on batches of data, assumes you already have train_loader
            pbar = tqdm.tqdm(total=len(loaders[phase]))
            for i, data in enumerate(loaders[phase]):
                data = batch_to_device(data)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    #import pdb; pdb.set_trace()

                    out = softm(net(data['image'].float()))

                    #data_truth = softm(torch.squeeze(data['truth'])) ## here I am not sure, bc the entropy fct already has softmax
                    #data_truth = data_truth.long().reshape(4, 224*224)
                    #out = out.reshape(4, 1, 224*224)
                    # data_truth should have shape 4, 244,244
                    # input matrix is in the shape: (Minibatch, Classes, H, W)
                    # the target is in size (Minibatch, H, W)
                    loss = criterion(out, data['truth'].float())
                                                   #############################
                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                running_loss += loss.cpu().detach().numpy()
                loss_ = running_loss / ((i + 1) * cfg.batch_size)
                pbar.set_description('[{}] loss: {:.4f}'.format(phase, loss_))
                pbar.update(1)

            pbar.close()
            writer.add_scalar('{}/loss'.format(phase),
                              loss_,
                              epoch)

        # make preview images
        if phase == 'test':
            data = next(iter(loaders[phase]))
            data = batch_to_device(data)
            pred_ = softm(net(data['image'].float())).cpu()

            im_ = data['image'].float().cpu().detach()
            im_ = [torch.cat(3*[im_[i, ...]]) for i in range(im_.shape[0])]
            truth_ = data['truth'].float().cpu()
            truth_ = [
                torch.cat([truth_[i, ...]])
                for i in range(truth_.shape[0])
            ]

            # normalize prediction maps in-place
            for b in range(pred_.shape[0]):
                for n in range(pred_.shape[1]):
                    pred_[b, n, ...] = (pred_[b, n, ...] - pred_[b, n, ...].min())
                    pred_[b, n, ...] = pred_[b, n, ...] / pred_[b, n, ...].max()

            # find max location on each channel of each batch element
            pos = []
            for b in range(pred_.shape[0]):
                pos.append([])
                for n in range(pred_.shape[1]):
                    idx_max = pred_[b, n, ...].argmax()
                    i, j = np.unravel_index(idx_max, pred_[b, n, ...].shape)
                    pos[-1].append((i, j))
                    # draw circle on image through numpy :(
                    rr, cc = draw.circle(i, j, 5, shape=im_[b][n, ...].shape)
                    im__ = np.rollaxis(im_[b].detach().numpy(), 0, 3)
                    im__[rr, cc, ...] = (1., 0., 0.)
                    im_[b] = torch.from_numpy(np.rollaxis(im__, -1, 0))

            pred_ = [torch.cat([pred_[i, ...]]) for i in range(pred_.shape[0])]
            #import pdb; pdb.set_trace()
            all_ = [
                tutls.make_grid([im_[i], truth_[i], pred_[i]],
                                nrow=len(pred_),
                                padding=10,
                                pad_value=1.)
                for i in range(len(truth_))
            ]
            all_ = torch.cat(all_, dim=1)
            writer.add_image('test/img', all_, epoch)

        # save checkpoint
        if phase == 'val':
            is_best = False
            if (loss_ < best_loss):
                is_best = True
                best_loss = loss_
            path = pjoin(run_dir, 'checkpoints')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model': net,
                    'best_loss': best_loss,
                    'optimizer': optimizer.state_dict()
                },
                is_best,
                path=path)
예제 #12
0
def main():
    # Define network
    net = Net()
    print(net)

    data_transform = transforms.Compose(
        [Rescale(250), Normalize(), RandomCrop(224)])

    train_dataset = FacialKeypointsDataset(
        csv_file='data/training_frames_keypoints.csv',
        root_dir='data/training/',
        transform=data_transform)

    # iterate through the transformed dataset and print some stats about the first few samples
    for i in range(4):
        sample = train_dataset[i]
        print(i, sample['image'].size, sample['keypoints'].size)

    train_loader = DataLoader(train_dataset,
                              batch_size=10,
                              shuffle=True,
                              num_workers=4)

    test_dataset = FacialKeypointsDataset(
        csv_file='data/test_frames_keypoints.csv',
        root_dir='data/test/',
        transform=data_transform)

    test_loader = DataLoader(test_dataset,
                             batch_size=1,
                             shuffle=True,
                             num_workers=4)

    test_images, test_outputs, gt_pts = net_sample_output(net, test_loader)

    # print out the dimensions of the data to see if they make sense
    print(test_images.data.size())
    print(test_outputs.data.size())
    print(gt_pts.size())

    # call it
    visualize_output(test_images, test_outputs, gt_pts, 1)

    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters(),
                           lr=0.001,
                           betas=(0.9, 0.999),
                           eps=1e-8)

    n_epochs = 2

    train_net(net, criterion, optimizer, train_loader, n_epochs)

    # get a sample of test data again
    test_images, test_outputs, gt_pts = net_sample_output(net, test_loader)

    print(test_images.data.size())
    print(test_outputs.data.size())
    print(gt_pts.size())

    model_dir = 'saved_models/'
    model_name = 'keypoints_model_1.pt'

    # after training, save your model parameters in the dir 'saved_models'
    torch.save(net.state_dict(), model_dir + model_name)

    weights1 = net.conv1.weight.data

    w = weights1.numpy()

    filter_index = 0

    print(w[filter_index][0])
    print(w[filter_index][0].shape)

    # display the filter weights
    plt.imshow(w[filter_index][0], cmap='gray')
예제 #13
0
import matplotlib.pyplot as plt
import numpy as np

device = torch.device('cuda')
net = Net()

data_transform = transforms.Compose(
    [Rescale(256),
     RandomCrop(224),
     Normalize(),
     ToTensor()]
)

train_dataset = FacialKeypointsDataset(
    "./data/training_frames_keypoints.csv",
    "./data/training",
    data_transform
)

test_dataset = FacialKeypointsDataset(
    "./data/test_frames_keypoints.csv",
    "./data/test",
    data_transform
)

batch_size = 256
train_loader = DataLoader(
    train_dataset,
    batch_size=batch_size,
    shuffle=True
)
예제 #14
0
파일: train.py 프로젝트: zhang-x-z/QiaoQian
import torch
import torch.optim as optim
import torch.nn as nn
from models import Net
from torch.utils.data import DataLoader
from torchvision import transforms
from data_load import FacialKeypointsDataset
from data_load import Rescale, RandomCrop, Normalize, ToTensor

# 载入数据
data_transform = transforms.Compose(
    [Rescale(250), RandomCrop(224),
     Normalize(), ToTensor()])
transformed_dataset = FacialKeypointsDataset(
    csv_file=
    'C:/Users/76785/Downloads/Facial-Key-Point-Detection-CNN-master/data/training_frames_keypoints.csv',
    root_dir=
    'C:/Users/76785/Downloads/Facial-Key-Point-Detection-CNN-master/data/training',
    transform=data_transform)
batch_size = 10
train_loader = DataLoader(transformed_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=0)
test_dataset = FacialKeypointsDataset(
    csv_file=
    'C:/Users/76785/Downloads/Facial-Key-Point-Detection-CNN-master/data/test_frames_keypoints.csv',
    root_dir=
    'C:/Users/76785/Downloads/Facial-Key-Point-Detection-CNN-master/data/test/',
    transform=data_transform)
test_loader = DataLoader(test_dataset,
                         batch_size=batch_size,
예제 #15
0
def main():
    #------------------------------------------------------------------------------------------------------------------
    # Hyperparameters
    hyp_epochs = 5
    hyp_batch_size = 20
    #hyp_optim = "SGD"
    hyp_optim = "Adam"
    #hyp_net = Net1()
    hyp_net = Net2()

    print("Hyperparameters")
    print("--------------")
    print("Epochs = ", hyp_epochs)
    print("Batch Size = ", hyp_batch_size)
    print("Optimizer = ", hyp_optim)
    print("--------------")
    ## TODO: Define the Net in models.py

    net = hyp_net
    print(net)

    ## TODO: define the data_transform using transforms.Compose([all tx's, . , .])
    # order matters! i.e. rescaling should come before a smaller crop
    data_transform = transforms.Compose(
        [Rescale(256), RandomCrop(224),
         Normalize(), ToTensor()])
    # testing that you've defined a transform
    assert (data_transform is not None), 'Define a data_transform'

    # create the transformed dataset
    transformed_dataset = FacialKeypointsDataset(
        csv_file='../files/training_frames_keypoints.csv',
        root_dir='../files/training/',
        transform=data_transform)

    print('Number of images: ', len(transformed_dataset))

    # iterate through the transformed dataset and print some stats about the first few samples
    for i in range(4):
        sample = transformed_dataset[i]
        print(i, sample['image'].size(), sample['keypoints'].size())

    # load training data in batches
    batch_size = hyp_batch_size

    train_loader = DataLoader(transformed_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=0)

    # load in the test data, using the dataset class
    # AND apply the data_transform you defined above

    # create the test dataset
    test_dataset = FacialKeypointsDataset(
        csv_file='../files/test_frames_keypoints.csv',
        root_dir='../files/test/',
        transform=data_transform)
    # load test data in batches
    batch_size = hyp_batch_size

    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)

    # test the model on a batch of test images
    # call the above function
    # returns: test images, test predicted keypoints, test ground truth keypoints
    test_images, test_outputs, gt_pts = net_sample_output(test_loader, net)

    # print out the dimensions of the data to see if they make sense
    print(test_images.data.size())
    print(test_outputs.data.size())
    print(gt_pts.size())
    # visualize the output
    # by default this shows a batch of 10 images
    # call it
    _visualise = False
    if _visualise == True:
        visualize_output(test_images, test_outputs, gt_pts)

    ## TODO: Define the loss and optimization
    import torch.optim as optim

    criterion = nn.MSELoss()

    hyp_optimizer = None
    if hyp_optim == "Adam":
        hyp_optimizer = optim.Adam(net.parameters(), lr=0.001)

    if hyp_optim == "SGD":
        hyp_optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    optimizer = hyp_optimizer
    # train your network
    n_epochs = hyp_epochs  # start small, and increase when you've decided on your model structure and hyperparams

    # this is a Workspaces-specific context manager to keep the connection
    # alive while training your model, not part of pytorch
    train_net(n_epochs, train_loader, net, criterion, optimizer)

    # get a sample of test data again
    test_images, test_outputs, gt_pts = net_sample_output(test_loader, net)

    print(test_images.data.size())
    print(test_outputs.data.size())
    print(gt_pts.size())

    ## TODO: change the name to something uniqe for each new model
    model_dir = '../saved_models/'
    model_name = 'keypoints_model_2.pt'

    # after training, save your model parameters in the dir 'saved_models'
    torch.save(net.state_dict(), model_dir + model_name)
    # --------------------------------------------------------------------
    # To run the following code after retreiving an existing model,
    # you can do so in the resume.py file
    # --------------------------------------------------------------------

    # Get the weights in the first conv layer, "conv1"
    # if necessary, change this to reflect the name of your first conv layer
    weights1 = net.conv1.weight.data

    w = weights1.numpy()

    filter_index = 0

    print(w[filter_index][0])
    print(w[filter_index][0].shape)

    # display the filter weights
    plt.imshow(w[filter_index][0], cmap='gray')

    ##TODO: load in and display any image from the transformed test dataset
    i = 1
    show_image(test_images, w, i)
# testing that you've defined a transform
assert(train_transform is not None and test_transform is not None), 'Define a data_transform'
    
# create the transformed dataset

if args["dataset"] == "Kaggle":
    X, y = load_KagggleDataset(split=args['split'],train_30=args['train30'],train_8=args['train8'])
    X_test, y_test = X[:300], y[:300]
    X_train, y_train = X[300:], y[300:]
    transformed_dataset = KagggleDataset(X_train, y_train, train_transform)
    test_dataset = KagggleDataset(X_test, y_test, test_transform)
    sub, div = 48., 48.
else:
    transformed_dataset = FacialKeypointsDataset(csv_file='./data/training_frames_keypoints.csv',
                                             root_dir='./data/training/',
                                         transform=train_transform)
    test_dataset = FacialKeypointsDataset(csv_file='./data/test_frames_keypoints.csv',
                                             root_dir='./data/test/',
                                            transform = test_transform)
    sub, div = 100.,50.
print('Number of images: ', len(transformed_dataset))

# iterate through the transformed dataset and print some stats about the first few samples
for i in range(4):
    sample = transformed_dataset[i]
    print(i, sample['image'].shape, sample['keypoints'].size())
# load training data in batches
batch_size = args["batch_size"]
train_loader = DataLoader(transformed_dataset, 
                          batch_size=batch_size,
예제 #17
0
def train(cfg):

    # make run_dir with date
    import pdb; pdb.set_trace() ## DEBUG ##
    d = datetime.datetime.now()
    run_dir = pjoin(cfg.out_dir, 'exp_{:%Y-%m-%d_%H-%M}'.format(d))

    if(not os.path.exists(run_dir)):
        os.makedirs(run_dir)

    with open(pjoin(run_dir, 'cfg.yml'), 'w') as outfile:
        yaml.dump(cfg.__dict__, stream=outfile, default_flow_style=False)

    # generate tensorboard object
    writer = SummaryWriter(run_dir)

    net = UNet(cfg.out_channels,
               in_channels=cfg.in_channels,
               depth=cfg.depth,
               merge_mode=cfg.merge_mode)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=cfg.lr, momentum=cfg.momentum)


    # make augmenter
    transf = iaa.Sequential([
        iaa.SomeOf(3, [
            iaa.Affine(rotate=iap.Uniform(-cfg.aug_rotate, cfg.aug_rotate)),
            iaa.Affine(shear=iap.Uniform(-cfg.aug_shear, cfg.aug_shear)),
            iaa.Fliplr(1.),
            iaa.Flipud(1.),
            iaa.GaussianBlur(sigma=iap.Uniform(0.0, cfg.aug_gaussblur))
        ]),
        iaa.Resize(cfg.in_shape), rescale_augmenter
    ])

    base_loader = FacialKeypointsDataset(csv_file=cfg.csv_file,
                                         root_dir=cfg.root_dir,
                                         sig_kp=cfg.sig_kp,
                                         transform=transf)

    # build train, val and test sets randomly with given split ratios
    idxs = np.arange(len(base_loader))
    np.random.shuffle(idxs)
    train_idxs = idxs[:int(len(base_loader) * cfg.train_split)]
    others_idxs = idxs[int(len(base_loader) * cfg.train_split):]
    val_idxs = others_idxs[:int(others_idxs.size * cfg.val_split)]
    test_idxs = others_idxs[int(others_idxs.size * cfg.val_split):]

    train_loader = torch.utils.data.Subset(base_loader, train_idxs)
    val_loader = torch.utils.data.Subset(base_loader, val_idxs)
    test_loader = torch.utils.data.Subset(base_loader, test_idxs)

    loaders = {'train': DataLoader(train_loader,
                                   batch_size=cfg.batch_size,
                                   num_workers=cfg.n_workers),
               'val': DataLoader(train_loader,
                                   batch_size=cfg.batch_size,
                                   num_workers=cfg.n_workers),
               'test': DataLoader(train_loader,
                                  num_workers=cfg.n_workers)}
    # convert batch to device
    device = torch.device('cuda' if cfg.cuda else 'cpu')

    net.to(device)

    batch_to_device = lambda batch: {
        k: v.to(device) if (isinstance(v, torch.Tensor)) else v
        for k, v in batch.items()
    }

    for epoch in range(cfg.n_epochs):  # loop over the dataset multiple times
        for phase in loaders.keys():
            if phase == 'train':
                net.train()
            else:
                net.eval()  # Set model to evaluate mode

            running_loss = 0.0
            # train on batches of data, assumes you already have train_loader
            pbar = tqdm.tqdm(total=len(loaders[phase]))
            for i, data in enumerate(loaders[phase]):
                data = batch_to_device(data)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    out = net(data['image'])
                    loss = criterion(out,
                                     data['truth'])

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                running_loss += loss.cpu().detach().numpy()
                loss_ = running_loss / ((i + 1) * cfg.batch_size)
                pbar.set_description('[{}] loss: {:.4f}'.format(phase, loss_))
                pbar.update(1)

            pbar.close()
            writer.add_scalar('{}/loss'.format(phase),
                              loss_,
                              epoch)

        # make preview images
        if phase == 'prev':
            data = next(iter(loaders[phase]))
            data = batch_to_device(data)
            pred_ = net(data['image']).cpu()
            pred_ = [
                pred_[i, ...].repeat(3, 1, 1)
                for i in range(pred_.shape[0])
            ]
            im_ = data['image'].cpu()
            im_ = [im_[i, ...] for i in range(im_.shape[0])]
            truth_ = data['truth'].cpu()
            truth_ = [
                truth_[i, ...].repeat(3, 1, 1)
                for i in range(truth_.shape[0])
            ]
            all_ = [
                tutls.make_grid([im_[i], truth_[i], pred_[i]],
                                nrow=len(pred_),
                                padding=10,
                                pad_value=1.)
                for i in range(len(truth_))
            ]
            all_ = torch.cat(all_, dim=1)
            writer.add_image('test/img', all_, epoch)

        # save checkpoint
        if phase == 'val':
            is_best = False
            if (loss_ < best_loss):
                is_best = True
                best_loss = loss_
            path = pjoin(run_dir, 'checkpoints')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model': net,
                    'best_loss': best_loss,
                    'optimizer': optimizer.state_dict()
                },
                is_best,
                path=path)
예제 #18
0
    torch.cuda.empty_cache()
    offline = True
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    n_epochs = 20

    print("AI Running on", device)

    net = FCN().apply(initialize_weights_advance_).to(device)
    print(net)

    data_transform = transforms.Compose(
        [Rescale(250), RandomCrop(224),
         Normalize(), ToTensor()])

    transformed_dataset = FacialKeypointsDataset(
        csv_file='data/training_frames_keypoints.csv',
        root_dir='data/training/',
        transform=data_transform)

    # load training data in batches
    batch_size = 128

    train_loader = DataLoader(transformed_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)

    criterion = nn.MSELoss()

    optimizer = optim.Adam(params=net.parameters(), lr=0.001)

    losses = train_net(n_epochs, net, train_loader, device, optimizer,
예제 #19
0
data_transform = transforms.Compose([Rescale(250),
                                    RandomCrop(crop),
                                    Normalize(crop),
                                    ToTensor()])

# testing that you've defined a transform
assert(data_transform is not None), 'Define a data_transform'


# In[134]:


# create the transformed dataset
transformed_dataset = FacialKeypointsDataset(csv_file='training_frames_keypoints.csv',
                                             root_dir='/home/tianbai/P1_Facial_Keypoints/data/training/',
                                             transform=data_transform)


print('Number of images: ', len(transformed_dataset))


for i in range(4):
    sample = transformed_dataset[i]
    print(i, sample['image'].size(), sample['keypoints'].size())
    print (sample['keypoints'][0:5])


# In[135]: