예제 #1
0
import torch
from torch.utils.data import Dataset, DataLoader
from model.AerialNet_two_stream import net
from data.pck_dataset import GoogleEarthPCK
from data.download import download_eval
from image.normalization import NormalizeImageDict
from util.torch_util import BatchTensorToVars, str_to_bool, print_info
from geotnf.point_tnf import *
from geotnf.transformation import GeometricTnf, SynthPairTnf_pck
import numpy as np
from tqdm import tqdm

torch.cuda.set_device(1) # Using second GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if __name__ == '__main__':
    print_info('[Deep Aerial Matching] Evaluation Script', ['green', 'bold'])

    # Argument parser
    parser = argparse.ArgumentParser(description='Deep Aerial Registration PyTorch Implementation')
    # Paths
    parser.add_argument('--model-aff', type=str, default='trained_models/checkpoint_seresnext101.pth.tar', help='Trained affine model filename')
    parser.add_argument('--batch-size', type=int, default=16, help='Test batch size')
    parser.add_argument('--feature-extraction-cnn', type=str, default='se_resnext101', help='Feature extraction architecture')
    parser.add_argument('--image-path', type=str, default='datasets/GoogleEarth/GoogleEarth_pck', help='Path to PCK dataset')
    parser.add_argument('--dataset', type=str, default='GoogleEarth_pck', help='Select evaluation dataset')
    args = parser.parse_args()

    use_cuda = torch.cuda.is_available()

    # Create model
    print('Creating CNN model...')
from data.synth_dataset import SynthDataset
from geotnf.transformation import SynthPairTnf
from image.normalization import NormalizeImageDict
from util.train_test_fn import train, test
from util.torch_util import save_checkpoint, str_to_bool, print_info

import pickle
from functools import partial

# torch.cuda.set_device(1) # Using second GPU

pickle.load = partial(pickle.load, encoding="latin1")
pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")

if __name__ == '__main__':
    print_info('[Deep Aerial Matching] training script',['green','bold'])

    # Argument parsing
    parser = argparse.ArgumentParser(description='Deep Aerial Matching PyTorch Implementation')
    # Paths
    parser.add_argument('--training-dataset', type=str, default='GoogleEarth', help='dataset to use for training')
    parser.add_argument('--training-tnf-csv', type=str, default='', help='path to training transformation csv folder')
    parser.add_argument('--training-image-path', type=str, default='', help='path to folder containing training images')
    parser.add_argument('--trained-models-dir', type=str, default='trained_models', help='path to trained models folder')
    parser.add_argument('--trained-models-fn', type=str, default='checkpoint_adam', help='trained model filename')
    # Optimization parameters
    parser.add_argument('--lr', type=float, default=0.0004, help='learning rate')
    parser.add_argument('--momentum', type=float, default=0.9, help='momentum constant')
    parser.add_argument('--num-epochs', type=int, default=100, help='number of training epochs')
    parser.add_argument('--batch-size', type=int, default=12, help='training batch size')
    parser.add_argument('--weight-decay', type=float, default=0, help='weight decay constant')
예제 #3
0
import argparse
import torch
from torch.utils.data import DataLoader

from models.base_model import net as base_model
from models.improved_model import net as improved_model
from models.improved_model2 import net as improved_model2

from so_clevr_dataset import SortOfClevrDataset

from util.torch_util import BatchTensorToVars, str_to_bool, print_info
from tqdm import tqdm

if __name__ == '__main__':
    print_info('[Relational Reasoning Network] Evaluation Script',
               ['green', 'bold'])

    # Argument parser
    parser = argparse.ArgumentParser(
        description='Relational Reasoning Network PyTorch Implementation')

    parser.add_argument('--model-type',
                        type=str,
                        default='base',
                        choices=['base', 'improved', 'improved2'],
                        help='Model type: base, improved, improved2')
    parser.add_argument('--load-model',
                        type=str,
                        default='trained_models/base_model.pth.tar',
                        help='The trained model path')
    parser.add_argument('--batch-size',
theta_aff_ensemble = theta2homogeneous(theta_aff_ensemble)
theta_aff_aff_ensemble = theta2homogeneous(theta_aff_aff_ensemble)

theta = torch.bmm(theta_aff_aff_ensemble, theta_aff_ensemble).view(-1,
                                                                   9)[:, :6]

### Process result
warped_image_aff_aff = affTnf(Im2Tensor(source_image), theta.view(-1, 2, 3))
result_aff_aff_np = warped_image_aff_aff.squeeze(0).transpose(0, 1).transpose(
    1, 2).cpu().detach().numpy()
io.imsave('results/aff_aff.jpg', result_aff_aff_np)

print()
print_info(
    "# ====================================== #\n"
    "#            <Execution Time>            #\n"
    "#            - %.4s seconds -            #" % (time.time() - start_time) +
    "\n"
    "# ====================================== #", ['yellow', 'bold'])

# Create overlay
aff_overlay = cv2.addWeighted(src1=result_aff_np,
                              alpha=0.4,
                              src2=target_image,
                              beta=0.8,
                              gamma=0)
io.imsave('results/aff_overlay.jpg', np.clip(aff_overlay, -1, 1))

# Create checkboard
aff_checkboard = createCheckBoard(result_aff_np, target_image)
io.imsave('results/aff_checkboard.jpg', aff_checkboard)
예제 #5
0
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader

from models.base_model import net as base_model
from models.improved_model import net as improved_model
from models.improved_model2 import net as improved_model2

from so_clevr_dataset import SortOfClevrDataset

from util.train_test_fn import train, test
from util.torch_util import save_checkpoint, str_to_bool, print_info

if __name__ == '__main__':
    print_info('[Relational Reasoning Network] training script',
               ['green', 'bold'])

    # Argument parsing
    parser = argparse.ArgumentParser(
        description='Relational Reasoning Network PyTorch Implementation')
    # Paths
    parser.add_argument('--trained-models-dir',
                        type=str,
                        default='trained_models',
                        help='path to trained models folder')
    # Optimization parameters
    parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
    parser.add_argument('--num-epochs',
                        type=int,
                        default=100,
                        help='number of training epochs')