Ejemplo n.º 1
0
 def __init__(
     self,
     items: Iterator,
     classes: Collection = None,
     label_delim: str = None,
     **kwargs,
 ):
     super().__init__(items, classes=classes, **kwargs)
     self.loss_func = OnlineContrastiveLoss(1.0, HardNegativePairSelector())
Ejemplo n.º 2
0
def siamese_embedding_learner(data,
                              pretrained_model_class,
                              emsize=128,
                              margin=1.0,
                              callback_fns=None):
    meta = cnn_config(pretrained_model_class)
    model = create_cnn_model(pretrained_model_class, emsize)
    learn = Learner(
        data,
        model,
        loss_func=OnlineContrastiveLoss(margin, HardNegativePairSelector()),
        callback_fns=callback_fns,
    )
    learn.split(meta["split"])
    apply_init(model[1], nn.init.kaiming_normal_)
    return learn
online_train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_sampler=train_batch_sampler, **kwargs)
online_test_loader = torch.utils.data.DataLoader(
    test_dataset, batch_sampler=test_batch_sampler, **kwargs)

# Set up the network and training parameters
from networks import EmbeddingNet
from losses import OnlineContrastiveLoss
from utils import AllPositivePairSelector, HardNegativePairSelector  # Strategies for selecting pairs within a minibatch

margin = 1.
embedding_net = EmbeddingNet()
model = embedding_net
if cuda:
    model.cuda()
loss_fn = OnlineContrastiveLoss(margin, HardNegativePairSelector())
lr = 1e-3
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
n_epochs = 20
log_interval = 50

fit(online_train_loader, online_test_loader, model, loss_fn, optimizer,
    scheduler, n_epochs, cuda, log_interval)

train_embeddings_ocl, train_labels_ocl = extract_embeddings(
    train_loader, model)
plot_embeddings(train_embeddings_ocl, train_labels_ocl)
val_embeddings_ocl, val_labels_ocl = extract_embeddings(test_loader, model)
plot_embeddings(val_embeddings_ocl, val_labels_ocl)
Ejemplo n.º 4
0
def main():
    # Arguments
    parser = argparse.ArgumentParser(
        description=
        'High Quality Monocular Depth Estimation via Transfer Learning')
    parser.add_argument('--epochs',
                        default=20,
                        type=int,
                        help='number of total epochs to run')
    parser.add_argument('--lr',
                        '--learning-rate',
                        default=0.0001,
                        type=float,
                        help='initial learning rate')
    parser.add_argument('--data-folder',
                        type=str,
                        help='location of dataset folder')
    parser.add_argument('--pretrained-weights',
                        type=str,
                        default=None,
                        help='location of pretrained weights')
    parser.add_argument('--label-file',
                        type=str,
                        help='name of label txt file')
    parser.add_argument('--bs', default=4, type=int, help='batch size')
    parser.add_argument('--num-instances',
                        default=None,
                        type=int,
                        help='number of instances of same image in the batch')
    parser.add_argument('--lamb',
                        default=0.,
                        type=float,
                        help='lambda: multiplier for added additional loss')
    parser.add_argument('--evaluate',
                        type=str,
                        default=None,
                        help='path of model file for testing')
    args = parser.parse_args()
    print(args)
    # Create model
    model = PTResModel(pretrained_weights=args.pretrained_weights)
    evaluating = (args.evaluate != None)
    if (evaluating):
        print('Evaluating ', args.evaluate)
        checkpoint = torch.load(args.evaluate)
        model.load_state_dict(checkpoint)

    model = model.cuda()
    print('Model created.')

    # Training parameters
    optimizer = torch.optim.Adam(model.parameters(), args.lr)
    batch_size = args.bs

    # Load data

    data_test, nyu2_test = loadToMem(args.data_folder,
                                     txtfile='/nyu2_test_updated.csv')
    transformed_testing = depthDatasetMemory(
        data_test, nyu2_test, transform=getNoTransform(is_test=True))
    test_loader = DataLoader(transformed_testing, 1, shuffle=False)
    if (evaluating):
        evaluate_model(model, test_loader, args)
        return

    data, nyu2_train = loadToMem(args.data_folder, txtfile=args.label_file)
    transformed_training = depthDatasetMemory(
        data, nyu2_train, transform=getDefaultTrainTransform())
    if (args.num_instances):
        train_loader = DataLoader(transformed_training,
                                  batch_size,
                                  sampler=RandomIdentitySampler(
                                      transformed_training,
                                      num_instances=args.num_instances),
                                  num_workers=4,
                                  drop_last=True)
    else:
        train_loader = DataLoader(transformed_training,
                                  batch_size,
                                  shuffle=True,
                                  num_workers=4,
                                  drop_last=True)

    # Logging
    writer = SummaryWriter('logs', flush_secs=30)

    # Loss
    l1_criterion = nn.L1Loss()
    l1_criterion = l1_criterion.cuda()
    ps = AllPositivePairSelector(balance=False)
    criterion2 = OnlineContrastiveLoss(1., ps)
    criterion2 = criterion2.cuda()
    # Start training...
    for epoch in range(args.epochs):
        batch_time = AverageMeter()
        losses = AverageMeter()
        N = len(train_loader)

        # Switch to train mode
        model.train()
        transformed_training.reset_seeds()
        end = time.time()

        for i, sample_batched in enumerate(train_loader):
            optimizer.zero_grad()

            # Prepare sample and target
            image = torch.autograd.Variable(sample_batched['image'].cuda())
            depth = torch.autograd.Variable(
                sample_batched['depth'].cuda(non_blocking=True))
            image_id = torch.autograd.Variable(
                sample_batched['image_id'].cuda(non_blocking=True))
            # Normalize depth
            depth_n = DepthNorm(depth)

            # Predict
            output, feats = model(image)

            # Compute the loss
            l_depth = l1_criterion(output, depth_n)
            l_ssim = torch.clamp(
                (1 - ssim(output, depth_n, val_range=1000.0 / 10.0)) * 0.5, 0,
                1)

            loss = (1.0 * l_ssim) + (0.1 * l_depth)

            if (args.lamb > 0):
                l_mse = criterion2(feats, image_id, neg_loss=False)
                loss += args.lamb * l_mse

            # Update step
            losses.update(loss.data.item(), image.size(0))
            loss.backward()
            optimizer.step()

            # Measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            eta = str(datetime.timedelta(seconds=int(batch_time.val *
                                                     (N - i))))

            # Log progress
            niter = epoch * N + i
            if i % 5 == 0:
                # Print to console
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
                      'ETA {eta}\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
                          epoch,
                          i,
                          N,
                          batch_time=batch_time,
                          loss=losses,
                          eta=eta))

                # Log to tensorboard
                writer.add_scalar('Train/Loss', losses.val, niter)

            if i % 300 == 0:
                LogProgress(model, writer, test_loader, niter)

        # Record epoch's intermediate results
        LogProgress(model, writer, test_loader, niter)
        writer.add_scalar('Train/Loss.avg', losses.avg, epoch)
        torch.save(model.state_dict(), "models_" + str(epoch) + '.pth.tar')
        evaluate_model(model, test_loader, args)
Ejemplo n.º 5
0
    net = ResNet18()
    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
        if os.path.isfile('./checkpoint/contrastive_ckpt.t7'):
            checkpoint = torch.load('./checkpoint/contrastive_ckpt.t7')
            best_acc = checkpoint['acc']
        else:
            checkpoint = torch.load('./checkpoint/classification_ckpt.t7')
            best_acc = 0
        net.load_state_dict(checkpoint['net'])
        
        start_epoch = checkpoint['epoch']
        current_best_epoch=start_epoch
        print('the current best acc is %.3f on epoch %d'%(best_acc,start_epoch))

    criterion = OnlineContrastiveLoss(margin=1,pair_selector=AllPositivePairSelector())
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)

    for epoch in range(start_epoch, start_epoch+args.epochs):
        train(epoch)
        test(epoch)


Ejemplo n.º 6
0
def main(args):
  # parse args
  best_acc1 = 0.0
  # tensorboard writer
  writer = SummaryWriter(args.experiment + "/logs")

  if args.gpu >= 0:
    print("Use GPU: {}".format(args.gpu))
  else:
    print('Using CPU for computing!')

  fixed_random_seed = 2019
  torch.manual_seed(fixed_random_seed)
  np.random.seed(fixed_random_seed)
  random.seed(fixed_random_seed)


  # set up transforms for data augmentation
  mn = [float(x) for x in args.mean] if(args.mean) else [0.485, 0.456, 0.406] 
  st = [float(x) for x in args.std] if(args.std) else [0.229, 0.224, 0.225] 

  normalize = transforms.Normalize(mean=mn, std=st)
  train_transforms = get_train_transforms(normalize)
  val_transforms = get_val_transforms(normalize)
  if(args.train_denoiser):
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_denoiser_train_transforms(normalize)
    val_transforms = get_denoiser_val_transforms(normalize)
  elif(args.cub_training):
    networks.CLASSES=200
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_cub_train_transforms(normalize)
    val_transforms = get_cub_val_transforms(normalize)
  if(args.spad_training):
    networks.CLASSES=122
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_spad_train_transforms(normalize)
    val_transforms = get_spad_val_transforms(normalize)
  elif(args.cars_training):
    networks.CLASSES=196
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_cub_train_transforms(normalize)
    val_transforms = get_cub_val_transforms(normalize)
  elif(args.imagenet_training):
    networks.CLASSES=1000
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_imagenet_train_transforms(normalize)
    val_transforms = get_imagenet_val_transforms(normalize)
  if (not args.evaluate):
    print("Training time data augmentations:")
    print(train_transforms)


  model_clean=None
  model_teacher=None
  if args.use_resnet18:
    model = torchvision.models.resnet18(pretrained=False)
    model.fc = nn.Linear(512, networks.CLASSES)
    if(args.use_resnet18!="random"):
        model.load_state_dict(torch.load(args.use_resnet18)['state_dict'])
  elif args.use_resnet34:
    model = torchvision.models.resnet34(pretrained=False)
    model.fc = nn.Linear(512, networks.CLASSES)
  elif args.use_resnet50:
    model = torchvision.models.resnet50(pretrained=False)
    model.fc = nn.Linear(2048, networks.CLASSES)
  elif args.use_inception_v3:
    model = torchvision.models.inception_v3(pretrained=False, aux_logits=False)
    model.fc = nn.Linear(2048, networks.CLASSES)
  elif args.use_photon_net:
    model = networks.ResNetContrast(BasicBlock, [2, 2, 2, 2], networks.CLASSES)
    if(args.use_photon_net!="random"):
        model.load_state_dict(torch.load(args.use_photon_net)['state_dict'])
#  elif args.use_contrastive_allfeats:
#    model = networks.ResNetContrast2(BasicBlock, [2, 2, 2, 2], networks.CLASSES)
#    if(args.use_contrastive_allfeats!="random"):
#        model.load_state_dict(torch.load(args.use_contrastive_allfeats)['state_dict'])
  elif args.train_denoiser:
    model = UNet(3,3)
  elif args.use_dirty_pixel:
    model = torchvision.models.resnet18(pretrained=False)
    model.fc = nn.Linear(512, networks.CLASSES)
    model_clean = UNet(3,3)
    if(args.evaluate==False):
        model_clean.load_state_dict(torch.load(args.use_dirty_pixel)['state_dict'])
        model_clean = model_clean.cuda(args.gpu)
  elif args.use_student_teacher:
    model = networks.ResNetPerceptual(BasicBlock, [2, 2, 2, 2], networks.CLASSES)
    model_teacher = networks.ResNetPerceptual(BasicBlock, [2, 2, 2, 2], networks.CLASSES, teacher_model=True)
    model_teacher.load_state_dict(torch.load(args.use_student_teacher)['state_dict'])
    model_teacher = model_teacher.cuda(args.gpu)
    model_teacher.eval()
    for param in model_teacher.parameters():
      param.requires_grad = False
  else:
    print("select correct model")
    exit(0)

  criterion1 = nn.CrossEntropyLoss()
  if(args.use_student_teacher or args.train_denoiser or args.use_dirty_pixel):
    criterion2 = nn.MSELoss()
  else:
    ps = AllPositivePairSelector(balance=False)
    criterion2 = OnlineContrastiveLoss(1., ps)
  # put everthing to gpu
  if args.gpu >= 0:
    model = model.cuda(args.gpu)
    criterion1 = criterion1.cuda(args.gpu)
    #criterion3 = criterion3.cuda(args.gpu)
    criterion2 = criterion2.cuda(args.gpu)
  criterion = [criterion1, criterion2]
  #criterion = [criterion1]
  # setup the optimizer
  opt_params = model.parameters()
  if(args.use_dirty_pixel):
    opt_params = list(model.parameters()) + list(model_clean.parameters())
  optimizer = torch.optim.SGD(opt_params, args.lr,
                momentum=args.momentum,
                weight_decay=args.weight_decay)

  # resume from a checkpoint?
  if args.resume:
    if os.path.isfile(args.resume):
      print("=> loading checkpoint '{}'".format(args.resume))
      if(args.gpu>=0):
        checkpoint = torch.load(args.resume)
      else:
        checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
      #best_acc1 = checkpoint['best_acc1']


      #new_state_dict = OrderedDict()
      #model_dict = model.state_dict()
      #for k, v in checkpoint['state_dict'].items():
      #  name = k[7:] # remove `module.`
      #  if(name.startswith('fc')):
      #      continue
      #  new_state_dict[name] = v
      #model_dict.update(new_state_dict)
      #model.load_state_dict(model_dict)
      model.load_state_dict(checkpoint['state_dict'])
      if args.gpu < 0:
        model = model.cpu()
      else:
        model = model.cuda(args.gpu)
      if(args.use_dirty_pixel):
        model_clean.load_state_dict(checkpoint['model_clean_state_dict'])
        model_clean = model_clean.cuda(args.gpu)
#      # only load the optimizer if necessary
#      if (not args.evaluate):
#        args.start_epoch = checkpoint['epoch']
#        optimizer.load_state_dict(checkpoint['optimizer'])
      print("=> loaded checkpoint '{}' (epoch {}, acc1 {})"
          .format(args.resume, checkpoint['epoch'], best_acc1))
    else:
      print("=> no checkpoint found at '{}'".format(args.resume))


  # setup dataset and dataloader
  val_dataset = IMMetricLoader(args.data_folder,
                  split='val', transforms=val_transforms, image_id=False, pil_loader=args.pil_loader, clean_image=args.train_denoiser, label_file=args.val_label_file)

  print('Validation Set Size: ', len(val_dataset))

  val_batch_size = 1 if args.train_denoiser else 50
  val_loader = torch.utils.data.DataLoader(
    val_dataset, batch_size=val_batch_size, shuffle=False,
    num_workers=args.workers, pin_memory=True, sampler=None, drop_last=False)
  val_dataset.reset_seed()
  # evaluation
  if args.evaluate:
    print("Testing the model ...")
    cudnn.deterministic = True
    validate_model(val_loader, model, -1, args, writer, model_clean)
    return
  load_clean_image = args.use_student_teacher or args.train_denoiser or args.use_dirty_pixel
  train_dataset = IMMetricLoader(args.data_folder,
                  split='train', transforms=train_transforms, label_file=args.label_file, pil_loader=args.pil_loader, clean_image=load_clean_image)
  print('Training Set Size: ', len(train_dataset))
  if(args.num_instances):
    train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=args.batch_size,
    num_workers=args.workers, pin_memory=True, sampler=RandomIdentitySampler(train_dataset, num_instances=args.num_instances), drop_last=True)
  else:
    train_loader = torch.utils.data.DataLoader(
      train_dataset, batch_size=args.batch_size, shuffle=True,
      num_workers=args.workers, pin_memory=True, sampler=None, drop_last=True)

  # enable cudnn benchmark
  cudnn.enabled = True
  cudnn.benchmark = True


  if(args.train_denoiser):
    
    print("Training denoiser ...")
    for epoch in range(args.start_epoch, args.epochs):
      train_dataset.reset_seed()
      train_denoiser(train_loader, val_loader, model, criterion, optimizer, epoch, args, writer)
    return

  model.eval()
  top1 = AverageMeter()
  top5 = AverageMeter()
  val_acc1 = validate_model(val_loader, model, 0, args, writer, model_clean)
  writer.add_scalars('data/top1_accuracy',
     {"train" : top1.avg}, 0)
  writer.add_scalars('data/top5_accuracy',
     {"train" : top5.avg}, 0)
  model.train()

  # warmup the training
  if (args.start_epoch == 0) and (args.warmup_epochs > 0):
    print("Warmup the training ...")
    for epoch in range(0, args.warmup_epochs):
      acc1 = train_model(train_loader, val_loader, model, criterion, optimizer, epoch, "warmup", best_acc1, args, writer, model_clean, model_teacher)

  # start the training
  print("Training the model ...")
  for epoch in range(args.start_epoch, args.epochs):
    train_dataset.reset_seed()
    # train for one epoch
    acc1 = train_model(train_loader, val_loader, model, criterion, optimizer, epoch, "train", best_acc1, args, writer, model_clean, model_teacher)


    # save checkpoint
    best_acc1 = max(acc1, best_acc1)
Ejemplo n.º 7
0
        cudnn.benchmark = True

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isdir(
            'checkpoint'), 'Error: no checkpoint directory found!'
        if os.path.isfile('./checkpoint/contrastive_hard_negative_ckpt.t7'):
            checkpoint = torch.load(
                './checkpoint/contrastive_hard_negative_ckpt.t7')
            best_acc = checkpoint['acc']
        else:
            checkpoint = torch.load('./checkpoint/classification_ckpt.t7')
            best_acc = 0
        net.load_state_dict(checkpoint['net'])
        start_epoch = checkpoint['epoch']
        current_best_epoch = start_epoch
        print('the current best acc is %.3f on epoch %d' %
              (best_acc, start_epoch))

    criterion = OnlineContrastiveLoss(margin=1,
                                      pair_selector=HardNegativePairSelector())
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)

    for epoch in range(start_epoch, start_epoch + args.epochs):
        train(epoch)
        test(epoch)
Ejemplo n.º 8
0
# Dataloaders
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
online_train_loader = torch.utils.data.DataLoader(train_set, batch_sampler=train_batch_sampler, **kwargs)
online_test_loader = torch.utils.data.DataLoader(query_set, batch_sampler=test_batch_sampler, **kwargs)
print('dataloaders built')


# Build model and load checkpoint
# model = build_retriever(cfg.model)
model = Vgg16L2(num_dim=128)
model.cuda()
print('model built')

# Set up the network and training parameters
from losses import OnlineContrastiveLoss
from utils import AllPositivePairSelector, HardNegativePairSelector

margin = 1.
loss_fn = OnlineContrastiveLoss(margin, AllPositivePairSelector())
lr = 1e-4
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
n_epochs = 100
log_interval = 50

print('start training')
fit(online_train_loader, online_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, model_name='siamese_app_8_12')
print('done training')

torch.save(model.state_dict(), './checkpoint/siamese_app_100_epochs_8_12.pth')
Ejemplo n.º 9
0
def main():
    torch.manual_seed(1)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    print(args)
    # GPU / CPU
    device = torch.device('cuda')

    print("Initializing dataset")
    dataset = data_manager.init_dataset('../imdb/dataset_GEI', 'id_list.csv',
                                        args.cooperative)

    transform = transforms.Compose([
        transforms.RandomAffine(degrees=0, translate=(0.05, 0.02)),
        transforms.ToTensor()
    ])
    transform_test = transforms.Compose([transforms.ToTensor()])
    # trainLoader
    trainLoader = DataLoader(ImageDataset(dataset.train,
                                          sample='random',
                                          transform=transform),
                             sampler=RandomIdentitySampler(dataset.train,
                                                           num_instances=2),
                             batch_size=args.train_batch,
                             num_workers=args.workers)

    # test/val queryLoader
    # test/val galleryLoader
    test_probeLoader = DataLoader(ImageDataset(dataset.test_probe,
                                               sample='dense',
                                               transform=transform_test),
                                  shuffle=False,
                                  batch_size=args.test_batch,
                                  drop_last=False)

    test_galleryLoader = DataLoader(ImageDataset(dataset.test_gallery,
                                                 sample='dense',
                                                 transform=transform_test),
                                    shuffle=False,
                                    batch_size=args.test_batch,
                                    drop_last=False)
    model = models.model.ICDNet_group_mask_mask_early_8().to(device=device)
    #model = models.model.ICDNet_mask()
    #model= nn.DataParallel(model).cuda()
    #model = models.model.icdnet().to(device=device)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_cont = OnlineContrastiveLoss(margin=3)
    #criterion_trip = OnlineTripletLoss(3)
    criterion_trip = TripletLoss(3)
    criterion_sim = OnlineSimLoss()
    criterion_l2 = nn.MSELoss()
    criterion_label = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 betas=(0.5, 0.999))
    #scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
    scheduler = lr_scheduler.MultiStepLR(optimizer, [140],
                                         gamma=0.1,
                                         last_epoch=-1)

    #checkpoint = torch.load('./save_group_mask_early8_ones2_0002_sa3_500l2_01label_resbottle_shift002_all190_coo0/ep87.pth.tar')
    #model.load_state_dict(checkpoint['state_dict'])
    start_time = time.time()
    best_rank1 = -np.inf
    #args.max_epoch = 1
    cont_iter = 1
    for epoch in range(args.start_epoch, args.max_epoch):
        print("==> {}/{}".format(epoch + 1, args.max_epoch))
        cont_iter = train(epoch, model, criterion_cont, criterion_trip,
                          criterion_sim, criterion_l2, criterion_label,
                          optimizer, scheduler, trainLoader, device, cont_iter)
        if cont_iter > 250000:
            break
        if True:
            print("=============> Test")
            test_f.write("iter" + str(cont_iter) + '\n')
            rank1, correct_rate = test(model, test_probeLoader,
                                       test_galleryLoader, device)
            writer.add_scalar("Test/rank1", rank1, epoch)
            writer.add_scalar("Test/correct", correct_rate, epoch)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
            if is_best:
                state_dict = model.state_dict()
                save_checkpoint(
                    {
                        'state_dict': state_dict,
                        'epoch': epoch,
                        'optimizer': optimizer.state_dict(),
                    }, is_best,
                    osp.join(args.save_dir,
                             'ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))