Пример #1
0
def init_optimizer_and_model():
    global checkpoint
    if checkpoint is None:
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                    lr=lr, momentum=momentum, weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        best_loss = checkpoint['best_loss']
        print('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, best_loss))
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
    return optimizer, model
Пример #2
0
    def load(self):
        #self.dataset = load_dataset_from_pascal_voc_jar(self.jar_path, "TRAIN")
        self.dataset = load_dataset_from_icdar_jar(self.jar_path, "TRAIN")
        if self.augment == False:
            self.dataset.turn_off_augment()

        self.loader = self.__loader_from_dataset(self.dataset, self.batch_size,
                                                 self.workers)
        if self.chekpoint_tar_path:
            self.start_epoch, self.model, self.optimizer = self.__load_checkpoint(
                self.chekpoint_tar_path)
            print('\nLoaded checkpoint from epoch %d.\n' % self.start_epoch)
        else:
            self.model = SSD300(n_classes=self.n_classes)
            for param_name, param in self.model.named_parameters():
                if param.requires_grad:
                    if param_name.endswith('.bias'):
                        self.biases.append(param)
                    else:
                        self.not_biases.append(param)
            self.optimizer = torch.optim.SGD(params=[{
                'params': self.biases,
                'lr': 2 * self.lr
            }, {
                'params': self.not_biases
            }],
                                             lr=self.lr,
                                             momentum=self.momentum,
                                             weight_decay=self.weight_decay)
        self.model = self.model.to(device)
        self.criterion = MultiBoxLoss(
            priors_cxcy=self.model.priors_cxcy).to(device)
Пример #3
0
def evaluate():
    checkpoint_path = os.path.join(args.model_root, args.model_name)
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model = SSD300(n_classes=len(label_map), device=device).to(device)
    model.load_state_dict(checkpoint['model'])

    transform = Transform(size=(300, 300), train=False)
    test_dataset = VOCDataset(root=args.data_root,
                              image_set=args.image_set,
                              transform=transform,
                              keep_difficult=True)
    test_loader = DataLoader(dataset=test_dataset,
                             collate_fn=collate_fn,
                             batch_size=args.batch_size,
                             num_workers=args.num_workers,
                             shuffle=False,
                             pin_memory=True)

    detected_bboxes = []
    detected_labels = []
    detected_scores = []
    true_bboxes = []
    true_labels = []
    true_difficulties = []

    model.eval()
    with torch.no_grad():
        bar = tqdm(test_loader, desc='Evaluate the model')
        for i, (images, bboxes, labels, difficulties) in enumerate(bar):
            images = images.to(device)
            bboxes = [b.to(device) for b in bboxes]
            labels = [l.to(device) for l in labels]
            difficulties = [d.to(device) for d in difficulties]

            predicted_bboxes, predicted_scores = model(images)
            _bboxes, _labels, _scores = model.detect_objects(predicted_bboxes,
                                                             predicted_scores,
                                                             min_score=0.01,
                                                             max_overlap=0.45,
                                                             top_k=200)

            detected_bboxes += _bboxes
            detected_labels += _labels
            detected_scores += _scores
            true_bboxes += bboxes
            true_labels += labels
            true_difficulties += difficulties

        all_ap, mean_ap = calculate_mAP(detected_bboxes,
                                        detected_labels,
                                        detected_scores,
                                        true_bboxes,
                                        true_labels,
                                        true_difficulties,
                                        device=device)

    pretty_printer = PrettyPrinter()
    pretty_printer.pprint(all_ap)
    print('Mean Average Precision (mAP): %.4f' % mean_ap)
Пример #4
0
def main_train():
    cudnn.benchmark = True

    if TrainParams.checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=TrainParams.n_classes)
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * TrainParams.lr
        }, {
            'params': not_biases
        }],
                                    lr=TrainParams.lr,
                                    momentum=TrainParams.momentum,
                                    weight_decay=TrainParams.weight_decay)
    else:
        checkpoint = torch.load(TrainParams.checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(TrainParams.device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(
        TrainParams.device)

    train_dataset = BCCDDataset(TrainParams.data_folder,
                                split='train',
                                keep_difficult=TrainParams.keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=TrainParams.batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=TrainParams.workers,
        pin_memory=True)

    epochs = start_epoch + 1

    for epoch in range(start_epoch, epochs):
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch,
              device=TrainParams.device)

        save_checkpoint(epoch, model, optimizer)
Пример #5
0
def main():
    global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint

    if checkpoint is None:
        model = SSD300(n_classes=n_classes)

        biases = list()
        not_biases = list()

        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endwith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        
        optimizer = torch.optim.SGD(params=[{'params':biases, 'lr':2 * lr}, {'params':not_biases}], lr=lr, momentum=momentum, weight_decay=weight_decay)
    
    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        best_loss = checkpoint['best_loss']
        print('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, best_loss))
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.prior_cxcy).to(device)

    train_dataset = PascalVocDataset(data_folder,split='train',keep_difficult=keep_diffcult)
    val_dataset = PascalVocDataset(data_folder,split='test',keep_diffcult=keep_diffcult)

    train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,shuffle=True,collate_fn=train_dataset.collate_fn,num_workers=workers,pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,batch_size=batch_size,shuffle=True,collate_fn=val_dataset.collate_fn,num_workers=workers,pin_memory=True)

    # epochs
    for epoch in range(start_epoch,epochs):
        
        # One epoch training
        train(train_loader=train_loader,model=model,criterion=criterion,optimizer=optimizer,epoch=epoch)

        # One epoch validation
        val_loss = validate(val_loader=val_loader,model=model,criterion=criterion)

        is_best = val_loss < best_loss
        best_loss = min(val_loss,best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\n Epochs since last improvement:%d\n" % (epochs_since_improvement,))

        else:
            epochs_since_improvement = 0

        save_checkpoint(epoch,epochs_since_improvement,model,optimizer,val_loss,best_loss,is_best)
Пример #6
0
def main(config):
    """
    Training
    """
    global label_map, log

    out_dir = './models'
    out_dir = os.path.join('./models', config.model_name)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    log.open(os.path.join(out_dir, config.model_name + '.txt'), mode='a')
    log.write('\tout_dir = %s\n' % out_dir)
    log.write('\n')

    # Initialize model or load checkpoint
    if config.checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, which twice the default learning rate for bias
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)

        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * config.lr}, {'params': not_biases}],
            lr=config.lr, momentum=config.momentum, weight_decay=config.weight_decay)

    else:
        checkpoint = torch.load(config.checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        log('\nLoaded checkpoint from epoch %d.\n' % start_epoch)

    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    train_dataset = PascalVOCDataset(data_dir=config.data_folder, split='train', keep_difficult=config.keep_difficult)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True,
        collate_fn=train_dataset.collate_fn, num_workers=config.num_workers, pin_memory=True)

    for epoch in range(start_epoch, config.epochs):
        if epoch in config.decay_lr_at:
            adjust_learning_rate(optimizer, config.decay_lr_to)

        train(train_loader=train_loader, model=model, criterion=criterion, optimizer=optimizer, epoch=epoch)

        # Save checkpoint
        save_checkpoints(os.path.join(out_dir, 'checkpoint', 'checkpoint_epoch_{0}.pth.tar'.format(epoch+1)))
Пример #7
0
def main():
    """
    Training and validation.
    """
    global epochs_since_improvement, start_epoch, label_map, best_loss, epoch
    # Initialize model 
    model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                    lr=lr, momentum=momentum, weight_decay=weight_decay)
Пример #8
0
def create_model(num_classes=21, device=torch.device('cpu')):
    backbone = Backbone(pretrain_path='./pretrain/resnet50.pth')
    model = SSD300(backbone=backbone, num_classes=num_classes)

    pre_ssd_path = './pretrain/nvidia_ssdpyt_fp32.pt'
    pre_model_dict = torch.load(pre_ssd_path, map_location=device)
    pre_weights_dict = pre_model_dict['model']

    # only use the pre_trained bounding boxes regression weights
    del_conf_loc_dict = {}
    for k, v in pre_weights_dict.items():
        split_key = k.split('.')
        if 'conf' in split_key:
            continue
        del_conf_loc_dict.update({k: v})

    missing_keys, unexpected_keys = model.load_state_dict(del_conf_loc_dict,
                                                          strict=False)
    # if len(missing_keys) != 0 or len(unexpected_keys) != 0:
    #     print('missing_keys: ', missing_keys)
    #     print('unexpected_keys: ', unexpected_keys)
    return model
Пример #9
0
def main():
    wandb.init()

    # Config is a variable that holds and saves hyperparameters and inputs
    #wandb.watch(model)

    torch.manual_seed(30)
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at
    #print(device)
    # Initialize model or load checkpoint
    # if checkpoint is None:
    start_epoch = 79
    model = SSD300(n_classes=n_classes)

    #checkpoint = torch.load(checkpoint)
    # if checkpoint is None:
    #     start_epoch = 0
    #     model = SSD300(n_classes=n_classes)
    #     # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
    #     biases: List[Any] = list()
    #     not_biases = list()
    #     for param_name, param in model.named_parameters():
    #         if param.requires_grad:
    #             if param_name.endswith('.bias'):
    #                 biases.append(param)
    #             else:
    #                 not_biases.append(param)
    #     optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
    #                                 lr=lr, momentum=momentum, weight_decay=weight_decay)
    #
    # else:
    #     checkpoint = torch.load(checkpoint)
    #     start_epoch = checkpoint['epoch'] + 1
    #     print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
    #     model = checkpoint['model']
    #     optimizer = checkpoint['optimizer']

    # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
    biases: List[Any] = list()
    not_biases = list()
    for param_name, param in model.named_parameters():
        if param.requires_grad:
            if param_name.endswith('.bias'):
                biases.append(param)
            else:
                not_biases.append(param)
    optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                lr=lr, momentum=momentum, weight_decay=weight_decay)
    scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.0003,
                                                  max_lr=0.0008, step_size_up=26, step_size_down=26)
    # print(model)
    # else:
    #     checkpoint = torch.load(checkpoint)
    #     start_epoch = checkpoint['epoch'] + 1
    #     print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
    #     model = checkpoint['model']
    #     optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    checkpoint = torch.load('modelfi.pt')
    model.load_state_dict(checkpoint)
    # checkpoint = torch.load('model_best.pth.tar')
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
    wandb.watch(model, log="all")
    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
   # print(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # note that we're passing the collate function here


    test_dataset = PascalVOCDataset(data_folder,split='test', keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True,
                                              collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
    #print(next(iter(test_loader)))
#    print(train_loader)
#    a=next(iter(train_loader))
#    print(a)
    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
   # epochs = iterations // (len(train_dataset) // 8)
    #print(epochs)
    #decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]

    epochs = 100
    # Epochs
    for epoch in range(start_epoch, epochs):
        #
        # # Decay learning rate at particular epochs
        # if epoch in decay_lr_at:
        #     adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              scheduler=scheduler,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)
        test(test_loader=test_loader,
              model=model,
              criterion=criterion,
              #optimizer=optimizer,
              epoch=epoch)
Пример #10
0
'''Convert pretrained VGG model to SSD.

VGG model download from PyTorch model zoo: https://download.pytorch.org/models/vgg16-397923af.pth
'''
import torch

from model import SSD300

vgg = torch.load("/home/pzl/pytorch-hed/model/vgg16.pth")

ssd = SSD300()
layer_indices = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21]

for layer_idx in layer_indices:
    ssd.base[layer_idx].weight.data = vgg['features.%d.weight' % layer_idx]
    ssd.base[layer_idx].bias.data = vgg['features.%d.bias' % layer_idx]

# [24,26,28]
ssd.conv5_1.weight.data = vgg['features.24.weight']
ssd.conv5_1.bias.data = vgg['features.24.bias']
ssd.conv5_2.weight.data = vgg['features.26.weight']
ssd.conv5_2.bias.data = vgg['features.26.bias']
ssd.conv5_3.weight.data = vgg['features.28.weight']
ssd.conv5_3.bias.data = vgg['features.28.bias']

torch.save(ssd.state_dict(), 'pretained/ssd.pth')
Пример #11
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                    lr=lr, momentum=momentum, weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # note that we're passing the collate function here

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    epochs = iterations // (len(train_dataset) // 32)
    decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]

    # Epochs
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
Пример #12
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # Initialize model or load checkpoint
    if checkpoint is None:
        print("checkpoint none")
        start_epoch = 0
        model = SSD300(n_classes=n_classes)

        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)

        # differnet optimizer
        # optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
        #                             lr=lr, momentum=momentum, weight_decay=weight_decay)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

        #optimizer = torch.optim.SGD(params=[{'params':model.parameters(), 'lr': 2 * lr}, {'params': model.parameters}],  lr=lr, momentum=momentum, weight_decay=weight_decay)

    else:
        print("checkpoint load")
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # now it is mobilenet v3,VGG paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations,
    epochs = 600
    # decay_lr_at =[154, 193]
    # print("decay_lr_at:",decay_lr_at)
    print("epochs:", epochs)

    for param_group in optimizer.param_groups:
        optimizer.param_groups[1]['lr'] = lr
    print("learning rate.  The new LR is %f\n" %
          (optimizer.param_groups[1]['lr'], ))
    # Epochs,I try to use different learning rate shcheduler
    #different scheduler six way you could try
    #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max = (epochs // 7) + 1)
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode="min",
                                  factor=0.1,
                                  patience=15,
                                  verbose=True,
                                  threshold=0.00001,
                                  threshold_mode='rel',
                                  cooldown=0,
                                  min_lr=0,
                                  eps=1e-08)

    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        # if epoch in decay_lr_at:
        #     adjust_learning_rate_epoch(optimizer,epoch)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)
        print("epoch loss:", train_loss)
        scheduler.step(train_loss)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
def main():
    """
    Training and validation.
    """
    # Training settings
    parser = argparse.ArgumentParser(description='SSD VOC AL')
    parser.add_argument('--trial_number', type=int, default=1, metavar='N',
                        help='trial number for given acquisition function (default: 1)')
    parser.add_argument('--acquisition_function', type=str, default='RANDOM', metavar='N',
                        help='type of acquisition. Options are: RANDOM, MARGIN_SAMPLING')
    parser.add_argument('--lr', type=float, default=1e-3, metavar='N',
                        help='learning rate (default: 1e-3)')
    parser.add_argument('--save_dir', type=str, default='./results/', metavar='N',
                         help='directory to save to  (default: ./result/)')
    parser.add_argument('--reset_weight',type=bool, default=False, metavar='N',
                        help='reset network weights (default: False')
    args = parser.parse_args()

    print("Training with the following acquisition function: ", args.acquisition_function)
    print("Training for trial #: ", args.trial_number)

    global epochs_since_improvement, start_epoch, label_map, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        model = SSD300(n_classes=n_classes)     
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * args.lr}, {'params': not_biases}],
                                    lr=args.lr, momentum=momentum, weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        print("Loading checkpoint model.")
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

        # use lr
        for param_group in optimizer.param_groups:
            param_group['lr'] = args.lr



    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Read data files, partition train/pool and test set
    with open(os.path.join(data_folder, 'TRAIN_images.json'), 'r') as j:
        train_images = json.load(j)
    with open(os.path.join(data_folder, 'TRAIN_objects.json'), 'r') as j:
        train_objects = json.load(j)
    all_train_indices = np.arange(len(train_images))

    
    train_indices = all_train_indices[:init_train_size]
    pool_indices = all_train_indices[init_train_size:5011] #ONLY VOC 2007, remove 5011 to use all data
    
    with open(os.path.join(data_folder, 'TEST_images.json'), 'r') as j:
        test_images = json.load(j)
    with open(os.path.join(data_folder, 'TEST_objects.json'), 'r') as j:
        test_objects = json.load(j)
    all_test_indices = np.arange(len(test_images))
    test_indices = all_test_indices

    # Custom dataloaders
    train_dataset = TrainDataset(train_images, train_objects, train_indices,
                                    keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # note that we're passing the collate function here
    
    pool_dataset = PoolDataset(train_images, train_objects, pool_indices,
                                    pool_subset, num_of_queries,
                                    keep_difficult=keep_difficult)

    val_dataset = TestDataset(test_images, test_objects, test_indices,
                                    keep_difficult=keep_difficult)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
                                                collate_fn=val_dataset.collate_fn, num_workers=workers,
                                                pin_memory=True)

    training_data = list()
    training_data.append(train_indices)
    mAP_list = list()
    APs_list = list()

    for i in range(acquisition_iterations):
        #grab new data from pool 
        img_indices = list()
        if (args.acquisition_function == 'MARGIN_SAMPLING'):
            print("Doing one iteration of margin sampling")
            img_indices = marginSampleAcquisition(pool_dataset, model)
        elif (args.acquisition_function == 'RANDOM'):
            print("Doing one iteration of random sampling")
            img_indices = randomSampleAcquisition(pool_dataset)
        elif (args.acquisition_function == 'ENTROPY' or args.acquisition_function == 'BALD' or 
            args.acquisition_function == 'VAR_RATIO' or args.acquisition_function == 'MEAN_STD' or 
            args.acquisition_function == 'MEAN_STD_WITH_BBOX'):
            print("Doing one iteration of ", args.acquisition_function)
            img_indices = dropoutAcquisition(pool_dataset, model, args.acquisition_function, dropout_iterations=10)
        elif (args.acquisition_function == 'LOCALIZATION_STABILITY'):
            img_indices = localizationAwareAcquisiton(pool_dataset, model)
        elif (args.acquisition_function == 'QBC'):
            img_indices = queryByCommittee(pool_dataset, model)
        else:
            print("UNKNOWN ACQUISITION FUNCTION")
            exit()

       
        # reset weights before training
        if args.reset_weight:
            # checkpoint = torch.load(checkpoint)
            print("Loading checkpoint model.")
            model = checkpoint['model']
            optimizer = checkpoint['optimizer']
            # use lr
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr

        pool_dataset.removeFromPool(img_indices)
        train_dataset.addFromPool(img_indices)
        training_data.append(img_indices)

        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                            collate_fn=train_dataset.collate_fn, num_workers=workers,
                                            pin_memory=True)  
                                            # note that we're passing the collate function here



        # We can train on new data for certain number of iterations rather than epochs
        # XXXX iterations
        # epochs = int(80000 / len(train_dataset.indices))
        # print("training_data len: ", len(train_dataset.indices))
        # decay_epoch = int(60000 / len(train_dataset.indices))

        for epoch in range(epochs):
            print('Epochs: ', epoch, ' / ', epochs)
            print('Learning rate is: ', optimizer.param_groups[1]['lr'])

            # if epoch == decay_epoch:
            #     adjust_learning_rate(optimizer, 0.1) # decay by factor of 0.1

            # One epoch's training
            train(train_loader=train_loader,
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                epoch=epoch)

        # check performance
        APs, mAP = evaluate(val_loader, model)
        mAP_list.append(mAP)
        APs_list.append(APs)

        if save_al_checkpoints:
            save_checkpoint_active_learning(args.acquisition_function, i,  model, optimizer, mAP, training_data)

    # store accuracy
    print('Storing Accuracy Values over experiments')
    save_str = args.save_dir + args.acquisition_function + '_' + str(args.trial_number) + '_test_acc.npy'
    mAP_list = np.array(mAP_list)
    np.savez(save_str, mAP_list=mAP_list, training_data=training_data, APs_list=APs_list)
Пример #14
0
            # make images mini batch
            if is_training:
                img = load_image('voc2007/' + train_keys[idx])
                actual_data.append(data[train_keys[idx]])
            else:
                img = load_image('voc2007/' + test_keys[idx])
                actual_data.append(data[test_keys[idx]])

            img = img.reshape((300, 300, 3))
            mini_batch.append(img)

        return mini_batch, actual_data

    # tensorflow session
    with tf.Session() as sess:
        ssd = SSD300(sess)
        sess.run(tf.global_variables_initializer())

        # parameter saver
        saver = tf.train.Saver()

        print('\nSTART LEARNING')
        print('==================== ' + str(datetime.datetime.now()) +
              ' ====================')

        for ep in range(EPOCH):
            BATCH_LOSSES = []
            for ba in range(BATCH):
                minibatch, actual_data = next_batch(is_training=True)
                _, _, batch_loc, batch_conf, batch_loss = ssd.eval(
                    minibatch, actual_data, True)
Пример #15
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    #import active_vision_dataset_processing.data_loading
    import transforms, active_vision_dataset

    #Include all instances
    pick_trans = transforms.PickInstances(range(34))

    TRAIN_PATH = "./google_drive/MyDrive/ColabNotebooks/Project/trainDataset"

    train_dataset = active_vision_dataset.AVD(
        root=TRAIN_PATH,
        train=True,
        target_transform=pick_trans,
        scene_list=[
            'Home_001_1', 'Home_002_1', 'Home_003_1', 'Home_004_1',
            'Home_005_1', 'Home_006_1', 'Home_007_1', 'Home_008_1',
            'Home_014_1', 'Home_011_1', 'Home_010_1', 'Office_001_1'
        ],
        fraction_of_no_box=-1)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=active_vision_dataset.collate)
    """
    #I TRY TO USE THE DEFAULT DATASET LOADER::::::::::::::

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # note that we're passing the collate function here
   """

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    epochs = iterations // (len(train_dataset) // 32)
    decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]

    # Epochs
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
Пример #16
0
                      font=font)

        del draw

    model.train()
    return annotated_image


if __name__ == '__main__':
    parser = make_parser()
    args = parser.parse_args()

    num_classes = 2

    # text and another
    model = SSD300(num_classes)
    epoch = 14
    save_path = os.path.join(args.model, f'epoch_{epoch}.pt')
    obj = torch.load(save_path)
    # model = obj['model']
    model.load_state_dict(obj['model'])
    model.to(device)

    transform = MyTransform()
    root_folder = args.root

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    #
    folder_list = os.listdir(root_folder)
Пример #17
0
def main():
    print('Number of classes: ', n_classes)
    parser = argparse.ArgumentParser()
    parser.add_argument("--checkpoint", type=str, default=None)
    parser.add_argument("--batch_size", type=int, default=8)
    parser.add_argument("--size", type=int, default=300)
    parser.add_argument("--grad_clip", type=int, default=None)
    parser.add_argument("--data_folder", type=str, default='./')
    parser.add_argument("--save_cp", type=str, default='checkpoint300')
    opt = parser.parse_args()

    assert opt.size in [300, 512]

    # Initialize model or load checkpoint
    if opt.checkpoint is None:
        start_epoch = 0
        if opt.size == 300:
            model = SSD300(n_classes=n_classes)
        else:
            model = SSD512(n_classes=n_classes)
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo

    else:
        checkpoint = torch.load(opt.checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # train_dataset = PascalVOCDataset(opt.data_folder, dim=(opt.size, opt.size))
    train_dataset = COCODataset(opt.data_folder, dim=(opt.size, opt.size))
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)

    # Calculate total number of epochs to train and decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    epochs = iterations // (len(train_dataset) // 32)
    decay_lr_at = [80000, 100000]
    decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]

    # Epochs
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch,
              grad_clip=opt.grad_clip)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer, opt.save_cp)
def main():

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    test_dataset = PascalVOCDataset(data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=test_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        lr = 0.01
        optimizer = torch.optim.SGD([
            {
                'params': model.base.parameters(),
                'lr': lr / 100
            },
            {
                'params': model.aux_convs.parameters(),
                'lr ': lr / 10
            },
        ],
                                    lr=0.01,
                                    momentum=0.8)
        scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,
                                                      base_lr=[0.0001, 0.001],
                                                      max_lr=[0.001, 0.005],
                                                      step_size_up=31,
                                                      step_size_down=31)
        #print(model)
    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        scheduler = torch.optim.lr_scheduler.CyclicLR(
            optimizer,
            base_lr=[0.000001, 0.00001, 0.00001],
            max_lr=[0.000005, 0.00009, 0.00005],
            step_size_up=31,
            step_size_down=31)

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    print(model)
    #print(next(iter(test_loader)))
    #    print(train_loader)
    #    a=next(iter(train_loader))
    #    print(a)
    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    #epochs = iterations // (len(train_dataset) // 8)
    #print(epochs)
    #decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
    epochs = 125
    # Epochs
    #scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,div_factor=100.0, final_div_factor=100000.0, max_lr=0.001, total_steps=66)
    for epoch in range(start_epoch, epochs):

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              scheduler=scheduler,
              epoch=epoch)
        #print(scheduler.get_lr())

        test(
            test_loader=test_loader,
            model=model,
            criterion=criterion,
            #optimizer=optimizer,
            epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
Пример #19
0
def main():
    wandb.init(project="re")

    use_cuda = not config.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}

    torch.manual_seed(config.seed)  # pytorch random seed
    # numpy.random.seed(config.seed) # numpy random seed
    torch.backends.cudnn.deterministic = True

    # Config is a variable that holds and saves hyperparameters and inputs
    #wandb.watch(model)

    #torch.manual_seed(args.seed)
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at
    #print(device)
    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases: List[Any] = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=config.lr,
                                    momentum=config.momentum)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
    #wandb.watch(model, log="all")
    wandb.watch(model, log="all")
    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    # print(train_dataset)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        **kwargs)  # note that we're passing the collate function here

    test_dataset = PascalVOCDataset(data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        collate_fn=test_dataset.collate_fn,
        **kwargs)
    #print(next(iter(test_loader)))
    #    print(train_loader)
    #    a=next(iter(train_loader))
    #    print(a)
    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    # epochs = iterations // (len(train_dataset) // 8)
    #print(epochs)
    #decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
    config.epochs = 50
    #epochs = 10
    # Epochs
    for epoch in range(start_epoch, config.epochs + 1):

        # # Decay learning rate at particular epochs
        # if epoch in decay_lr_at:
        #     adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(config,
              train_loader=train_loader,
              device,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)
        test(
            config,
            test_loader=test_loader,
            device,
            model=model,
            criterion=criterion,
            #optimizer=optimizer,
            epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
Пример #20
0
from PIL import ImageDraw, ImageFont
import numpy as np
import pickle
from PIL import Image
import cv2
import time
from model import SSD300
from utils import *
import os
jt.flags.use_cuda = 1

# Load model checkpoint
experiment_id = "pretrain_model"  # set your experiment id
model_path = os.path.join('tensorboard', experiment_id, 'model_best.pkl')
params = pickle.load(open(model_path, "rb"))
model = SSD300(21)
model.load_parameters(params)
print(f'[*] Load model {model_path} success')


# Transforms
def transform(image,
              size=300,
              mean=[0.485, 0.456, 0.406],
              std=[0.229, 0.224, 0.225]):
    image = cv2.resize(image, (300, 300))
    image /= 255.
    image = (image - mean) / std
    return image.transpose(2, 0, 1)

Пример #21
0
from torchvision import transforms
from utils import *
from PIL import Image, ImageDraw, ImageFont
from datasets import PascalVOCDataset
from collections import Counter
import threading
n_classes = len(label_map)
from model import SSD300, MultiBoxLoss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SSD300(n_classes=n_classes)
# Load model checkpoint
model = model.to(device)
checkpoint = torch.load('model.pt')
model.load_state_dict(checkpoint)
model.eval()

# Transforms
resize = transforms.Resize((300, 300))
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])


def detect(original_image, min_score, max_overlap, top_k, suppress=None):
    """
    Detect objects in an image with a trained SSD300, and visualize the results.

    :param original_image: image, a PIL Image
    :param min_score: minimum threshold for a detected box to be considered a match for a certain class
    :param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via Non-Maximum Suppression (NMS)
    :param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
Пример #22
0
def main():
    # Prepare train dataset and dataloader
    train_ds = PascalVOCDataset('./data', 'TRAIN', keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(train_ds,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               collate_fn=train_ds.collate_fn,  # note that we're passing the collate function here
                                               num_workers=num_workers,
                                               pin_memory=True)
    n_classes = len(train_ds.label_map())
    start_epoch = 0

    # Initialize model
    model = SSD300(n_classes=n_classes)

    # Load checkpoint if existed
    checkpoint = None
    if checkpoint_path is not None and os.path.exists(checkpoint_path):
        checkpoint = torch.load(checkpoint_path)
        start_epoch = checkpoint['epoch'] + 1
        print('Load checkpoint from epoch %d.\n' % checkpoint['epoch'])

    if checkpoint is not None:
        model.load_state_dict(checkpoint['model_state_dict'])

    model.to(device)
    model.train()

    # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
    biases = list()
    not_biases = list()
    for param_name, param in model.named_parameters():
        if param.requires_grad:
            if param_name.endswith('.bias'):
                biases.append(param)
            else:
                not_biases.append(param)
    optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                lr=lr, momentum=momentum, weight_decay=weight_decay)

    if checkpoint is not None:
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    epochs = iterations // (len(train_ds) // batch_size)
    decay_lr_at_epochs = [it // (len(train_ds) // batch_size) for it in decay_lr_at]

    # Epochs
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at_epochs:
            utils.adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader, model, criterion, optimizer, epoch)

        # Save checkpoint
        utils.save_checkpoint(checkpoint_path, model, optimizer, epoch)
Пример #23
0
    IoU = interArea / (boxAArea + boxBArea - interArea)

    # print(IoU,'\n',in_class,'\n',gt_class,'\n',in_coor,'\n',gt_coor)
    # class_acc = 1 if gt_class.item() == in_class.item() else 0
    class_acc = 1 if (gt_class.item()
                      == in_class.item()) and IoU > theta else 0
    # print(class_acc,mean_IoU)
    return class_acc, IoU


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 1
data_encoder = DataEncoder()

print('Loading model..')
net = SSD300()
net.load_state_dict(torch.load('./checkpoint/ssd300_ckpt.pth')['net'])
net.to(device)
net.eval()

print('Preparing dataset..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
testset = ListDataset(root='/home/pzl/Data/tiny_vid',
                      list_file="/home/pzl/Data/tiny_vid/test_images.txt",
                      train=False,
                      transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=batch_size,
Пример #24
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # initialize the optimizer, with twice the default learning rate...
        # ...for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                    lr=lr, momentum=momentum, weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train')
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # using `collate_fn()` here

    # calculate total number of epochs to train and the epochs to decay...
    # ...learning rate at (i.e. convert iterations to epochs)
    # to convert iterations to epochs,...
    # ...divide iterations by the number of iterations per epoch
    epochs = iterations // (len(train_dataset) // batch_size)
    decay_lr_at = [it // (len(train_dataset) // batch_size) for it in decay_lr_at]
    print(f"Training for {iterations} iterations...")
    print(f"Training for {epochs} epochs...")
    print(f"Batch size is {batch_size}")
    print(f"Logging every {print_freq} batches...")

    # logging into train.txt
    with open(file='../logs/train_logs.txt', mode='a+') as f:
        f.writelines(f"Training for {iterations} iterations...\n")
        f.writelines(f"Training for {epochs} epochs...\n")
        f.writelines(f"Batch size is {batch_size}\n")
        f.writelines(f"Logging every {print_freq} batches...\n")

    # epochs
    for epoch in range(start_epoch, epochs):

        # decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

            # logging into train.txt
            with open(file='../logs/train_logs.txt', mode='a+') as f:
                f.writelines(f"DECAYING learning rate.\n The new LR is {(optimizer.param_groups[1]['lr'],)}\n")

        # one epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # save checkpoint after each epoch
        save_checkpoint(epoch, model, optimizer)
Пример #25
0
def main():
    """
    Training and validation.
    """
    global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.RMSprop(
            params=[{
                'params': biases,
                'lr': 2 * lr
            }, {
                'params': not_biases
            }],
            lr=lr,
            momentum=momentum,
            weight_decay=weight_decay
        )  # rmsprop optimizer used according to spherenet paper

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        best_loss = checkpoint['best_loss']
        print(
            '\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' %
            (start_epoch, best_loss))
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    val_dataset = PascalVOCDataset(data_folder,
                                   split='test',
                                   keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             collate_fn=val_dataset.collate_fn,
                                             num_workers=workers,
                                             pin_memory=True)
    # Epochs
    for epoch in range(start_epoch, epochs):
        # Paper describes decaying the learning rate at the 80000th, 100000th, 120000th 'iteration', i.e. model update or batch
        # The paper uses a batch size of 32, which means there were about 517 iterations in an epoch
        # Therefore, to find the epochs to decay at, you could do,
        # if epoch in {80000 // 517, 100000 // 517, 120000 // 517}:
        #     adjust_learning_rate(optimizer, 0.1)

        # In practice, I just decayed the learning rate when loss stopped improving for long periods,
        # and I would resume from the last best checkpoint with the new learning rate,
        # since there's no point in resuming at the most recent and significantly worse checkpoint.
        # So, when you're ready to decay the learning rate, just set checkpoint = 'BEST_checkpoint_ssd300.pth.tar' above
        # and have adjust_learning_rate(optimizer, 0.1) BEFORE this 'for' loop

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # One epoch's validation
        val_loss = validate(val_loader=val_loader,
                            model=model,
                            criterion=criterion)

        # Did validation loss improve?
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))

        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        val_loss, best_loss, is_best)
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint_path, decay_lr_at, log_file

    # Initialize model or load checkpoint
    now = datetime.datetime.now().strftime('%m-%d_%H-%M-%S')
    if not os.path.exists(checkpoint_path):
        # create log file
        log_file = f'../logs/{now}.txt'
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint_path)
        start_epoch = checkpoint['epoch'] + 1
        log_file = checkpoint['log_file']
        if not os.path.exists(log_file):
            log_file = f'../logs/{now}.txt'
        log_and_print(log_file,
                      '\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = BDD100KDataset(IMAGE_DIR,
                                   JSON_DIR,
                                   split='train',
                                   valid_proportion=valid_proportion)
    train_loader, val_loader = get_train_valid_dataloaders(
        batch_size,
        valid_proportion=valid_proportion,
        image_dir=IMAGE_DIR,
        json_dir=JSON_DIR,
        num_workers=4,
        model='ssd')

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    epochs = iterations // (len(train_dataset) // 32)
    decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]

    # Epochs
    for epoch in range(start_epoch, epochs + 10):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        evaluate(val_loader, model)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer, log_file)
Пример #27
0
def train():
    set_seed(seed=10)
    os.makedirs(args.save_root, exist_ok=True)

    # create model, optimizer and criterion
    model = SSD300(n_classes=len(label_map), device=device)
    biases = []
    not_biases = []
    for name, param in model.named_parameters():
        if param.requires_grad:
            if name.endswith('.bias'):
                biases.append(param)
            else:
                not_biases.append(param)
    model = model.to(device)
    optimizer = torch.optim.SGD(params=[{
        'params': biases,
        'lr': 2 * args.lr
    }, {
        'params': not_biases
    }],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    if args.resume is None:
        start_epoch = 0
    else:
        checkpoint = torch.load(args.resume, map_location=device)
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
    print(f'Training will start at epoch {start_epoch}.')

    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy,
                             device=device,
                             alpha=args.alpha)
    criterion = criterion.to(device)
    '''
    scheduler = StepLR(optimizer=optimizer,
                       step_size=20,
                       gamma=0.5,
                       last_epoch=start_epoch - 1,
                       verbose=True)
    '''

    # load data
    transform = Transform(size=(300, 300), train=True)
    train_dataset = VOCDataset(root=args.data_root,
                               image_set=args.image_set,
                               transform=transform,
                               keep_difficult=True)
    train_loader = DataLoader(dataset=train_dataset,
                              collate_fn=collate_fn,
                              batch_size=args.batch_size,
                              num_workers=args.num_workers,
                              shuffle=True,
                              pin_memory=True)

    losses = AverageMeter()
    for epoch in range(start_epoch, args.num_epochs):
        # decay learning rate at particular epochs
        if epoch in [120, 140, 160]:
            adjust_learning_rate(optimizer, 0.1)

        # train model
        model.train()
        losses.reset()
        bar = tqdm(train_loader, desc='Train the model')
        for i, (images, bboxes, labels, _) in enumerate(bar):
            images = images.to(device)
            bboxes = [b.to(device) for b in bboxes]
            labels = [l.to(device) for l in labels]

            predicted_bboxes, predicted_scores = model(
                images)  # (N, 8732, 4), (N, 8732, num_classes)
            loss = criterion(predicted_bboxes, predicted_scores, bboxes,
                             labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            losses.update(loss.item(), images.size(0))

            if i % args.print_freq == args.print_freq - 1:
                bar.write(f'Average Loss: {losses.avg:.4f}')

        bar.write(f'Epoch: [{epoch + 1}|{args.num_epochs}] '
                  f'Average Loss: {losses.avg:.4f}')
        # adjust learning rate
        # scheduler.step()

        # save model
        state_dict = {
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict()
        }
        save_path = os.path.join(args.save_root, 'ssd300.pth')
        torch.save(state_dict, save_path)

        if epoch % args.save_freq == args.save_freq - 1:
            shutil.copyfile(
                save_path,
                os.path.join(args.save_root, f'ssd300_epochs_{epoch + 1}.pth'))
Пример #28
0
def main():
    """
    Training and validation.
    """
    global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        # optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.99))
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = ICDARDataset(data_folder, split='train')
    val_dataset = ICDARDataset(data_folder, split='test')
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             collate_fn=val_dataset.collate_fn,
                                             num_workers=workers,
                                             pin_memory=True)

    # Epochs
    for epoch in range(start_epoch, epochs):

        # One epoch's training
        train_loss = train(train_loader=train_loader,
                           model=model,
                           criterion=criterion,
                           optimizer=optimizer,
                           epoch=epoch)

        # One epoch's validation
        val_loss = validate(val_loader=val_loader,
                            model=model,
                            criterion=criterion)

        # Did validation loss improve?
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))

        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        val_loss, best_loss, is_best)

        with open('log.txt', 'a+') as f:
            f.write('epoch:' + str(epoch) + '  train loss:' + str(train_loss) +
                    '  val loss:' + str(val_loss) + '\n')
Пример #29
0
def main():

    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # Инициализация модели или загрузка чекпоинта
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Загрузчики данных
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)

    # Вычисление количесвта эпох
    epochs = iterations // (len(train_dataset) // batch_size)
    decay_lr_at = [
        it // (len(train_dataset) // batch_size) for it in decay_lr_at
    ]

    # Эпохи
    for epoch in range(start_epoch, epochs):

        # Уменьшение скорости обучения
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # OОбучение в течении одной эпохи
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # Сохранение чекпоинта
        save_checkpoint(epoch, model, optimizer)
Пример #30
0
    # Parameters
    keep_difficult = True  # difficult ground truth objects must always be considered in mAP calculation, because these objects DO exist!
    workers = 4
    device = torch.device(
        f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")

    # Load model checkpoint that is to be evaluated
    if args.checkpoint == 'pretrained_ssd300.pth.tar':
        checkpoint = torch.load(args.checkpoint)
        model = checkpoint['model']
        for m in model.modules():
            if 'Conv' in str(type(m)):
                setattr(m, 'padding_mode', 'zeros')
    else:
        checkpoint = torch.load(args.checkpoint)
        model = SSD300(n_classes=len(label_map))
        model.load_state_dict(checkpoint['model'])

    model = model.to(device)
    # Switch to eval mode
    model.eval()

    # Load test data
    test_dataset = PascalVOCDataset(args.data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=test_dataset.collate_fn,