示例#1
0
def main(config):
    """
    Training
    """
    global label_map, log

    out_dir = './models'
    out_dir = os.path.join('./models', config.model_name)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    log.open(os.path.join(out_dir, config.model_name + '.txt'), mode='a')
    log.write('\tout_dir = %s\n' % out_dir)
    log.write('\n')

    # Initialize model or load checkpoint
    if config.checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, which twice the default learning rate for bias
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)

        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * config.lr}, {'params': not_biases}],
            lr=config.lr, momentum=config.momentum, weight_decay=config.weight_decay)

    else:
        checkpoint = torch.load(config.checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        log('\nLoaded checkpoint from epoch %d.\n' % start_epoch)

    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    train_dataset = PascalVOCDataset(data_dir=config.data_folder, split='train', keep_difficult=config.keep_difficult)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True,
        collate_fn=train_dataset.collate_fn, num_workers=config.num_workers, pin_memory=True)

    for epoch in range(start_epoch, config.epochs):
        if epoch in config.decay_lr_at:
            adjust_learning_rate(optimizer, config.decay_lr_to)

        train(train_loader=train_loader, model=model, criterion=criterion, optimizer=optimizer, epoch=epoch)

        # Save checkpoint
        save_checkpoints(os.path.join(out_dir, 'checkpoint', 'checkpoint_epoch_{0}.pth.tar'.format(epoch+1)))
示例#2
0
def load(checkpoint):
    # Load model checkpoint that is to be evaluated
    checkpoint = torch.load(checkpoint)
    model = checkpoint['model']
    model = model.to(device)

    # Switch to eval mode
    model.eval()

    # Load test data
    test_dataset = PascalVOCDataset(data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
                                              collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
    return test_loader, model
def main():
    """
    Training.
    """
    # Initialize model and optimizer
    model = tiny_detector(n_classes=n_classes)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy)
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=lr,
                                momentum=momentum,
                                weight_decay=weight_decay)

    # Move to default device
    model = model.to(device)
    criterion = criterion.to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    # Epochs
    for epoch in range(total_epochs):
        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
示例#4
0
def main():
    wandb.init()

    # Config is a variable that holds and saves hyperparameters and inputs
    #wandb.watch(model)

    torch.manual_seed(30)
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at
    #print(device)
    # Initialize model or load checkpoint
    # if checkpoint is None:
    start_epoch = 79
    model = SSD300(n_classes=n_classes)

    #checkpoint = torch.load(checkpoint)
    # if checkpoint is None:
    #     start_epoch = 0
    #     model = SSD300(n_classes=n_classes)
    #     # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
    #     biases: List[Any] = list()
    #     not_biases = list()
    #     for param_name, param in model.named_parameters():
    #         if param.requires_grad:
    #             if param_name.endswith('.bias'):
    #                 biases.append(param)
    #             else:
    #                 not_biases.append(param)
    #     optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
    #                                 lr=lr, momentum=momentum, weight_decay=weight_decay)
    #
    # else:
    #     checkpoint = torch.load(checkpoint)
    #     start_epoch = checkpoint['epoch'] + 1
    #     print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
    #     model = checkpoint['model']
    #     optimizer = checkpoint['optimizer']

    # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
    biases: List[Any] = list()
    not_biases = list()
    for param_name, param in model.named_parameters():
        if param.requires_grad:
            if param_name.endswith('.bias'):
                biases.append(param)
            else:
                not_biases.append(param)
    optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                lr=lr, momentum=momentum, weight_decay=weight_decay)
    scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.0003,
                                                  max_lr=0.0008, step_size_up=26, step_size_down=26)
    # print(model)
    # else:
    #     checkpoint = torch.load(checkpoint)
    #     start_epoch = checkpoint['epoch'] + 1
    #     print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
    #     model = checkpoint['model']
    #     optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    checkpoint = torch.load('modelfi.pt')
    model.load_state_dict(checkpoint)
    # checkpoint = torch.load('model_best.pth.tar')
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
    wandb.watch(model, log="all")
    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
   # print(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # note that we're passing the collate function here


    test_dataset = PascalVOCDataset(data_folder,split='test', keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True,
                                              collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
    #print(next(iter(test_loader)))
#    print(train_loader)
#    a=next(iter(train_loader))
#    print(a)
    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
   # epochs = iterations // (len(train_dataset) // 8)
    #print(epochs)
    #decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]

    epochs = 100
    # Epochs
    for epoch in range(start_epoch, epochs):
        #
        # # Decay learning rate at particular epochs
        # if epoch in decay_lr_at:
        #     adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              scheduler=scheduler,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)
        test(test_loader=test_loader,
              model=model,
              criterion=criterion,
              #optimizer=optimizer,
              epoch=epoch)
示例#5
0
def main():
    # Prepare train dataset and dataloader
    train_ds = PascalVOCDataset('./data', 'TRAIN', keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(train_ds,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               collate_fn=train_ds.collate_fn,  # note that we're passing the collate function here
                                               num_workers=num_workers,
                                               pin_memory=True)
    n_classes = len(train_ds.label_map())
    start_epoch = 0

    # Initialize model
    model = SSD300(n_classes=n_classes)

    # Load checkpoint if existed
    checkpoint = None
    if checkpoint_path is not None and os.path.exists(checkpoint_path):
        checkpoint = torch.load(checkpoint_path)
        start_epoch = checkpoint['epoch'] + 1
        print('Load checkpoint from epoch %d.\n' % checkpoint['epoch'])

    if checkpoint is not None:
        model.load_state_dict(checkpoint['model_state_dict'])

    model.to(device)
    model.train()

    # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
    biases = list()
    not_biases = list()
    for param_name, param in model.named_parameters():
        if param.requires_grad:
            if param_name.endswith('.bias'):
                biases.append(param)
            else:
                not_biases.append(param)
    optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                lr=lr, momentum=momentum, weight_decay=weight_decay)

    if checkpoint is not None:
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    epochs = iterations // (len(train_ds) // batch_size)
    decay_lr_at_epochs = [it // (len(train_ds) // batch_size) for it in decay_lr_at]

    # Epochs
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at_epochs:
            utils.adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader, model, criterion, optimizer, epoch)

        # Save checkpoint
        utils.save_checkpoint(checkpoint_path, model, optimizer, epoch)
from model import SSD300
from datasets import PascalVOCDataset
import utils

from tqdm import tqdm

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Parameters
keep_difficult = True  # difficult ground truth objects must always be considered in mAP calculation, because these objects DO exist!
batch_size = 64
workers = 4
checkpoint_path = 'ssd300.pt'

# Load test data
test_dataset = PascalVOCDataset('./data', split='TEST', keep_difficult=keep_difficult)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
                                          collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
n_classes = len(test_dataset.label_map())

# Load model checkpoint that is to be evaluated
checkpoint = torch.load(checkpoint_path)
model = SSD300(n_classes)
model.load_state_dict(checkpoint['model_state_dict'])
model = model.to(device)

# Switch to eval mode
model.eval()


def evaluate(test_loader, model):
示例#7
0
                                    momentum=momentum,
                                    weight_decay=weight_decay)
        if args.checkpoint:
            checkpoint = torch.load(args.checkpoint)
            start_epoch = checkpoint['epoch'] + 1
            print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(args.data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    epochs = iterations // (len(train_dataset) // 32)

    # Epochs
示例#8
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                    lr=lr, momentum=momentum, weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # note that we're passing the collate function here

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    epochs = iterations // (len(train_dataset) // 32)
    decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]

    # Epochs
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
示例#9
0
    print('\nMean Average Precision (mAP): %.3f' % mAP)
    return mAP


if __name__ == '__main__':
    data_folder = './data'
    batch_size = 64
    workers = 4
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint = './combine_checkpoint_ssd352.pth.tar'

    # Load model checkpoint that is to be evaluated
    checkpoint = torch.load(checkpoint)
    model = checkpoint['model']
    model = model.to(device)

    # Switch to eval mode
    model.eval()

    # Load test data
    test_dataset = PascalVOCDataset(data_folder, split='test')
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        collate_fn=test_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)
    evaluate(test_loader, model)
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at, best_mAP

    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=internal_batchsize,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    # Load test data
    test_dataset = PascalVOCDataset(data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=internal_batchsize,
        shuffle=False,
        collate_fn=test_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations

    epochs = iterations // (len(train_dataset) // internal_batchsize)
    # print('Length of dataset:', len(train_dataset), epochs)
    decay_lr_at = [
        it // (len(train_dataset) // internal_batchsize) for it in decay_lr_at
    ]
    print('total train epochs: ', epochs, ' training starts ......')

    # Epochs
    best_mAP = -1.
    # criterion.increase_threshold()
    # print('current threshold: ', criterion.threshold)
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # _, current_mAP = evaluate(test_loader, model)

        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # Save checkpoint
        if epoch >= 30 and epoch % 30 == 0 or epoch == 5:
            _, current_mAP = evaluate(test_loader, model)
            if current_mAP > best_mAP:
                save_checkpoint(
                    epoch,
                    model,
                    optimizer,
                    name='checkpoints/my_checkpoint_topo_anchor_b32.pth.tar')
                best_mAP = current_mAP
                # criterion.increase_threshold(0.05)
        # elif epoch == 50:
        #     save_checkpoint(epoch, model, optimizer, name='checkpoints/my_checkpoint_rep300_b32.pth.tar')

    _, current_mAP = evaluate(test_loader, model)
    if current_mAP > best_mAP:
        save_checkpoint(
            epoch,
            model,
            optimizer,
            name='checkpoints/my_checkpoint_topo_anchor_b32.pth.tar')
        best_mAP = current_mAP
示例#11
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # initialize the optimizer, with twice the default learning rate...
        # ...for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
                                    lr=lr, momentum=momentum, weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train')
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # using `collate_fn()` here

    # calculate total number of epochs to train and the epochs to decay...
    # ...learning rate at (i.e. convert iterations to epochs)
    # to convert iterations to epochs,...
    # ...divide iterations by the number of iterations per epoch
    epochs = iterations // (len(train_dataset) // batch_size)
    decay_lr_at = [it // (len(train_dataset) // batch_size) for it in decay_lr_at]
    print(f"Training for {iterations} iterations...")
    print(f"Training for {epochs} epochs...")
    print(f"Batch size is {batch_size}")
    print(f"Logging every {print_freq} batches...")

    # logging into train.txt
    with open(file='../logs/train_logs.txt', mode='a+') as f:
        f.writelines(f"Training for {iterations} iterations...\n")
        f.writelines(f"Training for {epochs} epochs...\n")
        f.writelines(f"Batch size is {batch_size}\n")
        f.writelines(f"Logging every {print_freq} batches...\n")

    # epochs
    for epoch in range(start_epoch, epochs):

        # decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

            # logging into train.txt
            with open(file='../logs/train_logs.txt', mode='a+') as f:
                f.writelines(f"DECAYING learning rate.\n The new LR is {(optimizer.param_groups[1]['lr'],)}\n")

        # one epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # save checkpoint after each epoch
        save_checkpoint(epoch, model, optimizer)
示例#12
0
def evaluate(checkpoint, run_colab, batch_size, set, subset):
    """
    Evaluate.

    :param test_loader: DataLoader for test data
    :param model: model
    """

    data_folder = create_data_lists(run_colab)
    test_dataset = PascalVOCDataset(data_folder,
                                    split=set,
                                    keep_difficult=keep_difficult)
    if subset > 0:
        test_dataset.images = test_dataset.images[:subset]
        test_dataset.objects = test_dataset.objects[:subset]
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        collate_fn=test_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)
    # Load model checkpoint that is to be evaluated
    checkpoint = torch.load(checkpoint, map_location=device)
    model = checkpoint['model']
    print(f"Number of epoch trained: {checkpoint['epoch']}")
    model = model.to(device)

    # Switch to eval mode
    model.eval()
    # Make sure it's in eval mode
    model.eval()

    # Lists to store detected and true boxes, labels, scores
    det_boxes = list()
    det_labels = list()
    det_scores = list()
    true_boxes = list()
    true_labels = list()
    true_difficulties = list(
    )  # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py

    with torch.no_grad():
        # Batches
        for i, (images, boxes, labels,
                difficulties) in enumerate(tqdm(test_loader,
                                                desc='Evaluating')):
            images = images.to(device)  # (N, 3, 300, 300)

            # Forward prop.
            predicted_locs, predicted_scores = model(images)

            # Detect objects in SSD output
            det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(
                predicted_locs,
                predicted_scores,
                min_score=0.01,
                max_overlap=0.45,
                top_k=200)
            # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos

            # Store this batch's results for mAP calculation
            boxes = [b.cpu() for b in boxes]
            labels = [l.cpu() for l in labels]
            difficulties = [d.cpu() for d in difficulties]

            det_boxes.extend([box.cpu() for box in det_boxes_batch])
            det_labels.extend([label.cpu() for label in det_labels_batch])
            det_scores.extend([score.cpu() for score in det_scores_batch])
            true_boxes.extend(boxes)
            true_labels.extend(labels)
            true_difficulties.extend(difficulties)

        # Calculate mAP
        APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes,
                                 true_labels, true_difficulties)

    # Print AP for each class
    pp.pprint(APs)

    print('\nMean Average Precision (mAP): %.3f' % mAP)
    model.train()
示例#13
0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = './checkpoint_ssd300.pth.tar'

# Load model checkpoint that is to be evaluated
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)

# Switch to eval mode
model.eval()

# Load test data
create_data_lists('/content/data', '/content/data')

test_dataset = PascalVOCDataset('/content/data',
                                split='test',
                                keep_difficult=keep_difficult)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          collate_fn=test_dataset.collate_fn,
                                          num_workers=workers,
                                          pin_memory=True)


def evaluate(test_loader, model):
    """
    Evaluate.

    :param test_loader: DataLoader for test data
    :param model: model
def main(batch_size, continue_training, exp_name, learning_rate, num_epochs, print_freq, run_colab):
    # Data
    data_folder = create_data_lists(run_colab)
    train_dataset = PascalVOCDataset(data_folder,
                                     split='test',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               collate_fn=train_dataset.collate_fn, num_workers=workers,
                                               pin_memory=True)  # note that we're passing the collate function here

    # Networks
    checkpoint = torch.load(exp_name / "checkpoint_ssd300.pth.tar", map_location=device)
    print(f"Number of training epochs for detection network: {checkpoint['epoch']}")
    detection_network = checkpoint['model']

    if continue_training:
        adversarial_checkpoint = torch.load(exp_name / checkpoint, map_location=device)
        discriminator = adversarial_checkpoint['adversarial_model']
        optimizer = adversarial_checkpoint['optimizer']
        start_epoch = adversarial_checkpoint['epoch']
        print(f"Continue training of adversarial network from epoch {start_epoch}")
    else:
        start_epoch = 0
        image_encoder = VGGBase()
        discriminator = Discriminator(num_classes)
        optimizer = torch.optim.Adam(list(discriminator.parameters()) + list(image_encoder.parameters()),
                                     lr=learning_rate,
                                     weight_decay=1e-5)
    discriminator, image_encoder = discriminator.to(device), image_encoder.to(device)
    loss_function = GANLoss('vanilla').to(device)
    losses = AverageMeter()  # loss


    for epoch in range(start_epoch, num_epochs):
        for j, (images, boxes, labels, _) in enumerate(train_loader):
            images = images.to(device)
            _, image_embedding = image_encoder(images)
            random_box_indices = [np.random.randint(len(box)) for box in boxes]
            random_boxes = torch.stack([box[random_box_indices[i]] for i, box in enumerate(boxes)]).to(device)
            random_labels = torch.stack([one_hot_embedding(label[random_box_indices[i]], num_classes) for i, label in enumerate(labels)]).to(device)
            pred_real = discriminator(random_boxes, random_labels, image_embedding)
            loss_real = loss_function(pred_real, 1)

            with torch.no_grad():
                predicted_locs, predicted_scores = detection_network.forward(images)
                pred_boxes, pred_labels, _ = detection_network.detect_objects(predicted_locs,
                                                                                                       predicted_scores,
                                                                                                       min_score=0.2,
                                                                                                       max_overlap=0.45,
                                                                                                       top_k=200)
            random_box_indices = [np.random.randint(len(box)) for box in pred_boxes]
            random_fake_boxes = torch.stack([box[random_box_indices[i]] for i, box in enumerate(pred_boxes)]).to(device)
            random_fake_labels = torch.stack([one_hot_embedding(label[random_box_indices[i]], num_classes) for i, label in enumerate(pred_labels)]).to(device)
            pred_fake = discriminator(random_fake_boxes, random_fake_labels, image_embedding)
            loss_fake = loss_function(pred_fake, 0)

            total_loss = loss_fake + loss_real
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

            losses.update(total_loss.item(), images.size(0))
            if j % print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, j, len(train_loader), loss=losses))
        save_adversarial_checkpoint(epoch, discriminator, image_encoder, optimizer, exp_name)
def evaluate(args):
    """
    Evaluation scripts for exdark data. Refer to argparse arguments at teh end of the script
    """

    # Load model checkpoint that is to be evaluated

    checkpoint = torch.load(args.checkpoint, map_location=device)
    model = checkpoint['model']
    model = model.to(device)

    # Load test data

    test_dataset = PascalVOCDataset(args.data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)


    with open(args.exdark_metadata, "r") as fp:
        image_metadata = fp.read().splitlines()


    lighting_states = ['Low', 'Ambient', 'Object', 'Single', 'Weak', 'Strong', 'Screen', 'Window', 'Shadow', 'Twilight']
    lighting_indices = [[] for i in range(10)]
    indoor_outdoor = ['Indoor', 'Outdoor']
    location_indices = [[] for i in range(2)]

    for dataset_idx, _image in enumerate(test_dataset.images):
        for _metadata in image_metadata:
            metadata = _metadata.split(" ")
            if metadata[0].split(".")[0] == _image.split("/")[-1].split(".")[0]:
                lighting_indices[int(metadata[2]) - 1].append(dataset_idx)


    AP_dict = {}

    for state_idx, sets in enumerate(lighting_indices):


        print("\n \n Lighting State: ", lighting_states[state_idx])
        subset = torch.utils.data.Subset(test_dataset, sets)

        test_loader = torch.utils.data.DataLoader(subset, batch_size=batch_size, shuffle=False,
                                                collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)



        # Make sure it's in eval mode
        model.eval()

        # Lists to store detected and true boxes, labels, scores
        det_boxes = list()
        det_labels = list()
        det_scores = list()
        true_boxes = list()
        true_labels = list()
        true_difficulties = list()  # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py

        with torch.no_grad():
            # Batches
            for i, (images, boxes, labels, difficulties) in enumerate(test_loader):

                images = images.to(device)  # (N, 3, 300, 300)

                # Forward prop.
                predicted_locs, predicted_scores = model(images)

                # Detect objects in SSD output
                det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(predicted_locs, predicted_scores,
                                                                                            min_score=args.min_score, max_overlap=0.45,
                                                                                            top_k=200)
                # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos

                # Store this batch's results for mAP calculation
                boxes = [b.to(device) for b in boxes]
                labels = [l.to(device) for l in labels]
                difficulties = [d.to(device) for d in difficulties]

                det_boxes.extend(det_boxes_batch)
                det_labels.extend(det_labels_batch)
                det_scores.extend(det_scores_batch)
                true_boxes.extend(boxes)
                true_labels.extend(labels)
                true_difficulties.extend(difficulties)
            
            
            # Calculate aP for exdark and add to dictionary with lighting condition.
            APs = calculate_mAP_exdark(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties)
            APs["lighting"] = lighting_states[state_idx]
        
        # Print AP for each class
        pp.pprint(APs)

        if AP_dict:
            for key in AP_dict:
                AP_dict[key].append(APs[key])
        else:
            AP_dict = {}
            for key in APs:
                AP_dict[key] = [APs[key]]

    df = pd.DataFrame.from_dict(AP_dict)
    df.to_csv("{}_{}_results.csv".format(args.data_folder.split("/")[-1], args.min_score))
示例#16
0
def main(args):
    # Model parameters
    # Not too many here since the SSD300 has a very specific structure
    with open(args.config_file_path, "r") as fp:
        config = json.load(fp)

    n_classes = len(label_map)  # number of different types of objects
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    #Mobilenetv2
    #normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #                                 std=[0.229, 0.224, 0.225])

    # Learning parameters
    checkpoint = None  # path to model checkpoint, None if none
    batch_size = config['batch_size']  # batch size
    start_epoch = 0  # start at this epoch
    epochs = config[
        'n_epochs']  # number of epochs to run without early-stopping
    epochs_since_improvement = 0  # number of epochs since there was an improvement in the validation metric
    best_loss = 100.  # assume a high loss at first
    workers = 2  # number of workers for loading data in the DataLoader
    lr = config['lr']  # learning rate
    momentum = 0.9  # momentum
    weight_decay = config['weight_decay']  # weight decay
    grad_clip = None  # clip if g
    backbone_network = config['backbone_network']

    model = SSD(num_classes=n_classes, backbone_network=backbone_network)
    # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
    biases = list()
    not_biases = list()
    param_names_biases = list()
    param_names_not_biases = list()
    for param_name, param in model.named_parameters():
        if param.requires_grad:
            if param_name.endswith('.bias'):
                biases.append(param)
                param_names_biases.append(param_name)
            else:
                not_biases.append(param)
                param_names_not_biases.append(param_name)
    optimizer = torch.optim.SGD(params=[{
        'params': biases,
        'lr': 2 * lr
    }, {
        'params': not_biases
    }],
                                lr=lr,
                                momentum=momentum,
                                weight_decay=weight_decay)

    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors).to(device)

    #voc07_path = 'VOCdevkit/VOC2007'
    voc07_path = config['voc07_path']

    #voc12_path = 'VOCdevkit/VOC2012'
    # voc12_path = config['voc12_path']
    #from utils import create_data_lists

    create_data_lists(voc07_path, output_folder=config['data_folder'])

    #data_folder = 'VOC/VOCdevkit/'
    data_folder = config['data_folder']
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    val_dataset = PascalVOCDataset(data_folder,
                                   split='test',
                                   keep_difficult=keep_difficult)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             collate_fn=val_dataset.collate_fn,
                                             num_workers=workers,
                                             pin_memory=True)

    print(start_epoch)
    for epoch in range(start_epoch, epochs):
        # Paper describes decaying the learning rate at the 80000th, 100000th, 120000th 'iteration', i.e. model update or batch
        # The paper uses a batch size of 32, which means there were about 517 iterations in an epoch
        # Therefore, to find the epochs to decay at, you could do,
        # if epoch in {80000 // 517, 100000 // 517, 120000 // 517}:
        #     adjust_learning_rate(optimizer, 0.1)

        # In practice, I just decayed the learning rate when loss stopped improving for long periods,
        # and I would resume from the last best checkpoint with the new learning rate,
        # since there's no point in resuming at the most recent and significantly worse checkpoint.
        # So, when you're ready to decay the learning rate, just set checkpoint = 'BEST_checkpoint_ssd300.pth.tar' above
        # and have adjust_learning_rate(optimizer, 0.1) BEFORE this 'for' loop

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch,
              grad_clip=grad_clip)

        # One epoch's validation
        val_loss = validate(val_loader=val_loader,
                            model=model,
                            criterion=criterion)

        # Did validation loss improve?
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))

        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        val_loss, best_loss, is_best)
示例#17
0
def main():
    """
    Training and validation.
    """
    global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.RMSprop(
            params=[{
                'params': biases,
                'lr': 2 * lr
            }, {
                'params': not_biases
            }],
            lr=lr,
            momentum=momentum,
            weight_decay=weight_decay
        )  # rmsprop optimizer used according to spherenet paper

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        best_loss = checkpoint['best_loss']
        print(
            '\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' %
            (start_epoch, best_loss))
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    val_dataset = PascalVOCDataset(data_folder,
                                   split='test',
                                   keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             collate_fn=val_dataset.collate_fn,
                                             num_workers=workers,
                                             pin_memory=True)
    # Epochs
    for epoch in range(start_epoch, epochs):
        # Paper describes decaying the learning rate at the 80000th, 100000th, 120000th 'iteration', i.e. model update or batch
        # The paper uses a batch size of 32, which means there were about 517 iterations in an epoch
        # Therefore, to find the epochs to decay at, you could do,
        # if epoch in {80000 // 517, 100000 // 517, 120000 // 517}:
        #     adjust_learning_rate(optimizer, 0.1)

        # In practice, I just decayed the learning rate when loss stopped improving for long periods,
        # and I would resume from the last best checkpoint with the new learning rate,
        # since there's no point in resuming at the most recent and significantly worse checkpoint.
        # So, when you're ready to decay the learning rate, just set checkpoint = 'BEST_checkpoint_ssd300.pth.tar' above
        # and have adjust_learning_rate(optimizer, 0.1) BEFORE this 'for' loop

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # One epoch's validation
        val_loss = validate(val_loader=val_loader,
                            model=model,
                            criterion=criterion)

        # Did validation loss improve?
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))

        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        val_loss, best_loss, is_best)
示例#18
0
keep_difficult = True # 是否保留那些比较难检测的物体
n_classes = len(label_map)

# Learning parameters
batch_size = 8   # batch大小
iterations = 120000  # 一共要训的轮数
decay_lr_at = [80000, 100000]  # 在这些轮的时候学习率乘以0.1
start_epoch = 0  # 开始epoch
print_freq = 50  # train的时候,多少个iter打印一次信息
lr = 1e-3  # 学习率
momentum = 0.9  # SGD的momentum
weight_decay = 5e-4  # SGD的weight_decay
grad_clip = None  # 设置是否要把梯度clamp到[-grad_clip, grad_clip],如果是None则不clip
best_mAP = 0. # 记录当前最高mAP
train_loader = PascalVOCDataset(data_folder,
                                    split='train',
                                    keep_difficult=keep_difficult, batch_size=batch_size, shuffle=True, data_argu=False)
length = len(train_loader) // batch_size # 一个batch的iters
epochs = iterations // (len(train_loader) // 32) # 原论文batch_size为32跑了120000个iters,由此计算出epoches
decay_lr_at = [it // (len(train_loader) // 32) for it in decay_lr_at] # 并计算出需要降lr的epochs集合
val_loader = PascalVOCDataset(data_folder,
                                split='test',   
                                keep_difficult=keep_difficult, batch_size=batch_size, shuffle=False)
                                            
model = SSD300(n_classes=n_classes)

biases = list()
not_biases = list()
for param in model.parameters(): 
    if param.requires_grad:
        if param.name().endswith('.bias'):
示例#19
0
data_folder = 'dataset/'
keep_difficult = True
batch_size = 47
pp = PrettyPrinter()

# Define model & Load parameters
experiment_id = "pretrain_model"
model_path = os.path.join('tensorboard', experiment_id, 'model_best.pkl')
params = pickle.load(open(model_path, "rb"))
model = SSD300(21)
model.load_parameters(params)
print(f'[*] Load model {model_path} success')

# Load test data
test_loader = PascalVOCDataset(data_folder,
                              split='test',   
                              keep_difficult=keep_difficult, batch_size=batch_size, shuffle=False)
length = len(test_loader) // batch_size

def evaluate(test_loader, model):
    """ Evaluate.
    Args:
        test_loader: DataLoader for test data
        model: model
    """
    model.eval()

    det_boxes = list()
    det_labels = list()
    det_scores = list()
    true_boxes = list()
示例#20
0
def main():

    global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        best_loss = checkpoint['best_loss']
        print(
            '\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' %
            (start_epoch, best_loss))
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    val_dataset = PascalVOCDataset(data_folder,
                                   split='test',
                                   keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             collate_fn=val_dataset.collate_fn,
                                             num_workers=workers,
                                             pin_memory=True)
    # Epochs
    for epoch in range(start_epoch, epochs):

        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # One epoch's validation
        val_loss = validate(val_loader=val_loader,
                            model=model,
                            criterion=criterion)

        # Did validation loss improve?
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))

        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        val_loss, best_loss, is_best)
示例#21
0
def main():
    wandb.init(project="re")

    use_cuda = not config.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}

    torch.manual_seed(config.seed)  # pytorch random seed
    # numpy.random.seed(config.seed) # numpy random seed
    torch.backends.cudnn.deterministic = True

    # Config is a variable that holds and saves hyperparameters and inputs
    #wandb.watch(model)

    #torch.manual_seed(args.seed)
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at
    #print(device)
    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases: List[Any] = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=config.lr,
                                    momentum=config.momentum)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
    #wandb.watch(model, log="all")
    wandb.watch(model, log="all")
    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    # print(train_dataset)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        **kwargs)  # note that we're passing the collate function here

    test_dataset = PascalVOCDataset(data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        collate_fn=test_dataset.collate_fn,
        **kwargs)
    #print(next(iter(test_loader)))
    #    print(train_loader)
    #    a=next(iter(train_loader))
    #    print(a)
    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    # epochs = iterations // (len(train_dataset) // 8)
    #print(epochs)
    #decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
    config.epochs = 50
    #epochs = 10
    # Epochs
    for epoch in range(start_epoch, config.epochs + 1):

        # # Decay learning rate at particular epochs
        # if epoch in decay_lr_at:
        #     adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(config,
              train_loader=train_loader,
              device,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)
        test(
            config,
            test_loader=test_loader,
            device,
            model=model,
            criterion=criterion,
            #optimizer=optimizer,
            epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
示例#22
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # Initialize model or load checkpoint
    if checkpoint is None:
        print("checkpoint none")
        start_epoch = 0
        model = SSD300(n_classes=n_classes)

        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)

        # differnet optimizer
        # optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
        #                             lr=lr, momentum=momentum, weight_decay=weight_decay)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

        #optimizer = torch.optim.SGD(params=[{'params':model.parameters(), 'lr': 2 * lr}, {'params': model.parameters}],  lr=lr, momentum=momentum, weight_decay=weight_decay)

    else:
        print("checkpoint load")
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # now it is mobilenet v3,VGG paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations,
    epochs = 600
    # decay_lr_at =[154, 193]
    # print("decay_lr_at:",decay_lr_at)
    print("epochs:", epochs)

    for param_group in optimizer.param_groups:
        optimizer.param_groups[1]['lr'] = lr
    print("learning rate.  The new LR is %f\n" %
          (optimizer.param_groups[1]['lr'], ))
    # Epochs,I try to use different learning rate shcheduler
    #different scheduler six way you could try
    #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max = (epochs // 7) + 1)
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode="min",
                                  factor=0.1,
                                  patience=15,
                                  verbose=True,
                                  threshold=0.00001,
                                  threshold_mode='rel',
                                  cooldown=0,
                                  min_lr=0,
                                  eps=1e-08)

    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        # if epoch in decay_lr_at:
        #     adjust_learning_rate_epoch(optimizer,epoch)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)
        print("epoch loss:", train_loss)
        scheduler.step(train_loss)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
def main():

    # Custom dataloaders
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    test_dataset = PascalVOCDataset(data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=test_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        lr = 0.01
        optimizer = torch.optim.SGD([
            {
                'params': model.base.parameters(),
                'lr': lr / 100
            },
            {
                'params': model.aux_convs.parameters(),
                'lr ': lr / 10
            },
        ],
                                    lr=0.01,
                                    momentum=0.8)
        scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,
                                                      base_lr=[0.0001, 0.001],
                                                      max_lr=[0.001, 0.005],
                                                      step_size_up=31,
                                                      step_size_down=31)
        #print(model)
    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        scheduler = torch.optim.lr_scheduler.CyclicLR(
            optimizer,
            base_lr=[0.000001, 0.00001, 0.00001],
            max_lr=[0.000005, 0.00009, 0.00005],
            step_size_up=31,
            step_size_down=31)

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    print(model)
    #print(next(iter(test_loader)))
    #    print(train_loader)
    #    a=next(iter(train_loader))
    #    print(a)
    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    #epochs = iterations // (len(train_dataset) // 8)
    #print(epochs)
    #decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
    epochs = 125
    # Epochs
    #scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,div_factor=100.0, final_div_factor=100000.0, max_lr=0.001, total_steps=66)
    for epoch in range(start_epoch, epochs):

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              scheduler=scheduler,
              epoch=epoch)
        #print(scheduler.get_lr())

        test(
            test_loader=test_loader,
            model=model,
            criterion=criterion,
            #optimizer=optimizer,
            epoch=epoch)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)
示例#24
0
batch_size = 64
workers = 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = './data/checkpoint_ssd300.pth.tar'

# Load model checkpoint that is to be evaluated
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)

# Switch to eval mode
model.eval()

# Load test data
test_dataset = PascalVOCDataset(data_folder,
                                split='test',
                                keep_difficult=keep_difficult)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
                                          collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)



def evaluate(test_loader, model):
    """
    Evaluate.

    :param test_loader: DataLoader for test data
    :param model: model
    """

    # Make sure it's in eval mode
示例#25
0
def evaluate(data_folder, model_checkpoint):
    """
    Evaluate.

    :param test_loader: DataLoader for test data
    :param model: model
    """

    if torch.cuda.is_available() == True:
        checkpoint = torch.load(model_checkpoint,
                                map_location=torch.device('cpu'))
    else:
        checkpoint = torch.load(model_checkpoint)
    model = checkpoint['model']
    model = model.to(device)

    # Make sure it's in eval mode
    model.eval()

    # Load test data
    test_dataset = PascalVOCDataset(data_folder,
                                    split='test',
                                    keep_difficult=keep_difficult)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        collate_fn=test_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)

    # Lists to store detected and true boxes, labels, scores
    det_boxes = list()
    det_labels = list()
    det_scores = list()
    true_boxes = list()
    true_labels = list()
    true_difficulties = list(
    )  # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py

    with torch.no_grad():
        # Batches
        for i, (images, boxes, labels,
                difficulties) in enumerate(tqdm(test_loader,
                                                desc='Evaluating')):
            images = images.to(device)  # (N, 3, 300, 300)

            # Forward prop.
            predicted_locs, predicted_scores = model(images)

            # Detect objects in SSD output
            det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(
                predicted_locs,
                predicted_scores,
                min_score=0.01,
                max_overlap=0.45,
                top_k=200)
            # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos

            # Store this batch's results for mAP calculation
            boxes = [b.to(device) for b in boxes]
            labels = [l.to(device) for l in labels]
            difficulties = [d.to(device) for d in difficulties]

            det_boxes.extend(det_boxes_batch)
            det_labels.extend(det_labels_batch)
            det_scores.extend(det_scores_batch)
            true_boxes.extend(boxes)
            true_labels.extend(labels)
            true_difficulties.extend(difficulties)

        # Calculate mAP
        APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes,
                                 true_labels, true_difficulties)

    # Print AP for each class
    pp.pprint(APs)

    print('\nMean Average Precision (mAP): %.3f' % mAP)
示例#26
0
def main():

    global start_epoch, label_map, epoch, checkpoint, decay_lr_at

    # Инициализация модели или загрузка чекпоинта
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Загрузчики данных
    train_dataset = PascalVOCDataset(data_folder,
                                     split='train',
                                     keep_difficult=keep_difficult)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)

    # Вычисление количесвта эпох
    epochs = iterations // (len(train_dataset) // batch_size)
    decay_lr_at = [
        it // (len(train_dataset) // batch_size) for it in decay_lr_at
    ]

    # Эпохи
    for epoch in range(start_epoch, epochs):

        # Уменьшение скорости обучения
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # OОбучение в течении одной эпохи
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch)

        # Сохранение чекпоинта
        save_checkpoint(epoch, model, optimizer)
示例#27
0
def main():
    """
    Training.
    """
    global start_epoch, label_map, epoch, checkpoint, decay_lr_at, rev_label_map

    # Initialize model or load checkpoint
    if checkpoint is None:
        start_epoch = 0
        model = SSD300(n_classes=n_classes)
        # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
        biases = list()
        not_biases = list()
        for param_name, param in model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)
        optimizer = torch.optim.SGD(params=[{
            'params': biases,
            'lr': 2 * lr
        }, {
            'params': not_biases
        }],
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)

    # Load test data
    #create_data_lists('/content/data', '/content/data')

    train_dataset = PascalVOCDataset('/content/data/images',
                                     '/content/data/output.json',
                                     split='train',
                                     keep_difficult=keep_difficult)
    test_dataset = PascalVOCDataset('/content/data/images',
                                    '/content/data/output.json',
                                    split='test',
                                    keep_difficult=keep_difficult)

    # half = int(len(train_dataset)/2)
    # # split the dataset in train and test set
    # torch.manual_seed(1)
    # indices = torch.randperm(len(train_dataset)).tolist()

    # #print(indices)
    # train_dataset = torch.utils.data.Subset(train_dataset, indices[:-half])
    # test_dataset = torch.utils.data.Subset(test_dataset, indices[-half:])

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)  # note that we're passing the collate function here

    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        collate_fn=test_dataset.collate_fn,
        num_workers=workers,
        pin_memory=True)

    # Custom train dataloaders

    #create_data_lists('/content/data', '/content/data')

    # train_dataset = PascalVOCDataset('/content/data',
    #                                  split='train',
    #                                  keep_difficult=keep_difficult)
    #print(train_dataset[0])

    # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
    # To convert iterations to epochs, divide iterations by the number of iterations per epoch
    # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
    #epochs = iterations // (len(train_dataset) // 2)

    epochs = 1
    decay_lr_at = [it // (len(train_dataset) // 2) for it in decay_lr_at]

    prev_mAP = 0.0
    # Epochs
    for epoch in range(start_epoch, epochs):

        # Decay learning rate at particular epochs
        if epoch in decay_lr_at:
            adjust_learning_rate(optimizer, decay_lr_to)

        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch,
              device=device)

        _, mAP = evaluate(test_loader, model, device)

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer)

        # save best checkpoint, having best mean average precision
        if prev_mAP < mAP:
            prev_mAP = mAP
            save_best_checkpoint(epoch, model, optimizer)