コード例 #1
0
def dataset_loading(args, config):

    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)
    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)

    #dataset = VIRAT_Dataset(args["Datasets"]["virat_seq"]["train_image_path"],
    #                        args["Datasets"]["virat_seq"]["train_anno_path"],
    #                        transform=train_transform, target_transform=target_transform, downpurning_ratio = 0.2) #0.2
    dataset = VIRAT_table_comm(
        args["Datasets"]["virat_seq"]["train_image_path"],
        args["Datasets"]["virat_seq"]["train_anno_path"],
        transform=train_transform,
        target_transform=target_transform,
        downpurning_ratio=0.2)
    # dataset = VIRAT_Dataset(args["Datasets"]["virat_seq"]["train_image_path"],
    #                         args["Datasets"]["virat_seq"]["train_anno_path"],
    #                         transform=train_transform, downpurning_ratio = 0.05) #0.2

    label_file = ""
    if os.path.exists(label_file):
        store_labels(label_file, dataset.class_names)
    logging.info(dataset)
    num_classes = len(dataset.class_names)

    train_dataset = dataset

    train_loader = DataLoader(train_dataset,
                              args["flow_control"]["batch_size"],
                              num_workers=args["flow_control"]["num_workers"],
                              shuffle=True)

    val_dataset = VIRAT_Dataset(
        args["Datasets"]["virat_seq"]["train_image_path"],
        args["Datasets"]["virat_seq"]["train_anno_path"],
        transform=train_transform,
        target_transform=target_transform,
        downpurning_ratio=0.2 * 3. / 9.)
    val_loader = DataLoader(val_dataset,
                            args["flow_control"]["batch_size"],
                            num_workers=args["flow_control"]["num_workers"],
                            shuffle=False)
    logging.info("Build network.")
    return train_loader, val_loader, num_classes
コード例 #2
0
ファイル: train_ssd.py プロジェクト: alfarih31/grit-fr-ml
    create_net = create_fpnnet_ssd
    config = fpnnet_ssd_config

    train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)

    test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)

    logging.info("Prepare training datasets.")
    datasets = []
    for dataset_path in args.datasets:
        dataset = _VOCDataset(dataset_path, transform=train_transform,
                                target_transform=target_transform)
        label_file = os.path.join(args.checkpoint_folder, "voc-model-labels.txt")
        store_labels(label_file, dataset.class_names)
        num_classes = len(dataset.class_names)
        datasets.append(dataset)
    logging.info(f"Stored labels into file {label_file}.")
    train_dataset = ConcatDataset(datasets)
    logging.info("Train dataset size: {}".format(len(train_dataset)))
    train_loader = DataLoader(train_dataset, args.batch_size,
                              num_workers=args.num_workers,
                              shuffle=True)
    logging.info("Prepare Validation datasets.")

    val_dataset = _VOCDataset(args.validation_dataset, transform=test_transform,
                                target_transform=target_transform, is_test=True)
    logging.info(val_dataset)
    logging.info("validation dataset size: {}".format(len(val_dataset)))
コード例 #3
0
def dataset_loading(args, config):

    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)
    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)

    logging.info("Prepare training datasets.")
    dataset_name = args['flow_control']['dataset_type']
    if dataset_name == 'voc':
        dataset = VOCDataset("",
                             transform=train_transform,
                             target_transform=target_transform)
        label_txt_name = "voc-model-labels.txt"
    elif dataset_name == 'open_images':
        dataset = OpenImagesDataset(dataset_path,
                                    transform=train_transform,
                                    target_transform=target_transform,
                                    dataset_type="train",
                                    balance_data=args.balance_data)
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == 'coco':
        dataset = CocoDetection(args["Datasets"]["coco"]["train_image_path"],
                                args["Datasets"]["coco"]["train_anno_path"],
                                transform=train_transform,
                                target_transform=target_transform)
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == 'ecp':
        #dataset = EuroCity_Dataset( args["Datasets"]["ecp"]["train_image_path"],
        #                            args["Datasets"]["ecp"]["train_anno_path"],
        #                            transform=train_transform, target_transform=target_transform)
        dataset = ECP_table_comm(args["Datasets"]["ecp"]["train_image_path"],
                                 args["Datasets"]["ecp"]["train_anno_path"],
                                 transform=train_transform,
                                 target_transform=target_transform)
        dataset.Active_mode()
        if len(dataset) == 0:
            raise ValueError("Doesn't exist any file")

        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == "ecp-random":
        dataset = ECP_subsample_dataset(
            args["Datasets"]["ecp"]["train_image_path"],
            args["Datasets"]["ecp"]["train_anno_path"],
            transform=train_transform,
            target_transform=target_transform,
            _sampling_mode="random",
            ratio=args['flow_control']['dataset_ratio'])
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == "ecp-centroid":
        dataset = ECP_subsample_dataset(
            args["Datasets"]["ecp"]["train_image_path"],
            args["Datasets"]["ecp"]["train_anno_path"],
            transform=train_transform,
            target_transform=target_transform,
            _sampling_mode="centroid",
            ratio=args['flow_control']['dataset_ratio'])
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name in ["virat", "VIRAT"]:
        # dataset = VIRAT_Loader(args["Datasets"]["virat"]["train_image_path"],
        #                        args["Datasets"]["virat"]["train_anno_path"],
        #                        transform=train_transform, target_transform=target_transform)
        dataset = VIRAT_Dataset(
            args["Datasets"]["virat_seq"]["train_image_path"],
            args["Datasets"]["virat_seq"]["train_anno_path"],
            transform=train_transform,
            target_transform=target_transform,
            downpurning_ratio=0.2)  #0.2
        # dataset = VIRAT_table_comm(args["Datasets"]["virat_seq"]["train_image_path"],
        #                        args["Datasets"]["virat_seq"]["train_anno_path"],
        #                        transform=train_transform, target_transform=target_transform)

        label_txt_name = "virat_labels.txt"
    else:
        raise ValueError(
            "Dataset type {} is not supported.".format(dataset_name))

    label_file = os.path.join(args["flow_control"]["checkpoint_folder"],
                              label_txt_name)
    if os.path.exists(label_file):
        store_labels(label_file, dataset.class_names)
    logging.info(dataset)
    num_classes = len(dataset.class_names)

    train_dataset = dataset
    logging.info("Stored labels into file {}.".format(label_file))
    logging.info("Train dataset size: {}".format(len(train_dataset)))
    logging.debug("================= train_loader ===================")
    logging.debug("DataLoader batchsize : ",
                  args["flow_control"]["batch_size"])
    # if dataset_name == "virat":
    #     indicies = np.arange(args["flow_control"]["batch_size"])
    #     train_loader = DataLoader(train_dataset, args["flow_control"]["batch_size"],
    #                           num_workers=args["flow_control"]["num_workers"],
    #                           shuffle=False, sampler=SubsetRandomSampler(indicies))
    # else:
    train_loader = DataLoader(train_dataset,
                              args["flow_control"]["batch_size"],
                              num_workers=args["flow_control"]["num_workers"],
                              shuffle=True)
    logging.info("Prepare Validation datasets.")
    if dataset_name == "voc":
        raise NotImplementedError("Doesn't modify")
        val_dataset = VOCDataset("",
                                 transform=test_transform,
                                 target_transform=target_transform,
                                 is_test=True)
    elif dataset_name == 'open_images':
        val_dataset = OpenImagesDataset(dataset_path,
                                        transform=test_transform,
                                        target_transform=target_transform,
                                        dataset_type="test")
        logging.info(val_dataset)
    elif dataset_name == "coco":
        val_dataset = CocoDetection(args["Datasets"]["coco"]["val_image_path"],
                                    args["Datasets"]["coco"]["val_anno_path"],
                                    transform=test_transform,
                                    target_transform=target_transform)
        logging.info(val_dataset)
    elif dataset_name in ["ecp", "ecp-random", "ecp-centroid"]:
        val_dataset = EuroCity_Dataset(
            args["Datasets"]["ecp"]["val_image_path"],
            args["Datasets"]["ecp"]["val_anno_path"],
            transform=test_transform,
            target_transform=target_transform)
    # elif dataset_name = "ecp-random":
    #     val_dataset = ECP_subsample_dataset( args["Datasets"]["ecp"]["val_image_path"],
    #                                          args["Datasets"]["ecp"]["val_anno_path"],
    #                                          transform=test_transform, target_transform=target_transform, _sampling_mode = "random", ratio = 0.1)
    # elif dataset_name = "ecp-centroid":
    #     val_dataset = ECP_subsample_dataset( args["Datasets"]["ecp"]["val_image_path"],
    #                                          args["Datasets"]["ecp"]["val_anno_path"],
    #                                          transform=test_transform, target_transform=target_transform, _sampling_mode = "centroid", ratio = 0.1)

    elif dataset_name in ["virat", "VIRAT"]:
        # val_dataset = VIRAT_Loader(args["Datasets"]["virat"]["test_image_path"],
        #                            args["Datasets"]["virat"]["test_anno_path"],
        #                            transform=train_transform, target_transform=target_transform)
        val_dataset = VIRAT_Dataset(
            args["Datasets"]["virat_seq"]["train_image_path"],
            args["Datasets"]["virat_seq"]["train_anno_path"],
            transform=train_transform,
            target_transform=target_transform,
            downpurning_ratio=0.2 * 3. / 9.)

    logging.info("validation dataset size: {}".format(len(val_dataset)))
    # if dataset_name == "virat":
    #     indicies = np.arange(args["flow_control"]["batch_size"])
    #     val_loader = DataLoader(train_dataset, args["flow_control"]["batch_size"],
    #                           num_workers=args["flow_control"]["num_workers"],
    #                           shuffle=False, sampler=SubsetRandomSampler(indicies))
    # else:
    #     val_loader = DataLoader(val_dataset, args["flow_control"]["batch_size"],
    #                             num_workers=args["flow_control"]["num_workers"],
    #                             shuffle=False)
    val_loader = DataLoader(val_dataset,
                            args["flow_control"]["batch_size"],
                            num_workers=args["flow_control"]["num_workers"],
                            shuffle=False)
    logging.info("Build network.")
    return train_loader, val_loader, num_classes
コード例 #4
0
ファイル: train_ssd.py プロジェクト: sarzhann/object_tracker
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)
    train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)

    test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)

    logging.info("Prepare training dataset.")
    train_dataset = VOCDataset(args.dataset, transform=train_transform,
                         target_transform=target_transform)
    label_file = os.path.join(args.checkpoint_folder, "labels.txt")
    store_labels(label_file, train_dataset.class_names)
    num_classes = len(train_dataset.class_names)
    logging.info(f"Stored labels into file {label_file}.")
    logging.info("Train dataset size: {}".format(len(train_dataset)))
    train_loader = DataLoader(train_dataset, args.batch_size,
                              num_workers=args.num_workers,
                              shuffle=True)

    logging.info("Prepare Validation datasets.")
    val_dataset = VOCDataset(args.validation_dataset, transform=test_transform,
                             target_transform=target_transform, is_test=True)
    logging.info(val_dataset)
    logging.info("validation dataset size: {}".format(len(val_dataset)))

    val_loader = DataLoader(val_dataset, args.batch_size,
                            num_workers=args.num_workers,
コード例 #5
0
def main(args):
    DEVICE = torch.device(
        "cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu")
    #DEVICE = torch.device("cpu")
    if args.use_cuda and torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        logging.info("Use Cuda.")

    timer = Timer()

    logging.info(args)
    if args.net == 'vgg16-ssd':
        create_net = create_vgg_ssd
        config = vgg_ssd_config
    elif args.net == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif args.net == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif args.net == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif args.net == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=args.mb2_width_mult)
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)
    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)

    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)

    logging.info("Prepare training datasets.")
    datasets = []
    for dataset_path in args.datasets:
        if args.dataset_type == 'voc':
            dataset = VOCDataset(dataset_path,
                                 transform=train_transform,
                                 target_transform=target_transform)
            label_file = os.path.join(args.checkpoint_folder,
                                      "voc-model-labels.txt")
            store_labels(label_file, dataset.class_names)
            num_classes = len(dataset.class_names)
        elif args.dataset_type == 'open_images':
            dataset = OpenImagesDataset(dataset_path,
                                        transform=train_transform,
                                        target_transform=target_transform,
                                        dataset_type="train",
                                        balance_data=args.balance_data)
            label_file = os.path.join(args.checkpoint_folder,
                                      "open-images-model-labels.txt")
            store_labels(label_file, dataset.class_names)
            logging.info(dataset)
            num_classes = len(dataset.class_names)
        elif args.dataset_type == 'coco':
            # root, annFile, transform=None, target_transform=None, transforms=None)
            #  dataset_type="train", balance_data=args.balance_data)
            dataset = CocoDetection(
                "/home/wenyen4desh/datasets/coco/train2017",
                "/home/wenyen4desh/datasets/coco/annotations/instances_train2017.json",
                transform=train_transform,
                target_transform=target_transform)

            label_file = os.path.join(args.checkpoint_folder,
                                      "open-images-model-labels.txt")
            store_labels(label_file, dataset.class_names)
            logging.info(dataset)
            num_classes = len(dataset.class_names)
            # raise ValueError("Dataset type {} yet implement.".format(args.dataset_type))
        else:
            raise ValueError("Dataset type {} is not supported.".format(
                args.dataset_type))
        datasets.append(dataset)
    logging.info("Stored labels into file {}.".format(label_file))
    train_dataset = ConcatDataset(datasets)
    logging.info("Train dataset size: {}".format(len(train_dataset)))
    train_loader = DataLoader(train_dataset,
                              args.batch_size,
                              num_workers=args.num_workers,
                              shuffle=True)
    logging.info("Prepare Validation datasets.")
    if args.dataset_type == "voc":
        val_dataset = VOCDataset(args.validation_dataset,
                                 transform=test_transform,
                                 target_transform=target_transform,
                                 is_test=True)
    elif args.dataset_type == 'open_images':
        val_dataset = OpenImagesDataset(dataset_path,
                                        transform=test_transform,
                                        target_transform=target_transform,
                                        dataset_type="test")
        logging.info(val_dataset)
    elif args.dataset_type == "coco":
        val_dataset = CocoDetection(
            "/home/wenyen4desh/datasets/coco/val2017",
            "/home/wenyen4desh/datasets/coco/annotations/instances_val2017.json",
            transform=test_transform,
            target_transform=target_transform)
        logging.info(val_dataset)
    logging.info("validation dataset size: {}".format(len(val_dataset)))

    val_loader = DataLoader(val_dataset,
                            args.batch_size,
                            num_workers=args.num_workers,
                            shuffle=False)
    logging.info("Build network.")
    net = create_net(num_classes)
    min_loss = -10000.0
    last_epoch = -1

    base_net_lr = args.base_net_lr if args.base_net_lr is not None else args.lr
    extra_layers_lr = args.extra_layers_lr if args.extra_layers_lr is not None else args.lr
    if args.freeze_base_net:
        logging.info("Freeze base net.")
        freeze_net_layers(net.base_net)
        params = itertools.chain(net.source_layer_add_ons.parameters(),
                                 net.extras.parameters(),
                                 net.regression_headers.parameters(),
                                 net.classification_headers.parameters())
        params = [{
            'params':
            itertools.chain(net.source_layer_add_ons.parameters(),
                            net.extras.parameters()),
            'lr':
            extra_layers_lr
        }, {
            'params':
            itertools.chain(net.regression_headers.parameters(),
                            net.classification_headers.parameters())
        }]
    elif args.freeze_net:
        freeze_net_layers(net.base_net)
        freeze_net_layers(net.source_layer_add_ons)
        freeze_net_layers(net.extras)
        params = itertools.chain(net.regression_headers.parameters(),
                                 net.classification_headers.parameters())
        logging.info("Freeze all the layers except prediction heads.")
    else:
        params = [{
            'params': net.base_net.parameters(),
            'lr': base_net_lr
        }, {
            'params':
            itertools.chain(net.source_layer_add_ons.parameters(),
                            net.extras.parameters()),
            'lr':
            extra_layers_lr
        }, {
            'params':
            itertools.chain(net.regression_headers.parameters(),
                            net.classification_headers.parameters())
        }]

    timer.start("Load Model")
    if args.resume:
        logging.info("Resume from the model {}".format(args.resume))
        net.load(args.resume)
    elif args.base_net:
        logging.info("Init from base net {}".format(args.base_net))
        net.init_from_base_net(args.base_net)
    elif args.pretrained_ssd:
        logging.info("Init from pretrained ssd {}".format(args.pretrained_ssd))
        net.init_from_pretrained_ssd(args.pretrained_ssd)
    logging.info('Took {:.2f} seconds to load the model.'.format(
        timer.end("Load Model")))

    net.to(DEVICE)

    criterion = MultiboxLoss(config.priors,
                             iou_threshold=0.5,
                             neg_pos_ratio=3,
                             center_variance=0.1,
                             size_variance=0.2,
                             device=DEVICE)
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    logging.info("Learning rate: {}, Base net learning rate: {}, ".format(
        args.lr, base_net_lr) +
                 "Extra Layers learning rate: {}.".format(extra_layers_lr))

    if args.scheduler == 'multi-step':
        logging.info("Uses MultiStepLR scheduler.")
        milestones = [int(v.strip()) for v in args.milestones.split(",")]
        scheduler = MultiStepLR(optimizer,
                                milestones=milestones,
                                gamma=0.1,
                                last_epoch=last_epoch)
    elif args.scheduler == 'cosine':
        logging.info("Uses CosineAnnealingLR scheduler.")
        scheduler = CosineAnnealingLR(optimizer,
                                      args.t_max,
                                      last_epoch=last_epoch)
    else:
        logging.fatal("Unsupported Scheduler: {}.".format(args.scheduler))
        parser.print_help(sys.stderr)
        sys.exit(1)

    logging.info("Start training from epoch {}.".format(last_epoch + 1))
    for epoch in range(last_epoch + 1, args.num_epochs):
        scheduler.step()
        train(train_loader,
              net,
              criterion,
              optimizer,
              device=DEVICE,
              debug_steps=args.debug_steps,
              epoch=epoch)

        if epoch % args.validation_epochs == 0 or epoch == args.num_epochs - 1:
            val_loss, val_regression_loss, val_classification_loss = test(
                val_loader, net, criterion, DEVICE)
            logging.info("Epoch: {}, ".format(epoch) +
                         "Validation Loss: {:.4f}, ".format(val_loss) +
                         "Validation Regression Loss {:.4f}, ".format(
                             val_regression_loss) +
                         "Validation Classification Loss: {:.4f}".format(
                             val_classification_loss))
            model_path = os.path.join(
                args.checkpoint_folder,
                "{}-Epoch-{}-Loss-{}.pth".format(args.net, epoch, val_loss))
            net.save(model_path)
            logging.info("Saved model {}".format(model_path))
コード例 #6
0
def dataset_loading(args, config):

    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)
    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)
    test_normal_transform = TestTransform(config.image_size, 0, 1)

    logging.info("Prepare training datasets.")
    dataset_name = args['flow_control']['dataset_type']
    if dataset_name == 'voc':
        dataset = VOCDataset(dataset_path,
                             transform=train_transform,
                             target_transform=target_transform)
        label_txt_name = "voc-model-labels.txt"
    elif dataset_name == 'open_images':
        dataset = OpenImagesDataset(dataset_path,
                                    transform=train_transform,
                                    target_transform=target_transform,
                                    dataset_type="train",
                                    balance_data=args.balance_data)
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == 'coco':
        dataset = CocoDetection(args["Datasets"]["coco"]["train_image_path"],
                                args["Datasets"]["coco"]["train_anno_path"],
                                transform=test_normal_transform,
                                target_transform=target_transform)
        #  target_transform=target_transform)
        #  transform=train_transform, target_transform=target_transform)
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == "ecp":
        dataset = EuroCity_Dataset(args["Datasets"]["ecp"]["train_image_path"],
                                   args["Datasets"]["ecp"]["train_anno_path"],
                                   transform=test_normal_transform,
                                   target_transform=target_transform)
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == "ecp-random":
        dataset = ECP_subsample_dataset(
            args["Datasets"]["ecp"]["train_image_path"],
            args["Datasets"]["ecp"]["train_anno_path"],
            transform=test_normal_transform,
            target_transform=target_transform,
            _sampling_mode="random",
            ratio=0.1)
        label_txt_name = "open-images-model-labels.txt"
    elif dataset_name == "ecp-centroid":
        dataset = ECP_subsample_dataset(
            args["Datasets"]["ecp"]["train_image_path"],
            args["Datasets"]["ecp"]["train_anno_path"],
            transform=test_normal_transform,
            target_transform=target_transform,
            _sampling_mode="centroid",
            ratio=0.1)
        label_txt_name = "open-images-model-labels.txt"
    else:
        raise ValueError(
            "Dataset type {} is not supported.".format(dataset_name))

    label_file = os.path.join(args["flow_control"]["checkpoint_folder"],
                              label_txt_name)
    if os.path.exists(label_file):
        store_labels(label_file, dataset.class_names)
    logging.info(dataset)
    num_classes = len(dataset.class_names)

    train_dataset = dataset
    logging.info("Stored labels into file {}.".format(label_file))
    logging.info("Train dataset size: {}".format(len(train_dataset)))
    train_loader = DataLoader(train_dataset,
                              args["flow_control"]["batch_size"],
                              num_workers=args["flow_control"]["num_workers"],
                              shuffle=True)
    logging.info("Prepare Validation datasets.")
    if dataset_name == "voc":
        val_dataset = VOCDataset(args.validation_dataset,
                                 transform=test_transform,
                                 target_transform=target_transform,
                                 is_test=True)
    elif dataset_name == 'open_images':
        val_dataset = OpenImagesDataset(dataset_path,
                                        transform=test_transform,
                                        target_transform=target_transform,
                                        dataset_type="test")
        logging.info(val_dataset)
    elif dataset_name == "coco":
        val_dataset = CocoDetection(args["Datasets"]["coco"]["val_image_path"],
                                    args["Datasets"]["coco"]["val_anno_path"],
                                    transform=test_transform,
                                    target_transform=target_transform)
        logging.info(val_dataset)
    elif dataset_name == "ecp":
        val_dataset = EuroCity_Dataset(
            args["Datasets"]["ecp"]["val_image_path"],
            args["Datasets"]["ecp"]["val_anno_path"],
            transform=test_transform,
            target_transform=target_transform)
        logging.info(val_dataset)
    elif dataset_name == "ecp-random":
        val_dataset = ECP_subsample_dataset(
            args["Datasets"]["ecp"]["val_image_path"],
            args["Datasets"]["ecp"]["val_anno_path"],
            transform=test_transform,
            target_transform=target_transform,
            _sampling_mode="random",
            ratio=0.1)
    elif dataset_name == "ecp-centroid":
        val_dataset = ECP_subsample_dataset(
            args["Datasets"]["ecp"]["val_image_path"],
            args["Datasets"]["ecp"]["val_anno_path"],
            transform=test_transform,
            target_transform=target_transform,
            _sampling_mode="centroid",
            ratio=0.1)

    logging.info("validation dataset size: {}".format(len(val_dataset)))

    val_loader = DataLoader(val_dataset,
                            args["flow_control"]["batch_size"],
                            num_workers=args["flow_control"]["num_workers"],
                            shuffle=False)
    logging.info("Build network.")
    return train_loader, val_loader, num_classes
コード例 #7
0
def train_network(dataset_path, model_path, net_type):
    args.datasets = dataset_path
    args.validation_dataset = dataset_path
    args.checkpoint_folder = model_path
    args.log_dir = os.path.join(args.checkpoint_folder, 'log')
    args.net = net_type

    timer = Timer()

    logging.info(args)
    if args.net == 'slim':
        create_net = create_mb_tiny_fd
        config = fd_config
    elif args.net == 'RFB':
        create_net = create_Mb_Tiny_RFB_fd
        config = fd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)

    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, args.overlap_threshold)

    test_transform = TestTransform(config.image_size, config.image_mean_test,
                                   config.image_std)

    if not os.path.exists(args.checkpoint_folder):
        os.makedirs(args.checkpoint_folder)
    logging.info("Prepare training datasets.")
    datasets = []

    # voc datasets
    dataset = VOCDataset(dataset_path,
                         transform=train_transform,
                         target_transform=target_transform)
    label_file = os.path.join(args.checkpoint_folder, "voc-model-labels.txt")
    store_labels(label_file, dataset.class_names)
    num_classes = len(dataset.class_names)
    print('num_classes: ', num_classes)

    logging.info(f"Stored labels into file {label_file}.")
    # train_dataset = ConcatDataset(datasets)
    train_dataset = dataset
    logging.info("Train dataset size: {}".format(len(train_dataset)))
    train_loader = DataLoader(train_dataset,
                              args.batch_size,
                              num_workers=args.num_workers,
                              shuffle=True)
    logging.info("Prepare Validation datasets.")
    val_dataset = VOCDataset(args.validation_dataset,
                             transform=test_transform,
                             target_transform=target_transform,
                             is_test=True)

    logging.info("validation dataset size: {}".format(len(val_dataset)))

    val_loader = DataLoader(val_dataset,
                            args.batch_size,
                            num_workers=args.num_workers,
                            shuffle=False)
    logging.info("Build network.")
    net = create_net(num_classes)

    timer.start("Load Model")
    if args.resume:
        logging.info(f"Resume from the model {args.resume}")
        net.load(args.resume)
    logging.info(
        f'Took {timer.end("Load Model"):.2f} seconds to load the model.')

    # add multigpu_train
    if torch.cuda.device_count() >= 1:
        cuda_index_list = [int(v.strip()) for v in args.cuda_index.split(",")]
        net = nn.DataParallel(net, device_ids=cuda_index_list)
        logging.info("use gpu :{}".format(cuda_index_list))

    min_loss = -10000.0
    last_epoch = -1

    base_net_lr = args.base_net_lr if args.base_net_lr is not None else args.lr
    extra_layers_lr = args.extra_layers_lr if args.extra_layers_lr is not None else args.lr
    params = [{
        'params': net.module.base_net.parameters(),
        'lr': base_net_lr
    }, {
        'params':
        itertools.chain(net.module.source_layer_add_ons.parameters(),
                        net.module.extras.parameters()),
        'lr':
        extra_layers_lr
    }, {
        'params':
        itertools.chain(net.module.regression_headers.parameters(),
                        net.module.classification_headers.parameters())
    }]

    net.to(DEVICE)
    criterion = MultiboxLoss(config.priors,
                             iou_threshold=args.iou_threshold,
                             neg_pos_ratio=5,
                             center_variance=0.1,
                             size_variance=0.2,
                             device=DEVICE,
                             num_classes=num_classes,
                             loss_type=args.loss_type)
    if args.optimizer_type == "SGD":
        optimizer = torch.optim.SGD(params,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer_type == "Adam":
        optimizer = torch.optim.Adam(params, lr=args.lr)
        logging.info("use Adam optimizer")
    else:
        logging.fatal(f"Unsupported optimizer: {args.scheduler}.")
        parser.print_help(sys.stderr)
        sys.exit(1)
    logging.info(
        f"Learning rate: {args.lr}, Base net learning rate: {base_net_lr}, " +
        f"Extra Layers learning rate: {extra_layers_lr}.")
    if args.optimizer_type != "Adam":
        if args.scheduler == 'multi-step':
            logging.info("Uses MultiStepLR scheduler.")
            milestones = [int(v.strip()) for v in args.milestones.split(",")]
            scheduler = MultiStepLR(optimizer,
                                    milestones=milestones,
                                    gamma=0.1,
                                    last_epoch=last_epoch)
        elif args.scheduler == 'poly':
            logging.info("Uses PolyLR scheduler.")
        else:
            logging.fatal(f"Unsupported Scheduler: {args.scheduler}.")
            parser.print_help(sys.stderr)
            sys.exit(1)

    logging.info(f"Start training from epoch {last_epoch + 1}.")
    for epoch in range(last_epoch + 1, args.num_epochs):
        if args.optimizer_type != "Adam":
            if args.scheduler != "poly":
                if epoch != 0:
                    scheduler.step()
        train(train_loader,
              net,
              criterion,
              optimizer,
              device=DEVICE,
              debug_steps=args.debug_steps,
              epoch=epoch)
        if args.scheduler == "poly":
            adjust_learning_rate(optimizer, epoch)
        logging.info("epoch: {} lr rate :{}".format(
            epoch, optimizer.param_groups[0]['lr']))

        if epoch % args.validation_epochs == 0 or epoch == args.num_epochs - 1:
            logging.info("validation epoch: {} lr rate :{}".format(
                epoch, optimizer.param_groups[0]['lr']))
            val_loss, val_regression_loss, val_classification_loss = test(
                val_loader, net, criterion, DEVICE)
            logging.info(
                f"Epoch: {epoch}, " + f"Validation Loss: {val_loss:.4f}, " +
                f"Validation Regression Loss {val_regression_loss:.4f}, " +
                f"Validation Classification Loss: {val_classification_loss:.4f}"
            )
            model_path = os.path.join(
                args.checkpoint_folder,
                f"{args.net}-Epoch-{epoch}-Loss-{val_loss:.4f}.pth")
            net.module.save(model_path)
            logging.info(f"Saved model {model_path}")