Esempio n. 1
0
        parser.print_help(sys.stderr)
        sys.exit(1)
    train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)

    test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)

    logging.info("Prepare training datasets.")
    datasets = []
    for dataset_path in args.datasets:
        if args.dataset_type == 'voc':
            dataset = VOCDataset(dataset_path, transform=train_transform,
                                 target_transform=target_transform)
            label_file = os.path.join(args.checkpoint_folder, "voc-model-labels.txt")
            store_labels(label_file, dataset.class_names)
            num_classes = len(dataset.class_names)
        elif args.dataset_type == 'open_images':
            dataset = OpenImagesDataset(dataset_path,
                 transform=train_transform, target_transform=target_transform,
                 dataset_type="train", balance_data=args.balance_data)
            label_file = os.path.join(args.checkpoint_folder, "open-images-model-labels.txt")
            store_labels(label_file, dataset.class_names)
            logging.info(dataset)
            num_classes = len(dataset.class_names)

        else:
            raise ValueError(f"Dataset tpye {args.dataset_type} is not supported.")
        datasets.append(dataset)
    logging.info(f"Stored labels into file {label_file}.")
    train_dataset = ConcatDataset(datasets)
Esempio n. 2
0
if __name__ == '__main__':
	timer = Timer()

	logging.info(args)
	config = mobilenetv1_ssd_config	#config file for priors etc.
	train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std)
	target_transform = MatchPrior(config.priors, config.center_variance,
								  config.size_variance, 0.5)

	test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)

	logging.info("Prepare training datasets.")
	train_dataset = VIDDataset(args.datasets, transform=train_transform,
								 target_transform=target_transform)
	label_file = os.path.join("models/", "vid-model-labels.txt")
	store_labels(label_file, train_dataset._classes_names)
	num_classes = len(train_dataset._classes_names)
	logging.info(f"Stored labels into file {label_file}.")
	logging.info("Train dataset size: {}".format(len(train_dataset)))
	train_loader = DataLoader(train_dataset, args.batch_size,
							  num_workers=args.num_workers,
							  shuffle=True)
	logging.info("Prepare Validation datasets.")
	val_dataset = VIDDataset(args.datasets, transform=test_transform,
								 target_transform=target_transform, is_val=True)
	logging.info(val_dataset)
	logging.info("validation dataset size: {}".format(len(val_dataset)))

	val_loader = DataLoader(val_dataset, args.batch_size,
							num_workers=args.num_workers,
							shuffle=False)
        mode='max',
        prefix=''
    )

    trainer = Trainer(max_nb_epochs=args.num_epochs,
                      gpus=[args.gpu_id],
                      train_percent_check=1.0,
                      check_val_every_n_epoch=args.validation_epochs,
                      val_percent_check=1.0,
                      show_progress_bar=True,
                      resume_from_checkpoint=args.resume if args.resume != '' else None,
                      default_save_path=saving_directory,
                      checkpoint_callback=checkpoint_callback)

    # Store labels
    label_file = os.path.join(args.checkpoint_folder, "vid-model-labels.txt")
    store_labels(label_file, model.val_dataset._classes_names)
    logging.info(f"Stored labels into file {label_file}.")

    logging.info("Train dataset size: {}".format(len(model.train_dataset)))
    logging.info("validation dataset size: {}".format(len(model.val_dataset)))

    # view tensorflow logs
    logging.info(f'View tensorboard logs by running\ntensorboard --logdir {os.getcwd()}')
    logging.info('and going to http://localhost:6006 on your browser')

    # train
    trainer.fit(model)