import numpy as np from sklearn.svm import LinearSVC from sklearn.metrics import average_precision_score from sklearn.model_selection import cross_val_score import torch from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from virtex.config import Config from virtex.factories import PretrainingModelFactory, DownstreamDatasetFactory from virtex.models.downstream import FeatureExtractor from virtex.utils.checkpointing import CheckpointManager from virtex.utils.common import common_parser, common_setup parser = common_parser( description="Train SVMs for VOC2007 classification on a pretrained model.") group = parser.add_argument_group("Downstream config arguments.") group.add_argument("--down-config", metavar="FILE", help="Path to a downstream config file.") group.add_argument( "--down-config-override", nargs="*", default=[], help="A list of key-value pairs to modify downstream config params.", ) # fmt: off parser.add_argument_group("Checkpointing") group.add_argument("--layer", choices=["layer1", "layer2", "layer3", "layer4", "avgpool"],
from detectron2.evaluation import ( LVISEvaluator, PascalVOCDetectionEvaluator, COCOEvaluator, ) from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, Res5ROIHeads from virtex.config import Config from virtex.factories import PretrainingModelFactory from virtex.utils.checkpointing import CheckpointManager from virtex.utils.common import common_parser import virtex.utils.distributed as dist # fmt: off parser = common_parser( description="Train object detectors from pretrained visual backbone." ) parser.add_argument( "--d2-config", required=True, help="Path to a detectron2 config for downstream task finetuning." ) parser.add_argument( "--d2-config-override", nargs="*", default=[], help="""Key-value pairs from Detectron2 config to override from file. Some keys will be ignored because they are set from other args: [DATALOADER.NUM_WORKERS, SOLVER.EVAL_PERIOD, SOLVER.CHECKPOINT_PERIOD, TEST.EVAL_PERIOD, OUTPUT_DIR]""", ) parser.add_argument_group("Checkpointing and Logging") parser.add_argument(
# fmt: off from virtex.config import Config from virtex.factories import ( PretrainingDatasetFactory, PretrainingModelFactory, OptimizerFactory, LRSchedulerFactory, ) from virtex.utils.checkpointing import CheckpointManager from virtex.utils.common import common_parser, common_setup, cycle import virtex.utils.distributed as dist from virtex.utils.timer import Timer from virtex.data.transforms import IMAGENET_COLOR_MEAN, IMAGENET_COLOR_STD from virtex.utils.metrics import compute_scts_reward, CiderEvaluator parser = common_parser( description="Train a VirTex model (CNN + Transformer) on COCO Captions.") group = parser.add_argument_group("Checkpointing and Logging") group.add_argument( "--start-checkpoint", required=True, ) group.add_argument( "--resume-from", default=None, help="Path to a checkpoint to resume training from (if provided).") group.add_argument( "--checkpoint-every", type=int, default=2000, help="Serialize model to a checkpoint after every these many iterations.", )
from virtex.config import Config from virtex.factories import ( DownstreamDatasetFactory, PretrainingModelFactory, OptimizerFactory, LRSchedulerFactory, ) from virtex.utils.checkpointing import CheckpointManager from virtex.utils.common import common_parser, common_setup, cycle import virtex.utils.distributed as dist from virtex.utils.metrics import TopkAccuracy from virtex.utils.timer import Timer # fmt: off parser = common_parser( description="""Do image classification with linear models and frozen feature extractor, or fine-tune the feature extractor end-to-end.""") group = parser.add_argument_group("Downstream config arguments.") group.add_argument("--down-config", metavar="FILE", help="Path to a downstream config file.") group.add_argument( "--down-config-override", nargs="*", default=[], help="A list of key-value pairs to modify downstream config params.", ) parser.add_argument_group("Checkpointing and Logging") parser.add_argument("--weight-init", choices=["random", "imagenet", "torchvision", "virtex"],
from typing import Any, Dict, List from loguru import logger import torch from torch.utils.data import DataLoader from virtex.config import Config from virtex.data import ImageDirectoryDataset from virtex.factories import TokenizerFactory, PretrainingModelFactory from virtex.utils.checkpointing import CheckpointManager from virtex.utils.common import common_parser, common_setup from virtex.utils.metrics import CocoCaptionsEvaluator # fmt: off parser = common_parser( description="""Run image captioning inference on a pretrained model, and/or evaluate pretrained model on COCO Captions val2017 split.""") parser.add_argument( "--data-root", default=None, help="""Path to a directory containing image files to generate captions for. Default: COCO val2017 image directory as expected relative to project root.""" ) parser.add_argument( "--checkpoint-path", required=True, help="Path to load checkpoint and run captioning evaluation.") parser.add_argument("--output", default=None, help="Path to save predictions as a JSON file.") parser.add_argument(
import os from typing import Any, Dict, List from loguru import logger import torch from torch.utils.data import DataLoader # fmt: off from virtex.config import Config from virtex.data import CocoCaptionsEvalDataset from virtex.factories import TokenizerFactory, PretrainingModelFactory from virtex.utils.checkpointing import CheckpointManager from virtex.utils.common import common_parser, common_setup from virtex.utils.metrics import CocoCaptionsEvaluator parser = common_parser( description="Evaluate a pre-trained model based on captioning metrics.") parser.add_argument( "--checkpoint-path", required=True, help="Path to load checkpoint and run captioning evaluation.") # fmt: on def main(_A: argparse.Namespace): if _A.num_gpus_per_machine == 0: # Set device as CPU if num_gpus_per_machine = 0. device = torch.device("cpu") else: # Get the current device (this will be zero here by default). device = torch.cuda.current_device()