Ejemplo n.º 1
0
def main(args):
    # load config
    if args.variant == 'vmds':
        variant = multidsprites_videos_variant
    elif args.variant == 'spmot':
        variant = mot_sprite_variant
    elif args.variant == 'vor':
        variant = object_room_variant

    # load data
    print('Loading dataset: {}'.format(args.variant))
    train_loader, val_loader = build_dataloader(
        batch_size=variant['training_args']['batch_size'],
        num_workers=variant['num_workers'],
        n_steps=variant['n_steps'],
        dataset_class=args.variant,
        path=variant['path'],
        T=variant['schedule_args']['T'])
    os.makedirs(variant['ckpt_dir'], exist_ok=True)
    logger.set_snapshot_dir(variant['ckpt_dir'])
    # build model
    if torch.cuda.is_available():
        set_gpu_mode(True)
    model = op3_model.create_model_v2(variant['op3_args'],
                                      variant['op3_args']['det_repsize'],
                                      variant['op3_args']['sto_repsize'],
                                      action_dim=0)
    if variant['dataparallel']:
        model = torch.nn.DataParallel(model)
    scheduler = TrainingScheduler(**variant["schedule_args"],
                                  max_T=variant['n_steps'])
    trainer = OP3Trainer(train_loader, val_loader, model, scheduler,
                         **variant["training_args"])

    print('Start training.')
    for epoch in range(variant['num_epochs']):
        should_save_imgs = (epoch % variant['save_period'] == 0)
        train_stats = trainer.train_epoch(epoch)
        test_stats = trainer.test_epoch(epoch,
                                        train=False,
                                        batches=1,
                                        save_reconstruction=should_save_imgs)
        trainer.test_epoch(epoch,
                           train=True,
                           batches=1,
                           save_reconstruction=should_save_imgs)
        for k, v in {**train_stats, **test_stats}.items():
            logger.record_tabular(k, v)
        logger.dump_tabular()
        trainer.save_model()
    print('Done.')
Ejemplo n.º 2
0
def main(args):
    # load config
    if args.variant == 'vmds':
        variant = multidsprites_videos_variant
    elif args.variant == 'spmot':
        variant = mot_sprite_variant
    elif args.variant == 'vor':
        variant = object_room_variant

    # load data
    print('Loading dataset: {}'.format(args.variant))
    test_loader = build_testloader(
        batch_size=variant['training_args']['batch_size'],
        num_workers=variant['num_workers'],
        n_steps=variant['n_steps'],
        dataset_class=args.variant,
        path=variant['path'])

    # build model
    if torch.cuda.is_available():
        ptu.set_gpu_mode(True)
    model = op3_model.create_model_v2(variant['op3_args'],
                                      variant['op3_args']['det_repsize'],
                                      variant['op3_args']['sto_repsize'],
                                      action_dim=0)
    # load ckpt
    state_dict = torch.load(args.ckpt_file,
                            map_location=lambda storage, loc: storage)
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k
        if 'module.' in k:
            name = k[7:]  # remove 'module.' of dataparallel
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model.eval_mode = True
    if variant['dataparallel']:
        model = torch.nn.DataParallel(model)
    model.to(ptu.device)
    variant['schedule_args']['T'] = variant['n_steps']
    scheduler = TrainingScheduler(**variant["schedule_args"],
                                  max_T=variant['n_steps'])

    generate_annotation_file(model,
                             scheduler,
                             test_loader,
                             n_steps=variant['n_steps'],
                             n_slots=variant['op3_args']['K'],
                             path=args.out_path,
                             device=ptu.device)
from detectron2.utils.logger import setup_logger
setup_logger()

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.modeling import build_model

import pytorch_util as ptu

use_gpu, gpu_id = True, 0
ptu.set_gpu_mode(use_gpu, gpu_id)

os.environ['gpu_id'] = str(gpu_id)

max_num = 21808
dataset_loc = 'bdd_train.hdf5'
hfile = h5py.File(dataset_loc, 'r')

cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(
    model_zoo.get_config_file(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
Ejemplo n.º 4
0
import os, json, cv2, random
from collections import defaultdict

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
import pickle
from projection import *

import pytorch_util as ptu
import cv2

ptu.set_gpu_mode(True)
ptu.set_device(0)

#setup Mask RCNN
cfg = get_cfg()
cfg.merge_from_file(
    model_zoo.get_config_file(
        "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))

custom = True  # use of custom model mask RCNN Model
if custom:
    f = open('../../../temp/checkpoint_800.pkl', 'rb')
    cfg.MODEL.WEIGHTS = pickle.load(f)
else:
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
Ejemplo n.º 5
0
def run_experiment_here(
        experiment_function,
        variant=None,
        exp_id=0,
        seed=None,
        use_gpu=True,
        # Logger params:
        exp_prefix="default",
        snapshot_mode='last',
        snapshot_gap=1,
        git_infos=None,
        script_name=None,
        base_log_dir=None,
        force_randomize_seed=False,
        log_dir=None,
        **setup_logger_kwargs):
    """
    Run an experiment locally without any serialization.

    :param experiment_function: Function. `variant` will be passed in as its
    only argument.
    :param exp_prefix: Experiment prefix for the save file.
    :param variant: Dictionary passed in to `experiment_function`.
    :param exp_id: Experiment ID. Should be unique across all
    experiments. Note that one experiment may correspond to multiple seeds,.
    :param seed: Seed used for this experiment.
    :param use_gpu: Run with GPU. By default False.
    :param script_name: Name of the running script
    :param log_dir: If set, set the log directory to this. Otherwise,
    the directory will be auto-generated based on the exp_prefix.
    :return:
    """
    if variant is None:
        variant = {}
    variant['exp_id'] = str(exp_id)

    if force_randomize_seed or seed is None:
        seed = random.randint(0, 100000)
        variant['seed'] = str(seed)
    reset_execution_environment()

    actual_log_dir = setup_logger(exp_prefix=exp_prefix,
                                  variant=variant,
                                  exp_id=exp_id,
                                  seed=seed,
                                  snapshot_mode=snapshot_mode,
                                  snapshot_gap=snapshot_gap,
                                  base_log_dir=base_log_dir,
                                  log_dir=log_dir,
                                  git_infos=git_infos,
                                  script_name=script_name,
                                  **setup_logger_kwargs)

    set_seed(seed)
    set_gpu_mode(use_gpu)

    run_experiment_here_kwargs = dict(variant=variant,
                                      exp_id=exp_id,
                                      seed=seed,
                                      use_gpu=use_gpu,
                                      exp_prefix=exp_prefix,
                                      snapshot_mode=snapshot_mode,
                                      snapshot_gap=snapshot_gap,
                                      git_infos=git_infos,
                                      script_name=script_name,
                                      base_log_dir=base_log_dir,
                                      **setup_logger_kwargs)
    save_experiment_data(
        dict(run_experiment_here_kwargs=run_experiment_here_kwargs),
        actual_log_dir)
    return experiment_function(variant)