import sys
sys.path.insert(0, os.path.abspath('../machine-tasks'))
from tasks import get_task
from loss import L1Loss

comet_args = {
    'project_name': 'attentive-guidance',
    'workspace': 'andresespinosapc',
}
if os.environ.get('COMET_DISABLE'):
    comet_args['disabled'] = True
    comet_args['api_key'] = ''
if os.environ.get('COMET_OFFLINE'):
    comet_args['api_key'] = ''
    comet_args['offline_directory'] = 'comet_offline'
    experiment = OfflineExperiment(**comet_args)
else:
    experiment = Experiment(**comet_args)


def log_comet_parameters(opt):
    opt_dict = vars(opt)
    for key in opt_dict.keys():
        experiment.log_parameter(key, opt_dict[key])


TASK_DEFAULT_PARAMS = {
    'task_defaults': {
        'batch_size': 128,
        'k': 3,
        'max_len': 60,
Beispiel #2
0
def run_experiment_iter(i, experiment, train_iter, nExp, agent_list, env,
                        video, user_seed, experiment_name, log_params, debug,
                        project_name, sps, sps_es, **kwargs):
    """
    Function used to paralelize the run_experiment calculations.

    Parameters
    ----------
    i : int
        Index of the agent being trained.

    Raises
    ------
    NotImplementedError
        In case Comet is used, raises this error to signal where user intervention
        is required (namely to set the api_key and the workspace).

    Returns
    -------
    rewards : array
        An array with the cumulative rewards, where each column corresponds to
        an agent (random seed), and each row to a training iteration.
    arms : array
        An array with the number of agent arms, where each column corresponds
        to an agent (random seed), and each row to a training iteration.
    agent : Agent
        The trained agent.

    """
    if debug:
        start = time.time()
        print("Experiment {0} out of {1}...".format(i + 1, nExp))
    if not user_seed:
        seed = int.from_bytes(os.urandom(4), 'big')
    else:
        seed = user_seed

    if experiment_name:
        raise NotImplementedError(
            "Before using Comet, you need to come here and set your API key")
        experiment = Experiment(api_key=None,
                                project_name=project_name,
                                workspace=None,
                                display_summary=False,
                                offline_directory="offline")
        experiment.add_tag(experiment_name)
        experiment.set_name("{0}_{1}".format(experiment_name, i))
        # Sometimes adding the tag fails
        log_params["experiment_tag"] = experiment_name
        experiment.log_parameters(log_params)

    agent = agent_list[i]
    if sps_es:  # This one overrides sps
        rewards, arms, agent = run_sps_es_experiment(agent,
                                                     env,
                                                     train_iter,
                                                     seed=seed,
                                                     video=video,
                                                     experiment=experiment,
                                                     **kwargs)
    elif sps:
        rewards, arms, agent = run_sps_experiment(agent,
                                                  env,
                                                  train_iter,
                                                  seed=seed,
                                                  video=video,
                                                  experiment=experiment,
                                                  **kwargs)
    else:
        rewards, arms, agent = run_aql_experiment(agent,
                                                  env,
                                                  train_iter,
                                                  seed=seed,
                                                  video=video,
                                                  experiment=experiment,
                                                  **kwargs)
    agent_list[i] = agent

    if experiment:
        experiment.end()

    if debug:
        end = time.time()
        elapsed = end - start
        units = "secs"
        if elapsed > 3600:
            elapsed /= 3600
            units = "hours"
        elif elapsed > 60:
            elapsed /= 60
            units = "mins"
        print("Time elapsed: {0:.02f} {1}".format(elapsed, units))

    return rewards, arms, agent
def BSN_Train_PEM(opt):
    model = PEM(opt)
    model = torch.nn.DataParallel(model).cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=opt["pem_training_lr"],
                           weight_decay=opt["pem_weight_decay"])

    print('Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    def collate_fn(batch):
        batch_data = torch.cat([x[0] for x in batch])
        batch_iou = torch.cat([x[1] for x in batch])
        return batch_data, batch_iou

    train_dataset = ProposalDataSet(opt, subset="train")
    train_sampler = ProposalSampler(train_dataset.proposals,
                                    train_dataset.indices,
                                    max_zero_weight=opt['pem_max_zero_weight'])

    global_step = 0
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=model.module.batch_size,
        shuffle=False,
        sampler=train_sampler,
        num_workers=opt['data_workers'],
        pin_memory=True,
        drop_last=False,
        collate_fn=collate_fn if not opt['pem_do_index'] else None)

    subset = "validation" if opt['dataset'] == 'activitynet' else "test"
    test_loader = torch.utils.data.DataLoader(
        ProposalDataSet(opt, subset=subset),
        batch_size=model.module.batch_size,
        shuffle=True,
        num_workers=opt['data_workers'],
        pin_memory=True,
        drop_last=False,
        collate_fn=collate_fn if not opt['pem_do_index'] else None)

    milestones = [int(k) for k in opt['pem_lr_milestones'].split(',')]
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=milestones, gamma=opt['pem_step_gamma'])

    if opt['log_to_comet']:
        comet_exp = CometExperiment(api_key="hIXq6lDzWzz24zgKv7RYz6blo",
                                    project_name="bsnpem",
                                    workspace="cinjon",
                                    auto_metric_logging=True,
                                    auto_output_logging=None,
                                    auto_param_logging=False)
    elif opt['local_comet_dir']:
        comet_exp = OfflineExperiment(api_key="hIXq6lDzWzz24zgKv7RYz6blo",
                                      project_name="bsnpem",
                                      workspace="cinjon",
                                      auto_metric_logging=True,
                                      auto_output_logging=None,
                                      auto_param_logging=False,
                                      offline_directory=opt['local_comet_dir'])
    else:
        comet_exp = None

    if comet_exp:
        comet_exp.log_parameters(opt)
        comet_exp.set_name(opt['name'])

    test_PEM(test_loader, model, -1, -1, comet_exp, opt)
    for epoch in range(opt["pem_epoch"]):
        global_step = train_PEM(train_loader, model, optimizer, epoch,
                                global_step, comet_exp, opt)
        test_PEM(test_loader, model, epoch, global_step, comet_exp, opt)
        scheduler.step()
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule
from a2c_ppo_acktr.visualize import visdom_plot


args = get_args()

assert args.algo in ['a2c', 'ppo', 'acktr']
if args.recurrent_policy:
    assert args.algo in ['a2c', 'ppo'], \
        'Recurrent policy is not implemented for ACKTR'

if args.comet == "offline":
    experiment = OfflineExperiment(project_name="recurrent-value", workspace="nishanthvanand",
    disabled=args.disable_log, offline_directory="../comet_offline",
    parse_args=False)
elif args.comet == "online":
    experiment = Experiment(api_key="tSACzCGFcetSBTapGBKETFARf",
                        project_name="recurrent-value", workspace="nishanthvanand",
                        disabled=args.disable_log,
                        parse_args=False)
else:
    raise ValueError

experiment.log_parameters(vars(args))

num_updates = int(args.num_env_steps) // args.num_steps // args.num_processes

torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
    )

    checkpoint_callback = skopt.callbacks.CheckpointSaver(
        f'D:\\FINKI\\8_dps\\Project\\MODELS\\skopt_checkpoints\\{EXPERIMENT_ID}.pkl'
    )
    hyperparameters_optimizer.fit(X_train,
                                  y_train,
                                  callback=[checkpoint_callback])
    skopt.dump(hyperparameters_optimizer, f'saved_models\\{EXPERIMENT_ID}.pkl')

    y_pred = hyperparameters_optimizer.best_estimator_.predict(X_test)

    for i in range(len(hyperparameters_optimizer.cv_results_['params'])):
        exp = OfflineExperiment(
            api_key='A8Lg71j9LtIrsv0deBA0DVGcR',
            project_name=ALGORITHM,
            workspace="8_dps",
            auto_output_logging='native',
            offline_directory=
            f'D:\\FINKI\\8_dps\\Project\\MODELS\\comet_ml_offline_experiments\\{EXPERIMENT_ID}'
        )
        exp.set_name(f'{EXPERIMENT_ID}_{i + 1}')
        exp.add_tags([
            DS,
            SEGMENTS_LENGTH,
        ])
        for k, v in hyperparameters_optimizer.cv_results_.items():
            if k == "params": exp.log_parameters(dict(v[i]))
            else: exp.log_metric(k, v[i])
        exp.end()
def BSN_Train_TEM(opt):
    global_step = 0
    epoch = 0
    if opt['do_representation']:
        model = TEM(opt)
        optimizer = optim.Adam(model.parameters(),
                               lr=opt["tem_training_lr"],
                               weight_decay=opt["tem_weight_decay"])
        global_step, epoch = _maybe_load_checkpoint(
            model, optimizer, global_step, epoch,
            os.path.join(opt["checkpoint_path"], opt['name']))
        if opt['representation_checkpoint']:
            # print(model.representation_model.backbone.inception_5b_3x3.weight[0][0])
            if opt['do_random_model']:
                print('DOING RANDOM MDOEL!!!')
            else:
                print('DOING Pretrianed modelll!!!')
                partial_load(opt['representation_checkpoint'], model)
            # print(model.representation_model.backbone.inception_5b_3x3.weight[0][0])
        if not opt['no_freeze']:
            for param in model.representation_model.parameters():
                param.requires_grad = False
        print(len([p for p in model.representation_model.parameters()]))
    else:
        model = TEM(opt)
        optimizer = optim.Adam(model.parameters(),
                               lr=opt["tem_training_lr"],
                               weight_decay=opt["tem_weight_decay"])
        global_step, epoch = _maybe_load_checkpoint(
            model, optimizer, global_step, epoch,
            os.path.join(opt["checkpoint_path"], opt['name']))

    model = torch.nn.DataParallel(model).cuda()
    # summary(model, (2, 3, 224, 224))

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    if opt['dataset'] == 'gymnastics':
        # default image_dir is '/checkpoint/cinjon/spaceofmotion/sep052019/rawframes.426x240.12'
        img_loading_func = get_img_loader(opt)
        train_data_set = GymnasticsImages(opt,
                                          subset='Train',
                                          img_loading_func=img_loading_func,
                                          image_dir=opt['gym_image_dir'],
                                          video_info_path=os.path.join(
                                              opt['video_info'],
                                              'Train_Annotation.csv'))
        train_sampler = GymnasticsSampler(train_data_set, opt['sampler_mode'])
        test_data_set = GymnasticsImages(opt,
                                         subset="Val",
                                         img_loading_func=img_loading_func,
                                         image_dir=opt['gym_image_dir'],
                                         video_info_path=os.path.join(
                                             opt['video_info'],
                                             'Val_Annotation.csv'))
    elif opt['dataset'] == 'gymnasticsfeatures':
        # feature_dirs should roughly look like:
        # /checkpoint/cinjon/spaceofmotion/sep052019/tsn.1024.426x240.12.no-oversample/csv/rgb,/checkpoint/cinjon/spaceofmotion/sep052019/tsn.1024.426x240.12.no-oversample/csv/flow
        feature_dirs = opt['feature_dirs'].split(',')
        train_data_set = GymnasticsFeatures(opt,
                                            subset='Train',
                                            feature_dirs=feature_dirs,
                                            video_info_path=os.path.join(
                                                opt['video_info'],
                                                'Train_Annotation.csv'))
        test_data_set = GymnasticsFeatures(opt,
                                           subset='Val',
                                           feature_dirs=feature_dirs,
                                           video_info_path=os.path.join(
                                               opt['video_info'],
                                               'Val_Annotation.csv'))
        train_sampler = None
    elif opt['dataset'] == 'thumosfeatures':
        feature_dirs = opt['feature_dirs'].split(',')
        train_data_set = ThumosFeatures(opt,
                                        subset='Val',
                                        feature_dirs=feature_dirs)
        test_data_set = ThumosFeatures(opt,
                                       subset="Test",
                                       feature_dirs=feature_dirs)
        train_sampler = None
    elif opt['dataset'] == 'thumosimages':
        img_loading_func = get_img_loader(opt)
        train_data_set = ThumosImages(
            opt,
            subset='Val',
            img_loading_func=img_loading_func,
            image_dir=
            '/checkpoint/cinjon/thumos/rawframes.TH14_validation_tal.30',
            video_info_path=os.path.join(opt['video_info'],
                                         'Val_Annotation.csv'))
        test_data_set = ThumosImages(
            opt,
            subset='Test',
            img_loading_func=img_loading_func,
            image_dir='/checkpoint/cinjon/thumos/rawframes.TH14_test_tal.30',
            video_info_path=os.path.join(opt['video_info'],
                                         'Test_Annotation.csv'))
        train_sampler = None
    elif opt['dataset'] == 'activitynet':
        train_sampler = None
        representation_module = opt['representation_module']
        train_transforms = get_video_transforms(representation_module,
                                                opt['do_augment'])
        test_transforms = get_video_transforms(representation_module, False)
        train_data_set = VideoDataset(opt,
                                      train_transforms,
                                      subset='train',
                                      fraction=0.3)
        # We use val because we don't have annotations for test.
        test_data_set = VideoDataset(opt,
                                     test_transforms,
                                     subset='val',
                                     fraction=0.3)

    print('train_loader / val_loader sizes: ', len(train_data_set),
          len(test_data_set))
    train_loader = torch.utils.data.DataLoader(
        train_data_set,
        batch_size=model.module.batch_size,
        shuffle=False if train_sampler else True,
        sampler=train_sampler,
        num_workers=opt['data_workers'],
        pin_memory=True,
        drop_last=False)

    test_loader = torch.utils.data.DataLoader(
        test_data_set,
        batch_size=model.module.batch_size,
        shuffle=False,
        num_workers=opt['data_workers'],
        pin_memory=True,
        drop_last=False)
    # test_loader = None

    milestones = [int(k) for k in opt['tem_lr_milestones'].split(',')]
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=milestones, gamma=opt['tem_step_gamma'])

    if opt['log_to_comet']:
        comet_exp = CometExperiment(api_key="hIXq6lDzWzz24zgKv7RYz6blo",
                                    project_name="bsn",
                                    workspace="cinjon",
                                    auto_metric_logging=True,
                                    auto_output_logging=None,
                                    auto_param_logging=False)
    elif opt['local_comet_dir']:
        comet_exp = OfflineExperiment(api_key="hIXq6lDzWzz24zgKv7RYz6blo",
                                      project_name="bsn",
                                      workspace="cinjon",
                                      auto_metric_logging=True,
                                      auto_output_logging=None,
                                      auto_param_logging=False,
                                      offline_directory=opt['local_comet_dir'])
    else:
        comet_exp = None

    if comet_exp:
        comet_exp.log_parameters(opt)
        comet_exp.set_name(opt['name'])

    # test_TEM(test_loader, model, optimizer, 0, 0, comet_exp, opt)
    for epoch in range(epoch + 1, opt["tem_epoch"] + 1):
        global_step = train_TEM(train_loader, model, optimizer, epoch,
                                global_step, comet_exp, opt)
        test_TEM(test_loader, model, optimizer, epoch, global_step, comet_exp,
                 opt)
        if opt['dataset'] == 'activitynet':
            test_loader.dataset._subset_dataset(.3)
            train_loader.dataset._subset_dataset(.3)
        scheduler.step()
Beispiel #7
0
    def _create_experiment(self, parameters, pid, trial, count,
                           experiment_kwargs):
        # type: (Any, Any, Any, Any, Dict[str, Any]) -> BaseExperiment
        """
        Instantiates an Experiment, OfflineExperiment, or
        callable.
        """
        from comet_ml import Experiment, OfflineExperiment

        LOGGER.debug("Creating a %r with %r parameters", self.experiment_class,
                     parameters)

        if not experiment_kwargs:
            # Fallback on deprecated experiment kwargs given at Optimizer creation
            experiment_kwargs = self.experiment_kwargs

        # Inject the API Key if not set
        if "api_key" not in experiment_kwargs and self.api_key is not None:
            experiment_kwargs["api_key"] = self.api_key

        if self.experiment_class == "Experiment":
            exp = Experiment(**experiment_kwargs)  # type: BaseExperiment
        elif self.experiment_class == "OfflineExperiment":
            exp = OfflineExperiment(**
                                    experiment_kwargs)  # type: BaseExperiment
        elif callable(self.experiment_class):
            exp = self.experiment_class(**experiment_kwargs)
        else:
            raise OptimizerException("Invalid experiment_class: %s" %
                                     self.experiment_class)

        exp._set_optimizer(self, pid, trial, count)

        if self.predictor_spec:
            LOGGER.debug("Setting predictor in experiment: %s",
                         self.predictor_spec)

            if isinstance(exp, OfflineExperiment):
                mode = "local"
            else:
                # This should be at least global as it is set in Optimizer._fill_defaults_predictor
                mode = self.predictor_spec["mode"]

            # Old version of the Python SDK used to store predictor_spec with an
            # empty optimizer_id, which would leads to TypeError: type object
            # got multiple values for keyword argument 'optimizer_id'
            # Also pop mode as it is passed explicitely
            predictor_spec = self.predictor_spec.copy()
            predictor_spec.pop("optimizer_id", None)
            predictor_spec.pop("mode", None)

            try:
                exp.set_predictor(
                    Predictor(exp,
                              optimizer_id=self.id,
                              mode=mode,
                              **predictor_spec))
            except Exception:
                LOGGER.warning(
                    "Failure to create Predictor, experiment will run without Predictor",
                    exc_info=True,
                )

        exp.log_parameters(parameters)
        # Log optimizer static information:
        exp.log_other("optimizer_id", self.id)
        exp.log_other("optimizer_pid", pid)
        exp.log_other("optimizer_trial", trial)
        return exp
class CometLogger(LightningLoggerBase):
    r"""
    Log using `comet.ml <https://www.comet.ml>`_.
    """
    def __init__(self,
                 api_key: Optional[str] = None,
                 save_dir: Optional[str] = None,
                 workspace: Optional[str] = None,
                 project_name: Optional[str] = None,
                 rest_api_key: Optional[str] = None,
                 experiment_name: Optional[str] = None,
                 experiment_key: Optional[str] = None,
                 **kwargs):
        r"""

        Requires either an API Key (online mode) or a local directory path (offline mode)

        .. code-block:: python

            # ONLINE MODE
            from pytorch_lightning.loggers import CometLogger
            # arguments made to CometLogger are passed on to the comet_ml.Experiment class
            comet_logger = CometLogger(
                api_key=os.environ["COMET_API_KEY"],
                workspace=os.environ["COMET_WORKSPACE"], # Optional
                project_name="default_project", # Optional
                rest_api_key=os.environ["COMET_REST_API_KEY"], # Optional
                experiment_name="default" # Optional
            )
            trainer = Trainer(logger=comet_logger)

        .. code-block:: python

            # OFFLINE MODE
            from pytorch_lightning.loggers import CometLogger
            # arguments made to CometLogger are passed on to the comet_ml.Experiment class
            comet_logger = CometLogger(
                save_dir=".",
                workspace=os.environ["COMET_WORKSPACE"], # Optional
                project_name="default_project", # Optional
                rest_api_key=os.environ["COMET_REST_API_KEY"], # Optional
                experiment_name="default" # Optional
            )
            trainer = Trainer(logger=comet_logger)

        Args:
            api_key (str): Required in online mode. API key, found on Comet.ml
            save_dir (str): Required in offline mode. The path for the directory to save local comet logs
            workspace (str): Optional. Name of workspace for this user
            project_name (str): Optional. Send your experiment to a specific project.
            Otherwise will be sent to Uncategorized Experiments.
            If project name does not already exists Comet.ml will create a new project.
            rest_api_key (str): Optional. Rest API key found in Comet.ml settings.
                This is used to determine version number
            experiment_name (str): Optional. String representing the name for this particular experiment on Comet.ml.
            experiment_key (str): Optional. If set, restores from existing experiment.
        """
        super().__init__()
        self._experiment = None

        # Determine online or offline mode based on which arguments were passed to CometLogger
        if api_key is not None:
            self.mode = "online"
            self.api_key = api_key
        elif save_dir is not None:
            self.mode = "offline"
            self.save_dir = save_dir
        else:
            # If neither api_key nor save_dir are passed as arguments, raise an exception
            raise MisconfigurationException(
                "CometLogger requires either api_key or save_dir during initialization."
            )

        log.info(f"CometLogger will be initialized in {self.mode} mode")

        self.workspace = workspace
        self.project_name = project_name
        self.experiment_key = experiment_key
        self._kwargs = kwargs

        if rest_api_key is not None:
            # Comet.ml rest API, used to determine version number
            self.rest_api_key = rest_api_key
            self.comet_api = API(self.rest_api_key)
        else:
            self.rest_api_key = None
            self.comet_api = None

        if experiment_name:
            try:
                self.name = experiment_name
            except TypeError as e:
                log.exception(
                    "Failed to set experiment name for comet.ml logger")

    @property
    def experiment(self) -> CometBaseExperiment:
        r"""

        Actual comet object. To use comet features do the following.

        Example::

            self.logger.experiment.some_comet_function()

        """
        if self._experiment is not None:
            return self._experiment

        if self.mode == "online":
            if self.experiment_key is None:
                self._experiment = CometExperiment(
                    api_key=self.api_key,
                    workspace=self.workspace,
                    project_name=self.project_name,
                    **self._kwargs)
                self.experiment_key = self._experiment.get_key()
            else:
                self._experiment = CometExistingExperiment(
                    api_key=self.api_key,
                    workspace=self.workspace,
                    project_name=self.project_name,
                    previous_experiment=self.experiment_key,
                    **self._kwargs)
        else:
            self._experiment = CometOfflineExperiment(
                offline_directory=self.save_dir,
                workspace=self.workspace,
                project_name=self.project_name,
                **self._kwargs)

        return self._experiment

    @rank_zero_only
    def log_hyperparams(self, params: Union[Dict[str, Any],
                                            Namespace]) -> None:
        params = self._convert_params(params)
        params = self._flatten_dict(params)
        self.experiment.log_parameters(params)

    @rank_zero_only
    def log_metrics(self,
                    metrics: Dict[str, Union[torch.Tensor, float]],
                    step: Optional[int] = None) -> None:
        # Comet.ml expects metrics to be a dictionary of detached tensors on CPU
        for key, val in metrics.items():
            if is_tensor(val):
                metrics[key] = val.cpu().detach()

        self.experiment.log_metrics(metrics, step=step)

    def reset_experiment(self):
        self._experiment = None

    @rank_zero_only
    def finalize(self, status: str) -> None:
        r"""
        When calling self.experiment.end(), that experiment won't log any more data to Comet. That's why, if you need
        to log any more data you need to create an ExistingCometExperiment. For example, to log data when testing your
        model after training, because when training is finalized CometLogger.finalize is called.

        This happens automatically in the CometLogger.experiment property, when self._experiment is set to None
        i.e. self.reset_experiment().
        """
        self.experiment.end()
        self.reset_experiment()

    @property
    def name(self) -> str:
        return self.experiment.project_name

    @name.setter
    def name(self, value: str) -> None:
        self.experiment.set_name(value)

    @property
    def version(self) -> str:
        return self.experiment.id
Beispiel #9
0
def _get_comet_experiment():
    experiment = OfflineExperiment(project_name='general',
                                   workspace='benjaminbenoit',
                                   offline_directory="../transformer_net_comet_experiences")
    experiment.set_name("TransformerNet")
    return experiment
Beispiel #10
0
def get_params():
    parser = argparse.ArgumentParser(description='Perm')
    # Hparams
    padd = parser.add_argument
    padd('--batch-size',
         type=int,
         default=64,
         metavar='N',
         help='input batch size for training (default: 64)')
    padd('--latent_dim',
         type=int,
         default=20,
         metavar='N',
         help='Latent dim for VAE')
    padd('--lr',
         type=float,
         default=0.01,
         metavar='LR',
         help='learning rate (default: 0.01)')
    padd('--momentum',
         type=float,
         default=0.5,
         metavar='M',
         help='SGD momentum (default: 0.5)')
    padd('--latent_size',
         type=int,
         default=50,
         metavar='N',
         help='Size of latent distribution (default: 50)')
    padd('--estimator',
         default='reinforce',
         const='reinforce',
         nargs='?',
         choices=['reinforce', 'lax'],
         help='Grad estimator for noise (default: %(default)s)')
    padd('--reward',
         default='soft',
         const='soft',
         nargs='?',
         choices=['soft', 'hard'],
         help='Reward for grad estimator (default: %(default)s)')

    # Training
    padd('--epochs',
         type=int,
         default=10,
         metavar='N',
         help='number of epochs to train (default: 10)')
    padd('--PGD_steps',
         type=int,
         default=40,
         metavar='N',
         help='max gradient steps (default: 30)')
    padd('--max_iter',
         type=int,
         default=20,
         metavar='N',
         help='max gradient steps (default: 30)')
    padd('--max_batches',
         type=int,
         default=None,
         metavar='N',
         help=
         'max number of batches per epoch, used for debugging (default: None)')
    padd('--epsilon',
         type=float,
         default=0.5,
         metavar='M',
         help='Epsilon for Delta (default: 0.1)')
    padd('--LAMBDA',
         type=float,
         default=100,
         metavar='M',
         help='Lambda for L2 lagrange penalty (default: 0.1)')
    padd('--nn_temp',
         type=float,
         default=1.0,
         metavar='M',
         help='Starting diff. nearest neighbour temp (default: 1.0)')
    padd('--temp_decay_rate',
         type=float,
         default=0.9,
         metavar='M',
         help='Nearest neighbour temp decay rate (default: 0.9)')
    padd('--temp_decay_schedule',
         type=float,
         default=100,
         metavar='M',
         help='How many batches before decay (default: 100)')
    padd('--bb_steps',
         type=int,
         default=2000,
         metavar='N',
         help='Max black box steps per sample(default: 1000)')
    padd('--attack_epochs',
         type=int,
         default=10,
         metavar='N',
         help='Max numbe of epochs to train G')
    padd('--seed',
         type=int,
         default=1,
         metavar='S',
         help='random seed (default: 1)')
    padd('--batch_size', type=int, default=256, metavar='S', help='Batch size')
    padd('--embedding_dim', type=int, default=300, help='embedding_dim')
    padd('--embedding_type',
         type=str,
         default="non-static",
         help='embedding_type')
    padd('--test_batch_size',
         type=int,
         default=128,
         metavar='N',
         help='Test Batch size. 256 requires 12GB GPU memory')
    padd('--test',
         default=False,
         action='store_true',
         help='just test model and print accuracy')
    padd('--deterministic_G',
         default=False,
         action='store_true',
         help='Auto-encoder, no VAE')
    padd('--resample_test',
         default=False,
         action='store_true',
         help='Load model and test resampling capability')
    padd('--resample_iterations',
         type=int,
         default=100,
         metavar='N',
         help='How many times to resample (default: 100)')
    padd('--clip_grad',
         default=True,
         action='store_true',
         help='Clip grad norm')
    padd('--train_vae', default=False, action='store_true', help='Train VAE')
    padd('--train_ae', default=False, action='store_true', help='Train AE')
    padd('--use_flow',
         default=False,
         action='store_true',
         help='Add A NF to Generator')
    padd('--carlini_loss',
         default=False,
         action='store_true',
         help='Use CW loss function')
    padd('--vanilla_G',
         default=False,
         action='store_true',
         help='Vanilla G White Box')
    padd('--prepared_data',
         default='dataloader/prepared_data.pickle',
         help='Test on a single data')

    # Imported Model Params
    padd('--emsize', type=int, default=300, help='size of word embeddings')
    padd('--nhidden',
         type=int,
         default=300,
         help='number of hidden units per layer in LSTM')
    padd('--nlayers', type=int, default=2, help='number of layers')
    padd('--noise_radius',
         type=float,
         default=0.2,
         help='stdev of noise for autoencoder (regularizer)')
    padd('--noise_anneal',
         type=float,
         default=0.995,
         help='anneal noise_radius exponentially by this every 100 iterations')
    padd('--hidden_init',
         action='store_true',
         help="initialize decoder hidden state with encoder's")
    padd('--arch_i',
         type=str,
         default='300-300',
         help='inverter architecture (MLP)')
    padd('--arch_g',
         type=str,
         default='300-300',
         help='generator architecture (MLP)')
    padd('--arch_d',
         type=str,
         default='300-300',
         help='critic/discriminator architecture (MLP)')
    padd('--arch_conv_filters',
         type=str,
         default='500-700-1000',
         help='encoder filter sizes for different convolutional layers')
    padd('--arch_conv_strides',
         type=str,
         default='1-2-2',
         help='encoder strides for different convolutional layers')
    padd('--arch_conv_windows',
         type=str,
         default='3-3-3',
         help='encoder window sizes for different convolutional layers')
    padd('--z_size',
         type=int,
         default=100,
         help='dimension of random noise z to feed into generator')
    padd('--temp',
         type=float,
         default=1,
         help='softmax temperature (lower --> more discrete)')
    padd('--enc_grad_norm',
         type=bool,
         default=True,
         help='norm code gradient from critic->encoder')
    padd('--train_emb', type=bool, default=True, help='Train Glove Embeddings')
    padd('--gan_toenc',
         type=float,
         default=-0.01,
         help='weight factor passing gradient from gan to encoder')
    padd('--dropout',
         type=float,
         default=0.0,
         help='dropout applied to layers (0 = no dropout)')
    padd('--useJS',
         type=bool,
         default=True,
         help='use Jenson Shannon distance')
    padd('--perturb_z',
         type=bool,
         default=True,
         help='perturb noise space z instead of hidden c')
    padd('--max_seq_len', type=int, default=200, help='max_seq_len')
    padd('--gamma', type=float, default=0.95, help='Discount Factor')
    padd('--model',
         type=str,
         default="lstm_arch",
         help='classification model name')
    padd('--distance_func',
         type=str,
         default="cosine",
         help='NN distance function')
    padd('--hidden_dim', type=int, default=128, help='hidden_dim')
    padd('--burn_in', type=int, default=500, help='Train VAE burnin')
    padd('--beta', type=float, default=0., help='Entropy reg')
    padd('--embedding_training',
         type=bool,
         default=False,
         help='embedding_training')
    padd('--seqgan_reward',
         action='store_true',
         default=False,
         help='use seq gan reward')
    padd('--train_classifier',
         action='store_true',
         default=False,
         help='Train Classifier from scratch')
    padd('--diff_nn',
         action='store_true',
         default=False,
         help='Backprop through Nearest Neighbors')
    # Bells
    padd('--no-cuda',
         action='store_true',
         default=False,
         help='disables CUDA training')
    padd('--data_parallel',
         action='store_true',
         default=False,
         help="Use multiple GPUs")
    padd('--save_adv_samples',
         action='store_true',
         default=False,
         help='Write adversarial samples to disk')
    padd('--nearest_neigh_all',
         action='store_true',
         default=False,
         help='Evaluate near. neig. for whole evaluation set')
    padd("--comet",
         action="store_true",
         default=False,
         help='Use comet for logging')
    padd(
        "--offline_comet",
        action="store_true",
        default=False,
        help=
        'Use comet offline. To upload, after training run: comet-upload file.zip'
    )
    padd("--comet_username",
         type=str,
         default="joeybose",
         help='Username for comet logging')
    padd("--comet_apikey", type=str,\
            default="Ht9lkWvTm58fRo9ccgpabq5zV",help='Api for comet logging')
    padd('--debug', default=False, action='store_true', help='Debug')
    padd('--debug_neighbour',
         default=False,
         action='store_true',
         help='Debug nearest neighbour training')
    padd('--load_model',
         default=False,
         action='store_true',
         help='Whether to load a checkpointed model')
    padd('--save_model',
         default=False,
         action='store_true',
         help='Whether to checkpointed model')
    padd('--model_path', type=str, default="saved_models/lstm_torchtext2.pt",\
                        help='where to save/load target model')
    padd('--adv_model_path', type=str, default="saved_models/adv_model.pt",\
                        help='where to save/load adversarial')
    padd('--no_load_embedding',
         action='store_false',
         default=True,
         help='load Glove embeddings')
    padd('--namestr', type=str, default='BMD Text', \
            help='additional info in output filename to describe experiments')
    padd('--dataset', type=str, default="imdb", help='dataset')
    padd('--clip', type=float, default=1, help='gradient clipping, max norm')
    padd('--use_glove', type=str, default="true", help='gpu number')
    args = parser.parse_args()
    args.classes = 2
    args.sample_file = "temp/adv_samples.txt"
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    # Check if settings file
    if os.path.isfile("settings.json"):
        with open('settings.json') as f:
            data = json.load(f)
        args.comet_apikey = data["apikey"]
        args.comet_username = data["username"]

    # Prep file to save adversarial samples
    if args.save_adv_samples:
        now = datetime.datetime.now()
        if os.path.exists(args.sample_file):
            os.remove(args.sample_file)
        with open(args.sample_file, 'w') as f:
            f.write("Adversarial samples starting:\n{}\n".format(now))

    # Comet logging
    args.device = torch.device("cuda" if use_cuda else "cpu")
    if args.comet and not args.offline_comet:
        experiment = Experiment(api_key=args.comet_apikey,
                                project_name="black-magic-design",
                                workspace=args.comet_username)
    elif args.offline_comet:
        offline_path = "temp/offline_comet"
        if not os.path.exists(offline_path):
            os.makedirs(offline_path)
        from comet_ml import OfflineExperiment
        experiment = OfflineExperiment(project_name="black-magic-design",
                                       workspace=args.comet_username,
                                       offline_directory=offline_path)

    # To upload offline comet, run: comet-upload file.zip
    if args.comet or args.offline_comet:
        experiment.set_name(args.namestr)

        def log_text(self, msg):
            # Change line breaks for html breaks
            msg = msg.replace('\n', '<br>')
            self.log_html("<p>{}</p>".format(msg))

        experiment.log_text = MethodType(log_text, experiment)
        args.experiment = experiment

    return args
Beispiel #11
0
    hidden_size = configuration['hidden_size']
    n_layers = configuration['n_layers']
    flattened = False
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # Set all seeds for full reproducibility
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    # Set up Comet Experiment tracking  # Replace this with appropriate comet
    # workspaces
    experiment = OfflineExperiment("z15Um8oxWZwiXQXZxZKGh48cl",
                                   workspace='swechhachoudhary',
                                   offline_directory="../swechhas_experiments")

    experiment.set_name(
        name=args.config +
        "_dim={}_split={}".format(latent_dim, train_unlabeled_split))
    experiment.log_parameters(configuration)

    if encoding_model == 'pca':
        encoding_model = PCAEncoder(seed)
        flattened = True
    elif encoding_model == 'vae':
        encoding_model = VAE(latent_dim=latent_dim).to(device)
        flattened = True
    elif encoding_model == "ae":
        encoding_model = AE(latent_dim=latent_dim).to(device)
Beispiel #12
0
    if args.debug is False:
        ipdb.set_trace = lambda: None

    # Comet logging
    args.device = torch.device("cuda" if use_cuda else "cpu")
    if args.comet and not args.offline_comet:
        experiment = Experiment(api_key=args.comet_apikey,
                                project_name="black-magic-design",
                                workspace=args.comet_username)
    elif args.offline_comet:
        offline_path = "temp/offline_comet"
        if not os.path.exists(offline_path):
            os.makedirs(offline_path)
        from comet_ml import OfflineExperiment
        experiment = OfflineExperiment(project_name="black-magic-design",
                                       workspace=args.comet_username,
                                       offline_directory=offline_path)

    # To upload offline comet, run: comet-upload file.zip
    if args.comet or args.offline_comet:
        experiment.set_name(args.namestr)

        def log_text(self, msg):
            # Change line breaks for html breaks
            msg = msg.replace('\n', '<br>')
            self.log_html("<p>{}</p>".format(msg))

        experiment.log_text = MethodType(log_text, experiment)
        args.experiment = experiment

    main(args)
Beispiel #13
0
class CometLogger(LightningLoggerBase):
    r"""
    Log using `Comet.ml <https://www.comet.ml>`_. Install it with pip:

    .. code-block:: bash

        pip install comet-ml

    Comet requires either an API Key (online mode) or a local directory path (offline mode).

    **ONLINE MODE**

    Example:
        >>> import os
        >>> from pytorch_lightning import Trainer
        >>> from pytorch_lightning.loggers import CometLogger
        >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class
        >>> comet_logger = CometLogger(
        ...     api_key=os.environ.get('COMET_API_KEY'),
        ...     workspace=os.environ.get('COMET_WORKSPACE'),  # Optional
        ...     save_dir='.',  # Optional
        ...     project_name='default_project',  # Optional
        ...     rest_api_key=os.environ.get('COMET_REST_API_KEY'),  # Optional
        ...     experiment_name='default'  # Optional
        ... )
        >>> trainer = Trainer(logger=comet_logger)

    **OFFLINE MODE**

    Example:
        >>> from pytorch_lightning.loggers import CometLogger
        >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class
        >>> comet_logger = CometLogger(
        ...     save_dir='.',
        ...     workspace=os.environ.get('COMET_WORKSPACE'),  # Optional
        ...     project_name='default_project',  # Optional
        ...     rest_api_key=os.environ.get('COMET_REST_API_KEY'),  # Optional
        ...     experiment_name='default'  # Optional
        ... )
        >>> trainer = Trainer(logger=comet_logger)

    Args:
        api_key: Required in online mode. API key, found on Comet.ml. If not given, this
            will be loaded from the environment variable COMET_API_KEY or ~/.comet.config
            if either exists.
        save_dir: Required in offline mode. The path for the directory to save local
            comet logs. If given, this also sets the directory for saving checkpoints.
        workspace: Optional. Name of workspace for this user
        project_name: Optional. Send your experiment to a specific project.
            Otherwise will be sent to Uncategorized Experiments.
            If the project name does not already exist, Comet.ml will create a new project.
        rest_api_key: Optional. Rest API key found in Comet.ml settings.
            This is used to determine version number
        experiment_name: Optional. String representing the name for this particular experiment on Comet.ml.
        experiment_key: Optional. If set, restores from existing experiment.
        offline: If api_key and save_dir are both given, this determines whether
            the experiment will be in online or offline mode. This is useful if you use
            save_dir to control the checkpoints directory and have a ~/.comet.config
            file but still want to run offline experiments.
    """
    def __init__(self,
                 api_key: Optional[str] = None,
                 save_dir: Optional[str] = None,
                 workspace: Optional[str] = None,
                 project_name: Optional[str] = None,
                 rest_api_key: Optional[str] = None,
                 experiment_name: Optional[str] = None,
                 experiment_key: Optional[str] = None,
                 offline: bool = False,
                 **kwargs):

        if not _COMET_AVAILABLE:
            raise ImportError(
                'You want to use `comet_ml` logger which is not installed yet,'
                ' install it with `pip install comet-ml`.')
        super().__init__()
        self._experiment = None

        # Determine online or offline mode based on which arguments were passed to CometLogger
        api_key = api_key or get_api_key(None, get_config())

        if api_key is not None and save_dir is not None:
            self.mode = "offline" if offline else "online"
            self.api_key = api_key
            self._save_dir = save_dir
        elif api_key is not None:
            self.mode = "online"
            self.api_key = api_key
        elif save_dir is not None:
            self.mode = "offline"
            self._save_dir = save_dir
        else:
            # If neither api_key nor save_dir are passed as arguments, raise an exception
            raise MisconfigurationException(
                "CometLogger requires either api_key or save_dir during initialization."
            )

        log.info(f"CometLogger will be initialized in {self.mode} mode")

        self.workspace = workspace
        self.project_name = project_name
        self.experiment_key = experiment_key
        self._kwargs = kwargs

        if rest_api_key is not None:
            # Comet.ml rest API, used to determine version number
            self.rest_api_key = rest_api_key
            self.comet_api = API(self.rest_api_key)
        else:
            self.rest_api_key = None
            self.comet_api = None

        if experiment_name:
            self.experiment.set_name(experiment_name)
        self._kwargs = kwargs

    @property
    @rank_zero_experiment
    def experiment(self) -> CometBaseExperiment:
        r"""
        Actual Comet object. To use Comet features in your
        :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.

        Example::

            self.logger.experiment.some_comet_function()

        """
        if self._experiment is not None:
            return self._experiment

        if self.mode == "online":
            if self.experiment_key is None:
                self._experiment = CometExperiment(
                    api_key=self.api_key,
                    workspace=self.workspace,
                    project_name=self.project_name,
                    **self._kwargs)
                self.experiment_key = self._experiment.get_key()
            else:
                self._experiment = CometExistingExperiment(
                    api_key=self.api_key,
                    workspace=self.workspace,
                    project_name=self.project_name,
                    previous_experiment=self.experiment_key,
                    **self._kwargs)
        else:
            self._experiment = CometOfflineExperiment(
                offline_directory=self.save_dir,
                workspace=self.workspace,
                project_name=self.project_name,
                **self._kwargs)

        return self._experiment

    @rank_zero_only
    def log_hyperparams(self, params: Union[Dict[str, Any],
                                            Namespace]) -> None:
        params = self._convert_params(params)
        params = self._flatten_dict(params)
        self.experiment.log_parameters(params)

    @rank_zero_only
    def log_metrics(self,
                    metrics: Dict[str, Union[torch.Tensor, float]],
                    step: Optional[int] = None) -> None:
        assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'

        # Comet.ml expects metrics to be a dictionary of detached tensors on CPU
        for key, val in metrics.items():
            if is_tensor(val):
                metrics[key] = val.cpu().detach()

        self.experiment.log_metrics(metrics, step=step)

    def reset_experiment(self):
        self._experiment = None

    @rank_zero_only
    def finalize(self, status: str) -> None:
        r"""
        When calling ``self.experiment.end()``, that experiment won't log any more data to Comet.
        That's why, if you need to log any more data, you need to create an ExistingCometExperiment.
        For example, to log data when testing your model after training, because when training is
        finalized :meth:`CometLogger.finalize` is called.

        This happens automatically in the :meth:`~CometLogger.experiment` property, when
        ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``.
        """
        self.experiment.end()
        self.reset_experiment()

    @property
    def save_dir(self) -> Optional[str]:
        return self._save_dir

    @property
    def name(self) -> str:
        return str(self.experiment.project_name)

    @property
    def version(self) -> str:
        return self.experiment.id

    def __getstate__(self):
        state = self.__dict__.copy()
        state["_experiment"] = None
        return state
Beispiel #14
0
class CometLogger(Logger):
    def __init__(
        self,
        batch_size: int,
        snapshot_dir: Optional[str] = None,
        snapshot_mode: str = "last",
        snapshot_gap: int = 1,
        exp_set: Optional[str] = None,
        use_print_exp: bool = False,
        saved_exp: Optional[str] = None,
        **kwargs,
    ):
        """
        :param kwargs: passed to comet's Experiment at init.
        """
        if use_print_exp:
            self.experiment = PrintExperiment()
        else:
            from comet_ml import Experiment, ExistingExperiment, OfflineExperiment

            if saved_exp:
                self.experiment = ExistingExperiment(
                    previous_experiment=saved_exp, **kwargs
                )
            else:
                try:
                    self.experiment = Experiment(**kwargs)
                except ValueError:  # no API key
                    log_dir = Path.home() / "logs"
                    log_dir.mkdir(exist_ok=True)
                    self.experiment = OfflineExperiment(offline_directory=str(log_dir))

        self.experiment.log_parameter("complete", False)
        if exp_set:
            self.experiment.log_parameter("exp_set", exp_set)
        if snapshot_dir:
            snapshot_dir = Path(snapshot_dir) / self.experiment.get_key()
        # log_traj_window (int): How many trajectories to hold in deque for computing performance statistics.
        self.log_traj_window = 100
        self._cum_metrics = {
            "n_unsafe_actions": 0,
            "constraint_used": 0,
            "cum_completed_trajs": 0,
            "logging_time": 0,
        }
        self._new_completed_trajs = 0
        self._last_step = 0
        self._start_time = self._last_time = time()
        self._last_snapshot_upload = 0
        self._snaphot_upload_time = 30 * 60

        super().__init__(batch_size, snapshot_dir, snapshot_mode, snapshot_gap)

    def log_fast(
        self,
        step: int,
        traj_infos: Sequence[Dict[str, float]],
        opt_info: Optional[Tuple[Sequence[float], ...]] = None,
        test: bool = False,
    ) -> None:
        if not traj_infos:
            return
        start = time()

        self._new_completed_trajs += len(traj_infos)
        self._cum_metrics["cum_completed_trajs"] += len(traj_infos)
        # TODO: do we need to support sum(t[k]) if key in k?
        # without that, this doesn't include anything from extra eval samplers
        for key in self._cum_metrics:
            if key == "cum_completed_trajs":
                continue
            self._cum_metrics[key] += sum(t.get(key, 0) for t in traj_infos)
        self._cum_metrics["logging_time"] += time() - start

    def log(
        self,
        step: int,
        traj_infos: Sequence[Dict[str, float]],
        opt_info: Optional[Tuple[Sequence[float], ...]] = None,
        test: bool = False,
    ):
        self.log_fast(step, traj_infos, opt_info, test)
        start = time()
        with (self.experiment.test() if test else nullcontext()):
            step *= self.batch_size
            if opt_info is not None:
                # grad norm is left on the GPU for some reason
                # https://github.com/astooke/rlpyt/issues/163
                self.experiment.log_metrics(
                    {
                        k: np.mean(v)
                        for k, v in opt_info._asdict().items()
                        if k != "gradNorm"
                    },
                    step=step,
                )

            if traj_infos:
                agg_vals = {}
                for key in traj_infos[0].keys():
                    if key in self._cum_metrics:
                        continue
                    agg_vals[key] = sum(t[key] for t in traj_infos) / len(traj_infos)
                self.experiment.log_metrics(agg_vals, step=step)

            if not test:
                now = time()
                self.experiment.log_metrics(
                    {
                        "new_completed_trajs": self._new_completed_trajs,
                        "steps_per_second": (step - self._last_step)
                        / (now - self._last_time),
                    },
                    step=step,
                )
                self._last_time = now
                self._last_step = step
                self._new_completed_trajs = 0

        self.experiment.log_metrics(self._cum_metrics, step=step)
        self._cum_metrics["logging_time"] += time() - start

    def log_metric(self, name, val):
        self.experiment.log_metric(name, val)

    def log_parameters(self, parameters):
        self.experiment.log_parameters(parameters)

    def log_config(self, config):
        self.experiment.log_parameter("config", json.dumps(convert_dict(config)))

    def upload_snapshot(self):
        if self.snapshot_dir:
            self.experiment.log_asset(self._previous_snapshot_fname)

    def save_itr_params(
        self, step: int, params: Dict[str, Any], metric: Optional[float] = None
    ) -> None:
        super().save_itr_params(step, params, metric)
        now = time()
        if now - self._last_snapshot_upload > self._snaphot_upload_time:
            self._last_snapshot_upload = now
            self.upload_snapshot()

    def shutdown(self, error: bool = False) -> None:
        if not error:
            self.upload_snapshot()
            self.experiment.log_parameter("complete", True)
        self.experiment.end()
Beispiel #15
0
def main(args):
    print('Pretrain? ', not args.not_pretrain)
    print(args.model)
    start_time = time.time()

    if opt['local_comet_dir']:
        comet_exp = OfflineExperiment(api_key="hIXq6lDzWzz24zgKv7RYz6blo",
                                      project_name="selfcifar",
                                      workspace="cinjon",
                                      auto_metric_logging=True,
                                      auto_output_logging=None,
                                      auto_param_logging=False,
                                      offline_directory=opt['local_comet_dir'])
    else:
        comet_exp = CometExperiment(api_key="hIXq6lDzWzz24zgKv7RYz6blo",
                                    project_name="selfcifar",
                                    workspace="cinjon",
                                    auto_metric_logging=True,
                                    auto_output_logging=None,
                                    auto_param_logging=False)
    comet_exp.log_parameters(vars(args))
    comet_exp.set_name(args.name)

    # Build model
    # path = "/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/bsn"
    linear_cls = NonLinearModel if args.do_nonlinear else LinearModel

    if args.model == "amdim":
        hparams = load_hparams_from_tags_csv(
            '/checkpoint/cinjon/amdim/meta_tags.csv')
        # hparams = load_hparams_from_tags_csv(os.path.join(path, "meta_tags.csv"))
        model = AMDIMModel(hparams)
        if not args.not_pretrain:
            # _path = os.path.join(path, "_ckpt_epoch_434.ckpt")
            _path = '/checkpoint/cinjon/amdim/_ckpt_epoch_434.ckpt'
            model.load_state_dict(torch.load(_path)["state_dict"])
        else:
            print("AMDIM not loading checkpoint")  # Debug
        linear_model = linear_cls(AMDIM_OUTPUT_DIM, args.num_classes)
    elif args.model == "ccc":
        model = CCCModel(None)
        if not args.not_pretrain:
            # _path = os.path.join(path, "TimeCycleCkpt14.pth")
            _path = '/checkpoint/cinjon/spaceofmotion/bsn/TimeCycleCkpt14.pth'
            checkpoint = torch.load(_path)
            base_dict = {
                '.'.join(k.split('.')[1:]): v
                for k, v in list(checkpoint['state_dict'].items())
            }
            model.load_state_dict(base_dict)
        else:
            print("CCC not loading checkpoint")  # Debug
        linear_model = linaer_cls(CCC_OUTPUT_DIM,
                                  args.num_classes)  #.to(device)
    elif args.model == "corrflow":
        model = CORRFLOWModel(None)
        if not args.not_pretrain:
            _path = '/checkpoint/cinjon/spaceofmotion/supercons/corrflow.kineticsmodel.pth'
            # _path = os.path.join(path, "corrflow.kineticsmodel.pth")
            checkpoint = torch.load(_path)
            base_dict = {
                '.'.join(k.split('.')[1:]): v
                for k, v in list(checkpoint['state_dict'].items())
            }
            model.load_state_dict(base_dict)
        else:
            print("CorrFlow not loading checkpoing")  # Debug
        linear_model = linear_cls(CORRFLOW_OUTPUT_DIM, args.num_classes)
    elif args.model == "resnet":
        if not args.not_pretrain:
            resnet = torchvision.models.resnet50(pretrained=True)
        else:
            resnet = torchvision.models.resnet50(pretrained=False)
            print("ResNet not loading checkpoint")  # Debug
        modules = list(resnet.children())[:-1]
        model = nn.Sequential(*modules)
        linear_model = linear_cls(RESNET_OUTPUT_DIM, args.num_classes)
    else:
        raise Exception("model type has to be amdim, ccc, corrflow or resnet")

    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model).to(device)
        linear_model = nn.DataParallel(linear_model).to(device)
    else:
        model = model.to(device)
        linear_model = linear_model.to(device)
    # model = model.to(device)
    # linear_model = linear_model.to(device)

    # Freeze model
    for p in model.parameters():
        p.requires_grad = False
    model.eval()

    if args.optimizer == "Adam":
        optimizer = optim.Adam(linear_model.parameters(),
                               lr=args.lr,
                               weight_decay=args.weight_decay)
        print("Optimizer: Adam with weight decay: {}".format(
            args.weight_decay))
    elif args.optimizer == "SGD":
        optimizer = optim.SGD(linear_model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
        print("Optimizer: SGD with weight decay: {} momentum: {}".format(
            args.weight_decay, args.momentum))
    else:
        raise Exception("optimizer should be Adam or SGD")
    optimizer.zero_grad()

    # Set up log dir
    now = datetime.datetime.now()
    log_dir = '/checkpoint/cinjon/spaceofmotion/bsn/cifar-%d-weights/%s/%s' % (
        args.num_classes, args.model, args.name)
    # log_dir = "{}{:%Y%m%dT%H%M}".format(args.model, now)
    # log_dir = os.path.join("weights", log_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    print("Saving to {}".format(log_dir))

    batch_size = args.batch_size * torch.cuda.device_count()
    # CIFAR-10
    if args.num_classes == 10:
        data_path = ("/private/home/cinjon/cifar-data/cifar-10-batches-py")
        _train_dataset = CIFAR_dataset(glob(os.path.join(data_path, "data*")),
                                       args.num_classes, args.model, True)
        # _train_acc_dataset = CIFAR_dataset(
        #     glob(os.path.join(data_path, "data*")),
        #     args.num_classes,
        #     args.model,
        #     False)
        train_dataloader = data.DataLoader(_train_dataset,
                                           shuffle=True,
                                           batch_size=batch_size,
                                           num_workers=args.num_workers)
        # train_split = int(len(_train_dataset) * 0.8)
        # train_dev_split = int(len(_train_dataset) - train_split)
        # train_dataset, train_dev_dataset = data.random_split(
        #     _train_dataset, [train_split, train_dev_split])
        # train_acc_dataloader = data.DataLoader(
        #     train_dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers)
        # train_dev_acc_dataloader = data.DataLoader(
        #     train_dev_dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers)
        # train_dataset = data.Subset(_train_dataset, list(range(train_split)))
        # train_dataloader = data.DataLoader(
        #     train_dataset, shuffle=True, batch_size=batch_size, num_workers=args.num_workers)
        # train_acc_dataset = data.Subset(
        #     _train_acc_dataset, list(range(train_split)))
        # train_acc_dataloader = data.DataLoader(
        #     train_acc_dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers)
        # train_dev_acc_dataset = data.Subset(
        #     _train_acc_dataset, list(range(train_split, len(_train_acc_dataset))))
        # train_dev_acc_dataloader = data.DataLoader(
        #     train_dev_acc_dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers)

        _val_dataset = CIFAR_dataset([os.path.join(data_path, "test_batch")],
                                     args.num_classes, args.model, False)
        val_dataloader = data.DataLoader(_val_dataset,
                                         shuffle=False,
                                         batch_size=batch_size,
                                         num_workers=args.num_workers)
        # val_split = int(len(_val_dataset) * 0.8)
        # val_dev_split = int(len(_val_dataset) - val_split)
        # val_dataset, val_dev_dataset = data.random_split(
        #     _val_dataset, [val_split, val_dev_split])
        # val_dataloader = data.DataLoader(
        #     val_dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers)
        # val_dev_dataloader = data.DataLoader(
        #     val_dev_dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers)
    # CIFAR-100
    elif args.num_classes == 100:
        data_path = ("/private/home/cinjon/cifar-data/cifar-100-python")
        _train_dataset = CIFAR_dataset([os.path.join(data_path, "train")],
                                       args.num_classes, args.model, True)
        train_dataloader = data.DataLoader(_train_dataset,
                                           shuffle=True,
                                           batch_size=batch_size)
        _val_dataset = CIFAR_dataset([os.path.join(data_path, "test")],
                                     args.num_classes, args.model, False)
        val_dataloader = data.DataLoader(_val_dataset,
                                         shuffle=False,
                                         batch_size=batch_size)
    else:
        raise Exception("num_classes should be 10 or 100")

    best_acc = 0.0
    best_epoch = 0

    # Training
    for epoch in range(1, args.epochs + 1):
        current_lr = max(3e-4, args.lr *\
            math.pow(0.5, math.floor(epoch / args.lr_interval)))
        linear_model.train()
        if args.optimizer == "Adam":
            optimizer = optim.Adam(linear_model.parameters(),
                                   lr=current_lr,
                                   weight_decay=args.weight_decay)
        elif args.optimizer == "SGD":
            optimizer = optim.SGD(
                linear_model.parameters(),
                lr=current_lr,
                momentum=args.momentum,
                weight_decay=args.weight_decay,
            )

        ####################################################
        # Train
        t = time.time()
        train_acc = 0
        train_loss_sum = 0.0
        for iter, input in enumerate(train_dataloader):
            if time.time(
            ) - start_time > args.time * 3600 - 300 and comet_exp is not None:
                comet_exp.end()
                sys.exit(-1)

            imgs = input[0].to(device)
            if args.model != "resnet":
                imgs = imgs.unsqueeze(1)
            lbls = input[1].flatten().to(device)

            # output = model(imgs)
            # output = linear_model(output)
            output = linear_model(model(imgs))
            loss = F.cross_entropy(output, lbls)
            train_loss_sum += float(loss.data)
            train_acc += int(sum(torch.argmax(output, dim=1) == lbls))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # log_text = "train epoch {}/{}\titer {}/{} loss:{} {:.3f}s/iter"
            if iter % 1500 == 0:
                log_text = "train epoch {}/{}\titer {}/{} loss:{}"
                print(log_text.format(epoch, args.epochs, iter + 1,
                                      len(train_dataloader), loss.data,
                                      time.time() - t),
                      flush=False)
                t = time.time()

        train_acc /= len(_train_dataset)
        train_loss_sum /= len(train_dataloader)
        with comet_exp.train():
            comet_exp.log_metrics({
                'acc': train_acc,
                'loss': train_loss_sum
            },
                                  step=(epoch + 1) * len(train_dataloader),
                                  epoch=epoch + 1)
        print("train acc epoch {}/{} loss:{} train_acc:{}".format(
            epoch, args.epochs, train_loss_sum, train_acc),
              flush=True)

        #######################################################################
        # Train acc
        # linear_model.eval()
        # train_acc = 0
        # train_loss_sum = 0.0
        # for iter, input in enumerate(train_acc_dataloader):
        #     imgs = input[0].to(device)
        #     if args.model != "resnet":
        #         imgs = imgs.unsqueeze(1)
        #     lbls = input[1].flatten().to(device)
        #
        #     # output = model(imgs)
        #     # output = linear_model(output)
        #     output = linear_model(model(imgs))
        #     loss = F.cross_entropy(output, lbls)
        #     train_loss_sum += float(loss.data)
        #     train_acc += int(sum(torch.argmax(output, dim=1) == lbls))
        #
        #     print("train acc epoch {}/{}\titer {}/{} loss:{} {:.3f}s/iter".format(
        #         epoch,
        #         args.epochs,
        #         iter+1,
        #         len(train_acc_dataloader),
        #         loss.data,
        #         time.time() - t),
        #         flush=True)
        #     t = time.time()
        #
        #
        # train_acc /= len(train_acc_dataset)
        # train_loss_sum /= len(train_acc_dataloader)
        # print("train acc epoch {}/{} loss:{} train_acc:{}".format(
        #     epoch, args.epochs, train_loss_sum, train_acc), flush=True)

        #######################################################################
        # Train dev acc
        # # linear_model.eval()
        # train_dev_acc = 0
        # train_dev_loss_sum = 0.0
        # for iter, input in enumerate(train_dev_acc_dataloader):
        #     imgs = input[0].to(device)
        #     if args.model != "resnet":
        #         imgs = imgs.unsqueeze(1)
        #     lbls = input[1].flatten().to(device)
        #
        #     output = model(imgs)
        #     output = linear_model(output)
        #     # output = linear_model(model(imgs))
        #     loss = F.cross_entropy(output, lbls)
        #     train_dev_loss_sum += float(loss.data)
        #     train_dev_acc += int(sum(torch.argmax(output, dim=1) == lbls))
        #
        #     print("train dev acc epoch {}/{}\titer {}/{} loss:{} {:.3f}s/iter".format(
        #         epoch,
        #         args.epochs,
        #         iter+1,
        #         len(train_dev_acc_dataloader),
        #         loss.data,
        #         time.time() - t),
        #         flush=True)
        #     t = time.time()
        #
        # train_dev_acc /= len(train_dev_acc_dataset)
        # train_dev_loss_sum /= len(train_dev_acc_dataloader)
        # print("train dev epoch {}/{} loss:{} train_dev_acc:{}".format(
        #     epoch, args.epochs, train_dev_loss_sum, train_dev_acc), flush=True)

        #######################################################################
        # Val dev
        # # linear_model.eval()
        # val_dev_acc = 0
        # val_dev_loss_sum = 0.0
        # for iter, input in enumerate(val_dev_dataloader):
        #     imgs = input[0].to(device)
        #     if args.model != "resnet":
        #         imgs = imgs.unsqueeze(1)
        #     lbls = input[1].flatten().to(device)
        #
        #     output = model(imgs)
        #     output = linear_model(output)
        #     loss = F.cross_entropy(output, lbls)
        #     val_dev_loss_sum += float(loss.data)
        #     val_dev_acc += int(sum(torch.argmax(output, dim=1) == lbls))
        #
        #     print("val dev epoch {}/{} iter {}/{} loss:{} {:.3f}s/iter".format(
        #         epoch,
        #         args.epochs,
        #         iter+1,
        #         len(val_dev_dataloader),
        #         loss.data,
        #         time.time() - t),
        #         flush=True)
        #     t = time.time()
        #
        # val_dev_acc /= len(val_dev_dataset)
        # val_dev_loss_sum /= len(val_dev_dataloader)
        # print("val dev epoch {}/{} loss:{} val_dev_acc:{}".format(
        #     epoch, args.epochs, val_dev_loss_sum, val_dev_acc), flush=True)

        #######################################################################
        # Val
        linear_model.eval()
        val_acc = 0
        val_loss_sum = 0.0
        for iter, input in enumerate(val_dataloader):
            if time.time(
            ) - start_time > args.time * 3600 - 300 and comet_exp is not None:
                comet_exp.end()
                sys.exit(-1)

            imgs = input[0].to(device)
            if args.model != "resnet":
                imgs = imgs.unsqueeze(1)
            lbls = input[1].flatten().to(device)

            output = model(imgs)
            output = linear_model(output)
            loss = F.cross_entropy(output, lbls)
            val_loss_sum += float(loss.data)
            val_acc += int(sum(torch.argmax(output, dim=1) == lbls))

            # log_text = "val epoch {}/{} iter {}/{} loss:{} {:.3f}s/iter"
            if iter % 1500 == 0:
                log_text = "val epoch {}/{} iter {}/{} loss:{}"
                print(log_text.format(epoch, args.epochs, iter + 1,
                                      len(val_dataloader), loss.data,
                                      time.time() - t),
                      flush=False)
                t = time.time()

        val_acc /= len(_val_dataset)
        val_loss_sum /= len(val_dataloader)
        print("val epoch {}/{} loss:{} val_acc:{}".format(
            epoch, args.epochs, val_loss_sum, val_acc))
        with comet_exp.test():
            comet_exp.log_metrics({
                'acc': val_acc,
                'loss': val_loss_sum
            },
                                  step=(epoch + 1) * len(train_dataloader),
                                  epoch=epoch + 1)

        if val_acc > best_acc:
            best_acc = val_acc
            best_epoch = epoch
            linear_save_path = os.path.join(log_dir,
                                            "{}.linear.pth".format(epoch))
            model_save_path = os.path.join(log_dir,
                                           "{}.model.pth".format(epoch))
            torch.save(linear_model.state_dict(), linear_save_path)
            torch.save(model.state_dict(), model_save_path)

        # Check bias and variance
        print(
            "Epoch {} lr {} total: train_loss:{} train_acc:{} val_loss:{} val_acc:{}"
            .format(epoch, current_lr, train_loss_sum, train_acc, val_loss_sum,
                    val_acc),
            flush=True)
        # print("Epoch {} lr {} total: train_acc:{} train_dev_acc:{} val_dev_acc:{} val_acc:{}".format(
        #     epoch, current_lr, train_acc, train_dev_acc, val_dev_acc, val_acc), flush=True)

    print("The best epoch: {} acc: {}".format(best_epoch, best_acc))
Beispiel #16
0

if __name__ == '__main__':
    SEED = 1234

    random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)
    torch.backends.cudnn.deterministic = True

    config = get_config(sys.argv[1])
    # experiment = Experiment("wXwnV8LZOtVfxqnRxr65Lv7C2")
    comet_dir_path = os.path.join(config["result_directory"], config["model"])
    makedirs(comet_dir_path)
    experiment = OfflineExperiment(
        project_name="DeepGenomics",
        offline_directory=comet_dir_path)
    experiment.log_parameters(config)
    if torch.cuda.is_available():
        # torch.cuda.set_device(str(os.environ["CUDA_VISIBLE_DEVICES"]))
        device = torch.device('cuda:{}'.format(os.environ["CUDA_VISIBLE_DEVICES"]))
    else:
        device = torch.device('cpu')
    print(device)
    number_of_examples = len(get_filenames(os.path.join(config["data"], "x")))
    list_ids = [str(i) for i in range(number_of_examples)]
    random.shuffle(list_ids)
    t_ind, v_ind = round(number_of_examples * 0.7), round(number_of_examples * 0.9)
    train_indices, validation_indices, test_indices = list_ids[:t_ind], list_ids[t_ind:v_ind], list_ids[v_ind:]
    
    params = {'batch_size': config["training"]["batch_size"],
Beispiel #17
0
hyper_params = {
    "sequence_length": 28,
    "input_size": 28,
    "hidden_size": 128,
    "num_layers": 2,
    "num_classes": 10,
    "batch_size": 100,
    "num_epochs": 3,
    "learning_rate": 0.01
}

optimizer = Optimizer("pA3Hqc1pEswNvXOPtSoRobt7C")

experiment = OfflineExperiment(project_name="horoma",
                               offline_directory="./experiments",
                               disabled=False)
experiment.log_parameters(hyper_params)

# MNIST Dataset
train_dataset = dsets.MNIST(root='./data/',
                            train=True,
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./data/',
                           train=False,
                           transform=transforms.ToTensor())

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(
Beispiel #18
0
class CometLogger(LightningLoggerBase):
    r"""
    Log using `Comet.ml <https://www.comet.ml>`_.

    Install it with pip:

    .. code-block:: bash

        pip install comet-ml

    Comet requires either an API Key (online mode) or a local directory path (offline mode).

    **ONLINE MODE**

    .. code-block:: python

        import os
        from pytorch_lightning import Trainer
        from pytorch_lightning.loggers import CometLogger
        # arguments made to CometLogger are passed on to the comet_ml.Experiment class
        comet_logger = CometLogger(
            api_key=os.environ.get('COMET_API_KEY'),
            workspace=os.environ.get('COMET_WORKSPACE'),  # Optional
            save_dir='.',  # Optional
            project_name='default_project',  # Optional
            rest_api_key=os.environ.get('COMET_REST_API_KEY'),  # Optional
            experiment_name='default'  # Optional
        )
        trainer = Trainer(logger=comet_logger)

    **OFFLINE MODE**

    .. code-block:: python

        from pytorch_lightning.loggers import CometLogger
        # arguments made to CometLogger are passed on to the comet_ml.Experiment class
        comet_logger = CometLogger(
            save_dir='.',
            workspace=os.environ.get('COMET_WORKSPACE'),  # Optional
            project_name='default_project',  # Optional
            rest_api_key=os.environ.get('COMET_REST_API_KEY'),  # Optional
            experiment_name='default'  # Optional
        )
        trainer = Trainer(logger=comet_logger)

    Args:
        api_key: Required in online mode. API key, found on Comet.ml. If not given, this
            will be loaded from the environment variable COMET_API_KEY or ~/.comet.config
            if either exists.
        save_dir: Required in offline mode. The path for the directory to save local
            comet logs. If given, this also sets the directory for saving checkpoints.
        project_name: Optional. Send your experiment to a specific project.
            Otherwise will be sent to Uncategorized Experiments.
            If the project name does not already exist, Comet.ml will create a new project.
        rest_api_key: Optional. Rest API key found in Comet.ml settings.
            This is used to determine version number
        experiment_name: Optional. String representing the name for this particular experiment on Comet.ml.
        experiment_key: Optional. If set, restores from existing experiment.
        offline: If api_key and save_dir are both given, this determines whether
            the experiment will be in online or offline mode. This is useful if you use
            save_dir to control the checkpoints directory and have a ~/.comet.config
            file but still want to run offline experiments.
        prefix: A string to put at the beginning of metric keys.
        \**kwargs: Additional arguments like `workspace`, `log_code`, etc. used by
            :class:`CometExperiment` can be passed as keyword arguments in this logger.
    """

    LOGGER_JOIN_CHAR = '-'

    def __init__(self,
                 api_key: Optional[str] = None,
                 save_dir: Optional[str] = None,
                 project_name: Optional[str] = None,
                 rest_api_key: Optional[str] = None,
                 experiment_name: Optional[str] = None,
                 experiment_key: Optional[str] = None,
                 offline: bool = False,
                 prefix: str = '',
                 **kwargs):
        if comet_ml is None:
            raise ImportError(
                "You want to use `comet_ml` logger which is not installed yet,"
                " install it with `pip install comet-ml`.")
        super().__init__()
        self._experiment = None

        # Determine online or offline mode based on which arguments were passed to CometLogger
        api_key = api_key or comet_ml.config.get_api_key(
            None, comet_ml.config.get_config())

        if api_key is not None and save_dir is not None:
            self.mode = "offline" if offline else "online"
            self.api_key = api_key
            self._save_dir = save_dir
        elif api_key is not None:
            self.mode = "online"
            self.api_key = api_key
            self._save_dir = None
        elif save_dir is not None:
            self.mode = "offline"
            self._save_dir = save_dir
        else:
            # If neither api_key nor save_dir are passed as arguments, raise an exception
            raise MisconfigurationException(
                "CometLogger requires either api_key or save_dir during initialization."
            )

        log.info(f"CometLogger will be initialized in {self.mode} mode")

        self._project_name = project_name
        self._experiment_key = experiment_key
        self._experiment_name = experiment_name
        self._prefix = prefix
        self._kwargs = kwargs
        self._future_experiment_key = None

        if rest_api_key is not None:
            # Comet.ml rest API, used to determine version number
            self.rest_api_key = rest_api_key
            self.comet_api = API(self.rest_api_key)
        else:
            self.rest_api_key = None
            self.comet_api = None

        self._kwargs = kwargs

    @property
    @rank_zero_experiment
    def experiment(self):
        r"""
        Actual Comet object. To use Comet features in your
        :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.

        Example::

            self.logger.experiment.some_comet_function()

        """
        if self._experiment is not None:
            return self._experiment

        if self._future_experiment_key is not None:
            os.environ["COMET_EXPERIMENT_KEY"] = self._future_experiment_key

        try:
            if self.mode == "online":
                if self._experiment_key is None:
                    self._experiment = CometExperiment(
                        api_key=self.api_key,
                        project_name=self._project_name,
                        **self._kwargs,
                    )
                    self._experiment_key = self._experiment.get_key()
                else:
                    self._experiment = CometExistingExperiment(
                        api_key=self.api_key,
                        project_name=self._project_name,
                        previous_experiment=self._experiment_key,
                        **self._kwargs,
                    )
            else:
                self._experiment = CometOfflineExperiment(
                    offline_directory=self.save_dir,
                    project_name=self._project_name,
                    **self._kwargs,
                )
        finally:
            if self._future_experiment_key is not None:
                os.environ.pop("COMET_EXPERIMENT_KEY")
                self._future_experiment_key = None

        if self._experiment_name:
            self._experiment.set_name(self._experiment_name)

        return self._experiment

    @rank_zero_only
    def log_hyperparams(self, params: Union[Dict[str, Any],
                                            Namespace]) -> None:
        params = self._convert_params(params)
        params = self._flatten_dict(params)
        self.experiment.log_parameters(params)

    @rank_zero_only
    def log_metrics(self,
                    metrics: Dict[str, Union[torch.Tensor, float]],
                    step: Optional[int] = None) -> None:
        assert rank_zero_only.rank == 0, "experiment tried to log from global_rank != 0"
        # Comet.ml expects metrics to be a dictionary of detached tensors on CPU
        for key, val in metrics.items():
            if is_tensor(val):
                metrics[key] = val.cpu().detach()

        metrics_without_epoch = metrics.copy()
        epoch = metrics_without_epoch.pop('epoch', None)
        metrics_without_epoch = self._add_prefix(metrics_without_epoch)
        self.experiment.log_metrics(metrics_without_epoch,
                                    step=step,
                                    epoch=epoch)

    def reset_experiment(self):
        self._experiment = None

    @rank_zero_only
    def finalize(self, status: str) -> None:
        r"""
        When calling ``self.experiment.end()``, that experiment won't log any more data to Comet.
        That's why, if you need to log any more data, you need to create an ExistingCometExperiment.
        For example, to log data when testing your model after training, because when training is
        finalized :meth:`CometLogger.finalize` is called.

        This happens automatically in the :meth:`~CometLogger.experiment` property, when
        ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``.
        """
        self.experiment.end()
        self.reset_experiment()

    @property
    def save_dir(self) -> Optional[str]:
        return self._save_dir

    @property
    def name(self) -> str:
        # Don't create an experiment if we don't have one
        if self._experiment is not None and self._experiment.project_name is not None:
            return self._experiment.project_name

        if self._project_name is not None:
            return self._project_name

        return "comet-default"

    @property
    def version(self) -> str:
        # Don't create an experiment if we don't have one
        if self._experiment is not None:
            return self._experiment.id

        if self._experiment_key is not None:
            return self._experiment_key

        if "COMET_EXPERIMENT_KEY" in os.environ:
            return os.environ["COMET_EXPERIMENT_KEY"]

        if self._future_experiment_key is not None:
            return self._future_experiment_key

        # Pre-generate an experiment key
        self._future_experiment_key = comet_ml.generate_guid()

        return self._future_experiment_key

    def __getstate__(self):
        state = self.__dict__.copy()

        # Save the experiment id in case an experiment object already exists,
        # this way we could create an ExistingExperiment pointing to the same
        # experiment
        state[
            "_experiment_key"] = self._experiment.id if self._experiment is not None else None

        # Remove the experiment object as it contains hard to pickle objects
        # (like network connections), the experiment object will be recreated if
        # needed later
        state["_experiment"] = None
        return state
    encode = configuration['encode']
    cluster = configuration['cluster']
    latent_dim = configuration['latent_dim']
    flattened = False  # Default
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # Set all seeds for full reproducibility
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    # Set up Comet Experiment tracking
    experiment = OfflineExperiment(project_name='general',
                                   workspace='timothynest',  # Replace this with appropriate comet workspace
                                   offline_directory="experiments")
    experiment.set_name(
        name=args.config + "_dim={}_overlapped={}".format(latent_dim, train_split))
    experiment.log_parameters(configuration)

    # Initialize necessary objects
    if clustering_model == 'kmeans':
        clustering_model = KMeansClustering(n_clusters, seed)
    elif clustering_model == 'gmm':
        clustering_model = GMMClustering(n_clusters, seed)
    elif clustering_model == 'svm':
        clustering_model = SVMClustering(seed)
    else:
        print('No clustering model specified. Using Kmeans.')
        clustering_model = KMeansClustering(n_clusters, seed)