Beispiel #1
0
def test_comet_pickle(tmpdir, monkeypatch):
    """Verify that pickling trainer with comet logger works."""

    # prevent comet logger from trying to print at exit, since
    # pytest's stdout/stderr redirection breaks it
    import atexit
    monkeypatch.setattr(atexit, "register", lambda _: None)

    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import CometLogger
    except ModuleNotFoundError:
        return

    # hparams = tutils.get_hparams()
    # model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(default_save_path=tmpdir,
                           max_epochs=1,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})
def test_comet_logger(tmpdir):
    """Verify that basic functionality of Comet.ml logger works."""
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import CometLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(max_num_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"
def main(cfg: DictConfig):
    for i in range(100):
        seed = i + 1000
        seed_everything(seed)
        sequence_length = cfg.params[0].sequence_length
        hidden_layer = cfg.params[1].hidden_layer
        hidden_feature = cfg.params[2].hidden_feature
        activation = cfg.params[3].activation

        KOSPI200Dataset.setup(train_length=train_length,
                              val_length=val_length,
                              test_length=test_length,
                              sequence_length=sequence_length
                              )  # 2020.07.03 615일(개장일 기준) 이전이 2018.01.02

        comet_logger = CometLogger(
            api_key="SWhvV0XPkHV8tPdU8Nv67EXxU",
            workspace="dldjwls3",  # Optional
            project_name=f"stock-candidate-{train_length}days",
            experiment_name=
            f'gcn_{sequence_length}_{activation}_{hidden_layer}_{hidden_feature}'
        )

        model = Baseline(seed=seed,
                         sequence_length=sequence_length,
                         num_feature=5,
                         activation=activation,
                         hidden_layer=hidden_layer,
                         hidden_feature=hidden_feature)
        trainer = Trainer(max_epochs=120, gpus=-1, logger=comet_logger)
        trainer.fit(model)
def test_comet_pickle(tmpdir):
    """Verify that pickling trainer with comet logger works."""
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import CometLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(max_num_epochs=1, logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})
Beispiel #5
0
def main(cfg: DictConfig):
    seed = 1
    train_length = 500
    val_length = 30
    test_length = 585

    sequence_length = 9
    hidden_layer = 4
    hidden_feature = 1
    activation = 'sigmoid'

    seed_everything(seed)
    KOSPI200Dataset.setup(train_length=train_length,
                          val_length=val_length,
                          test_length=test_length,
                          sequence_length=sequence_length
                          )  # 2020.07.03 615일(개장일 기준) 이전이 2018.01.02

    comet_logger = CometLogger(
        api_key="SWhvV0XPkHV8tPdU8Nv67EXxU",
        workspace="dldjwls3",  # Optional
        project_name="stock-test",
        experiment_name='test')

    model = Baseline(seed=seed,
                     sequence_length=sequence_length,
                     num_feature=5,
                     activation=activation,
                     hidden_layer=hidden_layer,
                     hidden_feature=hidden_feature)
    trainer = Trainer(max_epochs=120, gpus=-1, logger=comet_logger)
    trainer.fit(model)
Beispiel #6
0
def main(hparams):
    # init module
    raw_data = read_smiles_multiclass('%s/raw.csv' % hparams.data)
    n_classes = len(raw_data[0][1])
    model = PropPredictor(hparams, n_classes=n_classes)
    model.version = hparams.dataset
    
    load_shortest_paths(hparams)
    #model.half()
    
    comet_logger = CometLogger(
        api_key=os.environ["COMET_KEY"],
        experiment_name=f'{hparams.dataset}-{str(time.time())}',
        log_graph=True,
        project_name="tcvaemolgen",
        workspace=os.environ["COMET_WKSP"]
    )

    # most basic trainer, uses good defaults
    trainer = Trainer(
        check_val_every_n_epoch=1,
        default_save_path=f'data/05_model_outputs/{hparams.dataset}',
        distributed_backend=hparams.distributed_backend,
        max_nb_epochs=hparams.max_nb_epochs,
        early_stop_callback=None,
        gpus=hparams.gpus,
        gradient_clip_val=10,
        nb_gpu_nodes=hparams.nodes,
        logger=comet_logger,
        log_save_interval=100,
        row_log_interval=10,    
        show_progress_bar=True,
        track_grad_norm=2
    )
    for round_idx in range(hparams.n_rounds):
        model.split_idx = round_idx
        log.info(f'Split {round_idx}')
        trainer.fit(model)
    
    trainer.test()
    return
def main(cfg: DictConfig):
    train_length = 500
    val_length = 30
    test_length = 585

    opt = Optimizer(
        config,
        api_key='SWhvV0XPkHV8tPdU8Nv67EXxU',
        project_name=f'stock-gcn-experiment-sequences-{train_length}days')
    for experiment in opt.get_experiments():
        seed = 1
        seed_everything(seed)

        hidden_layer = experiment.get_parameter('hidden_layer')
        hidden_feature = experiment.get_parameter('hidden_feature')
        activation = experiment.get_parameter('activation')
        sequence_length = experiment.get_parameter('sequence_length')

        KOSPI200Dataset.setup(train_length=train_length,
                              val_length=val_length,
                              test_length=test_length,
                              sequence_length=sequence_length
                              )  # 2020.07.03 615일(개장일 기준) 이전이 2018.01.02

        comet_logger = CometLogger(
            api_key="SWhvV0XPkHV8tPdU8Nv67EXxU",
            workspace="dldjwls3",  # Optional
            project_name=f'stock-gcn-experiment-sequences-{train_length}days',
            experiment_name=
            f'gcn_{sequence_length}_{activation}_{hidden_layer}_{hidden_feature}',
            experiment_key=experiment.get_key())

        model = Baseline(seed=seed,
                         sequence_length=sequence_length,
                         num_feature=5,
                         hidden_layer=hidden_layer,
                         hidden_feature=hidden_feature,
                         activation=activation)
        trainer = Trainer(max_epochs=120, gpus=-1, logger=comet_logger)
        trainer.fit(model)
Beispiel #8
0
def test_comet_logger(tmpdir, monkeypatch):
    """Verify that basic functionality of Comet.ml logger works."""

    # prevent comet logger from trying to print at exit, since
    # pytest's stdout/stderr redirection breaks it
    import atexit
    monkeypatch.setattr(atexit, "register", lambda _: None)

    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import CometLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(default_save_path=tmpdir,
                           max_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"
Beispiel #9
0
def main(cfg: DictConfig):
    for i in range(20):
        train_length = 300
        val_length = 30
        test_length = 585 - 30 * i

        seed = 1017
        seed_everything(seed)
        sequence_length = 10
        hidden_layer = 5
        hidden_feature = 12
        activation = 'sigmoid'

        KOSPI200Dataset.setup(train_length=train_length,
                              val_length=val_length,
                              test_length=test_length,
                              sequence_length=sequence_length
                              )  # 2020.07.03 615일(개장일 기준) 이전이 2018.01.02

        comet_logger = CometLogger(
            api_key="SWhvV0XPkHV8tPdU8Nv67EXxU",
            workspace="dldjwls3",  # Optional
            project_name=f"stock-rolling-simulation-{train_length}days",
            experiment_name=
            f'{seed}seed_gcn_{sequence_length}_{activation}_{hidden_layer}_{hidden_feature}'
        )

        model = Baseline(seed=seed,
                         sequence_length=sequence_length,
                         num_feature=5,
                         activation=activation,
                         hidden_layer=hidden_layer,
                         hidden_feature=hidden_feature)
        trainer = Trainer(max_epochs=120, gpus=-1, logger=comet_logger)
        trainer.fit(model)
        trainer.test(model)
Beispiel #10
0
def main(hparams):
    generator = Generator(hparams)
    discriminator = Discriminator(hparams)

    # scorer = models.mobilenet_v2(pretrained=True)
    model = GAN(hparams, generator, discriminator)

    experiment_name = hparams.loss_strategy + "+" + hparams.architecture
    if hparams.gradient_penalty_strategy != "none":
        experiment_name += "+" + hparams.gradient_penalty_strategy
    if hparams.multi_scale_gradient:
        experiment_name += "+msg"
    if hparams.instance_noise:
        experiment_name += "+in"
    if hparams.spectral_normalization:
        experiment_name += "+sn"
    if hparams.equalized_learning_rate:
        experiment_name += "+eqlr"

    experiment_name += " (" + hparams.dataset + ")"

    if hparams.logger == "none":
        logger = False
    elif hparams.logger == "comet.ml":
        logger = CometLogger(
            api_key=os.environ["COMET_KEY"],
            workspace=os.environ["COMET_WORKSPACE"],  # Optional
            project_name="gan-research-project",  # Optional
            rest_api_key=os.environ["COMET_REST_KEY"],  # Optional
            experiment_name=experiment_name)
    elif hparams.logger == "wandb":
        logger = WandbLogger(project="gan-research-project",
                             name=experiment_name)

        logger.watch(model)
    elif hparams.logger == "tensorboard":
        logger = TensorBoardLogger(save_dir=os.getcwd() + "/lightning_logs")
    else:
        raise ValueError("Must specific a logger")

    if hparams.save_checkpoints:
        checkpoint_callback = ModelCheckpoint(
            filepath=os.getcwd() + "/checkpoints/{epoch}-" +
            hparams.loss_strategy + "+" + hparams.gradient_penalty_strategy +
            "+" + hparams.dataset +
            "-{discriminator_loss:.5f}-{ic_score_mean:.5f}",
            monitor="discriminator_loss",
            mode="max",
            save_top_k=1,
            period=1)
    else:
        checkpoint_callback = False

    trainer = Trainer(min_epochs=hparams.min_epochs,
                      max_epochs=hparams.max_epochs,
                      gpus=hparams.gpus,
                      nb_gpu_nodes=hparams.nodes,
                      accumulate_grad_batches=hparams.accumulate_grad_batches,
                      progress_bar_refresh_rate=20,
                      early_stop_callback=False,
                      checkpoint_callback=checkpoint_callback,
                      logger=logger,
                      fast_dev_run=False,
                      num_sanity_val_steps=0,
                      distributed_backend="dp",
                      weights_summary=None)

    trainer.fit(model)
Beispiel #11
0
    exp_name =  args.experiment_name + \
                '_' + \
                args.model + \
                '_' + \
                config['dataloaders']['tokenizer_type'].get() + \
                '_epochs_' + \
                str(config['trainer_params']['max_epochs'].get())

    print("starting " + exp_name + " experiment")

    # setup logger
    api_key = args.api_key
    if api_key == '':
        api_key = os.environ['API_KEY']
    logger = CometLogger(api_key=os.environ['API_KEY'],
                         workspace="c00k1ez",
                         project_name="low-resource-lm-research",
                         experiment_name=exp_name)

    # get all config data to send in to comet.ml
    config_data = {}
    cfg_raw = config.get()
    for key in cfg_raw.keys():
        config_data.update(dict(cfg_raw[key]))
    logger.experiment.log_parameters(config_data)

    model_name = args.model + '_' + config['dataloaders'][
        'tokenizer_type'].get()

    # setup my custom checkpoint callback
    checkpoint_callback = CustomModelCheckpoint(
        model_name=model_name,
Beispiel #12
0
summary(model.generator.to("cuda"), (1, 128, 128, 128))

checkpoints_dir = '{}/checkpoints'.format(primary_directory)
checkpoint_callback = ModelCheckpoint(
    filepath=checkpoints_dir,
    verbose=True,
    save_last=False,
    save_top_k=0,
    monitor='dose_score',
    mode='min'
)
lr_logger = LearningRateLogger()

comet_logger = CometLogger(
    api_key="eyAsnp1KA7fXLxFMkEWKjhygS",
    project_name="kbp-challenge",
    workspace="mahruskazi"
)

checkpoint = None
if opt.load_epoch != -1:
    checkpoint = '{}/epoch={}.ckpt'.format(checkpoints_dir, opt.load_epoch)

trainer = pl.Trainer(logger=comet_logger,
                     resume_from_checkpoint=checkpoint,
                     gpus=1,
                     checkpoint_callback=checkpoint_callback,
                     callbacks=[lr_logger],
                     max_epochs=opt.epoch_count,
                     check_val_every_n_epoch=1,
                     num_sanity_val_steps=10,