Exemplo n.º 1
0
def main():
    conf = Configs()
    experiment.create(name='probabilities_fixed_cards')
    experiment.calculate_configs(conf, {}, ['run'])
    experiment.add_pytorch_models(dict(model=conf.model))
    experiment.start()
    conf.run()
Exemplo n.º 2
0
def load_experiment(run_uuid: str, checkpoint: Optional[int] = None):
    """
    Load a saved experiment from [train model](train_model.html).
    """

    # Create configurations object
    conf = Configs()
    # Load custom configurations used in the experiment
    conf_dict = experiment.load_configs(run_uuid)
    # We need to get inputs to the feed forward layer, $f(c_i)$
    conf_dict['is_save_ff_input'] = True

    # This experiment is just an evaluation; i.e. nothing is tracked or saved
    experiment.evaluate()
    # Initialize configurations
    experiment.configs(conf, conf_dict, 'run')
    # Set models for saving/loading
    experiment.add_pytorch_models(get_modules(conf))
    # Specify the experiment to load from
    experiment.load(run_uuid, checkpoint)

    # Start the experiment; this is when it actually loads models
    experiment.start()

    return conf
Exemplo n.º 3
0
def get_predictor():
    conf = Configs()
    experiment.evaluate()

    # This will download a pretrained model checkpoint and some cached files.
    # It will download the archive as `saved_checkpoint.tar.gz` and extract it.
    #
    # If you have a locally trained model load it directly with
    # run_uuid = 'RUN_UUID'
    # And for latest checkpoint
    # checkpoint = None
    run_uuid, checkpoint = experiment.load_bundle(
        lab.get_path() / 'saved_checkpoint.tar.gz',
        url=
        'https://github.com/lab-ml/python_autocomplete/releases/download/0.0.4/transformer_checkpoint.tar.gz'
    )

    conf_dict = experiment.load_configs(run_uuid)
    experiment.configs(conf, conf_dict)
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load(run_uuid, checkpoint)

    experiment.start()
    conf.model.eval()
    return Predictor(conf.model, cache('stoi', lambda: conf.text.stoi),
                     cache('itos', lambda: conf.text.itos))
Exemplo n.º 4
0
def load_experiment() -> Configs:
    conf = Configs()
    experiment.evaluate()

    # This will download a pretrained model checkpoint and some cached files.
    # It will download the archive as `saved_checkpoint.tar.gz` and extract it.
    #
    # If you have a locally trained model load it directly with
    # run_uuid = 'RUN_UUID'
    # And for latest checkpoint
    # checkpoint = None

    # run_uuid = 'a6cff3706ec411ebadd9bf753b33bae6'  # bpe
    # checkpoint = None
    run_uuid, checkpoint = experiment.load_bundle(
        lab.get_path() / 'saved_checkpoint.tar.gz',
        url=
        'https://github.com/lab-ml/python_autocomplete/releases/download/0.0.5/bundle.tar.gz'
    )

    conf_dict = experiment.load_configs(run_uuid)
    conf_dict['text.is_load_data'] = False
    experiment.configs(conf, conf_dict)
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load(run_uuid, checkpoint)

    experiment.start()

    return conf
Exemplo n.º 5
0
def main():
    conf = Configs()
    experiment.create(name='who_won')
    experiment.calculate_configs(conf, {}, ['run'])
    experiment.add_pytorch_models(dict(model=conf.model))
    experiment.start()
    conf.run()
Exemplo n.º 6
0
def main():
    conf = Configs()
    experiment.create(name='cifar_10', writers={'sqlite'})
    conf.optimizer = 'adam_optimizer'
    experiment.calculate_configs(conf, {}, ['set_seed', 'run'])
    experiment.add_pytorch_models(dict(model=conf.model))
    experiment.start()
    conf.run()
Exemplo n.º 7
0
def main():
    conf = Configs()
    experiment.create(name='mnist_latest')
    conf.optimizer = 'adam_optimizer'
    experiment.calculate_configs(conf, {}, ['set_seed', 'run'])
    experiment.add_pytorch_models(dict(model=conf.model))
    experiment.start()
    conf.run()
Exemplo n.º 8
0
def search(conf: Configs):
    tracker.set_global_step(0)

    experiment.create(name='mnist_hyperparam_tuning')
    experiment.calculate_configs(conf, {}, ['set_seed', 'run'])
    experiment.add_pytorch_models(dict(model=conf.model))
    experiment.start()

    conf.run()
    tracker.reset()
Exemplo n.º 9
0
def main():
    conf = Configs()
    experiment.create(name='sklearn', writers={'sqlite'})
    experiment.calculate_configs(conf)

    experiment.add_sklearn_models(dict(model=conf.model))
    experiment.start()
    conf.run()

    experiment.save_checkpoint()
Exemplo n.º 10
0
def main():
    conf = Configs()
    experiment.create(name='mnist_latest')
    experiment.configs(conf, {
        'device.cuda_device': 0,
        'optimizer.optimizer': 'Adam'
    }, ['seed', 'run'])
    experiment.add_pytorch_models(dict(model=conf.model))
    experiment.start()
    conf.run()
Exemplo n.º 11
0
def main():
    conf = Configs()
    experiment.create(name='mnist_configs', writers={'sqlite'})
    conf.optimizer = 'sgd_optimizer'
    experiment.calculate_configs(conf,
                                 {},
                                 ['set_seed', 'loop'])
    experiment.add_pytorch_models(dict(model=conf.model))
    experiment.start()
    conf.loop()
Exemplo n.º 12
0
def main():
    conf = Configs()
    experiment.create(name='configs')
    experiment.calculate_configs(conf, {'optimizer': 'sgd_optimizer'},
                                 ['set_seed', 'run'])
    experiment.start()
    conf.run()

    # save the model
    experiment.save_checkpoint()
Exemplo n.º 13
0
def setup_and_add():
    for t in range(10):
        tracker.set_scalar(f"loss1.{t}", is_print=t == 0)

    experiment.start()

    for i in monit.loop(1000):
        for t in range(10):
            tracker.add({f'loss1.{t}': i})
            tracker.save()
Exemplo n.º 14
0
def add_save():
    arr = torch.zeros((1000, 1000))
    experiment.start()

    for i in monit.loop(N):
        for t in range(10):
            arr += 1
        for t in range(10):
            if i == 0:
                tracker.set_scalar(f"loss1.{t}", is_print=t == 0)
        for t in range(10):
            tracker.add({f'loss1.{t}': i})
            tracker.save()
Exemplo n.º 15
0
def main():
    conf = Configs()
    experiment.create(name="source_code_eval", comment='lstm model')

    # Replace this with your training experiment UUID
    conf_dict = experiment.load_configs('6f10a292e77211ea89d69979079dc3d6')
    experiment.configs(conf, conf_dict, 'run')
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load('6f10a292e77211ea89d69979079dc3d6')

    experiment.start()
    evaluator = Evaluator(conf.model, conf.text, conf.text.valid, False)
    evaluator.eval()
Exemplo n.º 16
0
def main():
    conf = Configs()

    experiment.create(name='Battleship_DQN')
    experiment.calculate_configs(conf,
                                 {},
                                 ['set_seed', 'policy', 'target', 'run'])
    experiment.add_pytorch_models(dict(model=conf.policy))
    experiment.start()

    conf.run()

    if conf.is_save_models:
        experiment.save_checkpoint()
Exemplo n.º 17
0
def get_predictor():
    conf = Configs()
    experiment.evaluate()

    # Replace this with your training experiment UUID
    run_uuid = '39b03a1e454011ebbaff2b26e3148b3d'

    conf_dict = experiment.load_configs(run_uuid)
    experiment.configs(conf, conf_dict)
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load(run_uuid)

    experiment.start()
    conf.model.eval()
    return Predictor(conf.model, cache('stoi', lambda: conf.text.stoi), cache('itos', lambda: conf.text.itos))
Exemplo n.º 18
0
def main():
    # Create experiment
    experiment.create(name="rotary_shakespeare",
                      comment="rotary value",
                      writers={'screen', 'labml'})
    # Create configs
    conf = Configs()
    # Override configurations
    experiment.configs(
        conf,
        {
            # No fixed positional embeddings
            'transformer.src_embed': 'no_pos',
            'transformer.tgt_embed': 'no_pos',

            # Encoder with RoPE
            'transformer.encoder_attn': 'rotary_value',
            # 'transformer.encoder_attn': 'rotary',

            #
            'model': 'rotary_pe_transformer',

            # Use character level tokenizer
            'tokenizer': 'character',
            # Prompt separator is blank
            'prompt_separator': '',
            # Starting prompt for sampling
            'prompt': 'It is ',
            # Use Tiny Shakespeare dataset
            'text': 'tiny_shakespeare',

            # Use a context size of $256$
            'seq_len': 512,
            # Train for 32 epochs
            'epochs': 24,
            # Batch size $4$
            'batch_size': 16,
            # Switch between training and validation for $10$ times
            # per epoch
            'inner_iterations': 4,

            # Model size
            'd_model': 128,
            'transformer.ffn.d_ff': 512,
            'transformer.n_heads': 4,
            'transformer.dropout': 0.0,

            # Use [Adam optimizer](../../optimizers/noam.html)
            'optimizer.optimizer': 'Adam',
            'optimizer.learning_rate': 2.5e-4,
            'dataloader_shuffle_with_replacement': True
        })

    # Set models for saving and loading
    experiment.add_pytorch_models({'model': conf.model})

    # Start the experiment
    with experiment.start():
        # Run training
        conf.run()
Exemplo n.º 19
0
def main():
    experiment.create()
    conf = Configs()
    conf.learning_rate = 1e-4
    conf.epochs = 500
    conf.conv_sizes = [(128, 2), (256, 4)]
    # conf.conv_sizes = [(128, 1), (256, 2)]
    conf.activation = 'relu'
    conf.dropout = 0.1
    conf.train_batch_size = 32
    experiment.calculate_configs(conf, 'run')

    experiment.start()
    with tracker.namespace('valid'):
        conf.valid_dataset.save_artifacts()
    conf.run()
Exemplo n.º 20
0
def main():
    # Create experiment
    experiment.create(name="feedback_transformer")
    # Create configs
    conf = Configs()
    # Load configurations
    experiment.configs(
        conf,
        # A dictionary of configurations to override
        {
            'tokenizer': 'character',
            'text': 'tiny_shakespeare',
            'optimizer.learning_rate': 1.0,
            'optimizer.optimizer': 'Noam',
            'prompt': 'It is',
            'prompt_separator': '',

            # Use `feedback_transformer` for original feedback transformer
            'model': 'feedback_transformer_kv',
            'train_loader': 'shuffled_train_loader',
            'valid_loader': 'shuffled_valid_loader',
            'seq_len': 128,
            'epochs': 128,
            'batch_size': 64,
            'inner_iterations': 25
        })

    # Set models for saving and loading
    experiment.add_pytorch_models(get_modules(conf))

    # Start the experiment
    with experiment.start():
        # Run the training loop
        conf.run()
Exemplo n.º 21
0
def main(local_rank,
         rank,
         world_size,
         uuid,
         init_method: str = 'tcp://localhost:23456'):
    with monit.section('Distributed'):
        torch.distributed.init_process_group(
            "gloo",
            timeout=datetime.timedelta(seconds=30),
            init_method=init_method,
            rank=rank,
            world_size=world_size)
    conf = Configs()
    experiment.create(uuid=uuid, name="source_code_ddp", comment='lstm model')
    experiment.distributed(local_rank, world_size)
    experiment.configs(
        conf, {
            'model': 'transformer_model',
            'n_layers': 6,
            'batch_size': 12,
            'epochs': 32,
            'optimizer.optimizer': 'Noam',
            'optimizer.learning_rate': 1.0,
            'device.cuda_device': local_rank,
            'seq_len': 512,
            'train_loader': 'shuffled_train_loader',
            'valid_loader': 'shuffled_valid_loader'
        })
    experiment.add_pytorch_models(model=conf.ddp_model)
    with experiment.start():
        conf.run()
Exemplo n.º 22
0
def main():
    """
    ### Run the experiment
    """
    # Create experiment
    experiment.create(name="transformer_xl", comment='')
    # Create configs
    conf = Configs()
    # Load configurations
    experiment.configs(
        conf,
        # A dictionary of configurations to override
        {
            'tokenizer': 'character',
            'text': 'tiny_shakespeare',
            'optimizer.learning_rate': 1.,
            'optimizer.optimizer': 'Noam',
            'prompt': 'I ',
            'prompt_separator': '',
            'train_loader': 'sequential_train_loader',
            'valid_loader': 'sequential_valid_loader',
            'seq_len': 2,
            'mem_len': 32,
            'epochs': 128,
            'batch_size': 32,
            'inner_iterations': 25,
        })

    # Set models for saving and loading
    experiment.add_pytorch_models({'model': conf.model})

    # Start the experiment
    with experiment.start():
        # `TrainValidConfigs.run`
        conf.run()
Exemplo n.º 23
0
def main():
    # Create experiment
    experiment.create(name="hyper_lstm", comment='')
    # Create configs
    conf = Configs()
    # Load configurations
    experiment.configs(conf,
                       # A dictionary of configurations to override
                       {'tokenizer': 'character',
                        'text': 'tiny_shakespeare',
                        'optimizer.learning_rate': 2.5e-4,
                        'optimizer.optimizer': 'Adam',
                        'prompt': 'It is',
                        'prompt_separator': '',

                        'rnn_model': 'hyper_lstm',

                        'train_loader': 'shuffled_train_loader',
                        'valid_loader': 'shuffled_valid_loader',

                        'seq_len': 512,
                        'epochs': 128,
                        'batch_size': 2,
                        'inner_iterations': 25})

    # Set models for saving and loading
    experiment.add_pytorch_models(get_modules(conf))

    # Start the experiment
    with experiment.start():
        # `TrainValidConfigs.run`
        conf.run()
Exemplo n.º 24
0
def main():
    # Create experiment
    experiment.create(name='diffuse', writers={'screen', 'comet'})

    # Create configurations
    configs = Configs()

    # Set configurations. You can override the defaults by passing the values in the dictionary.
    experiment.configs(
        configs,
        {
            'dataset': 'CelebA',  # 'MNIST'
            'image_channels': 3,  # 1,
            'epochs': 100,  # 5,
        })

    # Initialize
    configs.init()

    # Set models for saving and loading
    experiment.add_pytorch_models({'eps_model': configs.eps_model})

    # Start and run the training loop
    with experiment.start():
        configs.run()
Exemplo n.º 25
0
def main():
    """
    ### Train StyleGAN2
    """

    # Create an experiment
    experiment.create(name='stylegan2')
    # Create configurations object
    configs = Configs()

    # Set configurations and override some
    experiment.configs(configs, {
        'device.cuda_device': 0,
        'image_size': 64,
        'log_generated_interval': 200
    })

    # Initialize
    configs.init()
    # Set models for saving and loading
    experiment.add_pytorch_models(mapping_network=configs.mapping_network,
                                  generator=configs.generator,
                                  discriminator=configs.discriminator)

    # Start the experiment
    with experiment.start():
        # Run the training loop
        configs.train()
Exemplo n.º 26
0
def main():
    conf = Configs()
    experiment.create(name='mnist_gan', comment='test')
    experiment.configs(conf,
                       {'label_smoothing': 0.01})
    with experiment.start():
        conf.run()
Exemplo n.º 27
0
def main():
    conf = Configs()
    # Assign one of transformer_mode, lstm_model, or rhn_model
    experiment.create(name="source_code", comment='bpe')
    experiment.configs(
        conf,
        {
            # 'model': 'transformer_model',
            'model': 'transformer_xl_model',
            'n_layers': 6,
            'epochs': 32,
            'optimizer.optimizer': 'AdamW',
            'optimizer.learning_rate': 1.25e-4,
            'device.cuda_device': 0,
            'is_token_by_token': True,
            'state_updater': 'transformer_memory',
            'mem_len': 256,
            'text.is_shuffle': False,
            'text.tokenizer': 'bpe',
            'text.batch_size': 12,
            'text.seq_len': 256,
            #
            # 'inner_iterations': 10,
            # 'text.truncate_data': 100_000,
        })
    experiment.add_pytorch_models(model=conf.model)
    with experiment.start():
        conf.run()
Exemplo n.º 28
0
def main(local_rank,
         rank,
         world_size,
         uuid,
         init_method: str = 'tcp://localhost:23456'):
    with monit.section('Distributed'):
        torch.distributed.init_process_group(
            "gloo",
            timeout=datetime.timedelta(seconds=30),
            init_method=init_method,
            rank=rank,
            world_size=world_size)
    conf = Configs()
    experiment.create(uuid=uuid, name='mnist ddp')
    experiment.distributed(local_rank, world_size)
    experiment.configs(
        conf, {
            'optimizer.optimizer': 'Adam',
            'optimizer.learning_rate': 1e-4,
            'model': 'ddp_model',
            'device.cuda_device': local_rank
        })
    conf.set_seed.set()
    experiment.add_pytorch_models(dict(model=conf.model))
    with experiment.start():
        conf.run()
Exemplo n.º 29
0
def main():
    # Create experiment
    experiment.create(name='ViT', comment='cifar10')
    # Create configurations
    conf = Configs()
    # Load configurations
    experiment.configs(
        conf,
        {
            # Optimizer
            'optimizer.optimizer': 'Adam',
            'optimizer.learning_rate': 2.5e-4,

            # Transformer embedding size
            'transformer.d_model': 512,

            # Training epochs and batch size
            'epochs': 1000,
            'train_batch_size': 64,

            # Augment CIFAR 10 images for training
            'train_dataset': 'cifar10_train_augmented',
            # Do not augment CIFAR 10 images for validation
            'valid_dataset': 'cifar10_valid_no_augment',
        })
    # Set model for saving/loading
    experiment.add_pytorch_models({'model': conf.model})
    # Start the experiment and run the training loop
    with experiment.start():
        conf.run()
Exemplo n.º 30
0
Arquivo: __init__.py Projeto: wx-b/nn
def main(run_uuid: str, checkpoint: int):
    """
    Train a small model with distillation
    """
    # Load saved model
    large_model = get_saved_model(run_uuid, checkpoint)
    # Create experiment
    experiment.create(name='distillation', comment='cifar10')
    # Create configurations
    conf = Configs()
    # Set the loaded large model
    conf.large = large_model
    # Load configurations
    experiment.configs(
        conf, {
            'optimizer.optimizer': 'Adam',
            'optimizer.learning_rate': 2.5e-4,
            'model': '_small_student_model',
        })
    # Set model for saving/loading
    experiment.add_pytorch_models({'model': conf.model})
    # Start experiment from scratch
    experiment.load(None, None)
    # Start the experiment and run the training loop
    with experiment.start():
        conf.run()