コード例 #1
0
def test_train(
    validate,
    clip_grad_norm_,
    process_batch,
    DataLoader,
    VoiceDataset,
    Adam,
    Tacotron2Loss,
    Tacotron2,
    get_available_memory,
    is_available,
    calc_avgmax_attention,
):
    metadata_path = os.path.join("test_samples", "dataset", "metadata.csv")
    dataset_directory = os.path.join("test_samples", "dataset", "wavs")
    output_directory = "checkpoint"
    train_size = 0.67

    train(
        metadata_path,
        dataset_directory,
        output_directory,
        epochs=1,
        batch_size=1,
        early_stopping=False,
        multi_gpu=False,
        train_size=train_size,
    )

    # Check checkpoint
    checkpoint_path = os.path.join(output_directory, "checkpoint_2")
    assert os.path.isfile(checkpoint_path)

    shutil.rmtree(output_directory)
コード例 #2
0
def train_classifier(config: Config):
    config_json = config.toDictionary()
    print('train_classifier')
    print(config_json)
    from training.train import train
    from torch.utils.data.dataloader import DataLoader
    from data.loader_segmentation import Segmentation

    model = get_model(config.classifier_name)
    
    wandb.init(entity='kobus_wits', project='wass_classifier', name=config.sweep_id + '_c_' + config.classifier_name, config=config_json)
    wandb.watch(model)

    train(
        model=model,
        dataloaders = {
            'train': DataLoader(
                Segmentation(
                    config.classifier_dataset_root,
                    source='train',
                    augmentation='train',
                    image_size=config.classifier_image_size
                ),
                batch_size=config.classifier_batch_size_train,
                shuffle=True,
                pin_memory=True,
                num_workers=4,
                prefetch_factor=4
            ),
        },
        epochs=config.classifier_epochs,
        validation_mod=10
    )

    wandb.finish()
コード例 #3
0
    def test_checkpointing(self):
        callback_step_count = 0

        def callback(output_location, step, model, optimizer, logger):
            nonlocal callback_step_count
            callback_step_count += 1

        # Train to epoch 1, iteration 1.
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=self.callbacks,
                    end_step=Step.from_epoch(1, 1, len(self.train_loader)))

        # Add a step-counting callback.
        self.callbacks.append(callback)

        # Train to epoch 1, iteration 1 again. Checkpointing should ensure we
        # only train for one step.
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=self.callbacks,
                    end_step=Step.from_epoch(1, 1, len(self.train_loader)))

        self.assertEqual(callback_step_count, 2)
コード例 #4
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_nonrepeatable_data_order_without_seed(self):
        del self.hparams.training_hparams.data_order_seed

        init = {
            k: v.clone().detach()
            for k, v in self.model.state_dict().items()
        }

        # Train the model once and get the state.
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    start_step=Step.from_epoch(1, 0, len(self.train_loader)),
                    end_step=Step.from_epoch(1, 1, len(self.train_loader)))
        state1 = TestTrain.get_state(self.model)

        # Train the model again and get the state.
        self.model.load_state_dict(init)
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    start_step=Step.from_epoch(1, 0, len(self.train_loader)),
                    end_step=Step.from_epoch(1, 1, len(self.train_loader)))
        state2 = TestTrain.get_state(self.model)

        # Ensure that the model states are NOT the same.
        for k in state1:
            self.assertFalse(np.array_equal(state1[k], state2[k]))
コード例 #5
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_different_data_order_on_different_epochs(self):
        del self.hparams.training_hparams.gamma
        del self.hparams.training_hparams.milestone_steps
        del self.hparams.training_hparams.warmup_steps

        init = {
            k: v.clone().detach()
            for k, v in self.model.state_dict().items()
        }

        # Train the model once and get the state.
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    start_step=Step.from_epoch(1, 0, len(self.train_loader)),
                    end_step=Step.from_epoch(1, 1, len(self.train_loader)))
        state1 = TestTrain.get_state(self.model)

        # Train the model again and get the state.
        self.model.load_state_dict(init)
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    start_step=Step.from_epoch(2, 0, len(self.train_loader)),
                    end_step=Step.from_epoch(2, 1, len(self.train_loader)))
        state2 = TestTrain.get_state(self.model)

        # Ensure that the model states are NOT the same.
        for k in state1:
            self.assertFalse(np.array_equal(state1[k], state2[k]))
コード例 #6
0
    def test_first_step(self):
        init_state = TestStandardCallbacks.get_state(self.model)

        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=self.callbacks,
                    end_step=Step.from_epoch(0, 1, len(self.train_loader)))

        # Check that the initial state has been saved.
        model_state_loc = paths.model(self.root,
                                      Step.zero(len(self.train_loader)))
        self.assertTrue(os.path.exists(model_state_loc))

        # Check that the model state at init reflects the saved state.
        self.model.load_state_dict(torch.load(model_state_loc))
        saved_state = TestStandardCallbacks.get_state(self.model)
        self.assertStateEqual(init_state, saved_state)

        # Check that the checkpoint file exists.
        self.assertTrue(os.path.exists(paths.checkpoint(self.root)))

        # Check that the logger file doesn't exist.
        self.assertFalse(os.path.exists(paths.logger(self.root)))
コード例 #7
0
    def test_last_step(self):
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=self.callbacks,
                    start_step=Step.from_epoch(2, 11, len(self.train_loader)),
                    end_step=Step.from_epoch(3, 0, len(self.train_loader)))

        end_state = TestStandardCallbacks.get_state(self.model)

        # Check that final state has been saved.
        end_loc = paths.model(self.root,
                              Step.from_epoch(3, 0, len(self.train_loader)))
        self.assertTrue(os.path.exists(end_loc))

        # Check that the final state that is saved matches the final state of the network.
        self.model.load_state_dict(torch.load(end_loc))
        saved_state = TestStandardCallbacks.get_state(self.model)
        self.assertStateEqual(end_state, saved_state)

        # Check that the logger has the right number of entries.
        self.assertTrue(os.path.exists(paths.logger(self.root)))
        logger = MetricLogger.create_from_file(self.root)
        self.assertEqual(len(logger.get_data('train_loss')), 1)
        self.assertEqual(len(logger.get_data('test_loss')), 1)
        self.assertEqual(len(logger.get_data('train_accuracy')), 1)
        self.assertEqual(len(logger.get_data('test_accuracy')), 1)

        # Check that the checkpoint file exists.
        self.assertTrue(os.path.exists(paths.checkpoint(self.root)))
コード例 #8
0
ファイル: model.py プロジェクト: nervosum/proof-of-concepts
def load_most_recent_model(
    model_dir: str, ) -> Tuple[Pipeline, Dict[str, str], Dict[str, str]]:
    """load model from file. If it does not yet exist, train a model

    Args:
        model_dir (str): Directory where models are stored.

    Returns:
        Pipeline: A trained model object
    """

    try:
        version = max(os.listdir(model_dir))
        logger.info("Load model")
        model_dir = os.path.join(model_dir, version)

        return (
            joblib.load(os.path.join(model_dir, "model.joblib")),
            load_json(os.path.join(model_dir, "schema.json")),
            load_json(os.path.join(model_dir, "metadata.json")),
        )

    except Exception as e:
        logger.error(e)
        logger.info("No model found, let's train one!")
        train(model_dir=model_dir)
        return load_most_recent_model(model_dir)
コード例 #9
0
def start():
    import numpy as np
    import cv2, wandb
    from artifacts.artifact_manager import artifact_manager
    from torch.utils.data.dataloader import DataLoader
    from models.wass import WASS
    from models.unet import UNet
    from models.fiiyc import FIIYC
    from models.vgg16 import Vgg16GAP
    from training.train import train
    from data.loader_segmentation import VOCSegmentation

    artifact_manager.setArtifactContainer('CAM')
    model = Vgg16GAP()
    # wandb.init(entity='kobus_wits', project='wass_adv', name='WASS_vgg16_cam')
    # train(
    #     model=model,
    #     dataloaders = {
    #         'train': DataLoader(VOCSegmentation('train', dataset='voc'), batch_size=32, shuffle=True, num_workers=8),
    #         'val': DataLoader(VOCSegmentation('val', dataset='voc'), batch_size=32, shuffle=False, num_workers=8)
    #     },
    #     epochs=10,
    #     validation_mod=15000
    # )
    # wandb.finish()

    # artifact_manager.setArtifactContainer('WASS_unet')
    # model = UNet()
    # wandb.init(entity='kobus_wits', project='wass_adv', name='WASS_unet')
    # train(
    #     model=model,
    #     dataloaders = {
    #         'train': DataLoader(VOCSegmentation('train', dataset='voco'), batch_size=16, shuffle=True, num_workers=8),
    #         'val': DataLoader(VOCSegmentation('val', dataset='voco'), batch_size=16, shuffle=False, num_workers=8)
    #     },
    #     epochs=1,
    #     validation_mod=15000
    # )
    # wandb.finish()

    artifact_manager.setArtifactContainer('WASS_base')
    model = WASS()
    wandb.init(entity='kobus_wits', project='wass_adv', name='WASS_base')
    train(model=model,
          dataloaders={
              'train':
              DataLoader(VOCSegmentation('train', dataset='voco'),
                         batch_size=16,
                         shuffle=True,
                         num_workers=8),
              'val':
              DataLoader(VOCSegmentation('val', dataset='voco'),
                         batch_size=16,
                         shuffle=False,
                         num_workers=8)
          },
          epochs=1000,
          validation_mod=15000)
    wandb.finish()
コード例 #10
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_in_full(self):
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback])

        self.assertEqual(self.step_counter, 37)
        self.assertEqual(self.ep, 3)
        self.assertEqual(self.it, 0)
        self.assertEqual(self.lr, 0.01)
コード例 #11
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_in_full_later_start(self):
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    start_step=Step.from_epoch(1, 5, len(self.train_loader)))

        self.assertEqual(self.step_counter, 20)
        self.assertEqual(self.ep, 3)
        self.assertEqual(self.it, 0)
        self.assertEqual(self.lr, 0.01)
コード例 #12
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_more_than_two_epochs(self):
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    end_step=Step.from_epoch(2, 1, len(self.train_loader)))

        self.assertEqual(self.step_counter, 26)
        self.assertEqual(self.ep, 2)
        self.assertEqual(self.it, 1)
        self.assertEqual(self.lr, 0.01)
コード例 #13
0
def start_training(cuda, epochs, general_seed, tensorflow_seed, batch_size,
                   buffer_size, learning_rate):
    # Disable GPU support if no GPUs are supposed to be used
    if not cuda:
        tf.config.set_visible_devices([], 'GPU')

    with mlflow.start_run():
        # Enable the logging of all parameters, metrics and models to mlflow and Tensorboard
        mlflow.tensorflow.autolog()

        # Fix all random seeds and Tensorflow specific reproducibility settings
        set_general_random_seeds(general_seed)
        set_tensorflow_random_seeds(tensorflow_seed)

        # Use Mirrored Strategy for multi GPU support
        strategy = tf.distribute.MirroredStrategy()
        click.echo(
            click.style(f'Number of devices: {strategy.num_replicas_in_sync}',
                        fg='blue'))

        # Fetch and prepare dataset
        train_dataset, eval_dataset = load_train_test_data(
            strategy, batch_size, buffer_size, tensorflow_seed)

        with strategy.scope():
            # Define model and compile model
            model = create_model(input_shape=(28, 28, 1))
            model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True),
                          optimizer=tf.keras.optimizers.Adam(
                              learning_rate=learning_rate),
                          metrics=['accuracy'])

            # Train and evaluate the trained model
            runtime = time.time()
            train(model, epochs, train_dataset)
            eval_loss, eval_acc = test(model, eval_dataset)
            click.echo(f'Test loss: {eval_loss}, Test Accuracy: {eval_acc}')

            device = 'GPU' if cuda else 'CPU'
            click.echo(
                click.style(
                    f'{device} Run Time: {str(time.time() - runtime)} seconds',
                    fg='green'))

            # Log hardware and software
            log_sys_intel_conda_env()

            click.echo(
                click.style(
                    f'\nLaunch TensorBoard with:\ntensorboard --logdir={os.path.join(mlflow.get_artifact_uri(), "tensorboard_logs", "train")}',
                    fg='blue'))
コード例 #14
0
def start_training(cuda, epochs, general_seed, pytorch_seed, log_interval,
                   training_batch_size, test_batch_size, learning_rate):
    # Set GPU settings
    use_cuda = (True if cuda == 'True' else False) and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    if use_cuda and torch.cuda.device_count() > 0:
        click.echo(click.style(f'Using {torch.cuda.device_count()} GPUs!', fg='blue'))

    # Set all random seeds and possibly turn of GPU non determinism
    set_general_random_seeds(general_seed)
    set_pytorch_random_seeds(pytorch_seed, use_cuda=use_cuda)

    # Load training and testing data
    train_loader, test_loader = load_train_test_data(training_batch_size, test_batch_size)

    # Define model, device and optimizer
    if torch.cuda.device_count() > 1:
        model = create_parallel_model()
    else:
        model = create_model()
    model.to(device)
    optimizer = optim.Adam(model.parameters())
    optimizer.step()

    with mlflow.start_run():
        # Create a SummaryWriter to write TensorBoard events locally
        events_output_dir = tempfile.mkdtemp()
        writer = SummaryWriter(events_output_dir)
        click.echo(click.style(f'Writing TensorBoard events locally to {events_output_dir}\n', fg='blue'))

        # Start training
        runtime = time.time()
        for epoch in range(1, epochs + 1):
            train(use_cuda, model, epoch, optimizer, log_interval, train_loader, writer)
            test(use_cuda, model, epoch, test_loader, writer)
        device = 'GPU' if use_cuda else 'CPU'
        click.echo(click.style(f'{device} Run Time: {str(time.time() - runtime)} seconds', fg='green'))

        # Closing writer to allow for the model to be logged
        writer.close()

        # Log the model to mlflow
        click.echo(click.style('Logging model to mlflow...', fg='blue'))
        mlflow.pytorch.log_model(model, 'models')

        # Log hardware and software
        log_sys_intel_conda_env()

        # Upload the TensorBoard event logs as a run artifact
        click.echo(click.style('Uploading TensorBoard events as a run artifact...', fg='blue'))
        mlflow.log_artifacts(events_output_dir, artifact_path='events')
        click.echo(click.style(f'\nLaunch TensorBoard with:\ntensorboard --logdir={os.path.join(mlflow.get_artifact_uri(), "events")}', fg='blue'))
コード例 #15
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_two_epoch_late_start(self):
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    start_step=Step.from_epoch(0, 5, len(self.train_loader)),
                    end_step=Step.from_epoch(2, 5, len(self.train_loader)))

        self.assertEqual(self.step_counter, 25)
        self.assertEqual(self.ep, 2)
        self.assertEqual(self.it, 5)
        self.assertEqual(self.lr, 0.01)
コード例 #16
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_one_epoch(self):
        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    end_step=Step.from_epoch(1, 0, len(self.train_loader)))

        self.assertEqual(self.step_counter,
                         13)  # Same as len(self.train_loader) + 1
        self.assertEqual(self.ep, 1)
        self.assertEqual(self.it, 0)
        self.assertEqual(self.lr, 0.1)
コード例 #17
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_zero_steps(self):
        before = TestTrain.get_state(self.model)

        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    end_step=Step.from_iteration(0, len(self.train_loader)))

        after = TestTrain.get_state(self.model)
        for k in before:
            self.assertTrue(np.array_equal(before[k], after[k]))
        self.assertEqual(self.step_counter, 0)
        self.assertEqual(self.ep, 0)
        self.assertEqual(self.it, 0)
コード例 #18
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_one_step_late_start(self):
        before = TestTrain.get_state(self.model)

        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    start_step=Step.from_epoch(0, 5, len(self.train_loader)),
                    end_step=Step.from_epoch(0, 6, len(self.train_loader)))

        after = TestTrain.get_state(self.model)
        for k in before:
            self.assertFalse(np.array_equal(before[k], after[k]))
        self.assertEqual(self.step_counter, 2)
        self.assertEqual(self.ep, 0)
        self.assertEqual(self.it, 6)
        self.assertEqual(self.lr, 0.06)
コード例 #19
0
ファイル: test_train.py プロジェクト: sbam13/open_lth
    def test_train_two_steps(self):
        before = TestTrain.get_state(self.model)

        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=[self.callback],
                    end_step=Step.from_iteration(2, len(self.train_loader)))

        after = TestTrain.get_state(self.model)
        for k in before:
            with self.subTest(k=k):
                self.assertFalse(np.array_equal(before[k], after[k]), k)

        self.assertEqual(self.step_counter, 3)
        self.assertEqual(self.ep, 0)
        self.assertEqual(self.it, 2)
        self.assertEqual(self.lr, 0.02)
コード例 #20
0
    def test_end_to_end(self):
        init_loc = paths.model(self.root, Step.zero(len(self.train_loader)))
        end_loc = paths.model(self.root,
                              Step.from_epoch(3, 0, len(self.train_loader)))

        init_state = TestStandardCallbacks.get_state(self.model)

        train.train(self.hparams.training_hparams,
                    self.model,
                    self.train_loader,
                    self.root,
                    callbacks=self.callbacks,
                    start_step=Step.from_epoch(0, 0, len(self.train_loader)),
                    end_step=Step.from_epoch(3, 0, len(self.train_loader)))

        end_state = TestStandardCallbacks.get_state(self.model)

        # Check that final state has been saved.
        self.assertTrue(os.path.exists(init_loc))
        self.assertTrue(os.path.exists(end_loc))

        # Check that the checkpoint file still exists.
        self.assertTrue(os.path.exists(paths.checkpoint(self.root)))

        # Check that the initial and final states match those that were saved.
        self.model.load_state_dict(torch.load(init_loc))
        saved_state = TestStandardCallbacks.get_state(self.model)
        self.assertStateEqual(init_state, saved_state)

        self.model.load_state_dict(torch.load(end_loc))
        saved_state = TestStandardCallbacks.get_state(self.model)
        self.assertStateEqual(end_state, saved_state)

        # Check that the logger has the right number of entries.
        self.assertTrue(os.path.exists(paths.logger(self.root)))
        logger = MetricLogger.create_from_file(self.root)
        self.assertEqual(len(logger.get_data('train_loss')), 4)
        self.assertEqual(len(logger.get_data('test_loss')), 4)
        self.assertEqual(len(logger.get_data('train_accuracy')), 4)
        self.assertEqual(len(logger.get_data('test_accuracy')), 4)
コード例 #21
0
def train_affinitynet(config: Config):
    config_json = config.toDictionary()
    print('train_affinitynet')
    print(config_json)
    from training.train import train
    from torch.utils.data.dataloader import DataLoader
    from data.loader_segmentation import Segmentation
    from artifacts.artifact_manager import artifact_manager

    model = get_model(config.affinity_net_name)
    
    wandb.init(entity='kobus_wits', project='wass_affinity', name=config.sweep_id + '_a_' + config.affinity_net_name, config=config_json)
    wandb.watch(model)

    train(
        model=model,
        dataloaders = {
            'train': DataLoader(
                Segmentation(
                    config.classifier_dataset_root,
                    source='train',
                    augmentation='train',
                    image_size=config.affinity_net_image_size,
                    requested_labels=['affinity'],
                    affinity_root=artifact_manager.getDir()
                ),
                batch_size=config.affinity_net_batch_size,
                shuffle=False,
                pin_memory=False,
                num_workers=4,
                prefetch_factor=4
            ),
        },
        epochs=config.affinity_net_epochs,
        validation_mod=10
    )

    wandb.finish()
コード例 #22
0
def neural():

    datasets = [
        # {
        #     'name': 'netflix',
        #     'nTimes': 10,
        #     'rate': 0.0001
        # },
        {
            'name': 'SPECT',
            'nTimes': 1000,
            'rate': 0.001
        }
    ]

    mh = 13  # number of neurons in the hidden layer

    for ds in datasets:
        thetas = train(ds['name'], ds['nTimes'], ds['rate'], mh)
        test(ds['name'], thetas)
コード例 #23
0
def main():

    env = UnityEnvironment(
        file_name=
        "/home/faten/projects/deep-reinforcement-learning/p1_navigation/Banana_Linux/Banana.x86_64"
    )

    brain_name = env.brain_names[0]
    brain = env.brains[brain_name]

    action_size = brain.vector_action_space_size
    env_info = env.reset(train_mode=True)[brain_name]
    state = env_info.vector_observations[0]
    state_size = len(state)

    agent = DQNAgent(state_size, action_size, seed=0)

    scores = train(env, agent)

    # plot the scores
    fig = plt.figure()
    ax = fig.add_subplot(111)
    plt.plot(np.arange(len(scores)), scores)
    plt.ylabel('Score')
    plt.xlabel('Epsiode #')
    plt.show()

    agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))

    for i in range(3):
        state = env.reset()
        for j in range(200):
            action = agent.act(state)
            env.render()
            state, reward, done, _ = env.step(action)
            if done:
                break

    env.close()
コード例 #24
0
ファイル: main.py プロジェクト: SergejVolkov/CIFAR_classifier
import torch
import models.model_conv_best as model
import log_utils.log_tensorboard as log
# import training.scheduler as scheduler
from training.train import train
import training.dataset as ds
from training.validation import test

if __name__ == "__main__":
    log.init("lel_kek")

    # Try to use GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    # Create an instance of the model
    net = model.Net()
    net.to(device)
    # PATH = 'model_instances/cifar_net_79%_best.pth'
    # net.load_state_dict(torch.load(PATH))

    train(net, epoch_count=20, start_epoch=10, use_scheduler=False)
    # test(net)

    # Save our beautiful model for future generations
    # PATH = 'model_instances/cifar_net_tmp.pth'
    # torch.save(net.state_dict(), PATH)
コード例 #25
0
ファイル: boost_clean.py プロジェクト: mohamedyd/CPClean
def train_classifiers(X_train_list, y_train, model):
    C_list = []
    for X_train in X_train_list:
        C = train(X_train, y_train, model)
        C_list.append(C)
    return C_list
コード例 #26
0
    checkpointing = True
    freq_checkpointing = 10
    reload = False
    model_full_name = "test_dbc4"
    model_id_pref = ""
    add_start_char = 1
    add_end_char = 1
    dict_path = "./dictionaries"

    model_dir = os.path.join(CHECKPOINT_DIR, model_full_name+"-folder")

    if reload:
        dict_path = os.path.join(model_dir, "dictionaries")
        train(test_path, test_path, n_epochs=n_epochs, normalization=normalization, batch_size=batch_size,
              dict_path=dict_path, model_dir=model_dir, add_start_char=add_start_char, add_end_char=add_end_char,
              freq_checkpointing=freq_checkpointing, reload=reload, model_specific_dictionary=True, debug=False,
              model_full_name=model_full_name,
              )
    else:
        train(dev_path, test_path, n_epochs=n_epochs, normalization=normalization,
              batch_size=batch_size,
              model_specific_dictionary=True,
              dict_path=None, model_dir=None, add_start_char=add_start_char,
              add_end_char=add_end_char, use_gpu=None,
              label_train=REPO_DATASET[test_path], label_dev=REPO_DATASET[test_path],
              freq_checkpointing=freq_checkpointing, reload=reload, model_id_pref="test",#"compare_normalization_all",
              hidden_size_encoder=250, output_dim=300, char_embedding_dim=300, debug=False,
              hidden_size_sent_encoder=250,
              hidden_size_decoder=300, print_raw=False, checkpointing=True
              )
# TODO : add DEV_5 to decode_sequence : 3d shapes in the decoded sequence also
コード例 #27
0
                         activation=act,
                         mask_value=MASK_VALUE,
                         regularization=None)

    # '{layer_pre}x{n_nodes}*{act}->[{memory}]{n_lstm}->{layer_pos}{n_nodes}*{act}'
    log_dir = 'out/{}-{}-{}-{}{}-{}-{}-{}'.format(
        n_dense_pre, int(n_node * 10), act, memory_unit, n_memory, n_dense_pos,
        int(n_stations / n_dense_pre), act)

    train(
        radius=radius,
        batch_size=batch_size,
        log_dir=log_dir,
        t_train_h=t_train_h,
        t_pred_d=t_pred_d,
        t_pred_resolution_h=t_pred_resolution_h,
        model_name=model,
        filenames_train=filenames_train,
        filenames_valid=filenames_valid,
        features_train=features_train,
        features_predict=features_predict,
    )
"""
Experiment II: Activation Function
"""
n_dense_pos = 3
n_node = 1.0
acts = 'relu', 'leaky_relu', 'tanh'
n_memory = 2
n_dense_pre = 3
memory_unit = 'lstm'
コード例 #28
0
        train_tasks.append(train_task)
    override_cfg = ["DATASETS.TRAIN", tuple(train_tasks)]

    # validation tasks
    if args.val_job_id is not None:
        valid_tasks = []
        for val_job_id in args.val_job_id:
            data, task_id = get_data_set(val_job_id)
            coco_json = f"datasets/valid_cvat_{task_id}.coco.json"
            with open(coco_json, "w") as f:
                json.dump(data, f)
            valid_task = f"cvat/valid_{task_id}"
            register_coco_instances(valid_task, {}, coco_json, args.cvat_base)
            valid_tasks.append(valid_task)
        override_cfg.extend(["DATASETS.TEST", tuple(valid_tasks)])

    if args.output is not None:
        override_cfg.extend(["OUTPUT_DIR", args.output])
        override_cfg.extend(
            ["MODEL.WEIGHTS", f"{args.output}/model_final.pth"])

    if args.epoch is not None:
        override_cfg.extend(["SOLVER.MAX_ITER", args.epoch])
    train(
        config_file=args.config,
        override_cfg=override_cfg,
        resume=args.resume,
        restart=args.restart,
        force_test=args.force_test,
    )
コード例 #29
0
use_gpu = False
batch_size = 2
n_epochs = 1
dropout_sent_encoder = 0
dropout_word_encoder = 0
dropout_word_decoder = 0
model_id_pref = "time"
hidden_size_encoder = 250
output_dim = 100
char_embedding_dim = 51
hidden_size_sent_encoder = 125
hidden_size_decoder = 300
n_layers_word_encoder = 1
dir_sent_src = 1
model_full_name = train(train_path, test_path, n_epochs=n_epochs, normalization=True,
                        batch_size=batch_size, model_specific_dictionary=True,
                        dict_path=None, model_dir=None, add_start_char=1,
                        add_end_char=1, use_gpu=use_gpu, verbose=1,
                        word_recurrent_cell_decoder="LSTM", word_recurrent_cell_encoder="LSTM",
                        clipping=0.5, char_src_attention=True, unrolling_word=True,
                        shared_context="all",
                        label_train=REPO_DATASET[train_path], label_dev=REPO_DATASET[test_path],
                        freq_checkpointing=10, reload=False, model_id_pref=model_id_pref,
                        hidden_size_encoder=hidden_size_encoder, output_dim=output_dim,
                        char_embedding_dim=char_embedding_dim,
                        hidden_size_sent_encoder=hidden_size_sent_encoder, hidden_size_decoder=hidden_size_decoder,
                        n_layers_word_encoder=n_layers_word_encoder,
                        print_raw=False, debug=False, timing=True,
                        dir_sent_encoder=dir_sent_src,
                        checkpointing=True)
コード例 #30
0
#from src.evaluation.test import test
from training.train import train

session_name = train("batch16")
# test(session_name=session_name, is_visualize=False)
# test(session_name='test', is_visualize=True)