コード例 #1
0
 def __init__(self, vocab, fix_len, **kwargs):
     super(AE, self).__init__(**kwargs)
     self.voc_dim = vocab.vectors.size(0)
     self.fix_len = fix_len
     self.ignore_index = vocab.stoi['<pad>']
     if self.metrics is not None:
         for m in self.metrics:
             m.ignore_index = self.ignore_index
             m.reduce = self.reduce
     self.latent_dim = kwargs.get('latent_dim')
     self.encoder = create_instance('encoder', kwargs, vocab, fix_len,
                                    self.latent_dim)
     self.decoder = create_instance('decoder', kwargs, vocab, fix_len,
                                    self.latent_dim)
コード例 #2
0
def train_params(params, resume, debug=False):
    if debug:
        torch.manual_seed(params["seed"])
    logger.info("Name of the Experiment: " + params['name'])
    print(("Name of the Experiment: " + params['name']))
    device = get_device(params)

    # Data loader
    data_loader = create_instance('data_loader', params, device)
    # Model
    model = create_instance('model', params, data_loader.vocab, data_loader.fix_len)
    # Optimizers
    optimizers = dict()
    if isinstance(model, WAE) or isinstance(model, DiscreteWAE) or isinstance(model, SupervisedWAE) or isinstance(model, SemiSupervisedWAE):
        model_params = itertools.chain(*[model.encoder.parameters(), model.decoder.parameters()])
        optimizer = create_instance('optimizer', params, model_params)
        critic_optimizer = create_instance('critic_optimizer', params, model.wasserstein_distance.parameters())
        optimizers['loss_optimizer'] = optimizer
        optimizers['critic_optimizer'] = critic_optimizer

        if isinstance(model, SemiSupervisedWAE):
            cat_critic_optimizer = create_instance('cat_critic_optimizer', params, model.cat_wasserstein_distance.parameters())
            optimizers['cat_critic_optimizer'] = cat_critic_optimizer
            classification_optimizer = create_instance('class_optimizer', params, model.encoder.parameters())
            optimizers['class_optimizer'] = classification_optimizer

    else:
        optimizer = create_instance('optimizer', params, model.parameters())
        optimizers['loss_optimizer'] = optimizer
    # Trainer
    trainer = create_instance('trainer', params, model, optimizers, resume, params, data_loader)
    best_model = trainer.train()
    with open(os.path.join(params['trainer']['logging']['logging_dir'], 'best_models.txt'), 'a+') as f:
        f.write(str(best_model) + "\n")
    trainer.__del__()
コード例 #3
0
    def __init__(self,
                 model,
                 loss,
                 metrics,
                 optimizer,
                 resume,
                 params,
                 data_loader,
                 train_logger=None,
                 **kwargs):

        super(TrainingVQ,
              self).__init__(model, loss, metrics, optimizer, resume, params,
                             train_logger, data_loader)

        self.train_vocab = data_loader.vocab

        emb_matrix = self.train_vocab.vectors.to(self.device)
        self.model.encoder.embedding.weight.data.copy_(emb_matrix)
        self.loss.ignore_index = data_loader.train_vocab.stoi['<pad>']
        for m in self.metrics:
            m.ignore_index = data_loader.train_vocab.stoi['<pad>']

        self.loss.b_scheduler = create_instance('beta_scheduler',
                                                params['trainer']['args'])
コード例 #4
0
    def __init__(self, input_size, **kwargs):
        super(RNN, self).__init__()
        kwargs['cell_type']['args']['batch_first'] = True
        kwargs['cell_type']['args']['input_size'] = input_size

        self.rnn = create_instance('cell_type', kwargs)
        self.hidden_state = None
コード例 #5
0
 def __init__(self, **kwargs):
     super().__init__()
     self.reduce = kwargs.get('reduce')
     if 'metrics' in kwargs:
         metrics = create_instance('metrics', kwargs)
         if type(metrics) is not list:
             metrics = [metrics]
         self.metrics = metrics
     else:
         self.metrics = None
コード例 #6
0
def load_model(args):
    try:
        state = torch.load(args.model, map_location='cuda:0')
        params = state["params"]
        params['gpus'][0] = '0'
        params['data_loader']['args']['batch_size'] = 1
        dtype_ = params.get("dtype", "float32")
        dtype_ = getattr(torch, dtype_)
        torch.set_default_dtype(dtype_)
        logger.info("Name of the Experiment: " + params['name'])
        device = get_device(params)
        data_loader = create_instance('data_loader', params, device, dtype_)
        model = create_instance('model', params, data_loader)
        model.load_state_dict(state["model_state"])
        model.to(device)
        model.eval()
        return model, data_loader

    except Exception as e:
        logger.error(e)
コード例 #7
0
    def __init__(self, vocab, fix_len, latent_dim=0, **kwargs):
        super(BasicLM, self).__init__()
        self.voc_dim = vocab.vectors.size(0)
        self.fix_len = fix_len
        self.ignore_index = vocab.stoi['<pad>']
        if self.metrics is not None:
            for m in self.metrics:
                m.ignore_index = self.ignore_index
                m.reduce = self.reduce

        self.decoder = create_instance('decoder', kwargs,
                                       *(vocab, fix_len, latent_dim))
コード例 #8
0
    def __init__(self,
                 model,
                 loss,
                 metric,
                 optimizer,
                 resume,
                 params,
                 data_loader,
                 train_logger=None,
                 **kwargs):
        super(TrainingInteractingPointProcess,
              self).__init__(model, loss, metric, optimizer, resume, params,
                             train_logger, data_loader)

        self.bptt_size = data_loader.bptt
        self.loss.b_scheduler = create_instance('beta_scheduler',
                                                params['trainer']['args'])
コード例 #9
0
def train_params(params, resume, debug=False, no_dirs=True):
    """initialise model trainer with saved model and parameters"""

    if debug:
        torch.manual_seed(params["seed"])

    print("Name of the Experiment: " + params['name'])
    device = get_device(params)

    # Data loader
    data_loader = create_instance('data_loader', params, device)

    #test for changed classes
    if params["model"]["args"]["decoder"]["name"] == "LanguageModelCNN":
        params["model"]["args"]["decoder"]["name"] = "LMDecoderCNN"

    # Model
    model = create_instance('model', params, data_loader.vocab,
                            data_loader.fix_len)
    # Optimizers
    optimizers = dict()
    if isinstance(model, WAE) or isinstance(model, DiscreteWAE):
        model_params = itertools.chain(
            *[model.encoder.parameters(),
              model.decoder.parameters()])
        optimizer = create_instance('optimizer', params, model_params)
        critic_optimizer = create_instance(
            'critic_optimizer', params,
            model.wasserstein_distance.parameters())
        optimizers['loss_optimizer'] = optimizer
        optimizers['critic_optimizer'] = critic_optimizer
    else:
        optimizer = create_instance('optimizer', params, model.parameters())
        optimizers['loss_optimizer'] = optimizer

    #rename dics to garbage folder
    if no_dirs:
        params['trainer']['save_dir'] = params['trainer']['save_dir'].replace(
            "results", "garbage")
        params['trainer']['logging']['tensorboard_dir'] = params['trainer'][
            'logging']['tensorboard_dir'].replace("results", "garbage")
        params['trainer']['logging']['logging_dir'] = params['trainer'][
            'logging']['logging_dir'].replace("results", "garbage")

    # Trainer
    trainer = create_instance('trainer', params, model, optimizers, resume,
                              params, data_loader)

    return trainer
コード例 #10
0
def train_params(params, resume, debug=False):

    if debug:
        torch.manual_seed(params["seed"])
    dtype_ = params.get("dtype", "float32")
    dtype_ = getattr(torch, dtype_)
    torch.set_default_dtype(dtype_)
    logger.info("Name of the Experiment: " + params['name'])
    device = get_device(params)
    data_loader = create_instance('data_loader', params, device, dtype_)
    model = create_instance('model', params, data_loader)
    # Optimizers
    optimizers = dict()
    if params['model']['name'] in ['TextTPP', 'TextARPP']:
        optimizer = create_instance('optimizer', params,
                                    model.tpp_model.parameters())

        lm_parm = model.language_model.parameters()
        if model.attention:
            lm_parm = itertools.chain(*[
                model.language_model.parameters(),
                model.attention_layer.parameters()
            ])

        language_optimizer = create_instance('language_optimizer', params,
                                             lm_parm)
        optimizers['loss_optimizer'] = optimizer
        optimizers['language_optimizer'] = language_optimizer
    else:
        optimizer = create_instance('optimizer', params, model.parameters())
        optimizers['loss_optimizer'] = optimizer

    trainer = create_instance('trainer', params, model, optimizers, resume,
                              params, data_loader)
    best_model = trainer.train()
    with open(
            os.path.join(params['trainer']['logging']['logging_dir'],
                         'best_models.txt'), 'a+') as f:
        f.write(str(best_model) + "\n")
コード例 #11
0
import torch
from tyche.utils.helper import create_instance
from tyche.utils.helper import load_params
from deep_graphs.models import GCNModelVAE
from deep_graphs.data.loaders import GAEDataLoader

data_dir = "/home/an/Desktop/Projects/DeepPointProcesses/data/deep_graphs/cora/"

epochs = 1
dropout = 0.2

data_dir = "/home/an/Desktop/Projects/DeepPointProcesses/deep_point_processes/experiments/graphs/gae.yaml"
full_params = load_params(data_dir)
model_params = full_params.get('model')
encoder_params = model_params.get('args').get('encoder')
decoder_params = model_params.get('args').get('decoder')
data_loader_params = full_params.get('data_loader')
device = torch.device("cpu")

if __name__=="__main__":
    data_loader = GAEDataLoader(device,**data_loader_params.get("args"))
    model = GCNModelVAE(data_loader,**model_params.get("args"))

    optimizers = dict()
    optimizer = create_instance('optimizer', full_params, model.parameters())
    optimizers['loss_optimizer'] = optimizer

    model.train_step(data_loader, optimizers, 0., None)
コード例 #12
0
def train_params(params):

    print(("Name of the Experiment: " + params['name']))
    device = get_device(params)

    # Data loader
    data_loader = create_instance('data_loader', params, device)

    # Model
    #model = create_instance('network', params, data_loader.vocab, data_loader.fix_len, 0)
    #model.to(device)

    model2 = DPCNN(data_loader.vocab, device)
    model2.to(device)

    critic_optimizer = create_instance('critic_optimizer', params,
                                       model2.parameters())

    # Optimizers
    optimizers = dict()
    optimizers['critic_optimizer'] = critic_optimizer
    epochs = 20
    for epoch in range(epochs):
        epoch_loss = 0
        epoch_accuracy = 0
        for batch in data_loader.train:
            x = batch.text
            y = batch.label
            optimizers['critic_optimizer'].zero_grad()
            logits = model2.forward(x)
            class_loss = F.cross_entropy(logits, y)
            class_loss.backward()
            optimizers['critic_optimizer'].step()
            epoch_loss += class_loss

            prediction = logits.argmax(dim=1)
            result = [i for i, j in zip(prediction, y) if i == j]
            accuracy = len(result) / x[0].size(0)
            epoch_accuracy += accuracy

        print("Training:")
        print("epoch_loss {}".format(epoch_loss))
        print("epoch_accuracy {}".format(epoch_accuracy /
                                         len(data_loader.train)))
        with torch.no_grad():
            epoch_loss = 0
            epoch_accuracy = 0
            for batch in data_loader.validate:
                x = batch.text
                y = batch.label
                logits = model2.forward(x)
                class_loss = F.cross_entropy(logits, y)
                epoch_loss += class_loss

                prediction = logits.argmax(dim=1)
                result = [i for i, j in zip(prediction, y) if i == j]
                accuracy = len(result) / x[0].size(0)
                epoch_accuracy += accuracy

        print("Validation:")
        print("epoch_loss {}".format(epoch_loss))
        print("epoch_accuracy {}".format(epoch_accuracy /
                                         len(data_loader.validate)))