Exemple #1
0
    def __init__(self,
                 d_feat=6,
                 hidden_size=64,
                 num_layers=2,
                 dropout=0.0,
                 n_epochs=200,
                 pre_epoch=40,
                 dw=0.5,
                 loss_type="cosine",
                 len_seq=60,
                 len_win=0,
                 lr=0.001,
                 metric="mse",
                 batch_size=2000,
                 early_stop=20,
                 loss="mse",
                 optimizer="adam",
                 n_splits=2,
                 GPU=0,
                 seed=None,
                 **kwargs):
        # Set logger.
        self.logger = get_module_logger("ADARNN")
        self.logger.info("ADARNN pytorch version...")
        os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU)

        # set hyper-parameters.
        self.d_feat = d_feat
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.n_epochs = n_epochs
        self.pre_epoch = pre_epoch
        self.dw = dw
        self.loss_type = loss_type
        self.len_seq = len_seq
        self.len_win = len_win
        self.lr = lr
        self.metric = metric
        self.batch_size = batch_size
        self.early_stop = early_stop
        self.optimizer = optimizer.lower()
        self.loss = loss
        self.n_splits = n_splits
        self.device = torch.device(
            "cuda:%d" %
            (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
        self.seed = seed

        self.logger.info("ADARNN parameters setting:"
                         "\nd_feat : {}"
                         "\nhidden_size : {}"
                         "\nnum_layers : {}"
                         "\ndropout : {}"
                         "\nn_epochs : {}"
                         "\nlr : {}"
                         "\nmetric : {}"
                         "\nbatch_size : {}"
                         "\nearly_stop : {}"
                         "\noptimizer : {}"
                         "\nloss_type : {}"
                         "\nvisible_GPU : {}"
                         "\nuse_GPU : {}"
                         "\nseed : {}".format(
                             d_feat,
                             hidden_size,
                             num_layers,
                             dropout,
                             n_epochs,
                             lr,
                             metric,
                             batch_size,
                             early_stop,
                             optimizer.lower(),
                             loss,
                             GPU,
                             self.use_gpu,
                             seed,
                         ))

        if self.seed is not None:
            np.random.seed(self.seed)
            torch.manual_seed(self.seed)

        n_hiddens = [hidden_size for _ in range(num_layers)]
        self.model = AdaRNN(
            use_bottleneck=False,
            bottleneck_width=64,
            n_input=d_feat,
            n_hiddens=n_hiddens,
            n_output=1,
            dropout=dropout,
            model_type="AdaRNN",
            len_seq=len_seq,
            trans_loss=loss_type,
        )
        self.logger.info("model:\n{:}".format(self.model))
        self.logger.info("model size: {:.4f} MB".format(
            count_parameters(self.model)))

        if optimizer.lower() == "adam":
            self.train_optimizer = optim.Adam(self.model.parameters(),
                                              lr=self.lr)
        elif optimizer.lower() == "gd":
            self.train_optimizer = optim.SGD(self.model.parameters(),
                                             lr=self.lr)
        else:
            raise NotImplementedError(
                "optimizer {} is not supported!".format(optimizer))

        self.fitted = False
        self.model.cuda()
Exemple #2
0
    def __init__(
        self,
        d_feat=6,
        hidden_size=64,
        num_layers=2,
        dropout=0.0,
        dec_dropout=0.0,
        n_epochs=200,
        lr=0.001,
        metric="mse",
        batch_size=5000,
        early_stop=20,
        base_model="GRU",
        model_path=None,
        optimizer="adam",
        gamma=0.1,
        gamma_clip=0.4,
        mu=0.05,
        GPU=0,
        seed=None,
        **kwargs
    ):
        # Set logger.
        self.logger = get_module_logger("ADD")
        self.logger.info("ADD pytorch version...")

        # set hyper-parameters.
        self.d_feat = d_feat
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.dec_dropout = dec_dropout
        self.n_epochs = n_epochs
        self.lr = lr
        self.metric = metric
        self.batch_size = batch_size
        self.early_stop = early_stop
        self.optimizer = optimizer.lower()
        self.base_model = base_model
        self.model_path = model_path
        self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
        self.seed = seed

        self.gamma = gamma
        self.gamma_clip = gamma_clip
        self.mu = mu

        self.logger.info(
            "ADD parameters setting:"
            "\nd_feat : {}"
            "\nhidden_size : {}"
            "\nnum_layers : {}"
            "\ndropout : {}"
            "\ndec_dropout : {}"
            "\nn_epochs : {}"
            "\nlr : {}"
            "\nmetric : {}"
            "\nbatch_size : {}"
            "\nearly_stop : {}"
            "\noptimizer : {}"
            "\nbase_model : {}"
            "\nmodel_path : {}"
            "\ngamma : {}"
            "\ngamma_clip : {}"
            "\nmu : {}"
            "\ndevice : {}"
            "\nuse_GPU : {}"
            "\nseed : {}".format(
                d_feat,
                hidden_size,
                num_layers,
                dropout,
                dec_dropout,
                n_epochs,
                lr,
                metric,
                batch_size,
                early_stop,
                optimizer.lower(),
                base_model,
                model_path,
                gamma,
                gamma_clip,
                mu,
                self.device,
                self.use_gpu,
                seed,
            )
        )

        if self.seed is not None:
            np.random.seed(self.seed)
            torch.manual_seed(self.seed)

        self.ADD_model = ADDModel(
            d_feat=self.d_feat,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            dropout=self.dropout,
            dec_dropout=self.dec_dropout,
            base_model=self.base_model,
            gamma=self.gamma,
            gamma_clip=self.gamma_clip,
        )
        self.logger.info("model:\n{:}".format(self.ADD_model))
        self.logger.info("model size: {:.4f} MB".format(count_parameters(self.ADD_model)))

        if optimizer.lower() == "adam":
            self.train_optimizer = optim.Adam(self.ADD_model.parameters(), lr=self.lr)
        elif optimizer.lower() == "gd":
            self.train_optimizer = optim.SGD(self.ADD_model.parameters(), lr=self.lr)
        else:
            raise NotImplementedError("optimizer {} is not supported!".format(optimizer))

        self.fitted = False
        self.ADD_model.to(self.device)