Ejemplo n.º 1
0
    def __init__(self,
                 input_shape,
                 rnn_type='LSTM',
                 rnn_layer=2,
                 hid_size=64,
                 dropout_3=0,
                 optim_method='Adam',
                 optim_args_3={'lr': 1e-3},
                 loss_fn='mse',
                 eval_metric='corr',
                 min_ratio_teacher=0.3):

        super().__init__()

        # Architecture
        self.hid_size = hid_size
        self.input_shape = input_shape
        self.input_size = input_shape[0] 
        self.input_day = input_shape[2]
        self.dropout = dropout_3
        self.rnn_layer = rnn_layer
        self.rnn_type = rnn_type

        self._build_model()
        # Optimization
        self.optimizer = getattr(optim, optim_method)(self.parameters(),
                                                      **optim_args_3)
        self.loss_fn = get_loss_fn(loss_fn)
        self.metric_fn = get_metric_fn(eval_metric)

        if torch.cuda.is_available():
            self.cuda()
Ejemplo n.º 2
0
    def __init__(self,
                 input_shape,
                 rnn_type='LSTM',
                 rnn_layer=2,
                 hid_size=64,
                 mix_dropout=0,
                 optim_method='Adam',
                 mix_optim_args={'lr': 1e-3},
                 min_loss_fn='mse_v2',
                 eval_metric='corr',
                 out_chnls=[6,6,6],
                 kernel= [4,4],
                 stride=[4,4],
                 min_ratio_teacher = 0.3):

        super().__init__()

        # Architecture
        self.hid_size = hid_size
        self.input_size = input_shape[0]
        self.input_length = input_shape[1]
        self.input_day = input_shape[2]
        self.dropout = mix_dropout
        self.rnn_layer = rnn_layer
        self.rnn_type = rnn_type
        self.out_chnls = out_chnls
        self.kernel = kernel
        self.stride = stride
        self.min_ratio_teacher = min_ratio_teacher

        self._build_model()
        # Optimization
        self.optimizer = getattr(optim, optim_method)(self.parameters(),
                                                      **mix_optim_args)
        self.loss_fn = get_loss_fn(min_loss_fn)
        self.metric_fn = get_metric_fn(eval_metric)

        if torch.cuda.is_available():
            self.cuda()