コード例 #1
0
class AutoEncoderRNNRegr(nn.Module):

    def __init__(self, **kwargs):
        super(AutoEncoderRNNRegr, self).__init__()

        # define parameters
        self.input_size = kwargs['in_size']
        self.rnn_input_size = kwargs['rnn_in_size']
        self.rnn_hidden_size = kwargs['rnn_h_size']
        self.reg_hidden_sizes = kwargs['reg_h_sizes']
        self.output_size = kwargs['out_size']
        self.num_layers = kwargs.get('num_layers', 1)
        self.p_dropout = kwargs.get('p_dropout', 0.0)

        # auto_encoder layer
        self.ae = AutoEncoder(**kwargs)
        if kwargs.get('ae_pretrain_weight'):
            self.ae.load_state_dict(kwargs['ae_pretrain_weight'])

        if kwargs.get('if_trainable'):
            for p in self.ae.parameters():
                p.requires_grad = kwargs['if_trainable']
        else:
            self.ae.weight.requires_grad = False

        # rnn layer
        self.rnn = nn.LSTM(input_size=self.rnn_input_size,
                           hidden_size=self.rnn_hidden_size,
                           num_layers=self.num_layers,
                           batch_first=True)

        # regression layer
        self.reg = nn.ModuleList()
        for k in range(len(self.reg_hidden_sizes) - 1):
            self.reg.append(nn.Linear(self.reg_hidden_sizes[k], self.reg_hidden_sizes[k + 1]))

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=self.p_dropout)

        # output layer
        self.out = nn.Linear(in_features=self.reg_hidden_sizes[-1], out_features=self.output_size)

    def forward(self, x):

        n_samples, seq_len, _ = x.shape
        en_x = x.view(n_samples * seq_len, -1)
        en_x, _ = self.ae(en_x)
        en_x = en_x.view(n_samples, seq_len, -1)  # (batch_size, seq_len, h_size)

        y, _ = self.rnn(en_x)
        y_t = y[:, -1, :]

        for layer in self.reg:
            y_t = self.relu(layer(y_t))
            y_t = self.dropout(y_t)

        return self.out(y_t)
コード例 #2
0
class DeepAP(nn.Module):
    def __init__(self, in_dim, ae_en_h_dims, ae_de_h_dims, conv_lstm_in_size,
                 conv_lstm_in_dim, conv_lstm_h_dim, conv_lstm_kernel_sizes,
                 conv_lstm_n_layers, fc_in_dim, fc_h_dims, fc_out_dim,
                 **kwargs):

        super(DeepAP, self).__init__()

        self.device = kwargs.get('device', 'cpu')

        ################
        # masked layer #
        ################

        mask = [[i for i in range(in_dim)], [i for i in range(in_dim)]]
        self.mask_layer = MaskNet(in_dim, in_dim, mask, device=self.device)
        self.mask_thre = kwargs.get('mask_thre', 0.0001)

        ######################
        # auto_encoder layer #
        ######################

        self.ae = AutoEncoder(in_dim=in_dim,
                              en_h_dims=ae_en_h_dims,
                              de_h_dims=ae_de_h_dims)

        if kwargs.get('ae_pretrain_weight') is not None:
            self.ae.load_state_dict(kwargs['ae_pretrain_weight'])
        else:
            raise ValueError('AutoEncoder not pretrained.')

        if kwargs.get('if_trainable'):
            for p in self.ae.parameters():
                p.requires_grad = kwargs['if_trainable']
        else:
            self.ae.weight.requires_grad = False

        ####################
        # conv_lstm layers #
        ####################

        self.conv_lstm_list = nn.ModuleList()
        for i in conv_lstm_kernel_sizes:
            i_kernel_size = (i, i)
            conv_lstm = ConvLSTM(
                in_size=conv_lstm_in_size,
                in_dim=conv_lstm_in_dim,
                h_dim=conv_lstm_h_dim,
                kernel_size=i_kernel_size,
                num_layers=conv_lstm_n_layers,
                batch_first=kwargs.get('conv_lstm_batch_first', True),
                bias=kwargs.get('conv_lstm_bias', True),
                only_last_state=kwargs.get('only_last_state', True),
                device=self.device)
            self.conv_lstm_list.append(conv_lstm)

        #########################
        # fully-connected layer #
        #########################

        self.fc = FC(
            in_dim=fc_in_dim,  # assert in_size == n_conv_lstm * conv_lstm_h_dim
            h_dims=fc_h_dims,
            out_dim=fc_out_dim,
            p_dropout=kwargs.get('fc_p_dropout', 0.1))

    def forward(self, input_data):  # input_data: (b, t, c, h, w)

        x = input_data.permute(0, 1, 3, 4, 2)  # => (b, t, h, w, c)

        ################
        # masked layer #
        ################

        masked_x = self.mask_layer(x)

        for p in self.mask_layer.parameters():
            for i in range(p.size(1)):
                if -self.mask_thre <= p[i, i] <= self.mask_thre:
                    masked_x[..., i] = 0.0

        ######################
        # auto-encoder layer #
        ######################

        en_x, de_x = self.ae(masked_x)
        en_x = en_x.permute(0, 1, 4, 2, 3)  # => (b, t, c, h, w)

        ######################
        # conv_lstm layers #
        ######################

        conv_lstm_out_list = []
        for conv_lstm in self.conv_lstm_list:
            conv_lstm_last_hidden, conv_lstm_last_state = conv_lstm(en_x)
            _, cell_last_state = conv_lstm_last_state
            conv_lstm_out_list.append(cell_last_state)

        conv_lstm_out = torch.cat(conv_lstm_out_list, dim=1)

        ###########################
        # fully-connected layer #
        ###########################

        fc_out = conv_lstm_out.permute(0, 2, 3, 1)  # => (b, h, w, c)
        fc_out = self.fc(fc_out)
        fc_out = fc_out.permute(0, 3, 1, 2)  # => (b, c, h, w)

        return fc_out, masked_x, de_x
コード例 #3
0
ファイル: train.py プロジェクト: yogurtss/AutoEncoder
def main():
    args = cfg()
    writer = SummaryWriter(result_dir)
    args.load = False

    x_train, x_test = cut_data(args.data_dir)
    train_dataset = CreditDataset(x_train)
    val_dataset = CreditDataset(x_test)

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)

    model = AutoEncoder(28)
    print(model)

    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    cy_len = floor(len(train_dataset) / args.batch_size // 2)
    clr = lr_scheduler.CyclicLR(optimizer, args.lr, args.max_lr, cy_len, cycle_momentum=False)

    criterion = nn.MSELoss()
    state = {"step": 0,
             "worse_epochs": 0,
             "epochs": 0,
             "best_loss": np.Inf}

    while state["worse_epochs"] < args.hold_step:
        print("Training one epoch from iteration " + str(state["step"]))
        model.train()
        for i, (x) in enumerate(train_loader):
            cur_lr = optimizer.state_dict()['param_groups'][0]['lr']
            writer.add_scalar("learning_rate", cur_lr, state['step'])
            optimizer.zero_grad()
            outputs = model(x)
            loss = criterion(outputs, x)
            loss.backward()
            writer.add_scalar("training_loss", loss, state['step'])
            optimizer.step()
            clr.step()
            # clr.step()
            state['step'] += 1

            if i % 50 == 0:
                print(
                    "{:4d}/{:4d} --- Loss: {:.6f}  with learnig rate {:.6f}".format(
                        i, len(train_dataset) // args.batch_size, loss, cur_lr))

        val_loss = valiadate(model, criterion, val_loader)
        # val_loss /= len(val_dataset)//args.batch_size
        print("Valiadation loss" + str(val_loss))
        writer.add_scalar("val_loss", val_loss, state['step'])

        writer.add_scalar("val_loss", val_loss, state["step"])

        # EARLY STOPPING CHECK
        checkpoint_path = args.model_path + str(state['step']) + '.pth'
        print("Saving model...")
        if val_loss >= state["best_loss"]:
            state["worse_epochs"] += 1
        else:
            print("MODEL IMPROVED ON VALIDATION SET!")
            state["worse_epochs"] = 0
            state["best_loss"] = val_loss
            state["best_checkpoint"] = checkpoint_path
            best_checkpoint_path = args.model_path + 'best.pth'
            save_model(model, optimizer, state, best_checkpoint_path)
        print(state)
        state["epochs"] += 1
        if state["epochs"] % 5 == 0:
            save_model(model, optimizer, state, checkpoint_path)
    last_model = args.model_path + 'last_model.pth'
    save_model(model, optimizer, state, last_model)
    print("Training finished")
    writer.close()