Exemplo n.º 1
0
class AutoEncoderRNNRegr(nn.Module):

    def __init__(self, **kwargs):
        super(AutoEncoderRNNRegr, self).__init__()

        # define parameters
        self.input_size = kwargs['in_size']
        self.rnn_input_size = kwargs['rnn_in_size']
        self.rnn_hidden_size = kwargs['rnn_h_size']
        self.reg_hidden_sizes = kwargs['reg_h_sizes']
        self.output_size = kwargs['out_size']
        self.num_layers = kwargs.get('num_layers', 1)
        self.p_dropout = kwargs.get('p_dropout', 0.0)

        # auto_encoder layer
        self.ae = AutoEncoder(**kwargs)
        if kwargs.get('ae_pretrain_weight'):
            self.ae.load_state_dict(kwargs['ae_pretrain_weight'])

        if kwargs.get('if_trainable'):
            for p in self.ae.parameters():
                p.requires_grad = kwargs['if_trainable']
        else:
            self.ae.weight.requires_grad = False

        # rnn layer
        self.rnn = nn.LSTM(input_size=self.rnn_input_size,
                           hidden_size=self.rnn_hidden_size,
                           num_layers=self.num_layers,
                           batch_first=True)

        # regression layer
        self.reg = nn.ModuleList()
        for k in range(len(self.reg_hidden_sizes) - 1):
            self.reg.append(nn.Linear(self.reg_hidden_sizes[k], self.reg_hidden_sizes[k + 1]))

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=self.p_dropout)

        # output layer
        self.out = nn.Linear(in_features=self.reg_hidden_sizes[-1], out_features=self.output_size)

    def forward(self, x):

        n_samples, seq_len, _ = x.shape
        en_x = x.view(n_samples * seq_len, -1)
        en_x, _ = self.ae(en_x)
        en_x = en_x.view(n_samples, seq_len, -1)  # (batch_size, seq_len, h_size)

        y, _ = self.rnn(en_x)
        y_t = y[:, -1, :]

        for layer in self.reg:
            y_t = self.relu(layer(y_t))
            y_t = self.dropout(y_t)

        return self.out(y_t)
Exemplo n.º 2
0
    def __init__(self, **kwargs):
        super(AutoEncoderRNNRegr, self).__init__()

        # define parameters
        self.input_size = kwargs['in_size']
        self.rnn_input_size = kwargs['rnn_in_size']
        self.rnn_hidden_size = kwargs['rnn_h_size']
        self.reg_hidden_sizes = kwargs['reg_h_sizes']
        self.output_size = kwargs['out_size']
        self.num_layers = kwargs.get('num_layers', 1)
        self.p_dropout = kwargs.get('p_dropout', 0.0)

        # auto_encoder layer
        self.ae = AutoEncoder(**kwargs)
        if kwargs.get('ae_pretrain_weight'):
            self.ae.load_state_dict(kwargs['ae_pretrain_weight'])

        if kwargs.get('if_trainable'):
            for p in self.ae.parameters():
                p.requires_grad = kwargs['if_trainable']
        else:
            self.ae.weight.requires_grad = False

        # rnn layer
        self.rnn = nn.LSTM(input_size=self.rnn_input_size,
                           hidden_size=self.rnn_hidden_size,
                           num_layers=self.num_layers,
                           batch_first=True)

        # regression layer
        self.reg = nn.ModuleList()
        for k in range(len(self.reg_hidden_sizes) - 1):
            self.reg.append(nn.Linear(self.reg_hidden_sizes[k], self.reg_hidden_sizes[k + 1]))

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=self.p_dropout)

        # output layer
        self.out = nn.Linear(in_features=self.reg_hidden_sizes[-1], out_features=self.output_size)
Exemplo n.º 3
0
def main(args, **kwargs):
    """ load data object """
    tar_date = args.dates[-1]
    data_file = os.path.join(
        data_dir, '{}_{}m_{}_{}.npz'.format(args.area, args.resolution,
                                            args.year, tar_date))
    data = np.load(data_file)
    label_mat = data['label_mat']
    mapping_mat = data['mapping_mat']
    static_mat = data['static_mat']
    dynamic_features, static_features = list(data['dynamic_features']), list(
        data['static_features'])

    dynamic_mat = []
    for date in args.dates:
        data_file = os.path.join(
            data_dir, '{}_{}m_{}_{}.npz'.format(args.area, args.resolution,
                                                args.year, date))
        data = np.load(data_file)
        dynamic_mat.append(data['dynamic_mat'])

    dynamic_mat = np.concatenate(dynamic_mat)
    data_obj = DataObj(label_mat, dynamic_mat, static_mat, None, None, None,
                       dynamic_features, static_features, mapping_mat)
    """ normalize data """
    data_obj.dynamic_x = normalize_mat(data_obj.dynamic_mat,
                                       if_retain_last_dim=False)
    data_obj.static_x = normalize_mat(data_obj.static_mat,
                                      if_retain_last_dim=False)
    """ define AutoEncoder model """
    ae = AutoEncoder(in_dim=data_obj.dynamic_feature_names +
                     data_obj.static_feature_names,
                     en_h_dims=args.en_h_dims,
                     de_h_dims=args.de_h_dims)

    ae = ae.to(kwargs['device'])

    train(ae, data_obj, args, **kwargs)
Exemplo n.º 4
0
    def __init__(self, args):
        self.initial_lr = args.learning_rate
        self.lr = args.learning_rate
        self.test_only = args.test_only
        self.dump_statistics = args.dump_statistics
        self.modelName = args.model
        self.experiment = args.experiment
        self.log_path = args.log_path
        self.save_path = args.save_path

        if not os.path.isdir(self.log_path):
            os.makedirs(self.log_path)

        self.logger = Logger(
            '%s/%s_%s.csv' % (self.log_path, self.modelName, args.experiment),
            'epoch, time, learning_rate, tr_loss, tr_acc, val_loss, val_acc')
        self.progress_bar = ProgressBar()
        self.chrono = Chrono()

        self.trainset, self.testset, self.trainloader, self.testloader = dataloader(
        )

        print('==> Building model..')
        self.ae = AutoEncoder()
        self.model = getattr(models, self.modelName)()

        if self.modelName == 'bit':
            self.model.load_from(
                numpy.load('./state_dicts/%s.npz' % self.modelName))

        if torch.cuda.is_available():
            self.ae = torch.nn.DataParallel(self.ae)
            self.model = torch.nn.DataParallel(self.model)
            torch.backends.cudnn.benchmark = True

        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.lr,
                                         momentum=0.9)

        self.load_ae()
        if args.resume or self.test_only or self.dump_statistics:
            self.load()

        self.criterion = torch.nn.CrossEntropyLoss()
        self.criterion = get_torch_vars(self.criterion, False)

        self.ae = get_torch_vars(self.ae, False)
        self.model = get_torch_vars(self.model, False)
Exemplo n.º 5
0
class DeepAP(nn.Module):
    def __init__(self, in_dim, ae_en_h_dims, ae_de_h_dims, conv_lstm_in_size,
                 conv_lstm_in_dim, conv_lstm_h_dim, conv_lstm_kernel_sizes,
                 conv_lstm_n_layers, fc_in_dim, fc_h_dims, fc_out_dim,
                 **kwargs):

        super(DeepAP, self).__init__()

        self.device = kwargs.get('device', 'cpu')

        ################
        # masked layer #
        ################

        mask = [[i for i in range(in_dim)], [i for i in range(in_dim)]]
        self.mask_layer = MaskNet(in_dim, in_dim, mask, device=self.device)
        self.mask_thre = kwargs.get('mask_thre', 0.0001)

        ######################
        # auto_encoder layer #
        ######################

        self.ae = AutoEncoder(in_dim=in_dim,
                              en_h_dims=ae_en_h_dims,
                              de_h_dims=ae_de_h_dims)

        if kwargs.get('ae_pretrain_weight') is not None:
            self.ae.load_state_dict(kwargs['ae_pretrain_weight'])
        else:
            raise ValueError('AutoEncoder not pretrained.')

        if kwargs.get('if_trainable'):
            for p in self.ae.parameters():
                p.requires_grad = kwargs['if_trainable']
        else:
            self.ae.weight.requires_grad = False

        ####################
        # conv_lstm layers #
        ####################

        self.conv_lstm_list = nn.ModuleList()
        for i in conv_lstm_kernel_sizes:
            i_kernel_size = (i, i)
            conv_lstm = ConvLSTM(
                in_size=conv_lstm_in_size,
                in_dim=conv_lstm_in_dim,
                h_dim=conv_lstm_h_dim,
                kernel_size=i_kernel_size,
                num_layers=conv_lstm_n_layers,
                batch_first=kwargs.get('conv_lstm_batch_first', True),
                bias=kwargs.get('conv_lstm_bias', True),
                only_last_state=kwargs.get('only_last_state', True),
                device=self.device)
            self.conv_lstm_list.append(conv_lstm)

        #########################
        # fully-connected layer #
        #########################

        self.fc = FC(
            in_dim=fc_in_dim,  # assert in_size == n_conv_lstm * conv_lstm_h_dim
            h_dims=fc_h_dims,
            out_dim=fc_out_dim,
            p_dropout=kwargs.get('fc_p_dropout', 0.1))

    def forward(self, input_data):  # input_data: (b, t, c, h, w)

        x = input_data.permute(0, 1, 3, 4, 2)  # => (b, t, h, w, c)

        ################
        # masked layer #
        ################

        masked_x = self.mask_layer(x)

        for p in self.mask_layer.parameters():
            for i in range(p.size(1)):
                if -self.mask_thre <= p[i, i] <= self.mask_thre:
                    masked_x[..., i] = 0.0

        ######################
        # auto-encoder layer #
        ######################

        en_x, de_x = self.ae(masked_x)
        en_x = en_x.permute(0, 1, 4, 2, 3)  # => (b, t, c, h, w)

        ######################
        # conv_lstm layers #
        ######################

        conv_lstm_out_list = []
        for conv_lstm in self.conv_lstm_list:
            conv_lstm_last_hidden, conv_lstm_last_state = conv_lstm(en_x)
            _, cell_last_state = conv_lstm_last_state
            conv_lstm_out_list.append(cell_last_state)

        conv_lstm_out = torch.cat(conv_lstm_out_list, dim=1)

        ###########################
        # fully-connected layer #
        ###########################

        fc_out = conv_lstm_out.permute(0, 2, 3, 1)  # => (b, h, w, c)
        fc_out = self.fc(fc_out)
        fc_out = fc_out.permute(0, 3, 1, 2)  # => (b, c, h, w)

        return fc_out, masked_x, de_x
Exemplo n.º 6
0
    def __init__(self, in_dim, ae_en_h_dims, ae_de_h_dims, conv_lstm_in_size,
                 conv_lstm_in_dim, conv_lstm_h_dim, conv_lstm_kernel_sizes,
                 conv_lstm_n_layers, fc_in_dim, fc_h_dims, fc_out_dim,
                 **kwargs):

        super(DeepAP, self).__init__()

        self.device = kwargs.get('device', 'cpu')

        ################
        # masked layer #
        ################

        mask = [[i for i in range(in_dim)], [i for i in range(in_dim)]]
        self.mask_layer = MaskNet(in_dim, in_dim, mask, device=self.device)
        self.mask_thre = kwargs.get('mask_thre', 0.0001)

        ######################
        # auto_encoder layer #
        ######################

        self.ae = AutoEncoder(in_dim=in_dim,
                              en_h_dims=ae_en_h_dims,
                              de_h_dims=ae_de_h_dims)

        if kwargs.get('ae_pretrain_weight') is not None:
            self.ae.load_state_dict(kwargs['ae_pretrain_weight'])
        else:
            raise ValueError('AutoEncoder not pretrained.')

        if kwargs.get('if_trainable'):
            for p in self.ae.parameters():
                p.requires_grad = kwargs['if_trainable']
        else:
            self.ae.weight.requires_grad = False

        ####################
        # conv_lstm layers #
        ####################

        self.conv_lstm_list = nn.ModuleList()
        for i in conv_lstm_kernel_sizes:
            i_kernel_size = (i, i)
            conv_lstm = ConvLSTM(
                in_size=conv_lstm_in_size,
                in_dim=conv_lstm_in_dim,
                h_dim=conv_lstm_h_dim,
                kernel_size=i_kernel_size,
                num_layers=conv_lstm_n_layers,
                batch_first=kwargs.get('conv_lstm_batch_first', True),
                bias=kwargs.get('conv_lstm_bias', True),
                only_last_state=kwargs.get('only_last_state', True),
                device=self.device)
            self.conv_lstm_list.append(conv_lstm)

        #########################
        # fully-connected layer #
        #########################

        self.fc = FC(
            in_dim=fc_in_dim,  # assert in_size == n_conv_lstm * conv_lstm_h_dim
            h_dims=fc_h_dims,
            out_dim=fc_out_dim,
            p_dropout=kwargs.get('fc_p_dropout', 0.1))
Exemplo n.º 7
0

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Train Autoencoder")
    parser.add_argument("--valid",
                        action="store_true",
                        default=False,
                        help="Perform validation only.")
    args = parser.parse_args()

    chrono = Chrono()
    progress_bar = ProgressBar()

    _, _, trainloader, testloader = dataloader()

    autoencoder = get_torch_vars(AutoEncoder(), False)

    if torch.cuda.is_available():
        autoencoder = torch.nn.DataParallel(autoencoder)
        torch.backends.cudnn.benchmark = True

    if args.valid:
        valid()
        exit(0)

    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(autoencoder.parameters())

    train()

    print('Finished Training')
Exemplo n.º 8
0
        loss = criterion(predict, x)
        mse[i] = loss.item()
    thres = np.percentile(mse, 95)
    print('The threshold  mse for anomaly events is {}'.format(thres))

    correct_num = 0
    total_num = x_anomaly.shape[0]

    # ano_precit = np.zeros(x_anomaly.shape, dtype=np.float32)

    for i in range(total_num):
        x_ano = torch.tensor(x_anomaly[i]).unsqueeze(0)
        predict = model(x_ano)
        loss = criterion(predict, x).item()
        if loss > thres:
            correct_num += 1

    print(correct_num / total_num)


if __name__ == '__main__':
    model_path = './result/2020-06-26-16/model_best.pth'
    state_dict = torch.load(model_path, map_location='cpu')
    state_pa = state_dict['model_state_dict']
    model = AutoEncoder(28)
    model.load_state_dict(state_pa)

    file_path = './data/creditcard.csv'

    validate(model, file_path)
Exemplo n.º 9
0
def main():
    args = cfg()
    writer = SummaryWriter(result_dir)
    args.load = False

    x_train, x_test = cut_data(args.data_dir)
    train_dataset = CreditDataset(x_train)
    val_dataset = CreditDataset(x_test)

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)

    model = AutoEncoder(28)
    print(model)

    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    cy_len = floor(len(train_dataset) / args.batch_size // 2)
    clr = lr_scheduler.CyclicLR(optimizer, args.lr, args.max_lr, cy_len, cycle_momentum=False)

    criterion = nn.MSELoss()
    state = {"step": 0,
             "worse_epochs": 0,
             "epochs": 0,
             "best_loss": np.Inf}

    while state["worse_epochs"] < args.hold_step:
        print("Training one epoch from iteration " + str(state["step"]))
        model.train()
        for i, (x) in enumerate(train_loader):
            cur_lr = optimizer.state_dict()['param_groups'][0]['lr']
            writer.add_scalar("learning_rate", cur_lr, state['step'])
            optimizer.zero_grad()
            outputs = model(x)
            loss = criterion(outputs, x)
            loss.backward()
            writer.add_scalar("training_loss", loss, state['step'])
            optimizer.step()
            clr.step()
            # clr.step()
            state['step'] += 1

            if i % 50 == 0:
                print(
                    "{:4d}/{:4d} --- Loss: {:.6f}  with learnig rate {:.6f}".format(
                        i, len(train_dataset) // args.batch_size, loss, cur_lr))

        val_loss = valiadate(model, criterion, val_loader)
        # val_loss /= len(val_dataset)//args.batch_size
        print("Valiadation loss" + str(val_loss))
        writer.add_scalar("val_loss", val_loss, state['step'])

        writer.add_scalar("val_loss", val_loss, state["step"])

        # EARLY STOPPING CHECK
        checkpoint_path = args.model_path + str(state['step']) + '.pth'
        print("Saving model...")
        if val_loss >= state["best_loss"]:
            state["worse_epochs"] += 1
        else:
            print("MODEL IMPROVED ON VALIDATION SET!")
            state["worse_epochs"] = 0
            state["best_loss"] = val_loss
            state["best_checkpoint"] = checkpoint_path
            best_checkpoint_path = args.model_path + 'best.pth'
            save_model(model, optimizer, state, best_checkpoint_path)
        print(state)
        state["epochs"] += 1
        if state["epochs"] % 5 == 0:
            save_model(model, optimizer, state, checkpoint_path)
    last_model = args.model_path + 'last_model.pth'
    save_model(model, optimizer, state, last_model)
    print("Training finished")
    writer.close()