Example #1
0
 def __init__(self,
              input_dim,
              side_feat_dim,
              gcn_hidden_dim,
              side_hidden_dim,
              encode_hidden_dim,
              num_support=5,
              num_classes=5,
              num_basis=3):
     super(GraphMatrixCompletion, self).__init__()
     self.encoder = StackGCNEncoder(input_dim, gcn_hidden_dim, num_support,
                                    DROPOUT_RATIO)
     self.dense1 = FullyConnected(side_feat_dim,
                                  side_hidden_dim,
                                  dropout=0.,
                                  use_bias=True)
     self.dense2 = FullyConnected(gcn_hidden_dim + side_hidden_dim,
                                  encode_hidden_dim,
                                  dropout=DROPOUT_RATIO,
                                  activation=lambda x: x)
     self.decoder = Decoder(encode_hidden_dim,
                            num_basis,
                            num_classes,
                            dropout=DROPOUT_RATIO,
                            activation=lambda x: x)
Example #2
0
def train():
    # config
    epochs = 20
    batchsize = 256

    # model
    encoder = Encoder().cuda()
    decoder = Decoder().cuda()
    trainloader = DataLoader('region', batchsize)

    params = list(encoder.parameters()) + list(decoder.parameters())
    # optimizier
    optimizer = optim.SGD(params, lr=0.01, momentum=0.9, weight_decay=1e-4)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1)
    # loss function
    loss_func = nn.MSELoss().cuda()
    best_loss = 1000

    for epoch in range(epochs):
        epoch_loss = []
        pbar = tqdm(trainloader)
        for image, coord in pbar:
            image = image.cuda()
            output = decoder(encoder(image))
            # compute loss
            loss = loss_func(output, image)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            epoch_loss.append(loss.item())
            fmt = 'Epoch[{:2d}]-Loss:{:.3f}'.format(epoch + 1, loss.item())
            pbar.set_description(fmt)

        avg_loss = sum(epoch_loss) / len(epoch_loss)
        # optimizer update
        scheduler.step()
        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(
                encoder.state_dict(),
                os.path.join(
                    checkpoints,
                    'Encoder-epoch-%d-loss:%.3f.pth' % (epoch + 1, avg_loss)))
            torch.save(
                decoder.state_dict(),
                os.path.join(
                    checkpoints,
                    'Decoder-epoch-%d-loss:%.3f.pth' % (epoch + 1, avg_loss)))
    print('Train Finished!!!')
Example #3
0
    def _build_model(self):
        """Build auto-encoder model.
        """
        embed = EmbeddingLayer(self.args.d_model,
                               self.args.vocab_size,
                               dropout=self.args.dropout)
        encoder = Encoder(self.args.d_model,
                          self.args.N,
                          self.args.head_num,
                          self.args.d_ff,
                          self.args.hidden_size,
                          dropout=self.args.dropout)
        decoder = Decoder(self.args.d_model,
                          self.args.N,
                          self.args.head_num,
                          self.args.d_ff,
                          self.args.hidden_size,
                          dropout=self.args.dropout)
        linear_softmax = LinearSoftmax(self.args.d_model, self.args.vocab_size)
        model = EncoderDecoder(embed, encoder, decoder, linear_softmax)

        return model
Example #4
0
def train(train_data, test_data, load_pretrained=False, pretrained_path=None):
    device = 'cpu'
    if torch.cuda.is_available():
        device = 'cuda'
    train_data = torch.Tensor(train_data).to(device)
    _test_data = torch.Tensor(test_data).to(device)
    
    num_data, input_dimension = train_data.shape # input dimension = dictionary size ~ 700
    hidden_dimensions = [1024, 728]
    code_dimension = 512

    total_steps = 500
    learning_rate = 1e-4
    iter_per_batch = 10
    batch_size = num_data // iter_per_batch

    config_dict = {
        'input_dimension': input_dimension,
        'hidden_dimensions': hidden_dimensions,
        'code_dimension': code_dimension
    }
    config_file_path = osp.join(output_path, 'config.json')
    with open(config_file_path, 'w') as f:
        json.dump(config_dict, f, indent=4)

    encoder = Encoder(input_dimension, hidden_dimensions, code_dimension)
    encoder.to(device)
    decoder = Decoder(input_dimension, hidden_dimensions, code_dimension)
    decoder.to(device)
    if load_pretrained == True:
        if pretrained_path != None:
            encoder_file_path, decoder_file_path = pretrained_path
            encoder.load_state_dict(torch.load(encoder_file_path))
            decoder.load_state_dict(torch.load(decoder_file_path))
        else:
            raise AssertionError("Pretrained path must be provided!!!")

    loss_func = nn.MSELoss()
    optimizer = torch.optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=learning_rate)
    best_loss = float("Inf")

    for i in tqdm(range(total_steps)):
        # Get next batch
        batch_index = i % iter_per_batch * batch_size
        X = train_data[batch_index : min(num_data, batch_index + batch_size)]
        out = encoder(X)
        out = decoder(out)
        loss = loss_func(out, X)
        loss.backward()
        optimizer.step()
        encoder.zero_grad()
        decoder.zero_grad()
        
        # Evaluate autoencoder performance on test data
        out = decoder(encoder((_test_data))).detach().cpu().numpy()
        test_data_loss = np.mean(np.sqrt(np.sum((out - test_data)**2, axis=1)))
        print("-- Train loss: {}".format(loss))
        print("-- Test loss: {}".format(test_data_loss))
        
        if test_data_loss < best_loss:
            best_loss = test_data_loss
            encoder_file_path = osp.join(output_path, 'encoder.pt')
            decoder_file_path = osp.join(output_path, 'decoder.pt')
            torch.save(encoder.state_dict(), encoder_file_path)
            torch.save(decoder.state_dict(), decoder_file_path)
Example #5
0
cuda=torch.device('cuda:0')

dataset_train = FirmaData_onesubject(data_folder_dir, subject_id_train, 0.5, 0.2, 0.3, window_size, subset='train')
dataset_test_normal=FirmaData_onesubject(data_folder_dir,subject_id_train,0.5,0.2,0.3,window_size,subset='test')
dataset_test_anomaly = FirmaData_onesubject(data_folder_dir, subject_id_anomaly, 0.7, 0.0, 0.3, window_size, subset='train')

loader_train = DataLoader(dataset_train, batch_size=b_size, shuffle=True)
loader_test_normal=DataLoader(dataset_test_normal, batch_size=b_size, shuffle=True)
loader_test_anomaly=DataLoader(dataset_test_anomaly,batch_size=b_size,shuffle=True)
#
# [window_size,data_dim]  564 in our case (remove the first timestamp dimension)
input_size = dataset_train[0].shape[1]

writer = SummaryWriter()
model_encoder=Encoder(input_size,latent_dim,embed_dim,num_layer)
model_decoder=Decoder(input_size,latent_dim,input_size,embed_dim,num_layer)
model = Seq2Seq(model_encoder,model_decoder,cuda).to(cuda)
#loss_function = nn.MSELoss(reduce=False)
#loss_function = nn.L1Loss(reduce=False)
loss_function = nn.CrossEntropyLoss(weight=torch.tensor([1.,4.]).cuda()) #
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
global_step=0


#writer.add_graph(model,loader_train)
for epoch in range(Max_epoch):
    for step, seq_data in enumerate(loader_train):
#        print(seq_data.shape)  # [32,10,564], [batch,seq_len,data_dim]
        global_step+=1
        seq_data=seq_data.cuda()
        seq_pred = model(seq_data.float())