示例#1
0
def train(model: nn.Module,
          train_loader: torch.utils.data.dataloader.DataLoader,
          optimizer: torch.optim, epoch: int):
    train_loss = 0
    train_loss_list = []
    batch_list = []
    num_data = 0
    device = torch_device(model)
    model.train()

    for X, target in train_loader:
        batch_size = X.size(0)
        num_data += batch_size
        X, target = X.to(device), target.to(device)
        output = model(X)
        loss = _loss_DeepAnT(output, target)
        train_loss += loss.item()
        train_loss_list.append(loss.item())
        batch_list.append(epoch-1 + (num_data / len(train_loader.sampler)))
        # backpropagation and weight update
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    avg_train_loss = train_loss / num_data

    return avg_train_loss, train_loss_list, batch_list
示例#2
0
def get_score(model: nn.Module,
              data_loader: torch.utils.data.dataloader.DataLoader):
    score = np.array([])
    device = torch_device(model)
    model.eval()
    for X, target in data_loader:
        X, target = X.to(device), target.to(device)
        output = model(X)
        s = _anomaly_score(output, target).cpu().detach().numpy()
        score = np.concatenate((score, s))

    return score
示例#3
0
def valid(model: nn.Module,
          valid_loader: torch.utils.data.dataloader.DataLoader):
    valid_loss = 0
    num_data = 0
    score = np.array([])
    device = torch_device(model)

    with torch.no_grad():
        for X, target in valid_loader:
            batch_size = X.size(0)
            num_data += batch_size
            X, target = X.to(device), target.to(device)
            output = model(X)
            s = _anomaly_score(output, target).cpu().detach().numpy()
            score = np.concatenate((score, s))
            loss = _loss_DeepAnT(output, target)
            valid_loss += loss.item()

    avg_valid_loss = valid_loss / num_data

    return avg_valid_loss, score