Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--epoch", "-e", type=int)
    args = parser.parse_args()

    model = DKT(cfg)
    model.load_state_dict(torch.load("epoch-{}.pth".format(args.epoch)))
    model.eval()

    test_set = ASSIST(cfg, train=False)
    test_loader = DataLoader(
        dataset=test_set,
        batch_size=cfg.solver.batch_size,
        num_workers=cfg.solver.num_workers,
        shuffle=False,
        collate_fn=my_collate_fn,
    )
    all_answers = []
    all_results = []
    for packed_xs, packed_skills, packed_answers in tqdm(test_loader):
        xs, lengths = rnn.pad_packed_sequence(packed_xs)
        skills, _ = rnn.pad_packed_sequence(packed_skills)
        answers, _ = rnn.pad_packed_sequence(packed_answers)

        with torch.no_grad():
            results = model(xs, skills, answers)

        answers = packed_answers.data
        skills = packed_skills.data
        results = rnn.pack_padded_sequence(results, lengths).data
        results = results.gather(-1, skills[:, None]).squeeze(-1)

        all_answers.append(answers)
        all_results.append(results)

    answers = torch.cat(all_answers).detach().cpu().numpy()
    results = torch.cat(all_results).detach().cpu().numpy()
    score = roc_auc_score(answers, results)
    print("AUC: {:.3f}".format(score))
Пример #2
0
def main():
    model = DKT(cfg)
    optimizer = optim.SGD(
        model.parameters(),
        lr=cfg.solver.lr,
        momentum=cfg.solver.momentum,
        weight_decay=cfg.solver.weight_decay,
    )
    scheduler = optim.lr_scheduler.StepLR(
        optimizer, step_size=20, gamma=1/1.5,
    )
    train_set = ASSIST(cfg, train=True)
    train_loader = DataLoader(
        dataset=train_set,
        batch_size=cfg.solver.batch_size,
        num_workers=cfg.solver.num_workers,
        shuffle=True,
        drop_last=False,
        collate_fn=my_collate_fn,
    )
    test_set = ASSIST(cfg, train=False)
    test_loader = DataLoader(
        dataset=test_set,
        batch_size=cfg.solver.batch_size,
        num_workers=cfg.solver.num_workers,
        shuffle=False,
        collate_fn=my_collate_fn,
    )

    for epoch in range(cfg.solver.epochs):
        pbar = tqdm(train_loader)
        model.train()
        for packed_xs, packed_skills, packed_answers in pbar:
            xs, lengths = rnn.pad_packed_sequence(packed_xs, padding_value=-1)
            skills, _ = rnn.pad_packed_sequence(packed_skills, padding_value=-1)
            answers, _ = rnn.pad_packed_sequence(packed_answers, padding_value=-1)

            loss = model(xs, skills, answers)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            pbar.set_description("Epoch: {}, Loss: {:.5f}".format(epoch, float(loss)))
        
        scheduler.step()
        
        model.eval()
        all_answers = []
        all_results = []
        for packed_xs, packed_skills, packed_answers in tqdm(test_loader):
            xs, lengths = rnn.pad_packed_sequence(packed_xs)
            skills, _ = rnn.pad_packed_sequence(packed_skills)
            answers, _ = rnn.pad_packed_sequence(packed_answers)

            with torch.no_grad():
                results = model(xs, skills, answers)
            
            answers = packed_answers.data
            skills = packed_skills.data
            results = rnn.pack_padded_sequence(results, lengths).data
            results = results.gather(-1, skills[:, None]).squeeze(-1)

            all_answers.append(answers)
            all_results.append(results)

        answers = torch.cat(all_answers).detach().cpu().numpy()
        results = torch.cat(all_results).detach().cpu().numpy()
        score = roc_auc_score(answers, results)
        print("AUC: {:.3f}".format(score))

        torch.save(model.state_dict(), "epoch-{}.pth".format(epoch))
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--gpu", "-g", type=int, default=-1)
    args = parser.parse_args()

    device = torch.device("cpu" if args.gpu ==
                          -1 else "cuda:{}".format(args.gpu))
    print(device)

    model = DKT(cfg)
    model.to(device)

    optimizer = optim.SGD(
        model.parameters(),
        lr=cfg.solver.lr,
        momentum=cfg.solver.momentum,
        weight_decay=cfg.solver.weight_decay,
    )
    scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=20,
        gamma=1 / 1.5,
    )
    train_set = ASSIST(cfg, train=True)
    train_loader = DataLoader(
        dataset=train_set,
        batch_size=cfg.solver.batch_size,
        num_workers=cfg.solver.num_workers,
        shuffle=True,
        drop_last=False,
        collate_fn=my_collate_fn,
    )
    test_set = ASSIST(cfg, train=False)
    test_loader = DataLoader(
        dataset=test_set,
        batch_size=cfg.solver.batch_size,
        num_workers=cfg.solver.num_workers,
        shuffle=False,
        collate_fn=my_collate_fn,
    )

    for epoch in range(cfg.solver.epochs):
        pbar = tqdm(train_loader)
        model.train()
        for packed_xs, packed_skills, packed_answers in pbar:
            packed_xs = packed_xs.to(device)
            packed_skills = packed_skills.to(device)
            packed_answers = packed_answers.to(device)

            xs, lengths = rnn.pad_packed_sequence(packed_xs, padding_value=-1)
            skills, _ = rnn.pad_packed_sequence(packed_skills,
                                                padding_value=-1)
            answers, _ = rnn.pad_packed_sequence(packed_answers,
                                                 padding_value=-1)

            loss, aux_loss = model(xs, skills, answers)

            pbar.set_description(
                "Epoch: {}, Loss: {:.5f}, AuxLoss: {:.5f}".format(
                    epoch + 1, float(loss), float(aux_loss)))

            optimizer.zero_grad()
            loss = loss + aux_loss
            loss.backward()
            optimizer.step()

        scheduler.step()

        model.eval()
        all_answers = []
        all_results = []
        for packed_xs, packed_skills, packed_answers in tqdm(test_loader):
            packed_xs.to(device)
            packed_skills.to(device)
            packed_answers.to(device)

            xs, lengths = rnn.pad_packed_sequence(packed_xs)
            skills, _ = rnn.pad_packed_sequence(packed_skills)
            answers, _ = rnn.pad_packed_sequence(packed_answers)

            with torch.no_grad():
                results = model(xs, skills, answers)

            answers = packed_answers.data
            results = rnn.pack_padded_sequence(results, lengths).data

            all_answers.append(answers)
            all_results.append(results)

        answers = torch.cat(all_answers).detach().cpu().numpy()
        results = torch.cat(all_results).detach().cpu().numpy()
        score = roc_auc_score(answers, results)
        print("AUC: {:.3f}".format(score))

        if (epoch + 1) % cfg.save_interval == 0:
            torch.save(model.state_dict(), "epoch-{}.pth".format(epoch + 1))