コード例 #1
0
def main():
    input_dim = 220
    embed_dim = 200
    hidden_dim = 225
    output_dim = 110
    batch_size = 4
    device = torch.device('cpu')

    test_input = torch.randint(1, 110, (4, 200))

    model = CNN_3D(input_dim, embed_dim, hidden_dim, 1, output_dim, batch_size,
                   device)
    out = model(test_input)

    train_path = "D:\\workspaces\\CKT\\dataset\\assist2009_updated\\sayhi_test.csv"
    train_dataset = KTData(train_path, opt='None')
    train_loader = DataLoader(train_dataset,
                              batch_size=4,
                              shuffle=False,
                              collate_fn=myutils.collate_fn)
    batch_len, batch_seq, batch_label = next(iter(train_loader))

    print(batch_len, batch_seq.shape, batch_label.shape)

    k_frames = 8
    next_question_number = batch_label[:, k_frames:,
                                       0].contiguous().view(-1).long()
    next_question_label = batch_label[:, k_frames:, 1].contiguous().view(-1)

    print(next_question_number.shape)
    print(next_question_label.shape)
コード例 #2
0
ファイル: CAKT_experiment.py プロジェクト: Badstu/CAKT
def CAKT_main(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    if opt.vis:
        vis = Visualizer(opt.env)
    else:
        vis = None

    # init_loss_file(opt)
    if opt.data_source == "statics" or opt.data_source == "assist2017":
        opt.fold_dataset = True
    train_path, valid_path, test_path = init_file_path(opt)
    print("data_source:{} fold_dataset:{}".format(opt.data_source,
                                                  opt.fold_dataset))

    # random_state = random.randint(1, 50)
    # print("random_state:", random_state)
    train_dataset = KTData(train_path,
                           fold_dataset=opt.fold_dataset,
                           q_numbers=opt.output_dim,
                           opt='None')
    valid_dataset = KTData(valid_path,
                           fold_dataset=opt.fold_dataset,
                           q_numbers=opt.output_dim,
                           opt='None')
    test_dataset = KTData(test_path,
                          fold_dataset=opt.fold_dataset,
                          q_numbers=opt.output_dim,
                          opt='None')

    # print(train_path, valid_path, test_path)
    print(len(train_dataset), len(valid_dataset), len(test_dataset))

    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers,
                             drop_last=True,
                             collate_fn=myutils.collate_fn)
    print("model name is {}, next is inital model".format(opt.model_name))
    if opt.model_name == "CAKT_dev":
        model = CAKT_dev(opt.k_frames, opt.knowledge_length,
                         opt.concept_length, opt.knowledge_emb_size,
                         opt.interaction_emb_size, opt.lstm_hidden_dim,
                         opt.lstm_num_layers, opt.batch_size, opt.device)
    elif opt.model_name == "CAKT":
        model = CAKT(opt.k_frames, opt.input_dim, opt.H, opt.embed_dim,
                     opt.hidden_dim, opt.num_layers, opt.output_dim,
                     opt.batch_size, opt.device)
    elif opt.model_name == "CAKT_CI":
        model = CAKT_CI(opt.k_frames, opt.input_dim, opt.H, opt.embed_dim,
                        opt.hidden_dim, opt.num_layers, opt.output_dim,
                        opt.batch_size, opt.device)
    elif opt.model_name == "CAKT_ablation":
        print("initial abaltion model: ", opt.ablation)
        model = CAKT_ablation(opt.k_frames, opt.input_dim, opt.H,
                              opt.embed_dim, opt.hidden_dim, opt.num_layers,
                              opt.output_dim, opt.batch_size, opt.device,
                              opt.ablation)

    lr = opt.lr
    last_epoch = -1

    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=lr,
                                 weight_decay=opt.weight_decay,
                                 betas=(0.9, 0.99))
    if opt.model_path:
        map_location = lambda storage, loc: storage
        checkpoint = torch.load(opt.model_path, map_location=map_location)
        model.load_state_dict(checkpoint["model"])
        last_epoch = checkpoint["epoch"]
        lr = checkpoint["lr"]
        optimizer.load_state_dict(checkpoint["optimizer"])

    if torch.cuda.device_count() == 1:
        model = model.to(opt.device)
    elif torch.cuda.device_count() == 2:
        model = nn.DataParallel(model, device_ids=[0, 1])
        model = model.to(opt.device)
    elif torch.cuda.device_count() == 4:
        model = nn.DataParallel(model, device_ids=[0, 1, 2, 3])
        model = model.to(opt.device)

    loss_result = {}
    auc_resilt = {}
    best_test_auc = 0
    # START TRAIN
    for epoch in range(opt.max_epoch):
        torch.cuda.empty_cache()
        if epoch < last_epoch:
            continue

        if opt.model_name == "CAKT" or opt.model_name == "CAKT_CI" or opt.model_name == "CAKT_ablation":
            train_loss_meter, train_auc_meter, train_all_auc, train_loss_list = train_cakt(
                opt, vis, model, train_loader, epoch, lr, optimizer)
            val_loss_meter, val_auc_meter, val_all_auc, val_loss_list = valid_cakt(
                opt, vis, model, valid_loader, epoch)
            test_loss_meter, test_auc_meter, test_all_auc, test_loss_list = test_cakt(
                opt, vis, model, test_loader, epoch)

        loss_result["train_loss"] = train_loss_meter.value()[0]
        # auc_resilt["train_auc"] = train_auc_meter.value()[0]
        auc_resilt["train_auc"] = train_all_auc
        loss_result["val_loss"] = val_loss_meter.value()[0]
        # auc_resilt["val_auc"] = val_auc_meter.value()[0]
        auc_resilt["val_auc"] = val_all_auc
        loss_result["test_loss"] = test_loss_meter.value()[0]
        # auc_resilt["test_auc"] = test_auc_meter.value()[0]
        auc_resilt["test_auc"] = test_all_auc

        for k, v in loss_result.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]),
                         Y=np.array([v]),
                         win="loss",
                         opts=dict(title="loss", showlegend=True),
                         name=k,
                         update='append')
        for k, v in auc_resilt.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]),
                         Y=np.array([v]),
                         win="auc",
                         opts=dict(title="auc", showlegend=True),
                         name=k,
                         update='append')

        # best_test_auc = max(best_test_auc, test_auc_meter.value()[0], val_auc_meter.value()[0])
        best_test_auc = max(best_test_auc, test_all_auc, val_all_auc)
        print("best_test_auc is: ", best_test_auc)

        # TODO 每个epoch结束后把loss写入文件
        myutils.save_loss_file(opt, epoch, train_loss_list, val_loss_list,
                               test_loss_list)

        # TODO 每个epoch结束后把AUC写入文件
        myutils.save_auc_file(opt, epoch, train_all_auc, val_all_auc,
                              test_all_auc)

        # TODO 每save_every个epoch结束后保存模型参数+optimizer参数
        # if epoch % opt.save_every == 0:
        if best_test_auc == test_all_auc or best_test_auc == val_all_auc:
            myutils.save_model_weight(opt, model, optimizer, epoch, lr)

        # TODO 做lr_decay
        lr = myutils.adjust_lr(opt, optimizer, epoch,
                               train_loss_meter.value()[0])

    # TODO 结束的时候保存final模型参数
    myutils.save_model_weight(opt, model, optimizer, epoch, lr, is_final=True)

    return best_test_auc
コード例 #3
0
def run_one_setting(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    print(opt.__dict__)

    if opt.vis:
        vis = Visualizer(opt.env)
    else:
        vis = None

    init_loss_file(opt)

    if opt.data_source == "statics":
        opt.fold_dataset = True
    train_path, valid_path, test_path = init_file_path(opt)
    print(opt.fold_dataset)
    print(opt.ablation)
    train_dataset = KTData(train_path,
                           fold_dataset=opt.fold_dataset,
                           q_numbers=opt.output_dim,
                           opt='None')
    test_dataset = KTData(test_path,
                          fold_dataset=opt.fold_dataset,
                          q_numbers=opt.output_dim,
                          opt='None')

    print(len(train_dataset), len(test_dataset))

    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers,
                             drop_last=True,
                             collate_fn=myutils.collate_fn)

    if opt.model_name == "CNN_ablation":
        model = CNN_ablation(opt.k_frames, opt.input_dim, opt.embed_dim,
                             opt.hidden_dim, opt.num_layers, opt.output_dim,
                             opt.batch_size, opt.device, opt.ablation)

    lr = opt.lr
    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=lr,
                                 weight_decay=opt.weight_decay,
                                 betas=(0.9, 0.99))

    model = model.to(opt.device)

    best_test_auc = 0
    # START TRAIN
    for epoch in range(opt.max_epoch):
        torch.cuda.empty_cache()
        train_loss_meter, train_auc_meter, train_loss_list = train.train(
            opt, vis, model, train_loader, epoch, lr, optimizer)
        torch.cuda.empty_cache()
        test_loss_meter, test_auc_meter, test_loss_list = test.test(
            opt, vis, model, test_loader, epoch)

        print("epoch{}, {k}:{v:.5f}".format(epoch,
                                            k="train_auc",
                                            v=train_auc_meter.value()[0]))
        print("epoch{}, {k}:{v:.5f}".format(epoch,
                                            k="test_auc",
                                            v=test_auc_meter.value()[0]))

        best_test_auc = max(best_test_auc, test_auc_meter.value()[0])
        print("best_test_auc is: ", best_test_auc)

        # TODO 做lr_decay
        lr = myutils.adjust_lr(opt, optimizer, epoch,
                               train_loss_meter.value()[0])

    return best_test_auc
コード例 #4
0
ファイル: EKT_experiment.py プロジェクト: Badstu/CAKT
def main(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    if opt.vis:
        vis = Visualizer(opt.env)
    else:
        vis = None

    init_loss_file(opt)
    if opt.data_source == "statics":
        opt.fold_dataset = True
    train_path, valid_path, test_path = init_file_path(opt)
    # print(opt.fold_dataset)

    # random_state = random.randint(1, 50)
    # print("random_state:", random_state)
    train_dataset = KTData(train_path,
                           fold_dataset=opt.fold_dataset,
                           q_numbers=opt.output_dim,
                           opt='None')
    valid_dataset = KTData(valid_path,
                           fold_dataset=opt.fold_dataset,
                           q_numbers=opt.output_dim,
                           opt='None')
    test_dataset = KTData(test_path,
                          fold_dataset=opt.fold_dataset,
                          q_numbers=opt.output_dim,
                          opt='None')

    print(len(train_dataset), len(valid_dataset), len(test_dataset))

    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers,
                             drop_last=True,
                             collate_fn=myutils.collate_fn)

    if opt.model_name == "EKT":
        model = EKTM_dev(knowledge_length=110,
                         knowledge_emb_size=100,
                         seq_hidden_size=100,
                         is_text=opt.is_text,
                         text_emb_size=100)

    random_text_token = torch.randn((100, ))

    lr = opt.lr
    last_epoch = -1

    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=lr,
                                 weight_decay=opt.weight_decay,
                                 betas=(0.9, 0.99))
    if opt.model_path:
        map_location = lambda storage, loc: storage
        checkpoint = torch.load(opt.model_path, map_location=map_location)
        model.load_state_dict(checkpoint["model"])
        last_epoch = checkpoint["epoch"]
        lr = checkpoint["lr"]
        optimizer.load_state_dict(checkpoint["optimizer"])

    model = model.to(opt.device)

    loss_result = {}
    auc_resilt = {}
    best_test_auc = 0
    # START TRAIN
    for epoch in range(opt.max_epoch):
        if epoch < last_epoch:
            continue
        if opt.model_name == "EKT":
            train_loss_meter, train_auc_meter, train_loss_list = run_ekt.train_ekt(
                opt, vis, model, train_loader, epoch, lr, optimizer)
            val_loss_meter, val_auc_meter, val_loss_list = run_ekt.valid_ekt(
                opt, vis, model, valid_loader, epoch)
            test_loss_meter, test_auc_meter, test_loss_list = val_loss_meter, val_auc_meter, val_loss_list
            # test_loss_meter, test_auc_meter, test_loss_list = run_ekt.test_ekt(opt, vis, model, test_loader, epoch)

        loss_result["train_loss"] = train_loss_meter.value()[0]
        auc_resilt["train_auc"] = train_auc_meter.value()[0]
        loss_result["val_loss"] = val_loss_meter.value()[0]
        auc_resilt["val_auc"] = val_auc_meter.value()[0]
        loss_result["test_loss"] = test_loss_meter.value()[0]
        auc_resilt["test_auc"] = test_auc_meter.value()[0]

        for k, v in loss_result.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]),
                         Y=np.array([v]),
                         win="loss",
                         opts=dict(title="loss", showlegend=True),
                         name=k,
                         update='append')
        for k, v in auc_resilt.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]),
                         Y=np.array([v]),
                         win="auc",
                         opts=dict(title="auc", showlegend=True),
                         name=k,
                         update='append')

        best_test_auc = max(best_test_auc,
                            test_auc_meter.value()[0],
                            val_auc_meter.value()[0])
        print("best_test_auc is: ", best_test_auc)

        # TODO 每个epoch结束后把loss写入文件
        myutils.save_loss_file(opt, epoch, train_loss_list, val_loss_list,
                               test_loss_list)

        # TODO 每save_every个epoch结束后保存模型参数+optimizer参数
        if epoch % opt.save_every == 0:
            myutils.save_model_weight(opt, model, optimizer, epoch, lr)

        # TODO 做lr_decay
        lr = myutils.adjust_lr(opt, optimizer, epoch,
                               train_loss_meter.value()[0])

    # TODO 结束的时候保存final模型参数
    myutils.save_model_weight(opt, model, optimizer, epoch, lr, is_final=True)
コード例 #5
0
def run_train_valid(opt, vis):
    print(opt.__dict__)
    train_path, valid_path, test_path = init_file_path(opt)

    train_dataset = KTData(train_path, opt='None')
    valid_dataset = KTData(valid_path, opt='None')

    print(train_path, valid_path)
    print(len(train_dataset), len(valid_dataset))

    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)

    if opt.model_name == "CNN":
        model = CNN(opt.input_dim, opt.embed_dim, opt.hidden_dim,
                    opt.num_layers, opt.output_dim, opt.batch_size, opt.device)
    elif opt.model_name == "CNN_3D":
        model = CNN_3D(opt.input_dim, opt.embed_dim, opt.hidden_dim,
                       opt.num_layers, opt.output_dim, opt.batch_size,
                       opt.device)
    else:
        model = RNN_DKT(opt.input_dim, opt.embed_dim, opt.hidden_dim,
                        opt.num_layers, opt.output_dim, opt.batch_size,
                        opt.device)

    lr = opt.lr
    last_epoch = -1
    previous_loss = 1e10

    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=lr,
                                 weight_decay=opt.weight_decay,
                                 betas=(0.9, 0.99))
    if opt.model_path:
        map_location = lambda storage, loc: storage
        checkpoint = torch.load(opt.model_path, map_location=map_location)
        model.load_state_dict(checkpoint["model"])
        last_epoch = checkpoint["epoch"]
        lr = checkpoint["lr"]
        optimizer.load_state_dict(checkpoint["optimizer"])

    model = model.to(opt.device)

    train_loss_list = []
    train_auc_list = []
    valid_loss_list = []
    valid_auc_list = []
    # START TRAIN
    for epoch in range(opt.max_epoch):
        if epoch < last_epoch:
            continue

        train_loss_meter, train_auc_meter, _ = train.train_3d(
            opt, vis, model, train_loader, epoch, lr, optimizer)
        val_loss_meter, val_auc_meter, _ = train.valid_3d(
            opt, vis, model, valid_loader, epoch)

        print("epoch: {}, train_auc: {}, val_auc: {}".format(
            epoch,
            train_auc_meter.value()[0],
            val_auc_meter.value()[0]))

        train_loss_list.append(train_loss_meter.value()[0])
        train_auc_list.append(train_auc_meter.value()[0])

        valid_loss_list.append(val_loss_meter.value()[0])
        valid_auc_list.append(val_auc_meter.value()[0])

        # TODO 每save_every个epoch结束后保存模型参数+optimizer参数
        if epoch % opt.save_every == 0:
            myutils.save_model_weight(opt,
                                      model,
                                      optimizer,
                                      epoch,
                                      lr,
                                      is_CV=True)

        # TODO 做lr_decay
        lr = myutils.adjust_lr(opt, optimizer, epoch)

    # TODO 结束的时候保存final模型参数
    myutils.save_model_weight(opt,
                              model,
                              optimizer,
                              epoch,
                              lr,
                              is_final=True,
                              is_CV=True)

    return train_loss_list, train_auc_list, valid_loss_list, valid_auc_list