Exemplo n.º 1
0
def submit():
    load_model_path = "checkpoints/LSTM_0615_22:42:44.pth"
    model = Sequence(14, 128, 1).cuda()
    model.load(load_model_path)

    user_list = []
    probability = []
    submit_data = AppData("data/submit_data_20d.json", iflabel=False)
    submit_dataloader = DataLoader(submit_data, 512, shuffle=False)
    for ii, (input, property, user_id) in tqdm(enumerate(submit_dataloader)):
        val_input = Variable(input, volatile=True).cuda()
        val_input2 = Variable(property).cuda()
        score = model(val_input, val_input2).cpu()
        probability.extend(t.nn.functional.softmax(score)[:, 1].data.tolist())
        user_list.extend(user_id.tolist())

    index = np.argmax(probability)
    print(user_list[index], probability[index])
    index2 = np.argmin(probability)
    print(user_list[index2], probability[index2])
    all = zip(probability, user_list)
    all = sorted(all, key=lambda x: x[0], reverse=True)
    with open('submission/submission_06_15_22_45_21785.txt', 'w') as f:
        for i in all[:21785]:
            f.writelines(str(i[1]) + '\n')
    print(all[21785], all[23800])
    print(len(all))
Exemplo n.º 2
0
def test_offline():
    test_data = AppData("data/train_20d_3p.json", iflabel=True)
    test_dataloader = DataLoader(test_data, 128, shuffle=False, num_workers=2)

    load_model_path = "checkpoints/LSTM_0616_08:30:43.pth"
    model = Sequence(14, 128, 1).cuda()
    model.load(load_model_path)

    test_cm, test_f1 = val(model, test_dataloader)

    print(test_f1)
    print(test_cm)
Exemplo n.º 3
0
def submit():
    model_time = "0628_10:39:34"
    load_model_path = "checkpoints/06281033/LSTM_%s.pth" % model_time
    model = Sequence(31, 128, 1).cuda()
    model.load(load_model_path)

    user_list = []
    probability = []
    submit_data = AppData("../kesci/data/data_v3_23d/submit2.json",
                          iflabel=False)
    submit_dataloader = DataLoader(submit_data, 512, shuffle=False)
    for ii, (input, property, user_id) in tqdm(enumerate(submit_dataloader)):
        val_input = Variable(input, volatile=True).cuda()
        val_input2 = Variable(property).cuda()
        score = model(val_input, val_input2).cpu()
        probability.extend(F.sigmoid(score)[:, 0].data.tolist())
        user_list.extend(user_id.tolist())

    index = np.argmax(probability)
    print(user_list[index], probability[index])
    index2 = np.argmin(probability)
    print(user_list[index2], probability[index2])
    all = zip(probability, user_list)
    all = sorted(all, key=lambda x: x[0], reverse=True)
    getnum = 24000
    with open(
            '../kesci/submission/submission_%s_%d.txt' % (model_time, getnum),
            'w') as f:
        num = 0
        for i in all[:getnum]:
            #if i[0] > 0.5:
            f.writelines(str(i[1]) + '\n')
            num += 1
    print(num)

    with open('../kesci/submission/score_%s.txt' % model_time, 'w') as f:
        for i in all:
            f.writelines("%s\t%s\n" % (str(i[1]), str(i[0])))
Exemplo n.º 4
0
def train():
    vis = Visualizer("Kesci" + time.strftime('%m%d%H%M'))
    train_data = AppData("../kesci/data/data_v3_23d/train_ab.json",
                         iflabel=True)
    val_data = AppData("../kesci/data/data_v3_23d/val_ab.json", iflabel=True)
    train_dataloader = DataLoader(train_data, 256, shuffle=True, num_workers=4)
    val_dataloader = DataLoader(val_data, 512, shuffle=False, num_workers=2)
    test_data = AppData("../kesci/data/data_v3_23d/test_ab.json", iflabel=True)
    test_dataloader = DataLoader(test_data, 512, shuffle=False, num_workers=2)

    criterion = t.nn.BCEWithLogitsLoss().cuda()
    learning_rate = 0.002
    weight_decay = 0.0003
    model = DoubleSequence(31, 128, 1).cuda()
    optimizer = t.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=weight_decay)

    loss_meter = meter.AverageValueMeter()
    confusion_matrix = meter.ConfusionMeter(2)
    previous_loss = 1e100

    for epoch in range(400):
        loss_meter.reset()
        confusion_matrix.reset()

        for ii, (data, property, target) in tqdm(enumerate(train_dataloader)):
            input = Variable(data).cuda()
            input2 = Variable(property).cuda()
            target = Variable(target).cuda()
            output = model(input, input2)

            optimizer.zero_grad()
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            loss_meter.add(loss.data[0])

            if ii % 100 == 99:
                vis.plot('loss', loss_meter.value()[0])

        if epoch % 3 == 2:
            train_cm, train_f1 = val(model, train_dataloader)
            vis.plot('train_f1', train_f1)
        val_cm, val_f1 = val(model, val_dataloader)

        vis.plot_many({'val_f1': val_f1, 'learning_rate': learning_rate})
        if loss_meter.value()[0] > previous_loss:
            learning_rate = learning_rate * 0.9
            # 第二种降低学习率的方法:不会有moment等信息的丢失
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate

        previous_loss = loss_meter.value()[0]

        if epoch % 3 == 2:
            model.save()
            test_cm, test_f1 = val(model, test_dataloader)
            vis.plot('test_f1', test_f1)
            vis.log(
                "训练集:{train_f1:%}, {train_pre:%}, {train_rec:%} | 验证集:{val_f1:%}, {val_pre:%}, {val_rec:%} | \
            测试集:{test_f1:%}, {test_pre:%}, {test_rec:%} | {train_true_num:%}, {val_true_num:%}, {test_true_num:%}"
                .format(
                    train_f1=train_f1,
                    val_f1=val_f1,
                    test_f1=test_f1,
                    train_true_num=train_cm.value()[:, 0].sum() /
                    len(train_data),
                    val_true_num=val_cm.value()[:, 0].sum() / len(val_data),
                    test_true_num=test_cm.value()[:, 0].sum() / len(test_data),
                    train_pre=train_cm.value()[0][0] /
                    train_cm.value()[0].sum(),
                    train_rec=train_cm.value()[0][0] /
                    train_cm.value()[:, 0].sum(),
                    val_pre=val_cm.value()[0][0] / val_cm.value()[0].sum(),
                    val_rec=val_cm.value()[0][0] / val_cm.value()[:, 0].sum(),
                    test_pre=test_cm.value()[0][0] / test_cm.value()[0].sum(),
                    test_rec=test_cm.value()[0][0] /
                    test_cm.value()[:, 0].sum()))
Exemplo n.º 5
0
def train():
    vis = Visualizer("Kesci")
    train_data = AppData("data/data_16d_target/train.json", iflabel=True)
    val_data = AppData("data/data_16d_target/val.json", iflabel=True)
    train_dataloader = DataLoader(train_data, 32, shuffle=True, num_workers=4)
    val_dataloader = DataLoader(val_data, 256, shuffle=False, num_workers=2)
    test_data = AppData("data/data_16d_target/test.json", iflabel=True)
    test_dataloader = DataLoader(test_data, 256, shuffle=False, num_workers=2)

    criterion = t.nn.CrossEntropyLoss().cuda()
    learning_rate = 0.003
    weight_decay = 0.0002
    model = Sequence(15, 128, 1).cuda()
    optimizer = t.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=weight_decay)

    loss_meter = meter.AverageValueMeter()
    confusion_matrix = meter.ConfusionMeter(2)
    previous_loss = 1e100

    for epoch in range(500):
        loss_meter.reset()
        confusion_matrix.reset()

        for ii, (data, property, label) in tqdm(enumerate(train_dataloader)):
            input = Variable(data).cuda()
            input2 = Variable(property).cuda()
            target = Variable(label).cuda().view(-1)
            output = model(input, input2)

            optimizer.zero_grad()
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            loss_meter.add(loss.data[0])

            confusion_matrix.add(output.data, target.data)

            if ii % 100 == 99:
                vis.plot('loss', loss_meter.value()[0])

        if epoch % 3 == 2:
            train_cm, train_f1 = val(model, train_dataloader)
            vis.plot('train_f1', train_f1)
        val_cm, val_f1 = val(model, val_dataloader)

        vis.plot_many({'val_f1': val_f1, 'learning_rate': learning_rate})

        # vis.log("epoch:{epoch},lr:{lr},loss:{loss},train_cm:{train_cm},val_cm:{val_cm}".format(
        #     epoch=epoch, loss=loss_meter.value()[0], val_cm=str(val_cm.value()),
        #     train_cm=str(confusion_matrix.value()), lr=learning_rate))

        if loss_meter.value()[0] > previous_loss:
            learning_rate = learning_rate * 0.95
            # 第二种降低学习率的方法:不会有moment等信息的丢失
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate

        previous_loss = loss_meter.value()[0]

        if epoch % 10 == 9:
            model.save()
            test_cm, test_f1 = val(model, test_dataloader)
            vis.plot('test_f1', test_f1)
            vis.log(
                "model:{model} | {train_f1}, {train_pre}, {train_rec} | {val_f1}, {val_pre}, {val_rec} | {test_f1}, {test_pre}, {test_rec}"
                .format(train_f1=train_f1,
                        val_f1=val_f1,
                        test_f1=test_f1,
                        model=time.strftime('%m%d %H:%M:%S'),
                        train_pre=str(train_cm.value()[0][0] /
                                      train_cm.value()[:, 0].sum()),
                        train_rec=str(train_cm.value()[0][0] /
                                      train_cm.value()[0].sum()),
                        val_pre=str(val_cm.value()[0][0] /
                                    val_cm.value()[:, 0].sum()),
                        val_rec=str(val_cm.value()[0][0] /
                                    val_cm.value()[0].sum()),
                        test_pre=str(test_cm.value()[0][0] /
                                     test_cm.value()[:, 0].sum()),
                        test_rec=str(test_cm.value()[0][0] /
                                     test_cm.value()[0].sum())))