예제 #1
0
def test_dataset(model, dataroot, outputfile, dataTransform=None, aug=True):
    test_datasets = tumorDataset.TumorDtaset(dataroot,
                                             "./test.csv",
                                             aug=aug,
                                             mix=0)
    predicts = {}
    test_loader = torch.utils.data.DataLoader(test_datasets,
                                              batch_size=8,
                                              num_workers=8,
                                              drop_last=False,
                                              shuffle=True)
    repeat_time = 1
    if aug:
        repeat_time = 5
    for times in range(repeat_time):
        for i_batch, (imgs, labels, name) in enumerate(test_loader):
            if i_batch % 100 == 0:
                print("Epoch:[ " + str(times) + " ] Batch: [ " + str(i_batch) +
                      "/" + str(len(test_loader)) + " ]")
            images = make_variable(imgs, volatile=True)
            predict, _ = model(images)
            predict = predict[0].cpu().data.numpy()
            for i in range(predict.shape[0]):
                if name[i] not in predicts:
                    predicts[name[i]] = 0.0
                predicts[name[i]] += predict[i]
    f = open(outputfile, 'w')
    f.write("id,ret\r\n")
    for k, v in predicts.items():
        f.write("%s,%d\r\n" % (k, np.round(predicts[k] / float(repeat_time))))
    f.close()
    f = open(outputfile + "s", 'w')
    f.write("id,ret\r\n")
    for k, v in predicts.items():
        f.write("%s,%f\r\n" % (k, predicts[k] / float(repeat_time)))
    f.close()
예제 #2
0
def crossEntropyLoss(y, label):
    smooth_para = 0.00
    lossT = -1 * (label) * ((1 - smooth_para) * torch.log(y + 1e-4) +
                            smooth_para * torch.log(1 - y + 1e-4))  #*frate
    lossF = -1 * (1 - label) * ((1 - smooth_para) * torch.log(1 - y + 1e-4) +
                                smooth_para * torch.log(y + 1e-4))  #*trate
    batch_size = y.shape[0]
    loss = torch.sum(lossT + lossF)
    loss = loss / batch_size
    return loss


if __name__ == "__main__":
    # model = TumorClassifier(reRange=(0.3, 0.7)).cuda()
    model = resnet18_3d(n_classes=2, in_channels=1).cuda()
    train_dataset = tumorDataset.TumorDtaset(
        "/home/afan/tumor/data/train_refine_v5", "train.csv", aug=False, mix=1)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=32,
                                               num_workers=28,
                                               drop_last=False,
                                               shuffle=True)

    frate, trate = train_dataset.distribution
    print("data distribution frate %f trate %f" % (frate, trate))
    optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=1e-5)
    nepoach = 30
    losss = []
    print("datasetSize:", len(train_dataset))

    for i_epoach in range(nepoach):
예제 #3
0
    lossT = -1 * label * torch.log(y + 1e-4) * frate
    lossF = -1 * (1 - label) * torch.log(1 - y + 1e-4) * trate
    batch_size = y.shape[0]
    loss = torch.sum(lossT + lossF)

    loss = loss / batch_size

    return loss


if __name__ == "__main__":

    model = TumorClassifier().cuda()

    train_dataset = tumorDataset.TumorDtaset("../data/train_img_refine",
                                             "train.csv",
                                             aug=True)
    # d = train_dataset[2][0]
    # for i in range(40):
    #     y = d[i]
    #     y[y<0]=0
    #     y[y>1]=1
    #     cv2.imwrite("./output/%2d.png"%(i),(y*255).astype(np.uint8))
    #
    # quit()

    #train_dataset.csvData = train_dataset.csvData[:100]
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=8,
                                               num_workers=8,
                                               drop_last=False,
예제 #4
0
    lossT = -1 * label * torch.log(y + 1e-4)  #*frate
    lossF = -1 * (1 - label) * torch.log(1 - y + 1e-4)  #*trate
    batch_size = y.shape[0]
    loss = torch.sum(lossT + lossF)

    loss = loss / batch_size

    return loss


if __name__ == "__main__":

    model = TumorClassifier().cuda().train()

    train_dataset = tumorDataset.TumorDtaset("../data/train_seg",
                                             "train.csv",
                                             aug=True)
    # d = train_dataset[2][0]
    # for i in range(40):
    #     y = d[i]
    #     y[y<0]=0
    #     y[y>1]=1
    #     cv2.imwrite("./output/%2d.png"%(i),(y*255).astype(np.uint8))
    #
    # quit()

    #train_dataset.csvData = train_dataset.csvData[:100]
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=8,
                                               num_workers=8,
                                               drop_last=False,
예제 #5
0
    #         print(w.shape)
    #         w = w[0]
    #         t = []
    #         for j in range(40):
    #             temp = w[j].copy()
    #             temp -= np.min(w[i])
    #             temp /= temp.max()
    #             t.append(temp)
    #             cv2.imwrite("./temp/%d.png"%(j),(temp*255).astype(np.uint8))
    #         cv2.imwrite("./temp/a.png",np.average(t,axis=0)*255)
    #
    # quit()


    train_dataset = tumorDataset.TumorDtaset("../data/test_img",None,aug=False,
                                             simiRoot="../data/test_img",similabel="./outputr3.csvs",simithres=0.1,

                                             )


    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=8,
        num_workers=28, drop_last=False,shuffle=True)
    frate,trate = train_dataset.distribution
    print("data distribution frate %f trate %f"%(frate,trate))
    optimizer = torch.optim.Adam(model.parameters(),lr=1e-5)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,10,0.3)
    nepoach=50
    losss = []
    print("datasetSize:",len(train_dataset))
    #benchmark.eval(model, "../data/train_img", "val.csv",dataTransform= utils.toCubeData)
    for i_epoach in range(nepoach):