コード例 #1
0
    print("datasetSize:", len(train_dataset))

    for i_epoach in range(nepoach):
        for i_batch, (img, label, name) in enumerate(train_loader):
            img = img.cuda()
            img = img.type(torch.cuda.FloatTensor)
            label = label.cuda()
            output = model(img)

            loss = [crossEntropyLoss(x, label) for x in output]
            nploss = [x.cpu().data.numpy() for x in loss]

            losss.append(nploss)
            if len(losss) == 10:
                print(i_epoach, i_batch, np.average(losss))
                losss.clear()

            loss_total = None
            for i, l in enumerate(loss):
                if i == 0:
                    loss_total = l
                else:
                    loss_total = loss_total + l
            loss_total.backward()
            optimizer.step()
            optimizer.zero_grad()
        pth = './model_resnet/net_%d.pth' % (i_epoach)
        print('save to', pth)
        torch.save(model.state_dict(), pth)
        benchmark.eval(model, "/home/afan/tumor/data/train_refine_v5",
                       "val.csv")
コード例 #2
0
    #train_dataset.csvData = train_dataset.csvData[:100]
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=8,
                                               num_workers=8,
                                               drop_last=False,
                                               shuffle=True)
    frate, trate = train_dataset.distribution
    print("data distribution frate %f trate %f" % (frate, trate))
    optimizer = torch.optim.SGD(model.parameters(), lr=3e-4, momentum=0.9)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10, 0.3)
    nepoach = 30
    losss = []
    print("datasetSize:", len(train_dataset))
    benchmark.eval(model,
                   "../data/train_img_refine",
                   "val.csv",
                   dataTransform=utils.getTemplateData)
    for i_epoach in range(nepoach):
        if i_epoach == 5:
            for p in model.featureExtractor.parameters():
                p.requires_grad = True
        for i_batch, (img, label) in enumerate(train_loader):
            img = utils.getTemplateData(img)
            img = img.cuda()
            label = label.cuda()
            output, _ = model(img)

            loss = [crossEntropyLoss(x, label, frate, trate) for x in output]
            nploss = [x.cpu().data.numpy() for x in loss]

            losss.append(nploss)
コード例 #3
0
ファイル: model_topn.py プロジェクト: FairyPig/cancerDiagnose
    # quit()

    #train_dataset.csvData = train_dataset.csvData[:100]
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=8,
                                               num_workers=8,
                                               drop_last=False,
                                               shuffle=True)
    frate, trate = train_dataset.distribution
    print("data distribution frate %f trate %f" % (frate, trate))
    optimizer = torch.optim.SGD(model.parameters(), lr=3e-4, momentum=0.9)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10, 0.3)
    nepoach = 30
    losss = []
    print("datasetSize:", len(train_dataset))
    benchmark.eval(model, "../data/train_seg", "val.csv")
    for i_epoach in range(nepoach):
        for i_batch, (img, label) in enumerate(train_loader):
            #continue
            img = img.cuda()
            label = label.cuda()
            output, _ = model(img)

            loss = [crossEntropyLoss(x, label) for x in output]
            nploss = [x.cpu().data.numpy() for x in loss]

            losss.append(nploss)
            if len(losss) == 10:
                print(i_epoach, i_batch, np.average(losss))
                losss.clear()
            # if i_epoach == 0 and i_batch < 100:
コード例 #4
0
    # quit()

    #train_dataset.csvData = train_dataset.csvData[:100]
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=8,
                                               num_workers=10,
                                               drop_last=False,
                                               shuffle=True)
    frate, trate = train_dataset.distribution
    print("data distribution frate %f trate %f" % (frate, trate))
    optimizer = torch.optim.SGD(model.parameters(), lr=3e-5, momentum=0.9)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10, 0.3)
    nepoach = 30
    losss = []
    print("datasetSize:", len(train_dataset))
    benchmark.eval(model, "../data/train_img_refine", "val.csv")
    for i_epoach in range(nepoach):
        for i_batch, (img, label) in enumerate(train_loader):
            img = img.cuda()
            label = label.cuda()
            output, outmap = model(img)

            loss = [crossEntropyLoss(x, label) for x in output]
            nploss = [x.cpu().data.numpy() for x in loss]
            #outmap = [x.cpu().data.numpy() for x in outmap]
            # for i in range(len(outmap[0])):
            #     m = [cv2.resize(outmap[j][i][0],(224,224)) for j in range(len(outmap))]
            #     cv2.imwrite("./output/%5d.jpg"%(random.randrange(0,1000)),(np.concatenate(m,axis=1)*255).astype(np.uint8))
            losss.append(nploss)
            if len(losss) == 10:
                print(i_epoach, i_batch, np.average(losss))
コード例 #5
0
ファイル: model_3d.py プロジェクト: FairyPig/cancerDiagnose
            img = img.cuda()
            label = label.cuda()
            output, _ = model(img)

            loss = [crossEntropyLoss(x, label) for x in output]
            nploss = [x.cpu().data.numpy() for x in loss]

            losss.append(nploss)
            if len(losss) == 10:
                print(i_epoach, i_batch, np.average(losss))
                losss.clear()
            # if i_epoach == 0 and i_batch < 100:
            #     loss = loss*0.1
            loss_total = None
            for i, l in enumerate(loss):
                if i == 0:
                    loss_total = l
                else:
                    loss_total = loss_total + l
            loss_total.backward()
            optimizer.step()
            optimizer.zero_grad()
        pth = './model/net_%d.pth' % (i_epoach)
        print('save to', pth)
        torch.save(model.state_dict(), pth)
        benchmark.eval(model,
                       "../data/train_seg",
                       "val.csv",
                       dataTransform=utils.toCubeData)
        scheduler.step()