예제 #1
0
                             pin_memory=False)
    #model = model.cuda()
    model.eval()
    predlist = []

    #     weight = torch.from_numpy(np.ones_like(y).float().cuda()
    for i, (x, coord, subj_name) in enumerate(data_loader):
        print(subj_name[0])
        coord = Variable(coord).cuda()
        x = Variable(x).cuda()
        nodulePred, casePred, out = model(x, coord)
        predlist.append(casePred.data.cpu().numpy())
        print(out.data.cpu().numpy().shape, out[0].data.cpu().numpy().shape)
        fname = config_submit['feat_save_root'] + '/' + subj_name[0] + '.npy'
        if os.path.exists(fname):
            print(fname, ' existed')
        np.save(fname, out.data.cpu().numpy())

        #print([i,data_loader.dataset.split[i,1],casePred.data.cpu().numpy()])
    predlist = np.concatenate(predlist)
    return predlist


config2['bboxpath'] = bbox_result_path
config2['datadir'] = prep_result_path

dataset = DataBowl3Classifier(testsplit, config2, phase='test')
nodulePred = test_casenet(casenet, dataset).T
# df = pandas.DataFrame({'id':testsplit, 'cancer':predlist})
# df.to_csv(filename,index=False)
예제 #2
0
    end_epoch = args.epochs

case_net = case_net.cuda()
loss = loss.cuda()
case_net = DataParallel(case_net)

save_dir = os.path.join('./', save_dir)
print(save_dir)
print(args.save_freq)
print(testsplit)
# Les noms des dossiers avec les images 3D
trainsplit = testsplit
valsplit = valsplit
testsplit = os.listdir(config_submit['testpath'])

dataset = DataBowl3Classifier(trainsplit, config2, phase='train')
train_loader_case = DataLoader(dataset,
                               batch_size=args.batch_size2,
                               shuffle=True,
                               num_workers=args.workers,
                               pin_memory=True)

dataset = DataBowl3Classifier(valsplit, config2, phase='val')
val_loader_case = DataLoader(dataset,
                             batch_size=max([args.batch_size2, 1]),
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

dataset = DataBowl3Classifier(trainsplit, config2, phase='val')
all_loader_case = DataLoader(dataset,
예제 #3
0
파일: main.py 프로젝트: Minerva-J/DSB2017
def main():
    global args
    args = parser.parse_args()

    torch.manual_seed(0)

    ##################################

    nodmodel = import_module(args.model1)
    config1, nod_net, loss, get_pbb = nodmodel.get_model()  #net_detector_3
    args.lr_stage = config1['lr_stage']  #np.array([50,100,140,160])
    args.lr_preset = config1['lr']  #[0.01,0.001,0.0001,0.00001]

    save_dir = args.save_dir

    ##################################

    casemodel = import_module(args.model2)  #net_classifier_3

    config2 = casemodel.config
    args.lr_stage2 = config2['lr_stage']  #np.array([50,100,140,160])
    args.lr_preset2 = config2['lr']  #[0.01,0.001,0.0001,0.00001]
    topk = config2['topk']  #5
    case_net = casemodel.CaseNet(topk=topk, nodulenet=nod_net)

    args.miss_ratio = config2['miss_ratio']  #1
    args.miss_thresh = config2['miss_thresh']  #0.03
    if args.debug:
        args.save_dir = 'debug'

    ###################################

    ################################
    start_epoch = args.start_epoch  #30
    if args.resume:
        print('resume', args.resume)
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        case_net.load_state_dict(checkpoint['state_dict'])
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model1 + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)
    if args.epochs == None:
        end_epoch = args.lr_stage2[-1]  #160
    else:
        end_epoch = args.epochs
    ################################
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test1 != 1 and args.test2 != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    ################################
    torch.cuda.set_device(0)
    #nod_net = nod_net.cuda()
    case_net = case_net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    if not args.debug:
        case_net = DataParallel(case_net)
        nod_net = DataParallel(nod_net)
    ################################

    if args.test1 == 1:
        testsplit = np.load('full.npy')
        dataset = DataBowl3Classifier(testsplit, config2, phase='test')
        predlist = test_casenet(case_net, dataset).T
        anstable = np.concatenate([[testsplit], predlist], 0).T
        df = pandas.DataFrame(anstable)
        df.columns = {'id', 'cancer'}
        df.to_csv('allstage1.csv', index=False)
        return

    if args.test2 == 1:

        testsplit = np.load('test.npy')
        dataset = DataBowl3Classifier(testsplit, config2, phase='test')
        predlist = test_casenet(case_net, dataset).T
        anstable = np.concatenate([[testsplit], predlist], 0).T
        df = pandas.DataFrame(anstable)
        df.columns = {'id', 'cancer'}
        df.to_csv('quick', index=False)
        return
    if args.test3 == 1:
        testsplit3 = np.load('stage2.npy')
        dataset = DataBowl3Classifier(testsplit3, config2, phase='test')
        predlist = test_casenet(case_net, dataset).T
        anstable = np.concatenate([[testsplit3], predlist], 0).T
        df = pandas.DataFrame(anstable)
        df.columns = {'id', 'cancer'}
        df.to_csv('stage2_ans.csv', index=False)
        return
    print('save_dir', save_dir)
    print('save_freq', args.save_freq)
    print('----------Loading Data')
    trainsplit = np.load('kaggleluna_full.npy')
    valsplit = np.load('valsplit.npy')
    testsplit = np.load('test.npy')
    ##############################################
    dataset = DataBowl3Detector(trainsplit, config1, phase='train')
    train_loader_nod = DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.workers,
                                  pin_memory=True)  #kaggleluna_full

    dataset = DataBowl3Detector(valsplit, config1, phase='val')
    val_loader_nod = DataLoader(dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=args.workers,
                                pin_memory=True)  #valsplit
    iter1, iter2, iter3 = next(iter(train_loader_nod))
    # print('len(train_loader_nod)',len(train_loader_nod))#1881
    # print('len(val_loader_nod)',len(val_loader_nod))#216
    # print("iter1: ", iter1.size())#([1, 1, 128, 128, 128])
    # print("iter2: ", iter2.size())#([1, 32, 32, 32, 3, 5])
    # print("iter3: ", iter3.size())#([1, 3, 32, 32, 32])
    optimizer = torch.optim.SGD(nod_net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)
    #########################################
    trainsplit = np.load('full.npy')
    dataset = DataBowl3Classifier(trainsplit, config2, phase='train')
    train_loader_case = DataLoader(dataset,
                                   batch_size=args.batch_size2,
                                   shuffle=True,
                                   num_workers=args.workers,
                                   pin_memory=True)  #full

    dataset = DataBowl3Classifier(valsplit, config2, phase='val')
    val_loader_case = DataLoader(dataset,
                                 batch_size=max([args.batch_size2, 1]),
                                 shuffle=False,
                                 num_workers=args.workers,
                                 pin_memory=True)  #valsplit

    dataset = DataBowl3Classifier(trainsplit, config2, phase='val')
    all_loader_case = DataLoader(dataset,
                                 batch_size=max([args.batch_size2, 1]),
                                 shuffle=False,
                                 num_workers=args.workers,
                                 pin_memory=True)  #full
    iter1, iter2, iter3, iter4 = next(iter(train_loader_case))
    # print('len(train_loader_case)',len(train_loader_case))#1595
    # print('len(val_loader_case)',len(val_loader_case))#350
    # print("iter1: ", iter1.size())#([1, 5, 1, 96, 96, 96])
    # print("iter2: ", iter2.size())#([1, 5, 3, 24, 24, 24])
    # print("iter3: ", iter3.size())#([1, 5])isnodlist
    # print("iter4: ", iter4.size())#([1, 1, 1])
    optimizer2 = torch.optim.SGD(case_net.parameters(),
                                 args.lr,
                                 momentum=0.9,
                                 weight_decay=args.weight_decay)
    ###############################################
    for epoch in range(start_epoch, end_epoch + 1):
        if epoch == start_epoch:  #30
            print('Epoch-train_casenet', epoch)
            lr = args.lr  #1e-2
            debug = args.debug
            args.lr = 0.0
            args.debug = True
            train_casenet(epoch, case_net, train_loader_case, optimizer2, args)
            args.lr = lr
            args.debug = debug
            # print(stop)
        if epoch < args.lr_stage[-1]:  #[50,100,140,160]160
            print('Epoch-train_nodulenet', epoch)
            train_nodulenet(train_loader_nod, nod_net, loss, epoch, optimizer,
                            args)
            validate_nodulenet(val_loader_nod, nod_net, loss)
        if epoch > config2['startepoch']:  #20
            print('Epoch-train_casenet', epoch)
            train_casenet(epoch, case_net, train_loader_case, optimizer2, args)
            val_casenet(epoch, case_net, val_loader_case, args)
            val_casenet(epoch, case_net, all_loader_case, args)

        if epoch % args.save_freq == 0:
            state_dict = case_net.module.state_dict()
            for key in state_dict.keys():
                state_dict[key] = state_dict[key].cpu()
예제 #4
0
def main():
    global args
    args = parser.parse_args()
    
    
    torch.manual_seed(0)
    
    
    ##################################

    nodmodel = import_module(args.model1)
    config1, nod_net, loss, get_pbb = nodmodel.get_model()
    args.lr_stage = config1['lr_stage']
    args.lr_preset = config1['lr']

    
    save_dir = args.save_dir

    
    ##################################
    
    casemodel = import_module(args.model2)
    
    config2 = casemodel.config
    args.lr_stage2 = config2['lr_stage']
    args.lr_preset2 = config2['lr']
    topk = config2['topk']
    case_net = casemodel.CaseNet(topk = topk,nodulenet=nod_net)

    args.miss_ratio = config2['miss_ratio']
    args.miss_thresh = config2['miss_thresh']
    if args.debug:
        args.save_dir = 'debug'
    
    ###################################
    
    
    
    
    
    
    ################################
    start_epoch = args.start_epoch
    if args.resume:
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results',save_dir)
        case_net.load_state_dict(checkpoint['state_dict'])
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model1 + '-' + exp_id)
        else:
            save_dir = os.path.join('results',save_dir)
    if args.epochs == None:
        end_epoch = args.lr_stage2[-1]
    else:
        end_epoch = args.epochs
    ################################
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir,'log')
    if args.test1!=1 and args.test2!=1 :
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f,os.path.join(save_dir,f))
    ################################
    torch.cuda.set_device(0)
    #nod_net = nod_net.cuda()
    case_net = case_net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    if not args.debug:
        case_net = DataParallel(case_net)
        nod_net = DataParallel(nod_net)
    ################################


    if args.test1 == 1:
        testsplit = np.load('full.npy')
        dataset = DataBowl3Classifier(testsplit, config2, phase = 'test')
        predlist = test_casenet(case_net,dataset).T
        anstable = np.concatenate([[testsplit],predlist],0).T
        df = pandas.DataFrame(anstable)
        df.columns={'id','cancer'}
        df.to_csv('allstage1.csv',index=False)
        return

    if args.test2 ==1:

        testsplit = np.load('test.npy')
        dataset = DataBowl3Classifier(testsplit, config2, phase = 'test')
        predlist = test_casenet(case_net,dataset).T
        anstable = np.concatenate([[testsplit],predlist],0).T
        df = pandas.DataFrame(anstable)
        df.columns={'id','cancer'}
        df.to_csv('quick',index=False)
        return
    if args.test3 == 1:
        testsplit3 = np.load('stage2.npy')
        dataset = DataBowl3Classifier(testsplit3,config2,phase = 'test')
        predlist = test_casenet(case_net,dataset).T
        anstable = np.concatenate([[testsplit3],predlist],0).T
        df = pandas.DataFrame(anstable)
        df.columns={'id','cancer'}
        df.to_csv('stage2_ans.csv',index=False)
        return
    print("save_dir", save_dir)
    print("save_freq", args.save_freq)
    # trainsplit = np.load('kaggleluna_full.npy')
    train_list = [f.split('_')[0] for f in os.listdir(config1['datadir'])]
    trainsplit = sorted(set(train_list),key=train_list.index)
    # valsplit = np.load('valsplit.npy')
    # testsplit = np.load('test.npy')

    dataset = DataBowl3Detector(trainsplit,config1,phase = 'train')
    train_loader_nod = DataLoader(dataset,batch_size = args.batch_size,
        shuffle = True,num_workers = args.workers,pin_memory=True)

    # dataset = DataBowl3Detector(valsplit,config1,phase = 'val')
    # val_loader_nod = DataLoader(dataset,batch_size = args.batch_size,
    #     shuffle = False,num_workers = args.workers,pin_memory=True)

    optimizer = torch.optim.SGD(nod_net.parameters(),
        args.lr,momentum = 0.9,weight_decay = args.weight_decay)
    
    # trainsplit = np.load('full.npy')
    dataset = DataBowl3Classifier(trainsplit,config2,phase = 'train')
    train_loader_case = DataLoader(dataset,batch_size = args.batch_size2,
        shuffle = True,num_workers = args.workers,pin_memory=True)
    
    # dataset = DataBowl3Classifier(valsplit,config2,phase = 'val')
    # val_loader_case = DataLoader(dataset,batch_size = max([args.batch_size2,1]),
    #     shuffle = False,num_workers = args.workers,pin_memory=True)

    # dataset = DataBowl3Classifier(trainsplit,config2,phase = 'val')
    # all_loader_case = DataLoader(dataset,batch_size = max([args.batch_size2,1]),
    #     shuffle = False,num_workers = args.workers,pin_memory=True)

    optimizer2 = torch.optim.SGD(case_net.parameters(),
        args.lr,momentum = 0.9,weight_decay = args.weight_decay)
    
    for epoch in range(start_epoch, end_epoch + 1):
        if epoch ==start_epoch:
            lr = args.lr
            debug = args.debug
            args.lr = 0.0
            args.debug = True
            train_casenet(epoch,case_net,train_loader_case,optimizer2,args)
            args.lr = lr
            args.debug = debug
        if epoch<args.lr_stage[-1]:
            train_nodulenet(train_loader_nod, nod_net, loss, epoch, optimizer, args)
            # validate_nodulenet(val_loader_nod, nod_net, loss)
        if epoch>config2['startepoch']:
            train_casenet(epoch,case_net,train_loader_case,optimizer2,args)
            # val_casenet(epoch,case_net,val_loader_case,args)
            # val_casenet(epoch,case_net,all_loader_case,args)

        if epoch % args.save_freq == 0: 
            state_dict = case_net.module.state_dict()
            for key in state_dict.keys():
                state_dict[key] = state_dict[key].cpu()

            torch.save({
                'epoch': epoch,
                'save_dir': save_dir,
                'state_dict': state_dict,
                'args': args},
                os.path.join(save_dir, '%03d.ckpt' % epoch))
예제 #5
0
파일: main.py 프로젝트: rahit/DSB2017
                             batch_size=1,
                             shuffle=False,
                             num_workers=32,
                             pin_memory=True)

    # model = model.cuda()
    model.eval()
    predlist = []

    # weight = torch.from_numpy(np.ones_like(y).float().cuda()
    for i, (x, coord) in enumerate(data_loader):
        coord = Variable(coord).cuda()
        x = Variable(x).cuda()
        nodulePred, casePred, _ = model(x, coord)
        predlist.append(casePred.data.cpu().numpy())
        # print(
        #     [i, data_loader.dataset.split[i, 1], casePred.data.cpu().numpy()])

    return np.concatenate(predlist)


casemodel_config['bboxpath'] = bbox_result_path
casemodel_config['datadir'] = prep_result_path

dataset = DataBowl3Classifier(dirlist, casemodel_config, phase='test')
predlist = test_casenet(casenet, dataset).T
anstable = np.concatenate([[dirlist], predlist], 0).T
df = pandas.DataFrame(anstable)
df.columns = {'id', 'cancer'}
df.to_csv(filename, index=False)