Пример #1
0
                             pad_value=config1['pad_value'])

    dataset = DataBowl3Detector(testsplit,
                                config1,
                                phase='test',
                                split_comber=split_comber)
    test_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=32,
                             pin_memory=False,
                             collate_fn=collate)

    test_detect(test_loader,
                nod_net,
                get_pbb,
                bbox_result_path,
                config1,
                n_gpu=config_submit['n_gpu'])

    dataset = DataBowl3Detector(valsplit,
                                config1,
                                phase='test',
                                split_comber=split_comber)
    test_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=32,
                             pin_memory=False,
                             collate_fn=collate)

    test_detect(test_loader,
Пример #2
0
config1, nod_net, loss, get_pbb = nodmodel.get_model()
checkpoint = torch.load('./2_nodule_detection/detector.ckpt')
nod_net.load_state_dict(checkpoint['state_dict'])

nod_net = nod_net

nod_net = nod_net

bbox_result_path = args.bbox_root
if not os.path.exists(bbox_result_path):
    os.mkdir(bbox_result_path)

split_comber = SplitComb(config1['sidelen'],
                         config1['max_stride'],
                         config1['stride'],
                         config1['margin'],
                         pad_value=config1['pad_value'])

dataset = DataBowl3Detector(config['testsplit'],
                            config1,
                            phase='test',
                            split_comber=split_comber)
test_loader = DataLoader(dataset,
                         batch_size=1,
                         shuffle=False,
                         num_workers=1,
                         pin_memory=False,
                         collate_fn=collate)

test_detect(test_loader, nod_net, get_pbb, bbox_result_path, config1)
Пример #3
0
import os
import sys
os.environ['PYTHONPATH'] = '%s:%s' % ('/home/caffe/python', '/workspace/pai')
import sys
sys.path.append('/home/caffe/python')
sys.path.append('/workspace/pai')
import caffe

from data import DataBowl3Detector
from test_detect import test_detect
from split_combine import SplitComb
from test_config import test_config as config

process = 'test'
if config['detector']:
    net = caffe.Net(config['test_prototxt'], config['caffe_model'], caffe.TEST)
    split_comber = SplitComb(config)
    dataset = DataBowl3Detector(config,
                                process=process,
                                split_comber=split_comber)
    test_detect(dataset, net, config=config, process=process)
Пример #4
0
bbox_result_path = './bbox_result'
if not os.path.exists(bbox_result_path):
    os.mkdir(bbox_result_path)
#testsplit = [f.split('_clean')[0] for f in os.listdir(prep_result_path) if '_clean' in f]

if not skip_detect:
    margin = 32
    sidelen = 144
    config1['datadir'] = prep_result_path
    split_comber = SplitComb(sidelen,config1['max_stride'],config1['stride'],margin,pad_value= config1['pad_value'])

    dataset = DataBowl3Detector(testsplit,config1,phase='test',split_comber=split_comber)
    test_loader = DataLoader(dataset,batch_size = 1,
        shuffle = False,num_workers = 32,pin_memory=False,collate_fn =collate)

    test_detect(test_loader, nod_net, get_pbb, bbox_result_path,config1,n_gpu=config_submit['n_gpu'])

    


casemodel = import_module(config_submit['classifier_model'].split('.py')[0])
casenet = casemodel.CaseNet(topk=5)
config2 = casemodel.config
checkpoint = torch.load(config_submit['classifier_param'])
casenet.load_state_dict(checkpoint['state_dict'])

torch.cuda.set_device(0)
casenet = casenet.cuda()
cudnn.benchmark = True
casenet = DataParallel(casenet)