예제 #1
0
def main():
    global args
    args = parser.parse_args()
    test_datadir = config_test['test_preprocess_result_path']
    nodmodel = import_module(config_test['detector_model']) #动态加载net_detector模块,其中定义了Config参数,检测网络结构,前传函数
    config, nod_net, loss, get_pbb = nodmodel.get_model()  # 生成模型
    checkpoint = torch.load(args.resume)  # 加载整个模型权重
    nod_net.load_state_dict(checkpoint['state_dict'])  # 仅加载模型参数

    nod_net = nod_net.cuda() # 将数据移动到GPU上计算
    cudnn.benchmark = True # 使用cudnn加速
    nod_net = DataParallel(nod_net) # 将输入的minibatch平均分开,放入多个GPU前传,各块GPU上的梯度加起来以后前传

    test_result_dir = config_test['test_result_path'] # bounding_box结果
    if not os.path.exists(test_result_dir):
        os.mkdir(test_result_dir)

    margin = 32
    sidelen = 144 # 将完整的CT图像切割成小立方体
    n_gpu = args.n_test

    # config['datadir'] = prep_result_path #输入图像路径,为经过预处理之后的文件夹路径
    # 先将3DCT图像进行分裂,因为如果不分裂,GPU会爆显存
    # 构造函数,不执行具体计算
    split_comber = SplitComb(sidelen,config['max_stride'],config['stride'],margin,pad_value= config['pad_value'])
    # 构造函数,不执行具体计算
    dataset = DataBowl3Detector(test_datadir,config,phase='test',split_comber=split_comber)
    # 构造函数,不执行具体计算
    test_loader = DataLoader(dataset,batch_size = 1,shuffle = False,num_workers = 32,pin_memory=False,collate_fn =collate)
    # 开始执行计算
    test_detect(test_loader, nod_net, get_pbb, test_result_dir, config, n_gpu=n_gpu)
예제 #2
0
    def init_net(self):
        torch.manual_seed(0)
        torch.cuda.set_device(0)

        #model = import_module(self.model)
        detect_config, detect_net, _, get_pbb = detect_model.get_model()
        attribute_config, attribute_net, __ = attribute_model.get_model()

        detect_checkpoint = torch.load(self.detect_resume)
        detect_net.load_state_dict(detect_checkpoint['state_dict'])

        attribute_checkpoint = torch.load(self.attribute_resume)
        attribute_net.load_state_dict(attribute_checkpoint['state_dict'])

        n_gpu = setgpu(self.gpu)

        detect_net = detect_net.cuda()
        attribute_net = attribute_net.cuda()
        #loss = loss.cuda()
        cudnn.benchmark = True
        detect_net = DataParallel(detect_net)
        attribute_net = DataParallel(attribute_net)

        margin = 32
        sidelen = 144
        split_comber = SplitComb(sidelen, detect_config['max_stride'],
                                 detect_config['stride'], margin,
                                 detect_config['pad_value'])

        print("init_net complete")
        return detect_net, attribute_net, split_comber, get_pbb
예제 #3
0
 def __init__(self, args):
     self.args = args
     # self.method = args.method
     self.config, self.net, self.loss, self.get_pbb = get_model()
     checkpoint = torch.load(args.resume_checkpoint)
     self.net.load_state_dict(checkpoint["state_dict"])
     self.split_comber = SplitComb(
         self.args.sidelen,
         config["max_stride"],
         config["stride"],
         self.args.margin,
         config["pad_value"],
     )
     if args.gpu is not None:
         self.device = torch.device("cuda")
         self.net = self.net.to(self.device)
         cudnn.benchmark = True
     pass
예제 #4
0
파일: test_se.py 프로젝트: kingaza/DeepSEED
def main():
    global args
    args = parser.parse_args()
    
    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    save_dir = args.save_dir
    load_dir = LOAD_DIR
    
    if args.resume:
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(load_dir + 'detector_' + args.resume)
        net.load_state_dict(checkpoint['state_dict'])
    
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    datadir = config_testing['preprocess_result_path']
    
    print("start test")
    margin = 32
    sidelen = 144

    split_comber = SplitComb(sidelen,config['max_stride'],config['stride'],margin,config['pad_value'])
    dataset = LungNodule3Ddetector(
        datadir,
        'LIDC_test.npy',
        config,
        phase='test',
        split_comber=split_comber)
    test_loader = DataLoader(
        dataset,
        batch_size = 1,
        shuffle = False,
        num_workers = args.workers,
        collate_fn = collate,
        pin_memory=False)
    
    test(test_loader, net, get_pbb, save_dir,config)
예제 #5
0
def main():
    global args
    args = parser.parse_args()

    torch.manual_seed(0)
    torch.cuda.set_device(0)

    print("import module ")
    model = import_module(args.model)
    print("get module")
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir
    if args.resume:
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        net.load_state_dict(checkpoint['state_dict'])
        print("save dir ", save_dir)
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    print("num of gpu", args.gpu)
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    print("get net")
    net = net.cuda()
    print("get loss")
    loss = loss.cuda()
    cudnn.benchmark = True
    print("data parallel")
    net = DataParallel(net)
    datadir = config_training['preprocess_result_path']

    if args.test == 1:
        print("testing")
        margin = 32
        sidelen = 112

        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])

        print("load data")
        dataset = data.DataBowl3Detector(datadir,
                                         'full.npy',
                                         config,
                                         phase='test',
                                         split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data.collate,
                                 pin_memory=False)

        test(test_loader, net, get_pbb, save_dir, config)
        return

    #net = DataParallel(net)
    dataset = data.DataBowl3Detector(datadir,
                                     'kaggleluna_full.npy',
                                     config,
                                     phase='train')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    dataset = data.DataBowl3Detector(datadir,
                                     'valsplit.npy',
                                     config,
                                     phase='val')
    val_loader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 0.5:
            lr = args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.1 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)
        validate(val_loader, net, loss)
예제 #6
0
def main():
    global args
    args = parser.parse_args()

    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir

    if args.resume:
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        net.load_state_dict(checkpoint['state_dict'])
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = False  #True
    net = DataParallel(net)
    traindatadir = config_training['train_preprocess_result_path']
    valdatadir = config_training['val_preprocess_result_path']
    testdatadir = config_training['test_preprocess_result_path']
    trainfilelist = []
    for f in os.listdir(config_training['train_data_path']):
        if f.endswith('.mhd') and f[:-4] not in config_training['black_list']:
            trainfilelist.append(f[:-4])
    valfilelist = []
    for f in os.listdir(config_training['val_data_path']):
        if f.endswith('.mhd') and f[:-4] not in config_training['black_list']:
            valfilelist.append(f[:-4])
    testfilelist = []
    for f in os.listdir(config_training['test_data_path']):
        if f.endswith('.mhd') and f[:-4] not in config_training['black_list']:
            testfilelist.append(f[:-4])

    if args.test == 1:
        margin = 32
        sidelen = 144
        import data
        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = data.DataBowl3Detector(testdatadir,
                                         testfilelist,
                                         config,
                                         phase='test',
                                         split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data.collate,
                                 pin_memory=False)

        for i, (data, target, coord,
                nzhw) in enumerate(test_loader):  # check data consistency
            if i >= len(testfilelist) / args.batch_size:
                break

        test(test_loader, net, get_pbb, save_dir, config)
        return
    #net = DataParallel(net)
    import data
    dataset = data.DataBowl3Detector(traindatadir,
                                     trainfilelist,
                                     config,
                                     phase='train')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    dataset = data.DataBowl3Detector(valdatadir,
                                     valfilelist,
                                     config,
                                     phase='val')
    val_loader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    for i, (data, target,
            coord) in enumerate(train_loader):  # check data consistency
        if i >= len(trainfilelist) / args.batch_size:
            break

    for i, (data, target,
            coord) in enumerate(val_loader):  # check data consistency
        if i >= len(valfilelist) / args.batch_size:
            break

    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 1 / 3:  #0.5:
            lr = args.lr
        elif epoch <= args.epochs * 2 / 3:  #0.8:
            lr = 0.1 * args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.05 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)
        validate(val_loader, net, loss)
예제 #7
0
def main():
    global args
    start = time.time()
    print('start!')
    args = parser.parse_args()
    config_training = import_module(args.config)
    config_training = config_training.config
    # from config_training import config as config_training
    torch.manual_seed(0)
    # torch.cuda.set_device(0)
    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch  #0

    save_dir = args.save_dir  #res18/retrft960
    # print('args.resume!',args.resume, time() - start)
    if args.resume:
        checkpoint = torch.load(args.resume)
        print('args.resume', args.resume)
        # if start_epoch == 0:
        #     start_epoch = checkpoint['epoch'] + 1
        # if not save_dir:
        #     save_dir = checkpoint['save_dir']
        # else:
        #     save_dir = os.path.join('results',save_dir)
        net.load_state_dict(checkpoint['state_dict'])
    # else:
    # print('start_epoch',start_epoch, time() - start)
    if start_epoch == 0:
        start_epoch = 1
    if not save_dir:
        exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
        save_dir = os.path.join('results', args.model + '-' + exp_id)
    else:
        save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    # print('args.test',args.test, time() - start)
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        # print('pyfiles', time() - start)
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
            # print('pyfiles1', time() - start)
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu

    # net = net.cuda()

    loss = loss.cuda()
    device = 'cuda'
    net = net.to(device)
    cudnn.benchmark = True  #False
    net = DataParallel(net).cuda()
    # print('net0', time.time() - start)
    traindatadir = config_training[
        'train_preprocess_result_path']  #'/home/zhaojie/zhaojie/Lung/data/luna16/LUNA16PROPOCESSPATH/'
    valdatadir = config_training[
        'val_preprocess_result_path']  #'/home/zhaojie/zhaojie/Lung/data/luna16/LUNA16PROPOCESSPATH/'
    testdatadir = config_training[
        'test_preprocess_result_path']  #'/home/zhaojie/zhaojie/Lung/data/luna16/LUNA16PROPOCESSPATH/'
    trainfilelist = []
    # print('data_path',config_training['train_data_path'])
    for folder in config_training['train_data_path']:
        print('folder', folder)
        for f in os.listdir(folder):
            if f.endswith(
                    '.mhd') and f[:-4] not in config_training['black_list']:
                trainfilelist.append(folder.split('/')[-2] + '/' + f[:-4])
    valfilelist = []
    for folder in config_training['val_data_path']:
        for f in os.listdir(folder):
            if f.endswith(
                    '.mhd') and f[:-4] not in config_training['black_list']:
                valfilelist.append(folder.split('/')[-2] + '/' + f[:-4])
    testfilelist = []
    for folder in config_training['test_data_path']:
        for f in os.listdir(folder):
            if f.endswith(
                    '.mhd') and f[:-4] not in config_training['black_list']:
                testfilelist.append(folder.split('/')[-2] + '/' + f[:-4])

    if args.test == 1:
        print('--------test-------------')
        print('len(testfilelist)', len(testfilelist))

        print('batch_size', args.batch_size)
        # margin = 32
        margin = 16
        # sidelen = 144
        sidelen = 128
        # sidelen = 208
        import data
        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = data.DataBowl3Detector(testdatadir,
                                         testfilelist,
                                         config,
                                         phase='test',
                                         split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data.collate,
                                 pin_memory=False)
        iter1, iter2, iter3, iter4 = next(iter(test_loader))
        # print("sample: ", len(iter1))
        # print("lable: ", iter2.size())
        # print("coord: ", iter3.size())
        for i, (data, target, coord,
                nzhw) in enumerate(test_loader):  # check data consistency
            if i >= len(testfilelist) // args.batch_size:
                break

        test(test_loader, net, get_pbb, save_dir, config)
        return

    import data
    print('len(trainfilelist)', len(trainfilelist))
    # print('trainfilelist',trainfilelist)
    print('batch_size', args.batch_size)
    dataset = data.DataBowl3Detector(traindatadir,
                                     trainfilelist,
                                     config,
                                     phase='train')
    # print('train_loader')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    dataset = data.DataBowl3Detector(valdatadir,
                                     valfilelist,
                                     config,
                                     phase='val')
    val_loader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)
    iter1, iter2, iter3 = next(iter(train_loader))
    # print("sample: ", iter1.size())
    # print("lable: ", iter2.size())
    # print("coord: ", iter3.size())
    for i, (data, target,
            coord) in enumerate(train_loader):  # check data consistency
        if i >= len(trainfilelist) / args.batch_size:
            break

    for i, (data, target,
            coord) in enumerate(val_loader):  # check data consistency
        if i >= len(valfilelist) / args.batch_size:
            break

    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 1 / 3:  #0.5:
            lr = args.lr
        elif epoch <= args.epochs * 2 / 3:  #0.8:
            lr = 0.1 * args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.05 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)
        validate(val_loader, net, loss)
예제 #8
0
def main():
    global args
    args = parser.parse_args()
    print(args.config)
    config_training = import_module(args.config)
    config_training = config_training.config
    # from config_training import config as config_training
    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir

    model2 = torch.nn.Sequential()
    model2.add_module('linear', torch.nn.Linear(3, 6, bias=True))
    model2.linear.weight = torch.nn.Parameter(torch.randn(6, 3))
    model2.linear.bias = torch.nn.Parameter(torch.randn(6))
    loss2 = torch.nn.CrossEntropyLoss()
    optimizer2 = optim.SGD(model2.parameters(), lr=args.lr,
                           momentum=0.9)  #, weight_decay=args.weight_decay)

    if args.resume:
        print('resume from ', args.resume)
        checkpoint = torch.load(args.resume)
        # if start_epoch == 0:
        #     start_epoch = checkpoint['epoch'] + 1
        # if not save_dir:
        #     save_dir = checkpoint['save_dir']
        # else:
        #     save_dir = os.path.join('results',save_dir)
        # print(checkpoint.keys())
        net.load_state_dict(checkpoint['state_dict'])
        if start_epoch != 0:
            model2.load_state_dict(checkpoint['state_dict2'])
    # else:
    if start_epoch == 0:
        start_epoch = 1
    if not save_dir:
        exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
        save_dir = os.path.join('results', args.model + '-' + exp_id)
    else:
        save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = False  #True
    net = DataParallel(net)
    traindatadir = config_training['train_preprocess_result_path']
    valdatadir = config_training['val_preprocess_result_path']
    testdatadir = config_training['test_preprocess_result_path']
    trainfilelist = []
    print config_training['train_data_path']
    for folder in config_training['train_data_path']:
        print folder
        for f in os.listdir(folder):
            if f.endswith(
                    '.mhd') and f[:-4] not in config_training['black_list']:
                if f[:-4] not in fnamedct:
                    trainfilelist.append(folder.split('/')[-2] + '/' + f[:-4])
                else:
                    trainfilelist.append(
                        folder.split('/')[-2] + '/' + fnamedct[f[:-4]])
    valfilelist = []
    for folder in config_training['val_data_path']:
        for f in os.listdir(folder):
            if f.endswith(
                    '.mhd') and f[:-4] not in config_training['black_list']:
                if f[:-4] not in fnamedct:
                    valfilelist.append(folder.split('/')[-2] + '/' + f[:-4])
                else:
                    valfilelist.append(
                        folder.split('/')[-2] + '/' + fnamedct[f[:-4]])
    testfilelist = []
    for folder in config_training['test_data_path']:
        for f in os.listdir(folder):
            if f.endswith(
                    '.mhd') and f[:-4] not in config_training['black_list']:
                if f[:-4] not in fnamedct:
                    testfilelist.append(folder.split('/')[-2] + '/' + f[:-4])
                else:
                    testfilelist.append(
                        folder.split('/')[-2] + '/' + fnamedct[f[:-4]])
    if args.test == 1:
        margin = 32
        sidelen = 144
        import data
        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = data.DataBowl3Detector(testdatadir,
                                         testfilelist,
                                         config,
                                         phase='test',
                                         split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data.collate,
                                 pin_memory=False)

        for i, (data, target, coord,
                nzhw) in enumerate(test_loader):  # check data consistency
            if i >= len(testfilelist) / args.batch_size:
                break

        test(test_loader, net, get_pbb, save_dir, config)
        return
    #net = DataParallel(net)
    import data
    print len(trainfilelist)
    dataset = data.DataBowl3Detector(traindatadir,
                                     trainfilelist,
                                     config,
                                     phase='train')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    dataset = data.DataBowl3Detector(valdatadir,
                                     valfilelist,
                                     config,
                                     phase='val')
    val_loader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)
    # load weak data
    # weakdata = pd.read_csv(config_training['weaktrain_annos_path'], names=['fname', 'position', 'centerslice'])
    # weakfilename = weakdata['fname'].tolist()[1:]
    # weakfilename = list(set(weakfilename))
    # print('#weakdata', len(weakfilename))
    for i, (data, target,
            coord) in enumerate(train_loader):  # check data consistency
        if i >= len(trainfilelist) / args.batch_size:
            break

    for i, (data, target,
            coord) in enumerate(val_loader):  # check data consistency
        if i >= len(valfilelist) / args.batch_size:
            break
    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)
    npars = 0
    for par in net.parameters():
        curnpar = 1
        for s in par.size():
            curnpar *= s
        npars += curnpar
    print('network size', npars)

    def get_lr(epoch):
        if epoch <= args.epochs * 1 / 2:  #0.5:
            lr = args.lr
        elif epoch <= args.epochs * 3 / 4:  #0.8:
            lr = 0.5 * args.lr
        # elif epoch <= args.epochs * 0.8:
        #     lr = 0.05 * args.lr
        else:
            lr = 0.1 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        # if epoch % 10 == 0:
        import data
        margin = 32
        sidelen = 144
        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = weakdatav2.DataBowl3Detector(
            config_training['weaktrain_data_path'],
            weakdct.keys(),
            config,
            phase='test',
            split_comber=split_comber)
        weaktest_loader = DataLoader(dataset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=args.workers,
                                     collate_fn=data.collate,
                                     pin_memory=False)
        print(len(weaktest_loader))
        for i, (data, target, coord,
                nzhw) in enumerate(weaktest_loader):  # check data consistency
            if i >= len(testfilelist) / args.batch_size:
                break
        srslst, cdxlst, cdylst, cdzlst, dimlst, prblst, poslst, lwzlst, upzlst = weaktest(
            weaktest_loader, model2, net, get_pbb, save_dir, config, epoch)
        config['ep'] = epoch
        config['save_dir'] = save_dir
        with open(
                config['save_dir'] + 'weakinferep' + str(config['ep']) + 'srs',
                'wb') as fp:
            pickle.dump(srslst, fp)
        with open(
                config['save_dir'] + 'weakinferep' + str(config['ep']) + 'cdx',
                'wb') as fp:
            pickle.dump(cdxlst, fp)
        with open(
                config['save_dir'] + 'weakinferep' + str(config['ep']) + 'cdy',
                'wb') as fp:
            pickle.dump(cdylst, fp)
        with open(
                config['save_dir'] + 'weakinferep' + str(config['ep']) + 'cdz',
                'wb') as fp:
            pickle.dump(cdzlst, fp)
        with open(
                config['save_dir'] + 'weakinferep' + str(config['ep']) + 'dim',
                'wb') as fp:
            pickle.dump(dimlst, fp)
        with open(
                config['save_dir'] + 'weakinferep' + str(config['ep']) + 'prb',
                'wb') as fp:
            pickle.dump(prblst, fp)
        with open(
                config['save_dir'] + 'weakinferep' + str(config['ep']) + 'pos',
                'wb') as fp:
            pickle.dump(poslst, fp)
        pdfrm = pd.read_csv(
            config['save_dir'] + 'weakinferep' + str(config['ep']) + '.csv',
            names=[
                'seriesuid', 'coordX', 'coordY', 'coordZ', 'diameter',
                'probability', 'position'
            ])
        if srslst:
            fnmlst = srslst  #pdfrm['seriesuid'].tolist()[1:]
        dataset = weakdatav2.DataBowl3Detector(
            config_training['weaktrain_data_path'],
            list(set(fnmlst)),
            config,
            phase='train',
            fnmlst=srslst,
            cdxlst=cdxlst,
            cdylst=cdylst,
            cdzlst=cdzlst,
            dimlst=dimlst,
            prblst=prblst,
            poslst=poslst,
            lwzlst=lwzlst,
            upzlst=upzlst)
        weaktrain_loader = DataLoader(dataset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=args.workers,
                                      pin_memory=True)
        print(len(weaktrain_loader))
        for i, (data, target, coord, prob, pos,
                feat) in enumerate(weaktrain_loader):  # check data consistency
            # print(data.size(), target.size(), coord.size(), prob.size(), pos.size(), feat.size())
            if i >= len(trainfilelist) / args.batch_size:
                break
        weaktrain(weaktrain_loader, model2, loss2, optimizer2, net, loss,
                  epoch, optimizer, get_lr,
                  save_dir)  #, args.save_freq, save_dir)
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)
        validate(val_loader, net, loss)
예제 #9
0
def main():
    global args
    args = parser.parse_args()

    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir

    if args.resume:
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        net.load_state_dict(checkpoint['state_dict'])
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    print("arg", args.gpu)
    print("num_gpu", n_gpu)

    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    datadir = config_training['preprocess_result_path']
    print("datadir", datadir)
    print("anchor", config['anchors'])
    print("pad_val", config['pad_value'])
    print("th_pos_train", config['th_pos_train'])

    if args.test == 1:
        margin = 32
        sidelen = 144
        print("args.test True")
        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = data.DataBowl3Detector(datadir,
                                         'val9.npy',
                                         config,
                                         phase='test',
                                         split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data.collate,
                                 pin_memory=False)

        test(test_loader, net, get_pbb, save_dir, config, sidelen)
        return

    # net = DataParallel(net)

    train_dataset = data.DataBowl3Detector(datadir,
                                           'train_luna_9.npy',
                                           config,
                                           phase='train')
    print("len train_dataset", train_dataset.__len__())
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    val_dataset = data.DataBowl3Detector(datadir,
                                         'val9.npy',
                                         config,
                                         phase='val')
    print("len val_dataset", val_dataset.__len__())

    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    margin = 32
    sidelen = 144

    split_comber = SplitComb(sidelen, config['max_stride'], config['stride'],
                             margin, config['pad_value'])
    test_dataset = data.DataBowl3Detector(datadir,
                                          'val9.npy',
                                          config,
                                          phase='test',
                                          split_comber=split_comber)

    test_loader = DataLoader(test_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=args.workers,
                             collate_fn=data.collate,
                             pin_memory=False)

    print("lr", args.lr)
    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 0.5:
            lr = args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.1 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    best_val_loss = 100
    best_test_loss = 0

    for epoch in range(start_epoch, args.epochs + 1):
        print("epoch", epoch)
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)
        best_val_loss = validate(val_loader, net, loss, best_val_loss, epoch,
                                 save_dir)
        if ((epoch > 150) and ((epoch + 1) % 10) == 0):
            best_test_loss = test_training(test_loader, net, get_pbb, save_dir,
                                           config, sidelen, best_test_loss,
                                           epoch, n_gpu)

        if ((epoch > 300) and ((epoch + 1) % 100) == 0):
            num_neg = train_dataset.get_neg_num_neg() + 800
            train_dataset.set_neg_num_neg(num_neg)
예제 #10
0
def main():
    global args
    args = parser.parse_args()
    print('start!')

    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir

    if args.resume:
        print('args.resume', args.resume)
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        net.load_state_dict(checkpoint['state_dict'])
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    datadir = '/home/zhaojie/zhaojie/Lung/DSB_Code/DSB2017-master/training/Data/ForTest/Preprocess/'

    if args.test == 1:
        # margin = 32
        # sidelen = 96
        margin = 16
        sidelen = 128
        print('margin,sidelen', margin, sidelen)
        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = data_test.DataBowl3Detector(datadir,
                                              'testnames.npy',
                                              config,
                                              phase='test',
                                              split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data_test.collate,
                                 pin_memory=False)
        print('len(test_loader)', args.batch_size * len(test_loader))  #
        iter1, iter2, iter3, iter4 = next(iter(test_loader))
        # print("sample: ", len(iter1))
        # print("lable: ", iter2.size())
        # print("coord: ", iter3.size())
        # print("nzhw: ", iter4.size())
        test(test_loader, net, get_pbb, save_dir, config)
        return
예제 #11
0
파일: main.py 프로젝트: rahit/DSB2017
bbox_result_path = './bbox_result'

if not os.path.exists(bbox_result_path):
    os.mkdir(bbox_result_path)

# dirlist = [
#     f.split('_clean')[0] for f in os.listdir(prep_result_path) if '_clean' in f]

if not skip_detect:
    margin = 32
    sidelen = 144
    nodmodel_config['datadir'] = prep_result_path

    split_comber = SplitComb(sidelen,
                             nodmodel_config['max_stride'],
                             nodmodel_config['stride'],
                             margin,
                             pad_value=nodmodel_config['pad_value'])

    dataset = DataBowl3Detector(dirlist,
                                nodmodel_config,
                                phase='test',
                                split_comber=split_comber)

    test_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=32,
                             pin_memory=False,
                             collate_fn=collate)
예제 #12
0
class LNDetector(object):
    """
    The DeepLN Detector
    """
    def __init__(self, args):
        self.args = args
        # self.method = args.method
        self.config, self.net, self.loss, self.get_pbb = get_model()
        checkpoint = torch.load(args.resume_checkpoint)
        self.net.load_state_dict(checkpoint["state_dict"])
        self.split_comber = SplitComb(
            self.args.sidelen,
            config["max_stride"],
            config["stride"],
            self.args.margin,
            config["pad_value"],
        )
        if args.gpu is not None:
            self.device = torch.device("cuda")
            self.net = self.net.to(self.device)
            cudnn.benchmark = True
        pass

    def __call__(self, filename):
        # load_pkl_file = time.time()
        pkl_file = os.path.join("/tmp/data", filename, "preprocessed.pkl")
        with open(pkl_file, "rb") as handle:
            obj = pickle.load(handle)

        slicelim = obj["sliceim"]

        extend_box = obj["extendbox"]
        spacing = obj["spacing"]
        img_shape = obj["shape"]

        stride = config["stride"]
        pad_value = config["pad_value"]
        imgs = slicelim.copy()
        nz, nh, nw = imgs.shape[1:]
        pz = int(np.ceil(float(nz) / stride)) * stride
        ph = int(np.ceil(float(nh) / stride)) * stride
        pw = int(np.ceil(float(nw) / stride)) * stride
        imgs = np.pad(
            imgs,
            [[0, 0], [0, pz - nz], [0, ph - nh], [0, pw - nw]],
            "constant",
            constant_values=pad_value,
        )

        xx, yy, zz = np.meshgrid(
            np.linspace(-0.5, 0.5, int(imgs.shape[1] / stride)),
            np.linspace(-0.5, 0.5, int(imgs.shape[2] / stride)),
            np.linspace(-0.5, 0.5, int(imgs.shape[3] / stride)),
            indexing="ij",
        )
        coord = np.concatenate(
            [xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]],
            0).astype("float32")
        imgs, nzhw = self.split_comber.split(imgs)
        coord2, nzhw2 = self.split_comber.split(
            coord,
            side_len=self.split_comber.side_len / stride,
            max_stride=self.split_comber.max_stride / stride,
            margin=self.split_comber.margin / stride,
        )
        assert np.all(nzhw == nzhw2), "split imgs not equal coords"
        imgs = torch.from_numpy((imgs.astype(np.float32) - 128) / 128)
        coord2 = torch.from_numpy(coord2)
        nzhw = np.array(nzhw)

        nodes = self.detect(imgs,
                            coord2,
                            nzhw,
                            extend_box,
                            spacing,
                            isfliped=False)

        return nodes, img_shape, spacing

    def detect(self, imgs, coord2, nzhw, extend_box, spacing, isfliped=False):
        net = self.net
        net.eval()
        outputlist = []
        # batch_size = 2
        # start = time.time()
        num_batches = int(imgs.shape[0] / self.args.batch_size)
        num_pass = (num_batches if num_batches *
                    self.args.batch_size == imgs.shape[0] else num_batches + 1)
        # start_num_pass = time.time()
        for i in range(num_pass):
            end_idxs = min((i + 1) * self.args.batch_size, imgs.shape[0])
            if self.args.gpu is not None:
                input_x = imgs[i * self.args.batch_size:end_idxs].to(
                    self.device)
                # input_coord = Variable(coord2[i*self.args.batch_size:end_idxs]).cuda()
                input_coord = coord2[i * self.args.batch_size:end_idxs].to(
                    self.device)
                output = net(input_x, input_coord)
                outputlist.append(output.data.cpu().numpy())
            else:
                input_x = Variable(imgs[i * self.args.batch_size:end_idxs])
                input_coord = Variable(coord2[i *
                                              self.args.batch_size:end_idxs])
                output = net(input_x, input_coord)
                outputlist.append(output.data.numpy())

        output = np.concatenate(outputlist, 0)
        output = self.split_comber.combine(output, nzhw=nzhw)
        thresh = -3
        pbb, mask = self.get_pbb(output, thresh, ismask=True)
        # if spacing[0] == 1.0:
        #     pbb1 = pbb[pbb[:,0]>3]
        # elif spacing[0] == 5.0:
        #     print("HC CT pbb")
        #     pbb1 = pbb[pbb[:,0]>3]
        pbb1 = pbb[pbb[:, 0] > 3]
        pbb1 = pbb1[np.argsort(-pbb1[:, 0])]
        # print(pbb1)
        nms_th = 0.05
        pbb1 = nms(pbb1, nms_th)
        if pbb1.shape[0] > 10:
            pbb1 = pbb1[0:10]
        # print("New in Jianyong")
        # print("The time for calculating ", time.time() - start)
        # start = time.time()
        # import pdb; pdb.set_trace()

        nodes = pbb2axis(pbb1, extend_box, spacing, isfliped=False)
        # print("The post processing ", time.time() - start)
        return nodes
예제 #13
0
def main():
    global args
    args = parser.parse_args()

    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    net.load_state_dict(
        torch.load(
            "/data/wzeng/DSB_3/training/detector/results/res18_extend/048.ckpt"
        )['state_dict'])
    model_path = "/data/wzeng/DSB_3/training/detector/results/res18_extend/048.ckpt"
    print('loading model form ' + model_path)

    #model_dict = net.state_dict()
    #pretrained_dict = torch.load("/data/wzeng/DSB_3/training/detector/results/res18_extend/081.ckpt")['state_dict']
    #pretrained_dict = {k: v for k, v in model_dict.items() if k in pretrained_dict}
    #model_dict.update(pretrained_dict)
    #net.load_state_dict(model_dict)

    #model_path = "/data/wzeng/DSB_3/training/detector/results/res18_extend/081.ckpt"
    #print('loading model form ' + model_path)

    start_epoch = args.start_epoch
    save_dir = args.save_dir

    if args.resume:
        #         print('start resume')
        print('loading model form ' + args.resume)
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        net.load_state_dict(checkpoint['state_dict'])


#         print('resume end')
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    datadir = config_training['preprocess_result_path']

    if args.test == 1:
        margin = 16
        sidelen = 128
        #margin = 32
        #sidelen = 144
        #         print('dataloader....')
        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = data.DataBowl3Detector(datadir,
                                         'new_test.npy',
                                         config,
                                         phase='test',
                                         split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data.collate,
                                 pin_memory=False)
        #         print('start testing.....')
        test(test_loader, net, get_pbb, save_dir, config)
        return

    #net = DataParallel(net)

    dataset = data.DataBowl3Detector(datadir,
                                     'train_val.npy',
                                     config,
                                     phase='train')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    dataset = data.DataBowl3Detector(datadir,
                                     'new_test.npy',
                                     config,
                                     phase='val')
    val_loader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    #optimizer = torch.optim.Adam(
    #    net.parameters(),
    #    args.lr,
    #    weight_decay = args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 0.5:
            lr = args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.1 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)
        validate(val_loader, net, loss)
예제 #14
0
import os
import sys
os.environ['PYTHONPATH'] = '%s:%s' % ('/home/caffe/python', '/workspace/pai')
import sys
sys.path.append('/home/caffe/python')
sys.path.append('/workspace/pai')
import caffe

from data import DataBowl3Detector
from test_detect import test_detect
from split_combine import SplitComb
from test_config import test_config as config

process = 'test'
if config['detector']:
    net = caffe.Net(config['test_prototxt'], config['caffe_model'], caffe.TEST)
    split_comber = SplitComb(config)
    dataset = DataBowl3Detector(config,
                                process=process,
                                split_comber=split_comber)
    test_detect(dataset, net, config=config, process=process)
def main():
    global args
    args = parser.parse_args()
    config_training = import_module(args.config)
    config_training = config_training.config
    # from config_training import config as config_training
    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir
    
#    args.resume = True
    if args.resume:
        checkpoint = torch.load(args.resume)
        # if start_epoch == 0:
        #     start_epoch = checkpoint['epoch'] + 1
        # if not save_dir:
        #     save_dir = checkpoint['save_dir']
        # else:
        #     save_dir = os.path.join('results',save_dir)
        net.load_state_dict(checkpoint['state_dict'])
    # else:
    if start_epoch == 0:
        start_epoch = 1
    if not save_dir:
        exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
        save_dir = os.path.join('results', args.model + '-' + exp_id)
    else:
        save_dir = os.path.join('results',save_dir)
    
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir,'log')
    if args.test!=1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f,os.path.join(save_dir,f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = False                     #True
    net = DataParallel(net)
    traindatadir = config_training['train_preprocess_result_path']
    valdatadir = config_training['val_preprocess_result_path']
    testdatadir = config_training['test_preprocess_result_path']
    trainfilelist = []
   # with open("/home/mpadmana/anaconda3/envs/DeepLung_original/luna_patient_names/luna_train_list.pkl",'rb') as f:
    #    trainfilelist=pickle.load(f)
    with open("/home/mpadmana/anaconda3/envs/DeepLung_original/methodist_patient_names/methodist_train.pkl",'rb') as f:

        trainfilelist=pickle.load(f)
        
    valfilelist = []
    #with open("/home/mpadmana/anaconda3/envs/DeepLung_original/luna_patient_names/luna_val_list.pkl",'rb') as f:
     #   valfilelist=pickle.load(f)
    with open ("/home/mpadmana/anaconda3/envs/DeepLung_original/methodist_patient_names/methodist_val.pkl",'rb') as f:
        valfilelist=pickle.load(f)
    testfilelist = []
    #with open("/home/mpadmana/anaconda3/envs/DeepLung_original/luna_patient_names/luna_test_list.pkl",'rb') as f:
     #   testfilelist=pickle.load(f)
    with open("/home/mpadmana/anaconda3/envs/DeepLung_original/methodist_patient_names/methodist_test.pkl",'rb') as f:
        testfilelist=pickle.load(f)
    testfilelist=['download20180608140526download20180608140500001_1_3_12_30000018060618494775800001943']
    if args.test == 1:

        margin = 32
        sidelen = 144
        import data
        split_comber = SplitComb(sidelen,config['max_stride'],config['stride'],margin,config['pad_value'])
        dataset = data.DataBowl3Detector(
            testdatadir,
            testfilelist,
            config,
            phase='test',
            split_comber=split_comber)
        test_loader = DataLoader(
            dataset,
            batch_size = 1,
            shuffle = False,
            num_workers = 0,
            collate_fn = data.collate,
            pin_memory=False)

        for i, (data, target, coord, nzhw) in enumerate(test_loader): # check data consistency
            if i >= len(testfilelist)/args.batch_size:
                break
        
        test(test_loader, net, get_pbb, save_dir,config)

        return
    #net = DataParallel(net)
    from detector import data
    print(len(trainfilelist))
    dataset = data.DataBowl3Detector(
        traindatadir,
        trainfilelist,
        config,
        phase = 'train')
    train_loader = DataLoader(
        dataset,
        batch_size = args.batch_size,
        shuffle = True,
        num_workers = 0,
        pin_memory=True)

    dataset = data.DataBowl3Detector(
        valdatadir,
        valfilelist,
        config,
        phase = 'val')
    val_loader = DataLoader(
        dataset,
        batch_size = args.batch_size,
        shuffle = False,
        num_workers = 0,
        pin_memory=True)

    for i, (data, target, coord) in enumerate(train_loader): # check data consistency
        if i >= len(trainfilelist)/args.batch_size:
            break

    for i, (data, target, coord) in enumerate(val_loader): # check data consistency
        if i >= len(valfilelist)/args.batch_size:
            break

    optimizer = torch.optim.SGD(
        net.parameters(),
        args.lr,
        momentum = 0.9,
        weight_decay = args.weight_decay)
    
    def get_lr(epoch):
        if epoch <= args.epochs * 1/3: #0.5:
            lr = args.lr
        elif epoch <= args.epochs * 2/3: #0.8:
            lr = 0.1 * args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.05 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr
    

    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr, args.save_freq, save_dir)
        validate(val_loader, net, loss)
예제 #16
0
def main():
    global args
    args = parser.parse_args()

    seed = 0
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir

    if args.resume:
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        net.load_state_dict(checkpoint['state_dict'])
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')

    # if training, save files to know how training was done
    if args.test != 1:
        sys.stdout = Logger(logfile)
        # sys.stdout = logging.getLogger(logfile)
        print sys.argv
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
        shutil.copy('config_training.py', os.path.join(save_dir))

    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    datadir = config_training[
        'preprocess_result_path'] if args.data is None else args.data

    if args.test == 1:
        margin = 32
        sidelen = 144

        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])

        test_set_file = args.test_filename

        dataset = data.DataBowl3Detector(datadir,
                                         test_set_file,
                                         config,
                                         phase='test',
                                         split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=data.collate,
                                 pin_memory=False)

        test(test_loader, net, get_pbb, save_dir, config, args.test_set)
        return

    #net = DataParallel(net)

    dataset = data.DataBowl3Detector(datadir,
                                     args.train_filename,
                                     config,
                                     phase='train')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    dataset = data.DataBowl3Detector(datadir,
                                     args.val_filename,
                                     config,
                                     phase='val')
    val_loader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    if args.optim == 'adam':
        optimizer = torch.optim.Adam(net.parameters())
    elif args.optim == 'sgd':
        optimizer = torch.optim.SGD(net.parameters(),
                                    args.lr,
                                    momentum=0.9,
                                    weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 0.5:
            lr = args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.1 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)
        validate(val_loader, net, loss)
예제 #17
0
def main():
    global args
    args = parser.parse_args()

    bestLoss = 1000

    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir

    if args.resume:
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(save_dir + 'detector_' + args.resume)
        start_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['state_dict'])

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    datadir = config_training['preprocess_result_path']

    luna_train = np.load('./luna_train.npy')
    luna_test = np.load('./luna_test.npy')

    if args.test == 1:
        print("start test")
        margin = 32
        sidelen = 144

        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        dataset = LungNodule3Ddetector(datadir,
                                       luna_test,
                                       config,
                                       phase='test',
                                       split_comber=split_comber)
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 collate_fn=collate,
                                 pin_memory=False)

        test(test_loader, net, get_pbb, save_dir, config)
        return

    dataset = LungNodule3Ddetector(datadir, luna_train, config, phase='train')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    dataset = LungNodule3Ddetector(datadir, luna_test, config, phase='val')
    val_loader = DataLoader(dataset,
                            batch_size=16,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 0.2:
            lr = args.lr
        elif epoch <= args.epochs * 0.4:
            lr = 0.1 * args.lr
        elif epoch <= args.epochs * 0.6:
            lr = 0.05 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr, save_dir)
        print("finsihed epoch {}".format(epoch))
        valiloss = validate(val_loader, net, loss)

        if bestLoss > valiloss:
            bestLoss = valiloss
            state_dict = net.module.state_dict()
            for key in state_dict.keys():
                state_dict[key] = state_dict[key].cpu()

            torch.save(
                {
                    'epoch': epoch + 1,
                    'save_dir': save_dir,
                    'state_dict': state_dict,
                    'args': args
                }, os.path.join(save_dir, 'detector_%03d.ckpt' % epoch))
            print("save model on epoch %d" % epoch)
예제 #18
0
def main():
    global args
    args = parser.parse_args()

    torch.manual_seed(0)  # 随机种子

    model = import_module(args.model)  # 动态的载入 net_res18.py模块
    config, net, loss, get_pbb = model.get_model()  # 网络配置参数,网络,损失,bounding box
    start_epoch = args.start_epoch
    weights_save_dir = config_train['weights_path']  # 存储网络权重的文件夹 res18
    val_result_dir = config_train['val_result_path']

    if args.resume:  # 载入训练好的模型权重,继续训练,如果我们需要fine tune,要是用这个模式
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1  # 在之前的epoch上继续累加计数
        net.load_state_dict(checkpoint['state_dict'])  # 载入模型权重
    else:  # 全新的训练
        if start_epoch == 0:
            start_epoch = 1  # epoch从0开始计数

    if not os.path.exists(weights_save_dir):
        os.makedirs(weights_save_dir)
    if not os.path.exists(val_result_dir):
        os.makedirs(val_result_dir)

    net = net.cuda()  # 网络设置为 GPU格式
    loss = loss.cuda()  # 损失设置为 GPU格式
    cudnn.benchmark = True  # 使用cudnn加速
    net = DataParallel(net)  # 若有多GPU,则将batch分开,并行处理
    train_datadir = config_train['train_preprocess_result_path']  # 训练数据预处理结果路径
    val_datadir = config_train['val_preprocess_result_path']  # 验证数据预处理结果路径

    # 该段代码仅仅在 args.test设置为1的时候使用
    if args.test == 1:  # 因为使用了train的数据做训练,就用val的数据做验证;所以,这个主要是进行验证,以及超参数的调参
        margin = 32
        sidelen = 144

        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        val_dataset = DataBowl3Detector(val_datadir,
                                        config,
                                        phase='test',
                                        split_comber=split_comber)
        test_loader = DataLoader(  # pytorch 自带
            val_dataset,
            batch_size=1,
            shuffle=False,
            num_workers=args.workers,
            collate_fn=collate,  # !这个怎么使用
            pin_memory=False)

        test_detect(test_loader, net, get_pbb, val_result_dir, config,
                    args.ntest)
        return

    # 准备训练数据和val数据,在fine tune的时候,修改成自己的数据
    train_dataset = DataBowl3Detector(train_datadir, config, phase='train')

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    if (args.validation == 1):
        val_dataset = DataBowl3Detector(  # validation数据
            val_datadir, config, phase='val')
        val_loader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=args.workers,
                                pin_memory=True)

    optimizer = torch.optim.SGD(  # 梯度下降法,学习率0.01,
        net.parameters(),
        args.lr,
        momentum=0.9,
        weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 0.5:  # opechs分为三个阶段,分别为 0.01, 0.001, 0.0001
            lr = args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.1 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    # 每个epoch都可以设置不同的参数来进行训练和验证
    for epoch in range(start_epoch, args.epochs + 1):
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, weights_save_dir)
        if (args.validation == 1):
            validate(val_loader, net, loss)
예제 #19
0
    valsplit = os.listdir(valpath)

#det_res = net.forward(prep_result_path)
#np.save('det_res.npy', det_res)

bbox_result_path = './bbox_result'
if not os.path.exists(bbox_result_path):
    os.mkdir(bbox_result_path)

if not skip_detect:
    margin = 32
    sidelen = 144
    config1['datadir'] = prep_result_path
    split_comber = SplitComb(sidelen,
                             config1['max_stride'],
                             config1['stride'],
                             margin,
                             pad_value=config1['pad_value'])

    dataset = DataBowl3Detector(testsplit,
                                config1,
                                phase='test',
                                split_comber=split_comber)
    test_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=32,
                             pin_memory=False,
                             collate_fn=collate)

    test_detect(test_loader,
예제 #20
0
def main():
    global args
    args = parser.parse_args()

    torch.manual_seed(0)
    torch.cuda.set_device(0)

    model = import_module(args.model)
    config, net, loss, get_pbb = model.get_model()
    start_epoch = args.start_epoch
    save_dir = args.save_dir

    if args.resume:
        checkpoint = torch.load(args.resume)
        if start_epoch == 0:
            start_epoch = checkpoint['epoch'] + 1
        if not save_dir:
            save_dir = checkpoint['save_dir']
        else:
            save_dir = os.path.join('results', save_dir)
        net.load_state_dict(checkpoint['state_dict'])
    else:
        if start_epoch == 0:
            start_epoch = 1
        if not save_dir:
            exp_id = time.strftime('%Y%m%d-%H%M%S', time.localtime())
            save_dir = os.path.join('results', args.model + '-' + exp_id)
        else:
            save_dir = os.path.join('results', save_dir)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = os.path.join(save_dir, 'log')
    if args.test != 1:
        sys.stdout = Logger(logfile)
        pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
        for f in pyfiles:
            shutil.copy(f, os.path.join(save_dir, f))
    n_gpu = setgpu(args.gpu)
    args.n_gpu = n_gpu
    net = net.cuda()
    loss = loss.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    datadir = config_training['preprocess_path']

    if args.test == 1:
        margin = 32
        sidelen = 144

        split_comber = SplitComb(sidelen, config['max_stride'],
                                 config['stride'], margin, config['pad_value'])
        # test过程先注释掉,因为有validation,而且最初的demo(根目录下的main.py)也是一个预测的过程
        # dataset = data.DataBowl3Detector(
        #     datadir,
        #     'full.npy',
        #     config,
        #     phase='test',
        #     split_comber=split_comber)
        # test_loader = DataLoader(
        #     dataset,
        #     batch_size = 1,
        #     shuffle = False,
        #     num_workers = args.workers,
        #     collate_fn = data.collate,
        #     pin_memory=False)
        #
        # test(test_loader, net, get_pbb, save_dir,config)
        # return

    #net = DataParallel(net)

    dataset = data.DataBowl3Detector(
        datadir,
        config_training['preprocess_path'],  # fix 
        config,
        phase='train')
    train_loader = DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    # dataset = data.DataBowl3Detector(
    #     datadir,
    #     'valsplit.npy',
    #     config,
    #     phase = 'val')
    # val_loader = DataLoader(
    #     dataset,
    #     batch_size = args.batch_size,
    #     shuffle = False,
    #     num_workers = args.workers,
    #     pin_memory=True)

    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    def get_lr(epoch):
        if epoch <= args.epochs * 0.5:
            lr = args.lr
        elif epoch <= args.epochs * 0.8:
            lr = 0.1 * args.lr
        else:
            lr = 0.01 * args.lr
        return lr

    for epoch in range(start_epoch, args.epochs + 1):
        # TypeError: only size-1 arrays can be converted to Python scalars(传递参数错误)
        # IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number(pytorch版本问题)
        train(train_loader, net, loss, epoch, optimizer, get_lr,
              args.save_freq, save_dir)