コード例 #1
0
def deep_feature_extraction():
    batch_size = 400
    images_paths, cutoffs = create_image_list()
    logger.info('load image paths done. #video: {:5d}'.format(len(cutoffs)))

    dataloader = create_loader(images_paths, batch_size)
    logger.info('create data loader done')

    model = vgg16(pretrained=True).to(opt['device'])
    model.eval()
    logger.info('create cnn-model done')

    count = 0
    buffer = []
    filepath = os.path.join(opt['featurepath'], 'frames-features.h5')
    fw = h5py.File(filepath, mode='w')
    with torch.no_grad():
        for iter, batch in enumerate(dataloader, 0):
            images, ind = batch
            images, ind = images.to(opt['device']), ind.numpy()
            features = list(model(images).data.cpu().numpy())
            current_len = len(features) + len(buffer)
            buffer += features
            if current_len >= cutoffs[count][0]:
                while True:
                    if count < len(cutoffs) and len(buffer) >= cutoffs[count][0]:
                        offset = cutoffs[count][0]

                        next_video = ''
                        try:
                            curr_ind = ind[0] - len(buffer) + len(features) + offset - 1
                            next_ind = curr_ind + 1

                            curr_video = images_paths[curr_ind].split('/')[-2]
                            next_video = images_paths[next_ind].split('/')[-2]
                            if curr_video == next_video:
                                logger.info('Split Error: {}/{}'.format(curr_video, next_video))
                                return
                        except:
                            pass

                        save_features = buffer[:offset]
                        if not len(save_features) == cutoffs[count][0]:
                            logger.info('Length Error: {}/{}'.format(len(save_features), cutoffs[count][0]))
                            return
                        save_features = np.array(save_features)
                        video = cutoffs[count][1]
                        fw.create_dataset(name=video, data=save_features)
                        if opt['verbose'] and count % opt['output_period'] == 0:
                            logger.info('CNT: {:6d}/{:6d}, Video: {}, Next Video: {}'.format(
                                    count, len(cutoffs), video, next_video
                                ))

                        buffer = buffer[offset:]
                        count += 1
                    else:
                        break
    fw.close()
コード例 #2
0
    # 'experiment/EfficientB4_parallel_data2203/checkpoint/best.ckpt',
    # 'experiment/ResneSt101_parallel_data2203/checkpoint/best.ckpt'
    ]

id_leaf = [2,4,5,6,7,8]
id_obs = [0,1,2,3,4,5,6,7,8,9,10,11,12]

preds_stack_val = []
labels_stack_val = []
preds_stack_test = []
labels_stack_test = []

for i in range(len(list_batch)):
    cfg.batch_size = list_batch[i]
    cfg.long_side = list_res[i]
    train_loader = create_loader(cfg.train_csv, data_dir, cfg, mode='train', dicom=False, type=cfg.type)
    val_loader = create_loader(cfg.dev_csv, data_dir, cfg, mode='val', dicom=False, type=cfg.type)
    test_loader = create_loader(cfg.test_csv, data_dir, cfg, mode='test', dicom=False, type=cfg.type)

    print(f'{model_names[i]}-{ids[i]}:' )
    cfg.backbone = model_names[i]
    cfg.id = ids[i]
    cfg.ckp_path = ckp_paths[i]
    chexpert_model = CheXpert_model(cfg, loss_func, metrics_dict)
    chexpert_model.load_ckp(cfg.ckp_path)
    preds, labels, _ = chexpert_model.predict_loader(
        val_loader, ensemble=False, cal_loss=False)
    preds_stack_val.append(preds)
    labels_stack_val.append(labels)
    preds, labels, _ = chexpert_model.predict_loader(
        test_loader, ensemble=False, cal_loss=False)
コード例 #3
0
# warnings.simplefilter('always')
warnings.filterwarnings("ignore")

cfg_path = './config/test_config.json'
# cfg_path = './config/example2.json'

with open(cfg_path) as f:
    cfg = edict(json.load(f))

loss_func = BCEWithLogitsLoss()

# data_dir = '/home/tungthanhlee/bdi_xray/data/images'
data_dir = '/home/single1/BACKUP/thanhtt/assigned_jpeg'

torch.cuda.set_device(cfg.device)
val_loader = create_loader(cfg.dev_csv, data_dir, cfg, mode='val')
test_loader = create_loader(cfg.test_csv, data_dir, cfg, mode='test')

metrics_dict = {
    'auc': AUC(),
    'sensitivity': Recall(),
    'specificity': Specificity(),
    'f1': F1()
}

id_leaf = [2, 4, 5, 6, 7, 8]
id_obs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]

#------------------------------- additional config for ensemble ---------------------------------------
model_names = [
    'dense',
コード例 #4
0
from model.chexpert import CheXpert_model
from data.dataset import create_loader
import warnings

# warnings.simplefilter('always')
warnings.filterwarnings("ignore")

# cfg_path = './config/chexmic_config.json'
cfg_path = './config/example.json'

with open(cfg_path) as f:
    cfg = edict(json.load(f))

data_dir = '/home/tungthanhlee/thanhtt/assigned_jpeg'

train_loader = create_loader(cfg.train_csv, data_dir, cfg, mode='train')
val_loader = create_loader(cfg.dev_csv, data_dir, cfg, mode='val')

loss_func = BCEWithLogitsLoss()

metrics_dict = {
    'acc': ACC(),
    'auc': AUC(),
    'f1': F1(),
    'precision': Precision(),
    'recall': Recall()
}
loader_dict = {'train': train_loader, 'val': val_loader}

chexpert_model = CheXpert_model(cfg, loss_func, metrics_dict)
# print(chexpert_model.model)