loader_test = DataLoader(ds_test,
                         batch_size=args.batch_size_valid,
                         pin_memory=True,
                         num_workers=args.num_workers)

#endregion

#region load model transfer from m0 vs m1 and m2
path_trained_models = Path(__file__).resolve(
).parent.parent / 'trained_models' / '2022_4_28_64_64' / 'binary_class_m0_m1m2'
if args.model_name == 'cls_3d':
    model_file = path_trained_models / 'cls_3d.pth'
if args.model_name == 'medical_net_resnet50':
    model_file = path_trained_models / 'medical_net_resnet50.pth'
model = get_model(args.model_name,
                  num_class=1,
                  model_file=model_file,
                  drop_prob=args.drop_prob)  #binary classification

#endregion

#region training
pos_weight = torch.FloatTensor(torch.tensor(args.pos_weight))
if torch.cuda.is_available():
    pos_weight = pos_weight.cuda()
criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='mean')
optimizer = optim.Adam(model.parameters(),
                       weight_decay=args.weight_decay,
                       lr=args.lr)
#from libs.neural_networks.optimizer_obsoleted.my_optimizer import Lookahead
# optimizer = Lookahead(optimizer=optimizer, k=5, alpha=0.5)
scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
dir_preprocess = '/disk1/3D_OCT_DME/preprocess_128_128_128/'

threshold = 0.5

models_dicts = []

path_model_base = Path(__file__).resolve(
).parent.parent.parent / 'trained_models' / '2022_4_28_64_64'
if args.task_type.startswith('3D_OCT_DME_M0_M1M2'):
    path_model = path_model_base / 'binary_class_m0_m1m2'
if args.task_type.startswith('3D_OCT_DME_M1_M2'):
    path_model = path_model_base / 'binary_class_m1_m2'

model_name = 'cls_3d'
model_file = path_model / 'cls_3d.pth'
model = get_model(model_name, 1, model_file=model_file)
ds_test = Dataset_CSV_test(csv_file=csv_file,
                           image_shape=args.image_shape,
                           depth_start=0,
                           depth_interval=2,
                           test_mode=True)
loader_test = DataLoader(ds_test,
                         batch_size=32,
                         pin_memory=True,
                         num_workers=4)
model_dict = {'model': model, 'weight': 1, 'dataloader': loader_test}
models_dicts.append(model_dict)

model_name = 'medical_net_resnet50'
model_file = path_model / 'medical_net_resnet50.pth'
model = get_model(model_name, 1, model_file=model_file)
Exemple #3
0
path_model_base = Path(__file__).resolve(
).parent.parent.parent / 'trained_models' / '2022_4_28_64_64'
if args.task_type.startswith('3D_OCT_DME_M0_M1M2'):
    path_model = path_model_base / 'binary_class_m0_m1m2'
elif args.task_type.startswith('3D_OCT_DME_M1_M2'):
    path_model = path_model_base / 'binary_class_m1_m2'
else:
    raise ValueError(f'{args.task_type} error!')
if args.model_name == 'cls_3d':
    model_name = 'cls_3d'
    model_file = path_model / 'cls_3d.pth'
if args.model_name == 'medical_net_resnet50':
    model_name = 'medical_net_resnet50'
    model_file = path_model / 'medical_net_resnet50.pth'

model = get_model(args.model_name, num_class=1, model_file=model_file)

layer_features = model.dense_1  #Cls_3d, medical_net_resnet50, dense_1
batch_size = 32
ds_test = Dataset_CSV_test(csv_file=csv_file,
                           image_shape=args.image_shape,
                           depth_start=0,
                           depth_interval=2,
                           test_mode=True)
loader_test = DataLoader(ds_test,
                         batch_size=batch_size,
                         pin_memory=True,
                         num_workers=4)
#endregion

features = compute_features_batches(model, layer_features, loader_test)
from libs.neural_networks.model.my_get_model import get_model

#region load model and set some parameters
csv_file = os.path.join(os.path.abspath('../../../cls_3d/'), 'datafiles', 'v3',
                        '3D_OCT_DME_M0_M1M2_test.csv')
dir_dest = '/disk1/3D_OCT_DME/results/2021_7_31/heatmaps_multi_class/dimensionality_reduction/'
tsne_image_file = os.path.join(dir_dest, 't_sne_test.png')
save_features = False
npy_file_features = os.path.join(dir_dest, 't_sne_test.npy')

num_class = 2
model_name = 'cls_3d'
model_file = os.path.join(os.path.abspath('../../../cls_3d/'),
                          'trained_models', 'multi_class', 'cls_3d.pth')
image_shape = (64, 64)
model = get_model(model_name, num_class, model_file=model_file)
layer_features = model.dense_1  #Cls_3d, medical_net_resnet50, dense_1
batch_size = 32
ds_test = Dataset_CSV_test(csv_file=csv_file,
                           image_shape=image_shape,
                           depth_start=0,
                           depth_interval=2,
                           test_mode=True)
loader_test = DataLoader(ds_test,
                         batch_size=batch_size,
                         pin_memory=True,
                         num_workers=4)
#endregion

from libs.neural_networks.heatmaps.t_SNE.my_tsne_helper import compute_features_batches, gen_features_reduced, draw_tsne
features = compute_features_batches(model, layer_features, loader_test)
Exemple #5
0
    if args.model_name == 'cls_3d':
        model_file = path_trained_models / 'Genesis_Chest_CT.pt'
    if args.model_name == 'ModelsGenesis':
        model_file = path_trained_models / 'Genesis_Chest_CT.pt'
    if args.model_name == 'medical_net_resnet34':
        model_file = '/disk1/MedicalNet_pytorch_files/pretrain/resnet_34.pth'
        model_file = path_trained_models / 'resnet_34.pth'
    if args.model_name == 'medical_net_resnet50':
        model_file = path_trained_models / 'resnet_50.pth'
    if args.model_name == 'medical_net_resnet101':
        model_file = path_trained_models / 'resnet_101.pth'
else:
    model_file = None

model = get_model(args.model_name,
                  num_class=1,
                  model_file=model_file,
                  drop_prob=args.drop_prob)

#endregion

#region training
pos_weight = torch.FloatTensor(torch.tensor(args.pos_weight))
if torch.cuda.is_available():
    pos_weight = pos_weight.cuda()
criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='mean')
optimizer = optim.Adam(model.parameters(),
                       weight_decay=args.weight_decay,
                       lr=args.lr)
# from libs.neural_networks.optimizer_obsoleted.my_optimizer import Lookahead
# optimizer = Lookahead(optimizer=optimizer, k=5, alpha=0.5)
scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)