def run_test(path_model, path_csv, path_output): model_name = 'SENET154' batch_size = 16 img_size = 256 crop_size = 224 target_mean = np.array([0.485, 0.456, 0.406]) target_std = np.array([0.229, 0.224, 0.225]) data_transform = DataTransform(no_bg=True, pad=True) data_transform_valid = data_transform.get_test(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std, positions=[0, 2, 9]) device = torch.device("cuda:0") output_lst = predict.predict(path_csv=path_csv, path_model=path_model, model_name=model_name, batch_size=batch_size, device=device, transform=data_transform_valid) pandas.DataFrame(output_lst).to_csv(path_output, header=False, index=False)
def run_test(path_model, path_csv, path_output): model_name = 'DENSENET161-LARGE3' batch_size = 16 img_size = 366 crop_size = 320 target_mean = 0.456 target_std = 0.225 data_transform = DataTransform(no_bg=True, pad=True) data_transform_valid = data_transform.get_test(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std, positions=[6, 3, 4]) device = torch.device("cuda:0") output_lst = predict.predict(path_csv=path_csv, path_model=path_model, model_name=model_name, batch_size=batch_size, device=device, transform=data_transform_valid) pandas.DataFrame(output_lst).to_csv(path_output, header=False, index=False)
def run_test(path_model, path_csv, path_output): model_name = 'VGG16-BN' batch_size = 16 img_size = 256 crop_size = 224 target_mean = 0.0 target_std = 1.0 data_transform = DataTransform(no_bg=True, pad=True) data_transform_valid = data_transform.get_test(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std, positions=[5, 6, 4]) device = torch.device("cuda:0") output_lst = predict.predict(path_csv=path_csv, path_model=path_model, model_name=model_name, batch_size=batch_size, device=device, transform=data_transform_valid) pandas.DataFrame(output_lst).to_csv(path_output, header=False, index=False)
def run_test(path_model): model_name = 'PNASNET' model_pretrained = True path_data = '../../MURA-v1.0/' path_root = '../../' batch_size = 6 img_size = 354 crop_size = 331 target_mean = 0.5 target_std = 0.5 data_transform = DataTransform(no_bg=True, pad=True) data_transform_valid = data_transform.get_valid(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) device = None opts, _ = getopt.getopt(sys.argv[1:], "d:", ["device="]) for opt, arg in opts: if opt in ("-d", "--device") and torch.cuda.is_available(): device = torch.device("cuda:" + str(arg)) if device is None: print("GPU not found! Using CPU!") device = torch.device("cpu") print('NN architecture = ', model_name) print("using data transforms: " + str(data_transform)) if os.path.exists(path_model + "-L.pth.tar"): print('Testing the model with best valid-loss') train_multiview.test(path_data=path_data, path_root=path_root, path_model=path_model + "-L", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid) if os.path.exists(path_model + "-A.pth.tar"): print('Testing the model with best valid-auroc') train_multiview.test(path_data=path_data, path_root=path_root, path_model=path_model + "-A", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid)
def run_test(path_model): model_name = 'DENSENET201' model_pretrained = True path_data_valid = '../../MURA_valid1_keras' batch_size = 16 img_size = 256 crop_size = 224 target_mean = 0.0 target_std = 1.0 data_transform = DataTransform(revised=False, no_bg=True, pad=True, to_rgb=False) data_transform_valid = data_transform.get_valid(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) device = None opts, _ = getopt.getopt(sys.argv[1:], "d:", ["device="]) for opt, arg in opts: if opt in ("-d", "--device") and torch.cuda.is_available(): device = torch.device("cuda:" + str(arg)) if device is None: print("GPU not found! Using CPU!") device = torch.device("cpu") print('NN architecture = ', model_name) print("using data transforms: " + str(data_transform)) print('Testing the model with best valid-loss') train.test(path_data=path_data_valid, path_model=path_model + "-L", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid) print('Testing the model with best valid-auroc') train.test(path_data=path_data_valid, path_model=path_model + "-A", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid)
def run_test(path_model, path_csv, path_output): model_name = 'DUALPATHNET107_5k' batch_size = 16 img_size = 256 crop_size = 224 target_mean = np.array([124 / 255, 117 / 255, 104 / 255]) target_std = 1 / (.0167 * 255) data_transform = DataTransform(no_bg=True, pad=True) data_transform_valid = data_transform.get_test(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std, positions=[0, 1, 8]) device = torch.device("cuda:0") output_lst = predict.predict( path_csv=path_csv, path_model=path_model, model_name=model_name, batch_size=batch_size, device=device, transform=data_transform_valid ) pandas.DataFrame(output_lst).to_csv(path_output, header=False, index=False)
def run_test(path_model, path_csv, path_output): model_name = 'INCEPTIONV4-LARGE' batch_size = 16 img_size = 378 crop_size = 331 target_mean = 0.5 target_std = 0.5 data_transform = DataTransform(no_bg=True, pad=True) data_transform_valid = data_transform.get_test(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std, positions=[1, 2, 9]) device = torch.device("cuda:0") output_lst = predict.predict( path_csv=path_csv, path_model=path_model, model_name=model_name, batch_size=batch_size, device=device, transform=data_transform_valid ) pandas.DataFrame(output_lst).to_csv(path_output, header=False, index=False)
def main(): configs = [ Config(model_name="DUALPATHNET107_5k", file_model="m-03916-09049", batch_size=32, img_size=256, crop_size=224, target_mean=np.array([124 / 255, 117 / 255, 104 / 255]), target_std=1 / (.0167 * 255), positions=[0, 1, 8], data_transform=DataTransform(no_bg=True, pad=True)), Config(model_name="SENET154", file_model="m-03924-09074", batch_size=32, img_size=256, crop_size=224, target_mean=np.array([0.485, 0.456, 0.406]), target_std=np.array([0.229, 0.224, 0.225]), positions=[0, 2, 9], data_transform=DataTransform(no_bg=True, pad=True)), Config(model_name="INCEPTIONV4-LARGE", file_model="m-03879-09080", batch_size=32, img_size=378, crop_size=331, target_mean=0.5, target_std=0.5, positions=[1, 2, 9], data_transform=DataTransform(no_bg=True, pad=True)), Config(model_name="VGG16-BN", file_model="m-04051-09053", batch_size=48, img_size=256, crop_size=224, target_mean=0.0, target_std=1.0, positions=[5, 6, 4], data_transform=DataTransform(no_bg=True, pad=True)), Config(model_name="DENSENET201-LARGE3", file_model="m-04001-09065", batch_size=48, img_size=366, crop_size=320, target_mean=0.456, target_std=0.225, positions=[5, 7, 3], data_transform=DataTransform(no_bg=True, pad=True)), Config(model_name="DENSENET161-LARGE3", file_model="m-03988-09084", batch_size=32, img_size=366, crop_size=320, target_mean=0.456, target_std=0.225, positions=[6, 3, 4], data_transform=DataTransform(no_bg=True, pad=True)), Config(model_name="NASNETALARGE", file_model="m-03815-09099", batch_size=8, img_size=354, crop_size=331, target_mean=0.5, target_std=0.5, positions=[7, 8, 4], data_transform=DataTransform(no_bg=True, pad=True)) ] keys, results = [], [] for config in configs: print(config.model_name) keys, result = run_test(sys.argv[1], config) results.append(result) results = np.concatenate(results, axis=1) score = np.mean(results, axis=1) label = np.array(score >= 0.5, dtype=np.int32) pandas.DataFrame(label, index=keys).to_csv(sys.argv[2], header=False)
def run_train(): timestamp = time.strftime("%Y%m%d") + '-' + time.strftime("%H%M%S") model_name = 'INCEPTIONV4-LARGE' model_pretrained = True path_data_train = '../../MURA_trainval_keras' path_data_valid = '../../MURA_valid1_keras' path_log = '../../trained_models/' + timestamp + '/tb' batch_size = 16 epoch_num = 80 img_size = 331 crop_size = 331 target_mean = 0.5 target_std = 0.5 path_model = '../../trained_models/' + timestamp + '/m-' + timestamp data_transform = DataTransform(aug="rot30", no_bg=True, pad=True, no_crop=True) data_transform_train = data_transform.get_train(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) data_transform_valid = data_transform.get_valid(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) device = None opts, _ = getopt.getopt(sys.argv[1:], "d:", ["device="]) for opt, arg in opts: if opt in ("-d", "--device") and torch.cuda.is_available(): device = torch.device("cuda:" + str(arg)) if device is None: print("GPU not found! Using CPU!") device = torch.device("cpu") print('Training NN architecture = ', model_name) train.train(path_data_train=path_data_train, path_data_valid=path_data_valid, path_log=path_log, path_model=path_model, model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, epoch_num=epoch_num, checkpoint=None, device=device, transform_train=data_transform_train, transform_valid=data_transform_valid, optimizer_fn=optimizers.adam_optimizers) print('NN architecture = ', model_name) print("using data transforms: " + str(data_transform)) print('Testing the model with best valid-loss') print('timestamp = ' + timestamp) train.test(path_data=path_data_valid, path_model=path_model + "-L", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid) print('Testing the model with best valid-auroc') print('timestamp = ' + timestamp) train.test(path_data=path_data_valid, path_model=path_model + "-A", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid)
def run_train(): timestamp = time.strftime("%Y%m%d") + '-' + time.strftime("%H%M%S") model_name = 'INCEPTIONRESNETV2' model_pretrained = True path_data = '../../MURA-v1.0/' path_root = '../../' path_log = '../../trained_models/' + timestamp + '/tb' batch_size = 24 epoch_num = 15 img_size = 299 crop_size = 299 target_mean = 0.5 target_std = 0.5 checkpoint = "../../trained_models/04037-09061-inceptionresnetv2-adam-nobgpad/m-20180518-015825-L.pth.tar" path_model = '../../trained_models/' + timestamp + '/m-' + timestamp data_transform = DataTransform(no_bg=True, pad=True) data_transform_train = data_transform.get_train(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) data_transform_valid = data_transform.get_valid(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) device = None opts, _ = getopt.getopt(sys.argv[1:], "d:", ["device="]) for opt, arg in opts: if opt in ("-d", "--device") and torch.cuda.is_available(): device = torch.device("cuda:" + str(arg)) if device is None: print("GPU not found! Using CPU!") device = torch.device("cpu") print('Training NN architecture = ', model_name) print('Training Multiview') train_multiview.train(path_data=path_data, path_root=path_root, path_log=path_log, path_model=path_model, model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, epoch_num=epoch_num, checkpoint=checkpoint, device=device, transform_train=data_transform_train, transform_valid=data_transform_valid, optimizer_fn=optimizers.adam_optimizers_small) print('NN architecture = ', model_name) print("using data transforms: " + str(data_transform)) print('Testing the model with best valid-loss') print('timestamp = ' + timestamp) train_multiview.test(path_data=path_data, path_root=path_root, path_model=path_model + "-L", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid) print('Testing the model with best valid-auroc') print('timestamp = ' + timestamp) train_multiview.test(path_data=path_data, path_root=path_root, path_model=path_model + "-A", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid)
def main(): configs = [ Config(model_name="SENET154", file_model="senet154", batch_size=32, img_size=256, crop_size=224, target_mean=np.array([0.485, 0.456, 0.406]), target_std=np.array([0.229, 0.224, 0.225]), data_transform=DataTransform(no_bg=True, pad=True), positions=[0, 6, 7, 3, 4]), Config(model_name="SENET154-LARGE", file_model="senet154large", batch_size=24, img_size=293, crop_size=256, target_mean=np.array([0.485, 0.456, 0.406]), target_std=np.array([0.229, 0.224, 0.225]), data_transform=DataTransform(no_bg=True, pad=True), positions=[5, 1, 2, 3, 9]), Config(model_name="INCEPTIONV4", file_model="inceptionv4", batch_size=32, img_size=341, crop_size=299, target_mean=0.5, target_std=0.5, data_transform=DataTransform(no_bg=True, pad=True), positions=[5, 6, 2, 8, 4]), Config(model_name="INCEPTIONV4-LARGE", file_model="inceptionv4large", batch_size=16, img_size=378, crop_size=331, target_mean=0.5, target_std=0.5, data_transform=DataTransform(no_bg=True, pad=True), positions=[0, 6, 2, 3, 9]), Config(model_name="INCEPTIONRESNETV2", file_model="inceptionresnetv2", batch_size=8, img_size=341, crop_size=299, target_mean=0.5, target_std=0.5, data_transform=DataTransform(no_bg=True, pad=True), positions=[0, 1, 7, 8, 9]), Config(model_name="DENSENET201-LARGE3", file_model="densenet201", batch_size=24, img_size=366, crop_size=320, target_mean=0.456, target_std=0.225, data_transform=DataTransform(no_bg=True, pad=True), positions=[5, 1, 7, 8, 4]), Config(model_name="DENSENET161-LARGE3", file_model="densenet161", batch_size=16, img_size=366, crop_size=320, target_mean=0.456, target_std=0.225, data_transform=DataTransform(no_bg=True, pad=True), positions=[0, 6, 7, 8, 4]), Config(model_name="DENSENET169-LARGE3", file_model="densenet169", batch_size=24, img_size=366, crop_size=320, target_mean=0.456, target_std=0.225, data_transform=DataTransform(no_bg=True, pad=True), positions=[5, 6, 2, 3, 4]), Config(model_name="NASNETALARGE", file_model="nasnetalarge", batch_size=8, img_size=354, crop_size=331, target_mean=0.5, target_std=0.5, data_transform=DataTransform(no_bg=True, pad=True), positions=[5, 1, 2, 8, 9]), Config(model_name="PNASNET", file_model="pnasnet", batch_size=8, img_size=354, crop_size=331, target_mean=0.5, target_std=0.5, data_transform=DataTransform(no_bg=True, pad=True), positions=[0, 1, 7, 3, 9]) ] keys, results = [], [] for config in configs: print(config.model_name) keys, result = run_test(sys.argv[1], config) results.append(result) results = np.concatenate(results, axis=1) score = np.mean(results, axis=1) label = np.array(score >= 0.5, dtype=np.int32) pandas.DataFrame(label, index=keys).to_csv(sys.argv[2], header=False)
import yaml import numpy as np from data_augmentation import DataTransform if __name__ == "__main__": config_train = { "transform": DataTransform(no_bg=True, pad=True, aug_rotate=np.random.uniform(15.0, 30.0), aug_shear=np.random.uniform(0.0, 10.0), flip_h="random", crop_mode="random", random_crop_factor=1.0 / np.random.uniform(7.5, 15.0)), "batch_size": 16, "optimizer_name": "adam", "learning_rate": 1e-6 * (0.01 / 1e-6)**(26 / 80), "differential_lr": 1, "weight_decay": np.power(10.0, np.random.uniform(-7.0, -4.0)).item(), "is_nesterov": False, "beta1": 0.9, "beta2": 0.999,
def run_train(): timestamp = time.strftime("%Y%m%d") + '-' + time.strftime("%H%M%S") model_name = 'SENET154-LARGE' model_pretrained = True path_data = '../../MURA-v1.0/' path_root = '../../' path_log = '../../trained_models/' + timestamp + '/tb' batch_size = 16 epoch_num = 10 img_size = 256 crop_size = 256 target_mean = np.array([0.485, 0.456, 0.406]) target_std = np.array([0.229, 0.224, 0.225]) checkpoint = "../../trained_models/03926-09052-senet154large-adam-nobgpad/m-20180626-081034-L.pth.tar" path_model = '../../trained_models/' + timestamp + '/m-' + timestamp data_transform = DataTransform(no_bg=True, pad=True) data_transform_train = data_transform.get_train(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) data_transform_valid = data_transform.get_valid(img_size=img_size, crop_size=crop_size, target_mean=target_mean, target_std=target_std) device = None opts, _ = getopt.getopt(sys.argv[1:], "d:", ["device="]) for opt, arg in opts: if opt in ("-d", "--device") and torch.cuda.is_available(): device = torch.device("cuda:" + str(arg)) if device is None: print("GPU not found! Using CPU!") device = torch.device("cpu") print('Training NN architecture = ', model_name) print('Training Multiview') train_multiview.train(path_data=path_data, path_root=path_root, path_log=path_log, path_model=path_model, model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, epoch_num=epoch_num, checkpoint=checkpoint, device=device, transform_train=data_transform_train, transform_valid=data_transform_valid, optimizer_fn=optimizers.adam_optimizers_small) print('NN architecture = ', model_name) print("using data transforms: " + str(data_transform)) print('Testing the model with best valid-loss') print('timestamp = ' + timestamp) train_multiview.test(path_data=path_data, path_root=path_root, path_model=path_model + "-L", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid) print('Testing the model with best valid-auroc') print('timestamp = ' + timestamp) train_multiview.test(path_data=path_data, path_root=path_root, path_model=path_model + "-A", model_name=model_name, model_pretrained=model_pretrained, batch_size=batch_size, device=device, transform=data_transform_valid)
import yaml import numpy as np from data_augmentation import DataTransform if __name__ == "__main__": config_train = { "transform": DataTransform( no_bg=True, pad=True, aug_rotate=20, aug_shear=10, flip_h="random", crop_mode="random", random_crop_factor=0.08 ), "batch_size": 20, "optimizer_name": "adam", "differential_lr": 10, "is_nesterov": False, "beta1": 0.9, "beta2": 0.999, "epoch_num": 80 } config_valid = { "model_name": "InceptionV4", "img_size": 378, "crop_size": 331 }
import yaml from data_augmentation import DataTransform if __name__ == "__main__": config = { "model_name": "NASNet", "img_size": 354, "crop_size": 331, "transform": DataTransform(no_bg=True, pad=True, crop_mode="ten", ten_crop_positions=[7, 8, 4], normalize=True), "batch_size": 8, "path_model": "models/NASNet.pt" } with open("test_configs/config07.yaml", "w") as f: yaml.dump(config, f, default_flow_style=False)