コード例 #1
0
def main() -> None:
    parse_command_line_arguments()

    path = "/cs/tmp/datasets/CBIS-DDSM/Calc-Training_P_00005_RIGHT_CC/08-07-2016-DDSM-23157/1.000000-full mammogram images-38548/1-1.dcm"
    dataset = pydicom.dcmread(path)
    plt.imshow(dataset.pixel_array, cmap=plt.cm.bone)
    plt.savefig("../output/image_original.png")

    image = resize(dataset.pixel_array, [512, 512])
    imgplot = plt.imshow(image, cmap=plt.cm.bone)
    plt.savefig("../output/image_resized.png")

    # resize keep aspect ratio
    df = pd.read_csv("../../data/CBIS-DDSM/training.csv")
    list_IDs = df['img_path'].values
    labels = df['label'].values

    X_train, X_val, y_train, y_val = dataset_stratified_split(split=0.25,
                                                              dataset=list_IDs,
                                                              labels=labels)

    dataset_train = create_dataset(X_train, y_train)

    label = labels[0]
    list_ID = list_IDs[0]

    image = parse_function(list_ID, label)
    plt.imshow(np.squeeze(image[0].numpy()), cmap='gray')
    plt.savefig("../output/image_maintained_AR.png")
def main() -> None:
    """
    Program entry point. Parses command line arguments to decide which dataset and model to use.
    Originally written as a group for the common pipeline. Later amended by Adam Jaamour.
    :return: None.
    """
    set_random_seeds()
    parse_command_line_arguments()
    print_num_gpus_available()

    # Create label encoder.
    l_e = create_label_encoder()

    # Run in training mode.
    if config.run_mode == "train":

        print("-- Training model --\n")

        # Start recording time.
        start_time = time.time()

        # Multi-class classification (mini-MIAS dataset)
        if config.dataset == "mini-MIAS":
            # Import entire dataset.
            images, labels = import_minimias_dataset(data_dir="../data/{}/images_processed".format(config.dataset),
                                                     label_encoder=l_e)

            # Split dataset into training/test/validation sets (80/20% split).
            X_train, X_test, y_train, y_test = dataset_stratified_split(split=0.20, dataset=images, labels=labels)

            # Create CNN model and split training/validation set (80/20% split).
            model = CnnModel(config.model, l_e.classes_.size)
            X_train, X_val, y_train, y_val = dataset_stratified_split(split=0.25,
                                                                      dataset=X_train,
                                                                      labels=y_train)

            # Calculate class weights.
            class_weights = calculate_class_weights(y_train, l_e)

            # Data augmentation.
            y_train_before_data_aug = y_train
            X_train, y_train = generate_image_transforms(X_train, y_train)
            y_train_after_data_aug = y_train
            np.random.shuffle(y_train)

            if config.verbose_mode:
                print("Before data augmentation:")
                print(Counter(list(map(str, y_train_before_data_aug))))
                print("After data augmentation:")
                print(Counter(list(map(str, y_train_after_data_aug))))

            # Fit model.
            if config.verbose_mode:
                print("Training set size: {}".format(X_train.shape[0]))
                print("Validation set size: {}".format(X_val.shape[0]))
                print("Test set size: {}".format(X_test.shape[0]))
            model.train_model(X_train, X_val, y_train, y_val, class_weights)

        # Binary classification (binarised mini-MIAS dataset)
        elif config.dataset == "mini-MIAS-binary":
            # Import entire dataset.
            images, labels = import_minimias_dataset(data_dir="../data/{}/images_processed".format(config.dataset),
                                                     label_encoder=l_e)

            # Split dataset into training/test/validation sets (80/20% split).
            X_train, X_val, y_train, y_val = dataset_stratified_split(split=0.20, dataset=images, labels=labels)

            # Create CNN model and split training/validation set (80/20% split).
            model = CnnModel(config.model, l_e.classes_.size)
            # model.load_minimias_weights()
            # model.load_minimias_fc_weights()

            # Fit model.
            if config.verbose_mode:
                print("Training set size: {}".format(X_train.shape[0]))
                print("Validation set size: {}".format(X_val.shape[0]))
            model.train_model(X_train, X_val, y_train, y_val, None)

        # Binary classification (CBIS-DDSM dataset).
        elif config.dataset == "CBIS-DDSM":
            images, labels = import_cbisddsm_training_dataset(l_e)

            # Split training dataset into training/validation sets (75%/25% split).
            X_train, X_val, y_train, y_val = dataset_stratified_split(split=0.25, dataset=images, labels=labels)
            train_dataset = create_dataset(X_train, y_train)
            validation_dataset = create_dataset(X_val, y_val)

            # Calculate class weights.
            class_weights = calculate_class_weights(y_train, l_e)

            # Create and train CNN model.
            model = CnnModel(config.model, l_e.classes_.size)
            # model.load_minimias_fc_weights()
            # model.load_minimias_weights()

            # Fit model.
            if config.verbose_mode:
                print("Training set size: {}".format(X_train.shape[0]))
                print("Validation set size: {}".format(X_val.shape[0]))
            model.train_model(train_dataset, validation_dataset, None, None, class_weights)

        # Save training runtime.
        runtime = round(time.time() - start_time, 2)

        # Save the model and its weights/biases.
        model.save_model()
        model.save_weights()

        # Evaluate training results.
        print_cli_arguments()
        if config.dataset == "mini-MIAS":
            model.make_prediction(X_val)
            model.evaluate_model(y_val, l_e, 'N-B-M', runtime)
        elif config.dataset == "mini-MIAS-binary":
            model.make_prediction(X_val)
            model.evaluate_model(y_val, l_e, 'B-M', runtime)
        elif config.dataset == "CBIS-DDSM":
            model.make_prediction(validation_dataset)
            model.evaluate_model(y_val, l_e, 'B-M', runtime)
        print_runtime("Training", runtime)

    # Run in testing mode.
    elif config.run_mode == "test":

        print("-- Testing model --\n")

        # Start recording time.
        start_time = time.time()

        # Test multi-class classification (mini-MIAS dataset).
        if config.dataset == "mini-MIAS":
            images, labels = import_minimias_dataset(data_dir="../data/{}/images_processed".format(config.dataset),
                                                     label_encoder=l_e)
            _, X_test, _, y_test = dataset_stratified_split(split=0.20, dataset=images, labels=labels)
            model = load_trained_model()
            predictions = model.predict(x=X_test)
            runtime = round(time.time() - start_time, 2)
            test_model_evaluation(y_test, predictions, l_e, 'N-B-M', runtime)

        # Test binary classification (binarised mini-MIAS dataset).
        elif config.dataset == "mini-MIAS-binary":
            pass

        # Test binary classification (CBIS-DDSM dataset).
        elif config.dataset == "CBIS-DDSM":
            images, labels = import_cbisddsm_testing_dataset(l_e)
            test_dataset = create_dataset(images, labels)
            model = load_trained_model()
            predictions = model.predict(x=test_dataset)
            runtime = round(time.time() - start_time, 2)
            test_model_evaluation(labels, predictions, l_e, 'B-M', runtime)

        print_runtime("Testing", runtime)
コード例 #3
0
def main() -> None:
    """
    Program entry point. Parses command line arguments to decide which dataset and model to use.
    :return: None.
    """
    parse_command_line_arguments()
    print_num_gpus_available()

    gpu = tf.config.experimental.list_physical_devices(device_type='GPU')
    tf.config.experimental.set_memory_growth(gpu[0], True)

    # Start recording time.
    start_time = time.time()

    # Create label encoder.
    l_e = create_label_encoder()

    # Multiclass classification (mini-MIAS dataset)
    if config.dataset == "mini-MIAS":
        # Import entire dataset.
        images, chars, labels = import_minimias_dataset(
            data_dir="../data/{}/images".format(config.dataset),
            label_encoder=l_e)

        # Split dataset into training/test/validation sets (60%/20%/20% split).
        X_train, X_test, y_train, y_test = dataset_stratified_split(
            split=0.20, dataset=images, labels=labels)
        X_train, X_val, y_train, y_val = dataset_stratified_split(
            split=0.25, dataset=X_train, labels=y_train)

        if config.SAMPLING == 'x':
            X_train_rebalanced = X_train
            y_train_rebalanced = y_train
        else:
            print(len(y_train))
            print(l_e.classes_)
            print(y_train.sum(axis=0))

            if len(config.CLASS_TYPE.split('-')) == 2:
                if config.SAMPLING == 'up':
                    X_train_rebalanced, y_train_rebalanced = generate_image_transforms_upsample(
                        X_train, y_train)
                elif config.SAMPLING == 'down':
                    X_train_rebalanced, y_train_rebalanced = generate_image_transforms_downsample(
                        X_train, y_train)

            if len(config.CLASS_TYPE.split(
                    '-')) != 2 and config.SAMPLING == 'up':
                X_train_rebalanced, y_train_rebalanced = generate_image_transforms(
                    X_train, y_train)

            print(len(y_train_rebalanced))
            print(l_e.classes_)
            print(y_train_rebalanced.sum(axis=0))

        # Create and train CNN model.
        if config.cnn == "ResNet":
            if config.model == 'basic':
                model = generate_resnet_model(l_e.classes_.size)
        elif config.cnn == "VGG":
            if config.model == 'basic':
                model = generate_vgg_model(l_e.classes_.size)
            else:
                model = generate_vgg_model_advance(l_e.classes_.size)

        if config.run_mode == "train":
            model = train_network(l_e.classes_.size, model, X_train_rebalanced,
                                  y_train_rebalanced, X_val, y_val,
                                  config.BATCH_SIZE, config.EPOCH_1,
                                  config.EPOCH_2)

    # Binary classification (CBIS-DDSM dataset).
    elif config.dataset == "CBIS-DDSM":
        images, labels, density, cc, mlo = import_cbisddsm_training_dataset(
            l_e)
        images_test, labels_test, density_test, cc_test, mlo_test = import_cbisddsm_testing_dataset(
            l_e)

        if len(config.model.split('-')) > 1 and config.model.split(
                '-')[1] == '3':
            if config.run_mode == "train":
                X = np.vstack((images, density, cc, mlo))
            elif config.run_mode == "test":
                X_test = np.vstack(
                    (images_test, density_test, cc_test, mlo_test))
                X_test = X_test.transpose()
        else:
            if config.run_mode == "train":
                X = np.vstack((images, density))
            elif config.run_mode == "test":
                X_test = np.vstack((images_test, density_test))
                X_test = X_test.transpose()

        if config.run_mode == "test":
            y_test = labels_test

        if config.run_mode == "train":
            # Split training dataset into training/validation sets (75%/25% split).
            X_train, X_val, y_train, y_val = dataset_stratified_split(
                split=0.25, dataset=X.transpose(), labels=labels)
            # X_train, X_val, y_train, y_val = dataset_stratified_split(split=0.25, dataset=images, labels=labels)

        if config.run_mode == "train":
            dataset_train = create_dataset(X_train, y_train)
            dataset_val = create_dataset(X_val, y_val)
        elif config.run_mode == "test":
            dataset_test = create_dataset(X_test, y_test)

        # Create and train CNN model.
        if config.cnn == "ResNet":
            if config.model.startswith('basic'):
                if len(config.model.split('-')) == 1:
                    model = generate_resnet_model(l_e.classes_.size)
                else:
                    model = generate_resnet_model_and_density(
                        l_e.classes_.size)
            else:
                if len(config.model.split('-')) == 1:
                    model = generate_resnet_model_advance(l_e.classes_.size)
                else:
                    model = generate_resnet_model_advance_and_density(
                        l_e.classes_.size)

        elif config.cnn == "VGG":
            if config.model.startswith('basic'):
                if len(config.model.split('-')) == 1:
                    model = generate_vgg_model(l_e.classes_.size)
                else:
                    model = generate_vgg_model_and_density(l_e.classes_.size)
            else:
                if len(config.model.split('-')) == 1:
                    model = generate_vgg_model_advance(l_e.classes_.size)
                else:
                    model = generate_vgg_model_advance_and_density(
                        l_e.classes_.size)

        if config.run_mode == "train":
            model = train_network(l_e.classes_.size, model, dataset_train,
                                  None, dataset_val, None, config.BATCH_SIZE,
                                  config.EPOCH_1, config.EPOCH_2)

    else:
        print_error_message()

    try:
        # Save the model
        # model.save("../saved_models/dataset-{}_model-{}-{}_" + datetime.now().strftime("%d%Y%H%M%S") + ".h5".format(config.dataset, config.model, config.cnn))
        if config.run_mode == "train":
            save_time = datetime.now().strftime("%Y%m%d%H%M")
            model.save_weights(
                "/cs/tmp/sjc29/saved_models/dataset-{}_model-{}-{}_{}.h5".
                format(config.dataset, config.model, config.cnn, save_time))
            print('save_time: ', save_time)
            print_runtime("Finish Training", round(time.time() - start_time,
                                                   2))
        elif config.run_mode == "test":
            model.load_weights(
                "/cs/tmp/sjc29/saved_models/dataset-{}_model-{}-{}_{}.h5".
                format(config.dataset, config.model, config.cnn,
                       config.MODEL_SAVE_TIME))
    except:
        print('save/load model error: ' + sys.exc_info()[0])

    # print config
    print_config()

    # Evaluate model results.
    if config.dataset == "mini-MIAS":
        if config.run_mode == "train":
            y_pred = make_predictions(model, X_val)
            evaluate(y_val, y_pred, l_e, config.dataset, config.CLASS_TYPE,
                     'output')
            print_runtime("Finish Prediction Validation Set",
                          round(time.time() - start_time, 2))
        elif config.run_mode == "test":
            y_pred_test = make_predictions(model, X_test)
            evaluate(y_test, y_pred_test, l_e, config.dataset,
                     config.CLASS_TYPE, 'output_test')
            print_runtime("Finish Prediction Testing Set",
                          round(time.time() - start_time, 2))
    elif config.dataset == "CBIS-DDSM":
        if config.run_mode == "train":
            y_pred = make_predictions(model, dataset_val)
            evaluate(y_val, y_pred, l_e, config.dataset, config.CLASS_TYPE,
                     'output')
            print_runtime("Finish Prediction Validation Set",
                          round(time.time() - start_time, 2))
        elif config.run_mode == "test":
            y_pred_test = make_predictions(model, dataset_test)
            evaluate(y_test, y_pred_test, l_e, config.dataset,
                     config.CLASS_TYPE, 'output_test')
            print_runtime("Finish Prediction Testing Set",
                          round(time.time() - start_time, 2))

    # Print the prediction
    # print(y_pred)

    # Print training runtime.
    print_runtime("Total", round(time.time() - start_time, 2))
コード例 #4
0
def main() -> None:
    """
    Program entry point. Parses command line arguments to decide which dataset and model to use.
    :return: None.
    """
    parse_command_line_arguments()
    print_num_gpus_available()

    # Start recording time.
    start_time = time.time()

    # Create label encoder.
    l_e = create_label_encoder()

    # Run in training mode.
    if config.run_mode == "train":

        # Multiclass classification (mini-MIAS dataset)
        if config.dataset == "mini-MIAS":
            # Import entire dataset.
            images, labels = import_minimias_dataset(
                data_dir="../data/{}/images_processed".format(config.dataset),
                label_encoder=l_e)

            # Split dataset into training/test/validation sets (60%/20%/20% split).
            X_train, X_test, y_train, y_test = dataset_stratified_split(
                split=0.20, dataset=images, labels=labels)
            X_train_rebalanced, y_train_rebalanced = generate_image_transforms(
                X_train, y_train)
            X_train, X_val, y_train, y_val = dataset_stratified_split(
                split=0.25,
                dataset=X_train_rebalanced,
                labels=y_train_rebalanced)
            # Create and train CNN model.
            model = generate_vgg_model(l_e.classes_.size)
            model = train_network(model, X_train, y_train, X_val, y_val,
                                  config.BATCH_SIZE, config.EPOCH_1,
                                  config.EPOCH_2)

        # Binary classification (CBIS-DDSM dataset).
        elif config.dataset == "CBIS-DDSM":
            images, labels = import_cbisddsm_training_dataset(l_e)

            # Split training dataset into training/validation sets (75%/25% split).
            X_train, X_val, y_train, y_val = dataset_stratified_split(
                split=0.25, dataset=images, labels=labels)
            dataset_train = create_dataset(X_train, y_train)
            dataset_val = create_dataset(X_val, y_val)

            # Create and train CNN model.

            if config.imagesize == "small":
                model = generate_vgg_model(l_e.classes_.size)
            else:
                model = generate_vgg_model_large(l_e.classes_.size)

            model = train_network(model, dataset_train, None, dataset_val,
                                  None, config.BATCH_SIZE, config.EPOCH_1,
                                  config.EPOCH_2)

        else:
            print_error_message()

        # Save the model
        model.save(
            "../saved_models/dataset-{}_model-{}_imagesize-{}.h5".format(
                config.dataset, config.model, config.imagesize))

        print_runtime("Total training time ", round(time.time() - start_time,
                                                    2))

        # Evaluate model results.
        if config.dataset == "mini-MIAS":
            y_pred = make_predictions(model, X_val)
            evaluate(y_val, y_pred, l_e, config.dataset, 'N-B-M')
        elif config.dataset == "CBIS-DDSM":
            y_pred = make_predictions(model, dataset_val)
            evaluate(y_val, y_pred, l_e, config.dataset, 'B-M')

    elif config.run_mode == "test":
        model = load_model(
            "../saved_models/classification/classification_basic_small.h5")

        images, labels = import_cbisddsm_testing_dataset(l_e)
        dataset_test = create_dataset(images, labels)

        testing_start_time = time.time()

        y_pred = make_predictions(model, dataset_test)
        print_runtime("Total testing time ",
                      round(time.time() - testing_start_time, 2))

        evaluate(labels, y_pred, l_e, config.dataset, 'B-M')