def neural_network_test_table_gen():
    X, y = default_img_set()
    X = [[full_prepare(img) for img in same_sample] for same_sample in X]
    Xs = count_blobs_with_all_methods(X)
    Xs = [np.array(X_count) for X_count in Xs]
    y = np.array(y)

    row_names = ('Wszystkie detale', 'Śledzone detale',
                 'Stosunek śledzonych detali')

    with open('exports/neural_network_test.csv', 'w') as csvfile:
        filewriter = csv.writer(csvfile, delimiter=';')
        # Header
        filewriter.writerow(('Metoda zliczania detali', 'Wskaźnik', 
                             'wskaźnik'))
        filewriter.writerow(('Metoda zliczania detali', 'Błąd', 'Dokładność'))

        for X, name in zip(Xs, row_names):
            X = np.array(X)
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, stratify=y, test_size=0.33, random_state=1)

            model = default_grain_classifier_model()
            model.compile(
                optimizer='adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])
            model.fit(X_train, y_train, epochs=300, verbose=0)

            score = model.evaluate(X_test, y_test, verbose=0)
            filewriter.writerow((name, *score))
def neural_network_validation_table_gen():
    X, y = default_img_set()
    X = [[full_prepare(img) for img in same_sample] for same_sample in X]
    Xs = count_blobs_with_all_methods(X)
    Xs = [np.array(X_count) for X_count in Xs]
    y = np.array(y)

    row_names = ('Wszystkie detale', 'Śledzone detale',
                 'Stosunek śledzonych detali')

    with open('exports/neural_network_validation.csv', 'w') as csvfile:
        filewriter = csv.writer(csvfile, delimiter=';')
        # Header
        filewriter.writerow(('Metoda zliczania detali', 'Wskaźnik', 
                     'wskaźnik'))
        filewriter.writerow(('Metoda zliczania detali', 'Błąd', 'Dokładność'))

        for X, name in zip(Xs, row_names):
            model = default_grain_classifier_model()
            model.compile(
                optimizer='adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])

            scores = np.array(network_cross_validation(model, X, y, 3))
            score = np.round(scores.mean(axis=0), 2)
            filewriter.writerow((name, *score))
def neural_network_trainig_plots_gen():
    X, y = default_img_set()
    X = [[full_prepare(img) for img in same_sample] for same_sample in X]
    Xs = count_blobs_with_all_methods(X)

    files_suffixes = ('all', 'remaining', 'ratio')

    for X, suffix in zip(Xs, files_suffixes):
        X = np.array(X)
        y = np.array(y)

        X_train, _, y_train, _ = train_test_split(
            X, y, stratify=y, test_size=0.33, random_state=1)

        model = default_grain_classifier_model()
        model.compile(
            optimizer='adam',
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])
        history = model.fit(X_train, y_train, epochs=300, verbose=0)

        plt.figure()
        plt.title('Historia treningu modelu')
        plt.xlabel('Epoka')

        plt.plot(history.history['accuracy'], c='b')
        plt.plot(history.history['loss'], c='r')

        plt.legend(('Dokładność', 'Błąd'))

        tikzplotlib.save('exports/neural_network_trainig_' + suffix)
def blob_analysis_plots_gen():
    X, y = default_img_set()
    X = [[full_prepare(img) for img in same_sample] for same_sample in X]

    Xa, Xr, Xp = count_blobs_with_all_methods(X)

    colors = ('r', 'g', 'b', 'y')
    labels = ('E5R', 'E11R', 'E6R', 'E16R')

    plot_blob_stat(Xa, y, colors)
    plt.title('Liczba wszystkich detali')
    plt.xlabel('Minuty')
    plt.ylabel('Liczba detali')
    patch_plot_legend(colors, labels)
    tikzplotlib.save('exports/blob_analysis_all')

    plot_blob_stat(Xr, y, colors)
    plt.title('Liczba śledzonych detali')
    plt.xlabel('Minuty')
    plt.ylabel('Liczba detali')
    patch_plot_legend(colors, labels)
    tikzplotlib.save('exports/blob_analysis_remaining')

    plot_blob_stat(Xp, y, colors)
    plt.title('Pozostały procent śledzonych detali')
    plt.xlabel('Minuty')
    plt.ylabel('Liczba detali')
    patch_plot_legend(colors, labels)
    tikzplotlib.save('exports/blob_analysis_ratio')
def main():
    '''Plot number of detected blobs using three ways of counting.'''
    X, y = default_img_set()
    X = [[full_prepare(img) for img in same_sample] for same_sample in X]

    Xa, Xr, Xp = count_blobs_with_all_methods(X)

    colors = ('r', 'g', 'b', 'y')
    labels = ('E5R', 'E6R', 'E11R', 'E16R')

    plot_blob_stat(Xa, y, colors)
    plt.title('Number of all blobs')
    plt.xlabel('minutes')
    plt.ylabel('number of all detected blobs')
    patch_plot_legend(colors, labels)

    plot_blob_stat(Xr, y, colors)
    plt.title('Number of remaining blobs')
    plt.xlabel('minutes')
    plt.ylabel('remaining blobs')
    patch_plot_legend(colors, labels)

    plot_blob_stat(Xp, y, colors)
    plt.title('Ratio of remaining blobs')
    plt.xlabel('minutes')
    plt.ylabel('Ratio of remaining blobs')
    patch_plot_legend(colors, labels)

    plt.show()
def blob_ratio_table_gen():
    sample_names = ('104_E5R', '106_E11R', '107_E6R', '111_E16R')

    with open('exports/neural_network_comparison.csv', 'w') as csvfile:
        filewriter = csv.writer(csvfile, delimiter=';')
        # Header
        filewriter.writerow(('Próbka', 'Minuta 0', 'Minuta 1', 'Minuta 2',
                             'Minuta 3', 'Minuta 4'))
        for name in sample_names:
            imgs = load_img_series('img/' + name)
            imgs_prep = [full_prepare(img) for img in imgs]
            stages_rem = find_blob_series(imgs_prep)
            ratios = ratio_of_remaining_blobs_in_stages(stages_rem)
            ratios = (round(ratio, 2) for ratio in ratios)
            filewriter.writerow((name, *ratios))
def blob_detection_compare_plots_gen():
    img = imread('img/104_E5R_0.jpg')
    img_crop = crop_ui(rgb2gray(img))
    img_prep = full_prepare(img)
    blobs_list = compare_detection(img_prep)

    suffixes = ('LoG', 'DoG', 'DoH')

    for blobs, suffix in zip(blobs_list, suffixes):
        _, ax = plt.subplots()
        plt.title('Liczba wykrytych detali: {}'.format(len(blobs)))
        plt.imshow(img_crop, cmap=plt.get_cmap('gray'))
        for blob in blobs:
            y, x, r = blob
            c = plt.Circle((x, y), r, color='r', fill=False)
            ax.add_patch(c)

        ax.set_axis_off()
        tikzplotlib.save('exports/blob_detection_compare_' + suffix)
def confusion_matrix_table_gen():
    X, y = default_img_set()
    X = [[full_prepare(img) for img in same_sample] for same_sample in X]
    X = count_blobs_with_all_methods(X)[2]
    X = np.array(X)
    y = np.array(y)

    model = default_grain_classifier_model()
    model.compile(
        optimizer='adam',
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    mcm = mean_confusion_matrix(model, X, y, 3)
    np.savetxt(
        "exports/mean_confusion_matrix_ratio.csv",
        mcm,
        fmt='%.2f',
        delimiter=";")
def blob_count_plots_gen():
    imgs = load_img_series('img/104_E5R')
    imgs_prep = [full_prepare(img) for img in imgs]
    imgs_crop = [crop_ui(rgb2gray(img)) for img in imgs]

    stages_all = find_blob_series(imgs_prep, only_remaining=False)
    stages_rem = find_blob_series(imgs_prep)

    # Map stages on first image
    colors = ('blue', 'blueviolet', 'magenta', 'crimson', 'red')
    fig = plt.figure(frameon=False)
    ax = fig.add_axes([0, 0, 1, 1])
    plt.imshow(imgs_crop[0], cmap=plt.get_cmap('gray'))
    for stage, color in zip(stages_rem, colors):
        for blob in stage:
            y, x, r = blob
            c = plt.Circle((x, y), r, color=color, fill=False)
            ax.add_patch(c)
    ax.set_axis_off()
    plt.savefig('exports/blob_tracker', dpi=300)

    # Show two methods combined to compare
    loop_set = enumerate(zip(stages_rem, stages_all, imgs_crop))
    for i, (stage_rem, stage_all, img) in loop_set:
        fig = plt.figure(frameon=False)
        ax = fig.add_axes([0, 0, 1, 1])
        plt.imshow(img, cmap=plt.get_cmap('gray'))
        for blob_all in stage_all:
            y, x, r = blob_all
            c = plt.Circle((x, y), r, color='b', fill=False)
            ax.add_patch(c)
        for blob_rem in stage_rem:
            y, x, r = blob_rem
            c = plt.Circle((x, y), r, color='r', fill=False)
            ax.add_patch(c)

        ax.set_axis_off()
        plt.savefig('exports/blob_tracker_min_' + str(i))
def network_comparison_table_gen():
    X, y = default_img_set()
    X = [[full_prepare(img) for img in same_sample] for same_sample in X]
    X = [
        ratio_of_remaining_blobs_in_stages(find_blob_series(img_series))
        for img_series in X
    ]
    X = np.array(X)
    y = np.array(y)

    with open('exports/neural_network_comparison.csv', 'w') as csvfile:
        filewriter = csv.writer(csvfile, delimiter=';')
        # Header
        filewriter.writerow(('Parametr', 'Wartość', 'Błąd', 'Dokładność'))

        # Activation functions
        activation_funcs = ('sigmoid', 'relu', 'elu', 'tanh')
        for func in activation_funcs:
            model = keras.Sequential([
                keras.layers.Dense(5, activation=func),
                keras.layers.Dense(256, activation=func),
                keras.layers.Dense(128, activation=func),
                keras.layers.Dense(4, activation='softmax')
            ])

            model.compile(
                optimizer='adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])
            scores = np.array(network_cross_validation(model, X, y, 3))
            score = np.round(scores.mean(axis=0), 2)

            filewriter.writerow(('Funkcja aktywacji', func, *score))

        # Number of hidden layers
        models = []
        models.append(
            keras.Sequential([
                keras.layers.Dense(5, activation='tanh'),
                keras.layers.Dense(512, activation='tanh'),
                keras.layers.Dense(4, activation='softmax')
            ]))
        models.append(
            keras.Sequential([
                keras.layers.Dense(5, activation='tanh'),
                keras.layers.Dense(256, activation='tanh'),
                keras.layers.Dense(128, activation='tanh'),
                keras.layers.Dense(4, activation='softmax')
            ]))

        for model, i in zip(models, (1, 2)):
            model.compile(
                optimizer='adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])
            scores = np.array(network_cross_validation(model, X, y, 3))
            score = np.round(scores.mean(axis=0), 2)

            filewriter.writerow(('Liczba warstw ukrytych', i, *score))

        # Number of neurons in hidden layers
        neurons_num = ((128, 64), (256, 128), (512, 126))

        for num in neurons_num:
            model = keras.Sequential([
                keras.layers.Dense(5, activation='tanh'),
                keras.layers.Dense(num[0], activation='tanh'),
                keras.layers.Dense(num[1], activation='tanh'),
                keras.layers.Dense(4, activation='softmax')
            ])

            model.compile(
                optimizer='adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])
            scores = np.array(network_cross_validation(model, X, y, 3))
            score = np.round(scores.mean(axis=0), 2)

            filewriter.writerow(('Liczba neuronów w warstwach ukrytych',
                                 '{} i {}'.format(num[0], num[1]), *score))

        # Optimizer
        model = default_grain_classifier_model()
        optimizers = ('sgd', 'adam')
        for opt in optimizers:
            model.compile(
                optimizer=opt,
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])
            scores = np.array(network_cross_validation(model, X, y, 3))
            score = np.round(scores.mean(axis=0), 2)

            filewriter.writerow(('Algorytm uczenia', opt, *score))
Ejemplo n.º 11
0
def main():
    '''Demo blob tracking with various ways of counting blobs.'''
    # Load images
    imgs = load_img_series('img/104_E5R')
    # Prepare images for processing
    imgs_prep = [full_prepare(img) for img in imgs]
    # Prepare cropped images for displaying
    imgs_crop = [crop_ui(rgb2gray(img)) for img in imgs]

    # Find blobs for stages of cooling with preserving only remainig ones
    stages_rem = find_blob_series(imgs_prep)

    # Map stages on first image
    colors = ('blue', 'blueviolet', 'magenta', 'crimson', 'red')
    _, ax = plt.subplots(1)
    plt.title("Blobs detection with DoH")
    plt.imshow(imgs_crop[0], cmap=plt.get_cmap('gray'))
    for stage, color in zip(stages_rem, colors):
        for blob in stage:
            y, x, r = blob
            c = plt.Circle((x, y), r, color=color, linewidth=0.75, fill=False)
            ax.add_patch(c)
    labels = ('Minute 0', 'Minute 1', 'Minute 2', 'Minute 3', 'Minute 4')
    patch_plot_legend_outside(colors, labels)
    print(ratio_of_remaining_blobs_in_stages(stages_rem))

    # Show stages on subplots
    _, ax = plt.subplots(2, 3, figsize=(12, 7))
    ax = ax.flatten()

    for idx, (stage, img) in enumerate(zip(stages_rem, imgs_crop)):
        ax[idx].imshow(img, cmap=plt.get_cmap('gray'))
        ax[idx].set_title("Minute: {}, blobs: {}".format(idx, len(stage)))
        for blob in stage:
            y, x, r = blob
            c = plt.Circle((x, y), r, color='r', linewidth=0.75, fill=False)
            ax[idx].add_patch(c)
    ax[-1].set_axis_off()
    plt.tight_layout()

    # Find all blobs for every stage of cooling
    stages_all = find_blob_series(imgs_prep, only_remaining=False)

    # Show stages on subplots
    _, ax = plt.subplots(2, 3, figsize=(12, 7))
    ax = ax.flatten()

    for idx, (stage, img) in enumerate(zip(stages_all, imgs_crop)):
        ax[idx].imshow(img, cmap=plt.get_cmap('gray'))
        ax[idx].set_title("Minute: {}, blobs: {}".format(idx, len(stage)))
        for blob in stage:
            y, x, r = blob
            c = plt.Circle((x, y), r, color='r', linewidth=0.75, fill=False)
            ax[idx].add_patch(c)
    ax[-1].set_axis_off()
    plt.tight_layout()

    # Show two methods combined to compare
    _, ax = plt.subplots(2, 3, figsize=(10, 7))
    ax = ax.flatten()

    # Show stages on subplots
    loop_set = enumerate(zip(stages_rem, stages_all, imgs_crop))
    for idx, (stage_rem, stage_all, img) in loop_set:
        ax[idx].imshow(img, cmap=plt.get_cmap('gray'))
        ax[idx].set_title("Minute: {}, all blobs: {}, rem blobs: {}".format(
            idx, len(stage_all), len(stage_rem)))
        for blob_all in stage_all:
            y, x, r = blob_all
            c = plt.Circle((x, y), r, color='b', linewidth=0.75, fill=False)
            ax[idx].add_patch(c)
        for blob_rem in stage_rem:
            y, x, r = blob_rem
            c = plt.Circle((x, y), r, color='r', linewidth=0.75, fill=False)
            ax[idx].add_patch(c)
    ax[-1].set_axis_off()
    plt.tight_layout()

    plt.show()