# Crop image of labels that shape of labels match shape of cropped image.
    labels_true_cropped = labels_true[0: img.shape[0] - img.shape[0] % 500,
                                      0: img.shape[1] - img.shape[1] % 500]

    # Get pixel-wise scores for labels 'building', 'highway' and 'no_label'.
    scores = lib.predict_pixelwise_class_scores_for_image_advanced_1(img=img_cropped,
                                                                     net=net,
                                                                     net_type='predicting',
                                                                     channel_means=channel_means,
                                                                     label_nr=3)
    # Predict label for each pixel in 'img_cropped'by choosing the label that got the highest score.
    labels_predicted = np.argmax(scores, 2).astype(np.uint8)

    # Calculate statistics for measuring the goodness of the prediction.
    stats_temp = lib.calculate_goodness_of_classification(labels_true=labels_true_cropped,
                                                          labels_predicted=labels_predicted,
                                                          labels_dict=labels_dict)

    # Cast dict objects 'stats_temp' to a pd.DataFrame object and append it
    # to 'stats_all' that stores statistics for all images of this test round.
    stats_all = stats_all.append(pd.DataFrame.from_dict(stats_temp, orient='index').T)

    # Plot predicted class labels over aerial view image.
    lib.plot_sat_image_and_labels(img=img_cropped,
                                  lab=labels_predicted,
                                  alpha=0.5,
                                  plot_img=False,
                                  store_img=True,
                                  plot_with='Image',
                                  dir_name=RESULTS_ROOT,
                                  file_name='test_image_' + area_name + RUN_NAME + str(number_of_training_steps),
示例#2
0
            img_test_FCN_ready = img_test_FCN_ready.transpose(2, 0, 1).reshape(1, 3, 500, 500)
            labels_test_FCN_ready = labels_test.reshape(1, 1, 500, 500)

            # Load image into FCN.
            solver.net.blobs['data'].data[...] = img_test_FCN_ready

            # Send test image trough the FCN.
            solver.net.forward()

            # Get predicted class labels for each pixel of this round's test image.
            labels_predicted = solver.net.blobs['score'].data.argmax(1)[0, :, :].astype(np.uint8)

            # Get statistics for classification success of this test image.
            # Note that we will get an object of type 'dict' back.
            stats_new_dict = lib.calculate_goodness_of_classification(labels_true=labels_test.astype(np.uint8),
                                                                      labels_predicted=labels_predicted,
                                                                      labels_dict=labels_dict
                                                                      )

            # Cast this dict object to a pd.DataFrame object.
            stats_new_DataFrame = pd.DataFrame.from_dict(stats_new_dict, orient='index').T


            # Append this pd.DataFrame object to the pd.DataFrame object 'stats_current_training'
            # which stores the statistics of all test images of this test round.
            stats_current_training = stats_current_training.append(stats_new_DataFrame, ignore_index=True)

        # Average stats in 'stats_current_training' column-wise over all the images which were
        # used in this test round and append this average to 'stats_all'.
        stats_all = stats_all.append(stats_current_training.T.mean(axis=1), ignore_index=True)

        # Save stats to disc:
    labels_true_cropped = labels_true_cropped.astype(np.int64)

    # Get pixel-wise scores for labels 'building', 'highway' and 'no_label'.
    scores = lib.predict_pixelwise_class_scores_for_image_advanced_1(img=img_cropped,
                                                                     net=net,
                                                                     net_type='predicting',
                                                                     channel_means=channel_means)
    # Predict label for each pixel in 'img_cropped'.
    labels_predicted = np.argmax(scores, 2)

    # Calculate statistics for measuring the goodness of the prediction.
    stats_temp = lib.calculate_goodness_of_classification(labels_true = labels_true_cropped,
                                                          labels_predicted = labels_predicted,
                                                          return_precision=True,
                                                          return_recall=True,
                                                          return_f1=True,
                                                          return_pixel_accuracy=False,
                                                          return_mean_accuracy=False,
                                                          return_mean_iu=False,
                                                          return_frequency_weighted_iu=False)

    # Cast dict objects 'stats_temp' to a pd.DataFrame object and append it
    # to 'stats_all' that stores statistics for all images of this test round.
    stats_temp_Data_Frame = pd.DataFrame(stats_temp.values()).T
    stats_temp_Data_Frame.columns = stats_temp.keys()
    stats_all = stats_all.append(stats_temp_Data_Frame)

    # Plot predicted class labels over aerial view image.
    lib.plot_sat_image_and_labels(img=img_cropped,
                                  lab_1=labels_predicted,
                                  lab_2=None,
示例#4
0
            # Load image into FCN.
            solver.net.blobs['data'].data[...] = img_test_FCN_ready

            # Send test image trough the FCN.
            solver.net.forward()

            # Get predicted class labels for each pixel of this round's test image.
            labels_predicted = solver.net.blobs['score'].data.argmax(1)[0, :, :]

            # Get statistics for classification success of this test image.
            # Note that we will get an object of type 'dict' back.
            stats_new_dict = lib.calculate_goodness_of_classification(labels_true=labels_test.astype(np.int64),
                                                                      labels_predicted=labels_predicted,
                                                                      return_precision=True,
                                                                      return_recall=True,
                                                                      return_f1=True,
                                                                      return_pixel_accuracy=False,
                                                                      return_mean_accuracy=False,
                                                                      return_mean_iu=False,
                                                                      return_frequency_weighted_iu=False
                                                                      )

            # Cast this dict object to a pd.DataFrame object.
            stats_new_DataFrame = pd.DataFrame(stats_new_dict.values()).T
            stats_new_DataFrame.columns = stats_new_dict.keys()

            # Append this pd.DataFrame object to the pd.DataFrame object 'stats_current_training'
            # which stores the statistics of all test images of this test round.
            stats_current_training = stats_current_training.append(stats_new_DataFrame, ignore_index=True)

        # Average stats in 'stats_current_training' column-wise over all the images which were
        # used in this test round and append this average to 'stats_all'.
    labels_google_osm_bld[labels_google_osm[:, :, 0] != 255] = False
    labels_google_osm_bld[labels_google_osm[:, :, 1] != 0] = False
    labels_google_osm_bld[labels_google_osm[:, :, 2] != 0] = False

    labels_google_osm_hgh = np.ones(shape=labels_google_osm.shape[0:2], dtype=bool)
    labels_google_osm_hgh[labels_google_osm[:, :, 0] != 0] = False
    labels_google_osm_hgh[labels_google_osm[:, :, 1] != 0] = False
    labels_google_osm_hgh[labels_google_osm[:, :, 2] != 255] = False

    labels_google_osm_out = np.zeros(shape=labels_google_osm.shape[0:2]).astype(np.uint8)
    labels_google_osm_out[labels_google_osm_bld] = 1
    labels_google_osm_out[labels_google_osm_hgh] = 2

    # Calculate some basic statistics.
    stats_temp = lib.calculate_goodness_of_classification(labels_true=labels_google_osm_out,
                                                          labels_predicted=labels_ISPRS,
                                                          labels_dict=labels_dict)

    stats_all = stats_all.append(pd.DataFrame.from_dict(stats_temp, orient='index').T)

    # lib.plot_true_and_pred_labels(true_lab=labels_ISPRS,
    #                               prd_lab=labels_google_osm_out,
    #                               plot_img=False,
    #                               store_img=True,
    #                               dir_name=STORE_DATA_ROOT + area_name + '/',
    #                               file_name=area_name + "_osm_isprs_labels_overlay"
    #                               )


# Calculate average statistics
stats_average = stats_all.T.mean(axis=1)