Пример #1
0
def choose_plot():
    print("***** Hello World *****")
    print("Press (1) to plot a equation chart")
    print("Press (2) to plot bar chart")
    print("Press (3) to plot dispersion bar chart")
    print("Press (4) to plot a line chart")
    print("Press (5) to a scatter chart")
    number = 0
    while number not in range(1, 3):
        number = int(input("Which one? "))
        if number == 1:
            print("Plotting math equation...")
            plot_math_function()
        elif number == 2:
            print("Plotting Moons...")
            plot_bar()
        elif number == 3:
            print("Plotting grades...")
            plot_grades()
        elif number == 4:
            print("Plotting line charts...")
            plot_line_chart()
        elif number == 5:
            print("Plotting scatter...")
            plot_scatter()
        else:
            print("Choose a valid option")
Пример #2
0
def main():
    train, val = load_data(DATA_PATH+'natural_data.npz', DATA_PATH+'dot_data.npz', mode=args.data_mode)

    x_train, y_train = train[0], train[1]
    x_val, y_val = val[0], val[1]

    # hyperparameters
    drop_ps = [0, 0, 0, 0, 0]
    epochs = epoch_num
    batch_size = 20
    print('Training {} for {} epochs'.format(model_name, epoch_num))
    # the model
    model, history = train_model(drop_ps, lr, x_train, y_train, x_val, y_val, epochs, batch_size)
    print(model.summary())
    # path for saving images
    os.mkdir(os.path.join(IMG_PATH, model_name))
    # plot the losses and velocity vectors
    plot_loss(history, epochs, model_name)
    plot_scatter(model, x_train, y_train, x_val, y_val, model_name)

    if False:
        # save the predictions
        y_pred_train = model.predict(x_train)
        np.save('y_pred_all.npy', y_pred_train)

    # save the model
    model.save(MODEL_PATH+model_name)
Пример #3
0
    def input_corr(self, sample):
        n_mods = sample.shape[1]

        x = sample[:,0,::].detach().cpu().numpy().flat
        y = sample[:,1,::].detach().cpu().numpy().flat

        plot_scatter(x,y)

        # np.random.shuffle(x)

        print(np.corrcoef(y,x)[0, 1])
Пример #4
0
def estimate_gradient_from_literature():

    califa_logM_lo = np.array([10.6, 10.5, 10.3])
    califa_logM_hi = np.array([11.8, 11.9, 11.9])

    califa_result = np.array([-0.1, -0.248, -0.2])
    califa_extent = np.array([1, 1, 2])

    sami_logM_lo = np.array([9.6, 9.5])
    sami_logM_hi = np.array([11.7, 11.7])

    sami_result = np.array([-0.31, -0.275])
    sami_extent = np.array([5, 2])

    manga_logM_lo = np.array(
        [9, 9, 8.4, 9.1, 9.9, 9.9, 10.9, 9.5, 8.8, 9, 9.4])
    manga_logM_hi = np.array(
        [11.9, 11.9, 11.9, 13.1, 10.8, 10.8, 12, 11.9, 11.3, 11.8, 12])

    manga_result = np.array([
        -0.12, -0.11, -0.09, -0.102, -0.14, -0.104, -0.202, -0.112, -0.106,
        -0.092, -0.18
    ])
    manga_extent = np.array([1.5, 1.5, 2, 1, 1, 1, 1, 1.5, 1.5, 1, 1])

    logM = np.concatenate([califa_logM_lo, sami_logM_lo, manga_logM_lo])
    results = np.concatenate([califa_result, sami_result, manga_result])

    xhi = np.concatenate([califa_logM_hi, sami_logM_hi, manga_logM_hi]) - logM
    xerr = np.array([np.zeros(xhi.shape), xhi])

    extents = np.concatenate([califa_extent, sami_extent, manga_extent])

    plt.plot_scatter_err(logM,
                         results,
                         xerr,
                         extents,
                         'o',
                         cbar_label=r'$R_{\rm e}$ extent',
                         xlabel=r'Mass Range $(\log M/M_{\odot})$',
                         ylabel=r'$\nabla \log(Z/Z_{\odot})$')

    plt.plot_scatter(extents,
                     results,
                     logM,
                     '',
                     'o',
                     cbar_label=r'Lowest Mass $(\log M/M_{\odot})$',
                     xlabel=r'$R_{\rm e}$ extent',
                     ylabel=r'$\nabla \log(Z/Z_{\odot})$')

    return
Пример #5
0
def metallicity_normalization_estimation():

    HFF = Table.read('output/tables/nbCGs_GallazzilogZ_from-Shipley-mass.fits')

    vals = []
    for cluster, ID in zip(HFF['cluster'], HFF['ID']):
        mask = (HFF['cluster'] == cluster) & (HFF['ID'] == ID)
        vals.append(
            metallicity_normalization_helper(cluster, ID,
                                             HFF['GallazzilogZ'][mask][0]))

    plt.plot_scatter(HFF['logM'],
                     HFF['GallazzilogZ'] - np.array(vals),
                     HFF['bins'],
                     'HFF',
                     'o',
                     cbar_label='Number of Annuli',
                     xlabel=r'$\log(M/M_{\odot})$',
                     ylabel=r'$\Delta \langle \log(Z/Z_{\odot}) \rangle$',
                     xmin=7.9,
                     xmax=11.5,
                     loc=2)

    offset = HFF['GallazzilogZ'] - np.array(vals)

    finals = []
    for cluster, ID in zip(HFF['cluster'], HFF['ID']):
        mask = (HFF['cluster'] == cluster) & (HFF['ID'] == ID)
        finals.append(
            metallicity_normalization_helper(
                cluster, ID, HFF['GallazzilogZ'][mask][0] + offset[mask]))

    plt.plot_scatter(HFF['logM'],
                     HFF['GallazzilogZ'] - np.array(finals),
                     HFF['bins'],
                     'HFF',
                     'o',
                     cbar_label='Number of Annuli',
                     xlabel=r'$\log(M/M_{\odot})$',
                     ylabel=r'$\Delta \langle \log(Z/Z_{\odot}) \rangle$',
                     xmin=7.9,
                     xmax=11.5,
                     loc=2)

    HFF['centrallogZ'] = HFF['GallazzilogZ'] + offset
    # HFF.write('output/tables/nbCGs_GallazzilogZ_from-Shipley-mass_final.fits')

    return
Пример #6
0
def print_mohajerani_all_metrics(settings, metrics):
    """Prints out metrics for CALFIN inputs and graphs the results."""
    dest_path_qa = settings['dest_path_qa']
    scaling = settings['scaling']
    saving = settings['saving']
    plotting = settings['plotting']
    validation_files = settings['validation_files']
    sqrt_point_samples = len(metrics['validation_distances_meters'])
    sqrt_image_samples = len(metrics['mean_deviations_meters'])
    mean_deviation_points_pixels = np.nanmean(
        metrics['validation_distances_pixels'])
    mean_deviation_points_meters = np.nanmean(
        metrics['validation_distances_meters'])
    mean_deviation_images_pixels = np.nanmean(
        metrics['mean_deviations_pixels'])
    mean_deviation_images_meters = np.nanmean(
        metrics['mean_deviations_meters'])
    mean_edge_iou_score = np.nanmean(metrics['validation_edge_ious'])
    mean_mask_iou_score = np.nanmean(metrics['validation_mask_ious'])
    std_deviation_points_pixels = np.nanstd(
        metrics['validation_distances_pixels']) / sqrt_point_samples * 1.96
    std_deviation_points_meters = np.nanstd(
        metrics['validation_distances_meters']) / sqrt_point_samples * 1.96
    std_deviation_images_pixels = np.nanstd(
        metrics['mean_deviations_pixels']) / sqrt_image_samples * 1.96
    std_deviation_images_meters = np.nanstd(
        metrics['mean_deviations_meters']) / sqrt_image_samples * 1.96
    std_edge_iou_score = np.nanstd(
        metrics['validation_edge_ious']) / sqrt_image_samples * 1.96
    std_mask_iou_score = np.nanstd(
        metrics['validation_mask_ious']) / sqrt_image_samples * 1.96
    median_mean_deviation_points_pixels = np.nanmedian(
        metrics['validation_distances_pixels'])
    median_mean_deviation_points_meters = np.nanmedian(
        metrics['validation_distances_meters'])
    median_mean_deviation_images_pixels = np.nanmedian(
        metrics['mean_deviations_pixels'])
    median_mean_deviation_images_meters = np.nanmedian(
        metrics['mean_deviations_meters'])
    median_edge_iou_score = np.nanmedian(metrics['validation_edge_ious'])
    median_mask_iou_score = np.nanmedian(metrics['validation_mask_ious'])

    if plotting:
        #Print histogram of all distance errors
        plot_histogram(metrics['validation_distances_meters'],
                       "all_mean_deviations_meters", dest_path_qa, saving,
                       scaling)

        #Print scatterplot of resolution errors
        plot_scatter(metrics['resolution_deviation_array'],
                     "Validation Resolution vs Deviations", dest_path_qa,
                     saving)
        plot_scatter(metrics['resolution_iou_array'],
                     "Validation Resolution vs IoU", dest_path_qa, saving)
        plt.show()

    number_no_front_images = len(
        settings['negative_image_names']
    )  #13 images are clouded/undetectable in the CALFIN validation set of 152 images
    number_valid_images = len(validation_files) - number_no_front_images
    percent_images_with_fronts = (
        1 - metrics['image_skip_count'] / len(validation_files)) * 100
    number_images_with_fronts = str(number_valid_images -
                                    metrics['image_skip_count'])
    total_images = str(number_valid_images)

    #Print output to file and to shell
    default = sys.stdout
    log_file_name = os.path.join(settings['dest_root_path'],
                                 settings['log_file_name'])
    output_streams = [open(log_file_name, "a", encoding='utf-8'), default]
    for i in range(len(output_streams)):
        sys.stdout = output_streams[i]
        print("mean distance (averaged over points): {:.2f} ± {:.2f} meters".
              format(mean_deviation_points_meters,
                     std_deviation_points_meters))
        print("mean distance (averaged over images): {:.2f} ± {:.2f} meters".
              format(mean_deviation_images_meters,
                     std_deviation_images_meters))
        print("mean distance (averaged over points): {:.2f} ± {:.2f} pixels".
              format(mean_deviation_points_pixels,
                     std_deviation_points_pixels))
        print("mean distance (averaged over images): {:.2f} ± {:.2f} pixels".
              format(mean_deviation_images_pixels,
                     std_deviation_images_pixels))
        print("mean distance (median over points): {:.2f} meters".format(
            median_mean_deviation_points_meters))
        print("mean distance (median over images): {:.2f} meters".format(
            median_mean_deviation_images_meters))
        print("mean distance (median over points): {:.2f} pixels".format(
            median_mean_deviation_points_pixels))
        print("mean distance (median over images): {:.2f} pixels".format(
            median_mean_deviation_images_pixels))
        print(
            "mean front Jaccard index (Intersection over Union): {:.4f} ± {:.4f}"
            .format(mean_edge_iou_score, std_edge_iou_score))
        print(
            "mean ice/ocean Jaccard index (Intersection over Union): {:.4f} ± {:.4f}"
            .format(np.nan, np.nan))
        print("median front Jaccard index (Intersection over Union): {:.4f}".
              format(median_edge_iou_score))
        print(
            "median ice/ocean Jaccard index (Intersection over Union): {:.4f}".
            format(np.nan))

        #Print final statistics
        print('mask_confidence_strength_threshold',
              settings['mask_confidence_strength_threshold'],
              'edge_confidence_strength_threshold',
              settings['edge_confidence_strength_threshold'])
        print(
            'image_skip_count:', metrics['image_skip_count'],
            'front skip count:', metrics['no_detection_skip_count'] +
            metrics['confidence_skip_count'], 'no_detection_skip_count:',
            metrics['no_detection_skip_count'], "confidence_skip_count:",
            metrics['confidence_skip_count'])
        print('total fronts:', metrics['front_count'])

        print(
            'True Positives: {}, False Positives: {}, False Negatives: {}, True Negatives: {}'
            .format(metrics['true_positives'], metrics['false_positive'],
                    metrics['false_negatives'], metrics['true_negatives']))
        print('% images with fronts: {:.2f} ({}/{})'.format(
            percent_images_with_fronts, number_images_with_fronts,
            total_images))
        if i == 0:
            output_streams[i].close()
Пример #7
0
 def scatter_latent(self, proj, gname, x_test):
     plotting.plot_scatter(self.encode(x_test), proj=proj, gname=gname)
Пример #8
0
 def report(self, x_test, y_test):
     plotting.plot_scatter(self.encode(x_test), y_test)
     plotting.plot_manifold(self.generator)
def print_calfin_all_metrics(settings, metrics):
    dest_path_qa = settings['dest_path_qa']
    scaling = settings['scaling']
    saving = settings['saving']
    validation_files = settings['validation_files']
    mean_deviation_points_pixels = np.nanmean(
        metrics['validation_distances_pixels'])
    mean_deviation_points_meters = np.nanmean(
        metrics['validation_distances_meters'])
    mean_deviation_images_pixels = np.nanmean(
        metrics['mean_deviations_pixels'])
    mean_deviation_images_meters = np.nanmean(
        metrics['mean_deviations_meters'])
    mean_edge_iou_score = np.nanmean(metrics['validation_ious'])
    median_mean_deviation_points_pixels = np.nanmedian(
        metrics['validation_distances_pixels'])
    median_mean_deviation_points_meters = np.nanmedian(
        metrics['validation_distances_meters'])
    median_mean_deviation_images_pixels = np.nanmedian(
        metrics['mean_deviations_pixels'])
    median_mean_deviation_images_meters = np.nanmedian(
        metrics['mean_deviations_meters'])
    median_edge_iou_score = np.nanmedian(metrics['validation_ious'])
    print("mean deviation (averaged over points): {:.2f} meters".format(
        mean_deviation_points_meters))
    print("mean deviation (averaged over images): {:.2f} meters".format(
        mean_deviation_images_meters))
    print("mean deviation (averaged over points): {:.2f} pixels".format(
        mean_deviation_points_pixels))
    print("mean deviation (averaged over images): {:.2f} pixels".format(
        mean_deviation_images_pixels))
    print("mean deviation (median over points): {:.2f} meters".format(
        median_mean_deviation_points_meters))
    print("mean deviation (median over images): {:.2f} meters".format(
        median_mean_deviation_images_meters))
    print("mean deviation (median over points): {:.2f} pixels".format(
        median_mean_deviation_points_pixels))
    print("mean deviation (median over images): {:.2f} pixels".format(
        median_mean_deviation_images_pixels))
    print("mean Jaccard index (Intersection over Union): {:.4f}".format(
        mean_edge_iou_score))
    print("median Jaccard index (Intersection over Union): {:.4f}".format(
        median_edge_iou_score))

    #Print histogram of all distance errors
    plot_histogram(metrics['validation_distances_meters'],
                   "all_mean_deviations_meters", dest_path_qa, saving, scaling)

    #Print scatterplot of resolution errors
    plot_scatter(metrics['resolution_deviation_array'],
                 "Validation Resolution vs Deviations", dest_path_qa, saving)
    plot_scatter(metrics['resolution_iou_array'],
                 "Validation Resolution vs IoU", dest_path_qa, saving)
    plt.show()

    #Print final statistics
    print('mask_confidence_strength_threshold',
          settings['mask_confidence_strength_threshold'],
          'edge_confidence_strength_threshold',
          settings['edge_confidence_strength_threshold'])
    print(
        'image_skip_count:', metrics['image_skip_count'], 'front skip count:',
        metrics['no_detection_skip_count'] + metrics['confidence_skip_count'],
        'no_detection_skip_count:', metrics['no_detection_skip_count'],
        "confidence_skip_count:", metrics['confidence_skip_count'])
    print('total fronts:', metrics['front_count'])

    number_no_front_images = len(
        settings['negative_image_names']
    )  #13 images are clouded/undetectable in the CALFIN validation set of 152 images
    number_valid_images = len(validation_files) - number_no_front_images
    percent_images_with_fronts = (
        1 - metrics['image_skip_count'] / len(validation_files)) * 100
    number_images_with_fronts = str(number_valid_images -
                                    metrics['image_skip_count'])
    total_images = str(number_valid_images)

    print(
        'True Positives: {}, False Positives: {}, False Negatives: {}, True Negatives: {}'
        .format(metrics['true_positives'], metrics['false_positive'],
                metrics['false_negatives'], metrics['true_negatives']))
    print('% images with fronts: {:.2f} ({}/{})'.format(
        percent_images_with_fronts, number_images_with_fronts, total_images))