예제 #1
0
    def test_SingleNTestPlot(self):

        expected_val = numpy.random.randint(0,20)
        observed_val = numpy.random.randint(0, 20)
        Ntest_result = mock.Mock()
        Ntest_result.name = 'Mock NTest'
        Ntest_result.sim_name = 'Mock SimName'
        Ntest_result.test_distribution = ['poisson', expected_val]
        Ntest_result.observed_statistic = observed_val
        matplotlib.pyplot.close()
        ax = plots.plot_poisson_consistency_test(Ntest_result)

        self.assertEqual(matplotlib.pyplot.gca().collections, ax.collections)
        self.assertEqual([i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
                    [i.sim_name for i in [Ntest_result]])
        self.assertEqual(matplotlib.pyplot.gca().get_title(), Ntest_result.name)
예제 #2
0
    def test_MultiSTestPlot(self,show=False):

        s_plots = numpy.random.randint(1,20)
        Stests = []
        for n in range(s_plots):
            Stest_result = mock.Mock() # Mock class with random attributes
            Stest_result.name = 'Mock STest'
            Stest_result.sim_name = ''.join(random.choice(string.ascii_letters) for _ in range(8))
            Stest_result.test_distribution = numpy.random.uniform(-1000, 0, numpy.random.randint(3, 500)).tolist()
            Stest_result.observed_statistic = numpy.random.uniform(-1000, 0) #random observed statistic
            if numpy.random.random() <0.02: # sim possible infinite values
                Stest_result.observed_statistic = -numpy.inf
            Stests.append(Stest_result)
        matplotlib.pyplot.close()
        ax = plots.plot_poisson_consistency_test(Stests)
        Stests.reverse()

        self.assertEqual(matplotlib.pyplot.gca().collections, ax.collections)
        self.assertEqual([i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
                        [i.sim_name for i in Stests])
        self.assertEqual(matplotlib.pyplot.gca().get_title(), Stests[0].name)
예제 #3
0
    def test_MultiNTestPlot(self,show=False):

        n_plots = numpy.random.randint(1,20)
        Ntests = []
        for n in range(n_plots):
            Ntest_result = mock.Mock()
            Ntest_result.name = 'Mock NTest'
            Ntest_result.sim_name = ''.join(random.choice(string.ascii_letters) for _ in range(8))
            Ntest_result.test_distribution = ['poisson', numpy.random.randint(0, 20)]
            Ntest_result.observed_statistic = numpy.random.randint(0, 20)
            Ntests.append(Ntest_result)
        matplotlib.pyplot.close()
        ax = plots.plot_poisson_consistency_test(Ntests)
        Ntests.reverse()

        self.assertEqual(matplotlib.pyplot.gca().collections, ax.collections)
        self.assertEqual([i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
                        [i.sim_name for i in Ntests])
        self.assertEqual(matplotlib.pyplot.gca().get_title(), Ntests[0].name)
        if show:
            matplotlib.pyplot.show()
예제 #4
0
# Store evaluation results
# ------------------------
#
# PyCSEP provides easy ways of storing objects to a JSON format using :func:`csep.write_json`. The evaluations can be read
# back into the program for plotting using :func:`csep.load_evaluation_result`.

csep.write_json(spatial_test_result, 'example_spatial_test.json')

####################################################################################################################################
# Plot spatial test results
# -------------------------
#
# We provide the function :func:`csep.utils.plotting.plot_poisson_consistency_test` to visualize the evaluation results from
# consistency tests.

ax = plots.plot_poisson_consistency_test(spatial_test_result,
                                        plot_args={'xlabel': 'Spatial likelihood'})
plt.show()

####################################################################################################################################
# Plot ROC Curves
# -----------------------
#
# We can also plot the Receiver operating characteristic (ROC) Curves based on forecast and testing-catalog.
# In the figure below, False Positive Rate is the normalized cumulative forecast rate, after sorting cells in decreasing order of rate.
# The "True Positive Rate" is the normalized cumulative area. The dashed line is the ROC curve for a uniform forecast,
# meaning the likelihood for an earthquake to occur at any position is the same. The further the ROC curve of a
# forecast is to the uniform forecast, the specific the forecast is. When comparing the
# forecast ROC curve against an catalog, one can evaluate if the forecast is more or less specific
# (or smooth) at different level or seismic rate.
#
# Note: This figure just shows an example of plotting an ROC curve with a catalog forecast.
예제 #5
0

####################################################################################################################################
# Example 3: Plot multiple evaluation results
# -------------------------------------------

####################################################################################################################################
# Load L-test results from example .json files (See
# :doc:`gridded_forecast_evaluation` for information on calculating and storing evaluation results)

L_results = [csep.load_evaluation_result(i) for i in datasets.l_test_examples]
args = {'figsize': (6,5),
        'title': r'$\mathcal{L}-\mathrm{test}$',
        'title_fontsize': 18,
        'xlabel': 'Log-likelihood',
        'xticks_fontsize': 9,
        'ylabel_fontsize': 9,
        'linewidth': 0.8,
        'capsize': 3,
        'hbars':True,
        'tight_layout': True}

####################################################################################################################################
# Description of plot arguments can be found in :func:`plot_poisson_consistency_test`.
# We set ``one_sided_lower=True`` as usual for an L-test, where the model is rejected if the observed
# is located within the lower tail of the simulated distribution.
ax = plots.plot_poisson_consistency_test(L_results, one_sided_lower=True, plot_args=args)



# Since we have two different grids here, so we do it separately for both grids.

#For Multi-resolution grid, linking region to catalog.
catalog.region = forecast_multi_grid.region
spatial_test_multi_res_result = poisson.spatial_test(forecast_multi_grid,
                                                     catalog)
number_test_multi_res_result = poisson.number_test(forecast_multi_grid,
                                                   catalog)

#For Single-resolution grid, linking region to catalog.
catalog.region = forecast_single_grid.region
spatial_test_single_res_result = poisson.spatial_test(forecast_single_grid,
                                                      catalog)
number_test_single_res_result = poisson.number_test(forecast_single_grid,
                                                    catalog)

####################################################################################################################################
# Plot spatial test results
# -------------------------
#
# We provide the function :func:`csep.utils.plotting.plot_poisson_consistency_test` to visualize the evaluation results from
# consistency tests.

stest_result = [spatial_test_single_res_result, spatial_test_multi_res_result]
ax_spatial = plots.plot_poisson_consistency_test(
    stest_result, plot_args={'xlabel': 'Spatial likelihood'})

ntest_result = [number_test_single_res_result, number_test_multi_res_result]
ax_number = plots.plot_poisson_consistency_test(
    ntest_result, plot_args={'xlabel': 'Number of Earthquakes'})