from plot import plot_contour, PlottedSample, plot_marginal_fit, plot_dependence_functions from contour_statistics import points_outside, sort_points_to_form_continous_line from read_write import read_dataset from datetime import datetime import matplotlib from palettable.colorbrewer.qualitative import Set2_3 as my_colors from palettable.colorbrewer.sequential import BuPu_5 as my_seq_colors from scipy.stats import gaussian_kde from virocon import get_OMAE2020_Hs_Tz, GlobalHierarchicalModel # Read dataset A, B or C. DATASET_CHARS = ["A", "B", "C"] for dataset_char in DATASET_CHARS: file_path = "datasets/" + dataset_char + ".txt" hs, tz, label_hs, label_tz = read_dataset(file_path) label_hs = "Significant wave height (m)" label_tz = "Zero-up-crossing period (s)" every_x_point = 1 hs = hs[0::every_x_point] tz = tz[0::every_x_point] # Create figure, start with scatter plot. matplotlib.rcParams[ "svg.fonttype"] = "none" # Do avoid outputting font as path. matplotlib.rcParams.update({"font.size": 6}) matplotlib.rcParams["font.sans-serif"] = "Arial" matplotlib.rcParams["font.family"] = "sans-serif" matplotlib.rcParams["mathtext.fontset"] = "custom" matplotlib.rcParams["mathtext.rm"] = "Arial" matplotlib.rcParams["mathtext.it"] = "Arial:italic"
import numpy as np import matplotlib.pyplot as plt from viroconcom.fitting import Fit from viroconcom.contours import IFormContour from plot import plot_contour, PlottedSample, plot_marginal_fit, plot_dependence_functions from contour_statistics import points_outside from read_write import read_dataset, determine_file_name_e1, write_contour, read_contour # Read dataset A, B or C. DATASET_CHAR = 'A' file_path = '../datasets/' + DATASET_CHAR + '.txt' sample_hs, sample_tz, label_hs, label_tz = read_dataset(file_path) # Define the structure of the probabilistic model that will be fitted to the # dataset. We will use the model that is recommended in DNV-RP-C205 (2010) on # page 38 and that is called 'conditonal modeling approach' (CMA). dist_description_hs = { 'name': 'Weibull_3p', 'dependency': (None, None, None), 'width_of_intervals': 0.5 } dist_description_tz = { 'name': 'Lognormal_SigmaMu', 'dependency': (0, None, 0), #Shape, Location, Scale 'functions': ('exp3', None, 'power3') #Shape, Location, Scale } # Fit the model to the data. fit = Fit((sample_hs, sample_tz), (dist_description_hs, dist_description_tz)) dist0 = fit.mul_var_dist.distributions[0]
import numpy as np import matplotlib.pyplot as plt from viroconcom.fitting import Fit from viroconcom.contours import IFormContour from plot import PlottedSample, plot_contour, plot_marginal_fit, plot_dependence_functions from contour_statistics import points_outside from read_write import read_dataset, determine_file_name_e1, write_contour, read_contour # Read dataset D, E or F. DATASET_CHAR = 'D' file_path = '../datasets/' + DATASET_CHAR + '.txt' sample_v, sample_hs, label_v, label_hs= read_dataset(file_path) # Define the structure of the probabilistic model that will be fitted to the # dataset. We will use the model that is recommended in DNV-RP-C205 (2010) on # page 38 and that is called 'conditonal modeling approach' (CMA). dist_description_hs = {'name': 'Weibull_3p', 'dependency': (None, None, None), 'width_of_intervals': 0.5} dist_description_v = {'name': 'Weibull_2p', 'dependency': (0, None, 0), #Shape, Location, Scale 'functions': ('power3', None, 'power3') #Shape, Location, Scale } # Fit the model to the data. fit = Fit((sample_hs, sample_v), (dist_description_hs, dist_description_v)) dist0 = fit.mul_var_dist.distributions[0] print('First variable: ' + dist0.name + ' with ' + ' scale: ' + str(dist0.scale) + ', ' + ' shape: ' + str(dist0.shape) + ', '
import numpy as np import matplotlib.pyplot as plt from viroconcom.fitting import Fit from viroconcom.contours import IFormContour from statistics import median from plot import plot_contour, PlottedSample from read_write import read_dataset # Read dataset A DATASET_CHAR = 'A' file_path = 'datasets/' + DATASET_CHAR + '.txt' a_hs, a_tz, label_hs, label_tz = read_dataset(file_path) # Define the structure of the probabilistic model that will be fitted to the # dataset. We will use the model that is recommended in DNV-RP-C205 (2010) on # page 38 and that is called 'conditonal modeling approach' (CMA). dist_description_hs = { 'name': 'Weibull_3p', 'dependency': (None, None, None), 'width_of_intervals': 0.5 } dist_description_tz = { 'name': 'Lognormal_SigmaMu', 'dependency': (0, None, 0), #Shape, Location, Scale 'functions': ('exp3', None, 'power3') #Shape, Location, Scale } # Fit the hs-tz model to the data. fit = Fit((a_hs, a_tz), (dist_description_hs, dist_description_tz))
import numpy as np import matplotlib.pyplot as plt from plot import PlottedSample, plot_confidence_interval from read_write import read_dataset, determine_file_name_e2, read_contour NR_OF_YEARS_TO_DRAW = 1 # Must be 1, 5 or 25. # Read dataset D. file_path = '../datasets/D.txt' dataset_d_v, dataset_d_hs, label_v, label_hs = read_dataset(file_path) # Read the contours that have beem computed previously from csv files. folder_name = 'contour_coordinates/' file_name_median = determine_file_name_e2('John', 'Doe', NR_OF_YEARS_TO_DRAW, 'median') file_name_bottom = determine_file_name_e2('John', 'Doe', NR_OF_YEARS_TO_DRAW, 'bottom') file_name_upper = determine_file_name_e2('John', 'Doe', NR_OF_YEARS_TO_DRAW, 'upper') (contour_v_median, contour_hs_median) = read_contour(folder_name + file_name_median) (contour_v_bottom, contour_hs_bottom) = read_contour(folder_name + file_name_bottom) (contour_v_upper, contour_hs_upper) = read_contour(folder_name + file_name_upper) # Plot the sample, the median contour and the confidence interval. fig = plt.figure(figsize=(5, 5), dpi=150) ax = fig.add_subplot(111) plotted_sample = PlottedSample(x=np.asarray(dataset_d_v),
import numpy as np import matplotlib.pyplot as plt from plot import plot_contour, PlottedSample from contour_statistics import points_outside from read_write import read_dataset, read_contour, determine_file_name_e1, \ determine_file_name_e2 DATASET_CHARS = ['A', 'B', 'C', 'D', 'E', 'F'] for dataset_char in DATASET_CHARS: # Read dataset A, B or C. file_path = 'datasets/' + dataset_char + '.txt' sample_x, sample_y, label_x, label_y = read_dataset(file_path) # Differentiate between sea state and wind wave contours. if dataset_char in ('A', 'B', 'C'): return_period_long_tr = 20 else: return_period_long_tr = 50 # Read the contours from the csv files. folder_name = 'contour-coordinates/' file_name_1 = determine_file_name_e1('Andreas', 'Haselsteiner', dataset_char, 1) file_name_long_tr = determine_file_name_e1('Andreas', 'Haselsteiner', dataset_char, return_period_long_tr) (contour_x_1, contour_y_1) = read_contour(folder_name + file_name_1) (contour_x_long,