LATEX_IN_FIGURES = True if LATEX_IN_FIGURES: enable_latex_infigures() # Enable parallel processing (or not) ENABLE_MULTI = True # These two variables serves to split the original image into sub-images to be treated in parallel # In general the optimal parameters are obtained for # number_of_threads_rows*number_of_threads_columns = number of cores on the machine number_of_threads_rows = 4 number_of_threads_columns = 4 if os.cpu_count() != (number_of_threads_rows * number_of_threads_columns): print('WARNING: not all cpu are used') # data image, ground_truth_original, X, Y = load_UAVSAR(PATH, DEBUG, FULL_TIME_SERIES) # Parameters if DEBUG: WINDOWS_SIZES = np.arange(start=5, stop=9, step=2) RANKS_LIST = np.arange(start=1, stop=5, step=2) else: WINDOWS_SIZES = np.arange(start=5, stop=11, step=2) RANKS_LIST = np.arange(start=1, stop=7, step=2) print('WINDOWS_SIZES=', WINDOWS_SIZES) print('RANKS_LIST=', RANKS_LIST) PFA_THRESHOLD = 0.1 n_r, n_rc, p, T = image.shape function_to_compute = compute_several_statistics
base_path = os.path.dirname(os.path.abspath(__file__)) base_path = os.path.join(base_path, 'intensities_maps') path_intensities = os.path.join(base_path, 'scene_' + str(SCENE_NUMBER) + '.npy') if not os.path.exists(path_intensities): print('ERROR with path of detection map') print('path=', path_intensities) sys.exit(1) # Load detection map intensities = np.load(path_intensities) # Load ground truth _, ground_truth_original, X, Y = load_UAVSAR(PATH, DEBUG, False, scene_number=SCENE_NUMBER, verbose=False) m_r = ground_truth_original.shape[0] - intensities.shape[0] m_c = ground_truth_original.shape[1] - intensities.shape[1] ground_truth = ground_truth_original[int(m_r / 2):-int(m_r / 2), int(m_c / 2):-int(m_c / 2)] # Compute threshold NUMBER_OF_POINTS = 500 thresholds = np.zeros(len(STATISTIC_NAMES)) pfas = np.zeros(len(STATISTIC_NAMES)) pds = np.zeros(len(STATISTIC_NAMES)) detection_maps = np.zeros(intensities.shape) for i_s, statistic in enumerate(STATISTIC_NAMES):
# In general the optimal parameters are obtained for # number_of_threads_rows*number_of_threads_columns = number of cores on the machine number_of_threads_rows = 4 number_of_threads_columns = 4 if os.cpu_count() != (number_of_threads_rows * number_of_threads_columns): print('WARNING: not all cpu are used') # data # DEBUG mode for fast debugging (use a small patch of 200x200 pixels) DEBUG = False PATH = 'data/UAVSAR/' FULL_TIME_SERIES = False # if true: use the full time series, else: use only the first and last images of the time series SCENE_NUMBER = 1 image, _, X, Y = load_UAVSAR(PATH, DEBUG, FULL_TIME_SERIES, SCENE_NUMBER) # Parameters n_r, n_rc, p, T = image.shape windows_mask = np.ones((5, 5)) m_r, m_c = windows_mask.shape function_to_compute = compute_several_statistics # AIC rule rank over: image 1, image 2, time series (image 1 and 2) statistic_names = [r'EDC'] statistic_list = [ rank_estimation_reshape for i in range(len(statistic_names)) ] args_list = [(stat) for stat in statistic_names] number_of_statistics = len(statistic_list) function_args = [statistic_list, args_list]
import random if __name__ == '__main__': # Activate latex in figures (or not) LATEX_IN_FIGURES = False if LATEX_IN_FIGURES: enable_latex_infigures() # data # DEBUG mode for fast debugging (use a small patch of 200x200 pixels) DEBUG = False PATH = 'data/UAVSAR/' FULL_TIME_SERIES = False # if true: use the full time series, else: use only the first and last images of the time series image, _, X, Y = load_UAVSAR(PATH, DEBUG, FULL_TIME_SERIES) # Parameters n_r, n_c, p, T = image.shape windows_mask = np.ones((5, 5)) m_r, m_c = windows_mask.shape N = m_r * m_c METHODS = ['AIC', 'BIC', 'AICc', 'EDC'] while True: i_r = random.choice(list(range(int(m_r / 2), n_r - int(m_r / 2)))) i_c = random.choice(list(range(int(m_c / 2), n_c - int(m_c / 2)))) # Obtaining data corresponding to the neighborhood defined by the mask local_data = image[i_r - int(m_r / 2):i_r + int(m_r / 2) + 1,