if DATAVIZ: # Data exploration : compute and show the mean spectrums mean_spectrums, std_spectrums = explore_spectrums( img, gt, LABEL_VALUES, viz, ignored_labels=IGNORED_LABELS) with open("mean_spectrum_Salinas.txt", 'w') as f: for ln, lv in mean_spectrums.items(): f.write(str(lv)) f.write('\n') plot_spectrums(mean_spectrums, viz, title='Mean spectrum/class') plot_spectrums_(std_spectrums, viz, title='Std spectrum/class') results = [] # run the experiment several times for run in range(N_RUNS): if TRAIN_GT is not None and TEST_GT is not None: train_gt = open_file(TRAIN_GT) test_gt = open_file(TEST_GT) elif TRAIN_GT is not None: train_gt = open_file(TRAIN_GT) test_gt = np.copy(gt) w, h = test_gt.shape test_gt[(train_gt > 0)[:w, :h]] = 0 elif TEST_GT is not None: test_gt = open_file(TEST_GT) else: # Sample random training spectral gt_ = gt[(PATCH_SIZE // 2):-(PATCH_SIZE // 2), (PATCH_SIZE // 2):-(PATCH_SIZE // 2)] train_gt, test_gt = sample_gt(gt_, SAMPLE_PERCENTAGE, mode=SAMPLING_MODE)
if DATAVIZ: # Data exploration : compute and show the mean spectrums mean_spectrums = explore_spectrums(img, gt, LABEL_VALUES, viz, ignored_labels=IGNORED_LABELS) plot_spectrums(mean_spectrums, viz, title="Mean spectrum/class") results = [] # run the experiment several times for run in range(N_RUNS): if TRAIN_GT is not None and TEST_GT is not None: print("Using existing train/test split...") train_gt = open_file(TRAIN_GT)['train_gt'] test_gt = open_file(TEST_GT)['test_gt'] elif TRAIN_GT is not None: train_gt = open_file(TRAIN_GT) test_gt = np.copy(gt) w, h = test_gt.shape test_gt[(train_gt > 0)[:w, :h]] = 0 elif TEST_GT is not None: test_gt = open_file(TEST_GT) else: # Sample random training spectra train_gt, test_gt = sample_gt(gt, SAMPLE_PERCENTAGE, mode=SAMPLING_MODE) scipy.io.savemat("test.mat", {'test_gt': test_gt})
args = parser.parse_args() CUDA_DEVICE = get_device(args.cuda) MODEL = args.model # Testing file MAT = args.mat N_CLASSES = args.n_classes INFERENCE = args.image TEST_STRIDE = args.test_stride CHECKPOINT = args.checkpoint img_filename = os.path.basename(INFERENCE) basename = MODEL + img_filename dirname = os.path.dirname(INFERENCE) img = open_file(INFERENCE) if MAT is not None: img = img[MAT] # Normalization img = np.asarray(img, dtype='float32') img = (img - np.min(img)) / (np.max(img) - np.min(img)) N_BANDS = img.shape[-1] hyperparams = vars(args) hyperparams.update({ 'n_classes': N_CLASSES, 'n_bands': N_BANDS, 'device': CUDA_DEVICE, 'ignored_labels': [0] }) hyperparams = dict((k, v) for k, v in hyperparams.items() if v is not None)
N_BANDS = (img1.shape[-1], img2.shape[-1]) # Instantiate the experiment based on predefined networks hyperparams.update({ "n_classes": N_CLASSES, "n_bands": N_BANDS, "ignored_labels": IGNORED_LABELS, "device": CUDA_DEVICE, }) hyperparams = dict((k, v) for k, v in hyperparams.items() if v is not None) results = [] # run the experiment several times for run in range(N_RUNS): if TRAIN_GT is not None and TEST_GT is not None: train_gt = open_file(TRAIN_GT)['TRLabel'] test_gt = open_file(TEST_GT)['TSLabel'] elif TRAIN_GT is not None: train_gt = open_file(TRAIN_GT) test_gt = np.copy(gt) w, h = test_gt.shape test_gt[(train_gt > 0)[:w, :h]] = 0 elif TEST_GT is not None: test_gt = open_file(TEST_GT) else: # Sample random training spectra train_gt, test_gt = sample_gt(gt, SAMPLE_PERCENTAGE, mode=SAMPLING_MODE) print("{} samples selected (over {})".format(np.count_nonzero(train_gt), np.count_nonzero(gt)))
args = parser.parse_args() CUDA_DEVICE = get_device(args.cuda) MODEL = args.model # Testing file MAT = args.mat MAT_LIDAR = args.mat_lidar N_CLASSES = args.n_classes INFERENCE = args.image_hsi TEST_STRIDE = args.test_stride CHECKPOINT = args.checkpoint img_filename = os.path.basename(INFERENCE) basename = MODEL + img_filename dirname = os.path.dirname(INFERENCE) img = open_file(INFERENCE) if MAT is not None: img = img[MAT] # Normalization img = np.asarray(img, dtype="float32") img = (img - np.min(img)) / (np.max(img) - np.min(img)) img_lidar = open_file(args.image_lidar) if MAT_LIDAR is not None: img_lidar = img_lidar[MAT_LIDAR] # Normalization img_lidar = np.asarray(img_lidar, dtype="float32") img_lidar = np.expand_dims(img_lidar, axis=2) img_lidar = (img_lidar - np.min(img_lidar)) / (np.max(img_lidar) - np.min(img_lidar))