Ejemplo n.º 1
0
def load_cached_model(filename: str):
    full_path = "{root}/cache/{file}.pkl".format(
        root=get_project_root(),
        file=filename,
    )

    return joblib.load(full_path)
Ejemplo n.º 2
0
def load_feature_cache(features: FeatureSet, image_name: str,
                       window_size: (int, int)) -> dict:
    cached = {}
    for name, feature in features.items.items():
        feature_cache_key = "{feature}{fwindow}-window{window}-image-{image_name}".format(
            image_name=image_name,
            fwindow=str(feature.windows),
            window=window_size,
            feature=str(feature),
        )
        full_path = "{root}/cache/features/{feature}/{cache_key}.npz".format(
            root=get_project_root(),
            feature=str(feature),
            cache_key=feature_cache_key)

        try:
            if os.path.isfile(full_path):
                npzdict = np.load(full_path)
                cached[name] = npzdict
            else:
                cached[name] = {}
        except OSError:
            cached[name] = {}

    return cached
Ejemplo n.º 3
0
def load_cache(filename):
    full_file_path = get_project_root() + "/cache/" + filename + '.npy'

    if not os.path.exists(full_file_path):
        return None

    print("Loaded cached: {}".format(filename))
    return np.load(full_file_path)
Ejemplo n.º 4
0
def cached_model_exists(filename):
    full_path = "{root}/cache/{file}.pkl".format(
        root=get_project_root(),
        file=filename,
    )

    if os.path.exists(full_path):
        return True

    return False
Ejemplo n.º 5
0
def cache_model(model, filename):
    dir_path = "{root}/cache".format(root=get_project_root(), )

    if not os.path.isdir(dir_path):
        os.mkdir(dir_path)

    full_path = "{dir_path}/{file}.pkl".format(
        dir_path=dir_path,
        file=filename,
    )

    joblib.dump(model, full_path)
Ejemplo n.º 6
0
def cache_calculated_features(features, image_name, to_cache, window_size):
    for name, feature in features.items.items():
        feature_cache_key = "{feature}{fwindow}-window{window}-image-{image_name}".format(
            image_name=image_name,
            fwindow=str(feature.windows),
            window=window_size,
            feature=str(feature),
        )

        dir_path = "{root}/cache/features/{feature}".format(
            feature=str(feature),
            root=get_project_root(),
        )
        full_path = "{dir_path}/{cache_key}.npz".format(
            dir_path=dir_path,
            cache_key=feature_cache_key,
        )
        os.makedirs(os.path.dirname(full_path), exist_ok=True)
        if not os.path.isdir(dir_path):
            os.mkdir(dir_path)

        arrays = to_cache[name]
        np.savez(full_path, **arrays)
        'cv_test',
        'feature_name',
        'classifier_name',
        'classifier',
        'feature_set',
        'test_image',
        'feature_scale',
        'jaccard_index',
        'accuracy',
        'precision',
        'recall',
        'normalized_cnf',
    ]
    results_frame = pd.DataFrame(columns=results_cols)

    df_path = '{root}/results/jaccard'.format(root=get_project_root())
    csv_file = '{}/{}_df_optimization.csv'.format(df_path, feature_name)
    if os.path.exists(csv_file):
        results_frame = pd.read_csv(csv_file)

    # for feature_scale in generate_feature_scales(feature_scales):
    for main_window_size in ((
            10,
            10,
    ), ):
        # print("Running feature scale: {}".format(feature_scale))

        for test_image, train_images in cv_train_test_split_images(images):

            print("test im:{} train_ims:{}".format(test_image, train_images))
Ejemplo n.º 8
0
    return dataset, image, bands


n_clusters = 32
pantex_window_sizes = ((25, 25), (50, 50), (100, 100), (150, 150))
class_names = {
    0: 'Non-Slum',
    1: 'Slum',
}
images = [
    'section_1',
    'section_2',
    'section_3',
]
show_plots = True
results_path = '{root}/results'.format(root=get_project_root())
current_time = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
base_path = data_path()
extension = 'tif'
main_window_size = (10, 10)
percentage_threshold = 0.5
test_size = 0.2
class_ratio = 1.3
feature_set = FeatureSet()
pantex = Pantex(pantex_window_sizes)

for image_name in images:
    for lac_box_size in (10, 20, 30):
        for lac_window_size in ((50, 50), (300, 300), (500, 500),):
            lac_window_size = (lac_window_size,)
            image_file = "{base_path}/{image_name}.{extension}".format(
Ejemplo n.º 9
0
            texton_clusters = texton_cluster(map(load_image, train_images))
            texton = Texton(texton_clusters, windows=curr_feature_scales)
            feature_set.add(texton, "TEXTON")
            cached = False
        if feature_name == "PANTEX":
            pantex = Pantex(curr_feature_scales)
            feature_set.add(pantex, "PANTEX")
        if feature_name == "LACUNARITY":
            lacunarity = Lacunarity(windows=curr_feature_scales)
            feature_set.add(lacunarity, "LACUNARITY")

    # texton = create_texton_feature(sat_image, ((25, 25), (50, 50), (100, 100)), image_name, n_clusters=n_clusters, cached=True)

    plt.close('all')
    print("Running feature set {}, image {}".format(feature_set, image_name))
    results_path = '{root}/results/all/{fs}'.format(root=get_project_root(),
                                                    fs=str(feature_set))
    os.makedirs(os.path.dirname(results_path + '/'), exist_ok=True)

    X_test = get_x_matrix(test_image_loaded,
                          image_name=image_name,
                          feature_set=feature_set,
                          window_size=main_window_size,
                          cached=cached)
    y_test, real_mask = get_y_vector(mask_full_path,
                                     main_window_size,
                                     percentage_threshold,
                                     cached=False)

    # X, y = balance_dataset(X, y, class_ratio=class_ratio)
    # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42, stratify=None)
Ejemplo n.º 10
0
    sift = Sift(sift_clusters, windows=((25, 25), (50, 50), (100, 100)))

    # texton = create_texton_feature(sat_image, ((25, 25), (50, 50), (100, 100)), image_name, n_clusters=n_clusters, cached=True)
    texton_clusters = texton_cluster(map(load_image, train_images))
    texton = Texton(texton_clusters, windows=((25, 25), (50, 50), (100, 100)))

    feature_set.add(sift, "SIFT")
    feature_set.add(texton, "TEXTON")

    for feature_set, classifier in generate_tests(
        (texton, sift, pantex, lacunarity)):
        plt.close('all')
        print("Running feature set {}, image {}, classifier {}".format(
            feature_set, image_name, str(classifier)))
        results_path = '{root}/results/jaccard/{fs}'.format(
            root=get_project_root(), fs=str(feature_set))
        os.makedirs(os.path.dirname(results_path + '/'), exist_ok=True)

        X_test = get_x_matrix(test_image_loaded,
                              image_name=image_name,
                              feature_set=feature_set,
                              window_size=main_window_size,
                              cached=True)
        y_test, real_mask = get_y_vector(mask_full_path,
                                         main_window_size,
                                         percentage_threshold,
                                         cached=False)

        # X, y = balance_dataset(X, y, class_ratio=class_ratio)
        # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42, stratify=None)
Ejemplo n.º 11
0
def cache_path():
    cache_path = get_project_root() + "/cache"
    return cache_path
Ejemplo n.º 12
0
import pandas as pd

from satsense.util.path import get_project_root

results_path = '{root}/results/jaccard'.format(root=get_project_root('../../'))
csv_path = "{}/{}_df_optimization.csv".format(results_path, "PANTEX")

optim = pd.read_csv(csv_path)
print(optim)

mean_jaccard = optim.groupby('classifier_name')['jaccard_index'].mean()