#se obtienen los paths de los archivos del dataset
for feature_idx, feature in enumerate(features):
    paths = list(absoluteFilePaths(baseline_path + feature))
    image_set += paths

#se cargan o se generan las activaciones
if generar_activaciones:
    activations = get_activations(model,
                                  image_set,
                                  target_layer_name,
                                  pooled=True)
    save_activations(activations, feature_type, target_layer_name,
                     "embedding_classification")
else:
    try:
        activations = load_activations(feature_type, target_layer_name,
                                       "embedding_classification")
    except Exception as e:
        print("No se pudo cargar activaciones")
        print("Generando nuevas activaciones")
        activations = get_activations(model,
                                      image_set,
                                      target_layer_name,
                                      pooled=True)
        save_activations(activations, feature_type, target_layer_name,
                         "embedding_classification")

umap_accuracies = []

for unn in umap_nneighbors:
    reducer = umap.UMAP(n_neighbors=unn)
    embedding = reducer.fit_transform(activations)
    image_set += paths

#se cargan o se generan las activaciones
if generar_activaciones:
    activations = get_activations(model,
                                  weights,
                                  image_set,
                                  target_layer_name,
                                  pooled=True)
    save_activations(
        activations, feature_type + "_" + target_layer_name +
        "_embedding_activations_" + weights + ".npy")
else:
    try:
        activations = load_activations(feature_type + "_" + target_layer_name +
                                       "_embedding_activations_" + weights +
                                       ".npy")
    except Exception as e:
        print("No se pudo cargar activaciones")
        print("Generando nuevas activaciones")
        activations = get_activations(model,
                                      weights,
                                      image_set,
                                      target_layer_name,
                                      pooled=True)
        save_activations(
            activations, feature_type + "_" + target_layer_name +
            "_embedding_activations_" + weights + ".npy")

targets = [x // n_ejemplos for x in range(0, n_ejemplos * len(features))]
targets = np.array(targets)
Beispiel #3
0
for feature_idx, feature in enumerate(features):
    paths = list(absoluteFilePaths(baseline_path + feature))
    feature_paths = divide_set(paths, training_fraction)
    feature_training_set = feature_paths[0]
    feature_validation_set = feature_paths[1]
    n_training_samples = len(feature_training_set)
    n_validation_samples = len(paths) - n_training_samples
    training_set += feature_training_set
    validation_set.append(feature_validation_set)

if generar_activaciones:
    activations = get_activations(model, training_set, target_layer_name, pooled=True)
    save_activations(activations, tipo_de_atributo, target_layer_name)
else:
    activations = load_activations(tipo_de_atributo, target_layer_name)

#inferencia utilizando knn
series_results_by_feature = []

for feature_index, feature_set in enumerate(validation_set):
    feature_results = np.zeros(len(n_neighbors))
    print("calculando vecinos de atributo " + features[feature_index] + " - " + "{:.1f}".format(
        feature_index * len(feature_set) * 100 / (len(feature_validation_set) * len(features))) + "%")

    for file_index, file in enumerate(feature_set):
        result = partial_forward(model, target_layer_name, file)
        result = np.expand_dims(result, axis=0)
        pooled_result = tf.keras.layers.GlobalAveragePooling2D()(result)

        for idx, k in enumerate(n_neighbors):