Esempio n. 1
0
    n_examples.append(feature_examples)

#se clasifican los ejemplos usando knn
file_number = 0
files = list(absoluteFilePaths(input_path))

for file in files:
    print("file " + str(file_number) + " of " + str(len(files)))
    result = partial_forward(model, target_layer_name, file)
    input_activation = np.zeros((1, channels))

    for channel in range(channels):
        feature_map = result[:, :, channel]
        avg = np.average(feature_map)
        input_activation[0][channel] = avg

    nearest_neighbors = knn(input_activation, activations, n_neighbors)
    nearest_classes = [
        features[i // n_examples[0]]
        for i in [x[0] for x in nearest_neighbors]
    ]
    output_class = most_frequent(nearest_classes)

    output_path = label_files_path + output_class + "/"
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    #copyfile(file, output_path + str(file_number + 8792) + ".jpg")
    copyfile(file, output_path + str(file_number) + ".jpg")
    file_number += 1
Esempio n. 2
0
    feature_results = np.zeros(len(n_neighbors))
    print("calculando vecinos de atributo " + features[feature_index] + " - " + "{:.1f}".format(
        feature_index * len(feature_set) * 100 / (len(feature_validation_set) * len(features))) + "%")

    for file_index, file in enumerate(feature_set):
        result = partial_forward(model, target_layer_name, file)
        result = np.expand_dims(result, axis=0)
        pooled_result = tf.keras.layers.GlobalAveragePooling2D()(result)

        for idx, k in enumerate(n_neighbors):
            specific_output_path = output_path + tipo_de_atributo + "/" + features[feature_index] + "/" + str(k) + "_neighbors/" + target_layer_name + "/errores/"

            if not os.path.exists(specific_output_path):
                os.makedirs(specific_output_path)

            nearest_neighbors = knn(pooled_result, activations, k)
            nearest_classes = [features[i // n_training_samples] for i in [x[0] for x in nearest_neighbors]]
            output_class = most_frequent(nearest_classes)

            if output_class == features[feature_index]:
                feature_results[idx] += 1
            else:
                copyfile(file, specific_output_path + output_class + "_" + str(feature_idx) + "_" + str(file_index) + ".png")


    feature_results /= n_validation_samples
    series_results_by_feature.append(feature_results)



x = np.arange(len(n_neighbors))
                                      target_layer_name,
                                      pooled=True)
        save_activations(activations, feature_type, target_layer_name,
                         "embedding_classification")

umap_accuracies = []

for unn in umap_nneighbors:
    reducer = umap.UMAP(n_neighbors=unn)
    embedding = reducer.fit_transform(activations)
    correct = 0
    experiment_indexes = random.sample(range(0, len(embedding)),
                                       total_experiments)

    for nn_idx, embedding_idx in enumerate(experiment_indexes):
        nearest_neighbors = knn(embedding[embedding_idx], embedding,
                                knn_neighbor_qty, "euclid")

        if visualizations:
            plt.rcParams["axes.grid"] = False

            for i, x in enumerate(nearest_neighbors):
                path = image_set[x[0]]
                img = mpimg.imread((path))
                imgplot = plt.imshow(img)
                if save_visualizations:
                    plt.savefig(output_path + str(i) + ".jpg")
                plt.show()

            plt.rcParams["axes.grid"] = True
            prop_iter = iter(plt.rcParams['axes.prop_cycle'])
            fig, ax = plt.subplots()
    feature_validation_set = feature_paths[1]
    n_training_samples = len(feature_training_set)
    n_validation_samples = len(paths) - n_training_samples
    training_set += feature_training_set
    validation_set.append(feature_validation_set)

#se cargan o se generan las activaciones
if generar_activaciones:
    activations = get_activations(model,
                                  training_set,
                                  target_layer_name,
                                  pooled=True)
    save_activations(activations, tipo_de_atributo, target_layer_name,
                     "retrieval")
else:
    activations = load_activations(tipo_de_atributo, target_layer_name,
                                   "retrieval")

#inferencia utilizando knn
series_results_by_feature = []

for input_file in input_files:
    file_activations = get_activations(model, [input_file],
                                       target_layer_name,
                                       pooled=True)[0]
    result = knn(file_activations, activations, 5)
    for x in result:
        img = mpimg.imread(training_set[x[0]])
        plt.imshow(img)
        plt.show()