コード例 #1
0
ファイル: saliency_generator.py プロジェクト: yabernar/watSOM
    def extract_image(self, input_path, output_path, supplements_path, image_nb, save=True):
        current = Image.open(os.path.join(input_path, "in{0:06d}.jpg".format(image_nb)))
        new_data = MosaicImage(current, self.image_parameters)
        self.som.set_data(new_data.get_data())
        winners = self.som.get_all_winners()
        diff_winners = np.zeros(winners.shape)
        for j in range(len(winners)):
            diff_winners[j] = manhattan_distance(np.asarray(winners[j]), np.asarray(self.initial_map[j]))
        diff_winners = diff_winners.reshape(new_data.nb_pictures)
        diff_winners = np.kron(diff_winners, np.ones((self.pictures_dim[0], self.pictures_dim[1])))
        #             diff_winners *= 30  # Use this parameter ?
        diff_winners = ImageOps.autocontrast(Image.fromarray(diff_winners).convert('L'))

        reconstructed = Image.fromarray(new_data.reconstruct(self.som.get_reconstructed_data(winners)))
        som_difference = ImageOps.autocontrast(ImageChops.difference(reconstructed, current).convert('L'))
        som_difference_modulated = ImageChops.multiply(som_difference, diff_winners)

        # Binarizing
        fn = lambda x: 255 if x > self.threshold else 0
        thresholded = som_difference_modulated.convert('L').point(fn, mode='1')

        result = ImageChops.multiply(thresholded, self.mask)

        # Saving
        if save:
            som_difference.save(os.path.join(supplements_path, "difference", "dif{0:06d}.png".format(image_nb)))
            diff_winners.save(os.path.join(supplements_path, "diff_winners", "win{0:06d}.png".format(image_nb)))
            som_difference_modulated.save(os.path.join(supplements_path, "saliency", "sal{0:06d}.png".format(image_nb)))
            thresholded.save(os.path.join(supplements_path, "thresholded", "thr{0:06d}.png".format(image_nb)))

            result.save(os.path.join(output_path, "bin{0:06d}.png".format(image_nb)))
コード例 #2
0
ファイル: saliency_generator.py プロジェクト: yabernar/watSOM
    def learning(self, bkg_image):
        # PARAMETERS
        self.image_parameters = Parameters({"pictures_dim": self.pictures_dim})
        data = MosaicImage(bkg_image, self.image_parameters)
        inputs_SOM = Parameters({"alpha": Variable(start=self.alpha_start, end=self.alpha_end, nb_steps=self.nb_epochs),
                                 "sigma": Variable(start=self.sigma_start, end=self.sigma_end, nb_steps=self.nb_epochs),
                                 "data": data.get_data(),
                                 "neurons_nbr": self.neurons_nb,
                                 "epochs_nbr": self.nb_epochs})
        self.som = SOM(inputs_SOM)

        # RUN
        for i in range(self.nb_epochs):
            # print('Epoch', i)
            self.som.run_epoch()
        self.initial_map = self.som.get_all_winners()
コード例 #3
0
ファイル: som_evaluation.py プロジェクト: yabernar/watSOM
def regenerate_database():
    global data_types
    data_types = {
        "spokenDigits":
        SpokenDigitsDataset(
            "/users/yabernar/workspace/watSOM/Data/FSDD/recordings",
            1000).get_data(),
        "images":
        MosaicImage(
            Image.open(
                "/users/yabernar/workspace/watSOM/Code/fast_som/Elijah.png"),
            Parameters({"pictures_dim": [10, 10]})).get_data(),
        "uniform3D":
        uniform(1000, 3),
        "pixel_colors":
        PixelsFromImage(
            Image.open(
                "/users/yabernar/workspace/watSOM/Code/fast_som/Elijah.png"),
            1000).get_data(),
        "catShape":
        GenerateFromShape(
            Image.open(
                "/users/yabernar/workspace/watSOM/Code/fast_som/cat-silhouette.png"
            ), 1000).get_data(),
        "uniform2D":
        uniform(1000, 2)
    }
コード例 #4
0
ファイル: execution.py プロジェクト: yabernar/watSOM
 def load_dataset(self):
     if self.dataset["type"] == "image":
         path = os.path.join("Data", "images", self.dataset["file"])
         img = Image.open(path)
         parameters = Parameters({
             "pictures_dim":
             [self.dataset["width"], self.dataset["height"]]
         })
         self.data = MosaicImage(img, parameters)
     elif self.dataset["type"] == "random_image":
         path = os.path.join("Data", "images", self.dataset["file"])
         img = Image.open(path)
         parameters = Parameters({
             "pictures_dim":
             [self.dataset["width"], self.dataset["height"]]
         })
         self.data = MosaicImage(img, parameters)
         self.training_data = RandomImage(img, parameters)
     elif self.dataset["type"] == "tracking":
         path = os.path.join("Data", "tracking", "dataset",
                             self.dataset["file"], "back.jpg")
         #if self.metadata["seed"] % 2 == 1:
         #    path = os.path.join("Data", "tracking", "dataset", self.dataset["file"], "input", "bkg.jpg")
         #else:
         #    path = os.path.join("Data", "tracking", "dataset", self.dataset["file"], "input", "bkg2.jpg")
         img = Image.open(path)
         parameters = Parameters({
             "pictures_dim":
             [self.dataset["width"], self.dataset["height"]]
         })
         self.data = MosaicImage(img, parameters)
     else:
         print("Error : No dataset type specified !")
コード例 #5
0
ファイル: GNG.py プロジェクト: yabernar/watSOM
    #         global_error += spatial.distance.euclidean(observation, self.network.nodes[s_1]['vector'])**2
    #     return global_error

    def square_error(self, winners=None):
        if winners is None:
            winners = self.get_all_winners()
        error = np.zeros(winners.shape)
        for i in np.ndindex(winners.shape):
            error[i] = np.mean(
                (self.data[i] - self.network.nodes[winners[i]]['vector'])**2)
        return np.mean(error)


if __name__ == '__main__':
    bkg = MosaicImage(
        Image.open(
            os.path.join("Data", "tracking", "dataset", "baseline", "office",
                         "bkg.jpg")), Parameters({"pictures_dim": [16, 16]}))
    image = MosaicImage(
        Image.open(
            os.path.join("Data", "tracking", "dataset", "baseline", "office",
                         "input", "in001010.jpg")),
        Parameters({"pictures_dim": [16, 16]}))

    nb_epochs = 100
    inputs = Parameters({
        "epsilon_winner": 0.1,
        "epsilon_neighbour": 0.006,
        "maximum_age": 10,
        "error_decrease_new_unit": 0.5,
        "error_decrease_global": 0.995,
        "data": bkg.get_data(),
コード例 #6
0
        return np.mean(error)


if __name__ == '__main__':
    for n in range(10, 61, 2):
        start = time.time()

        nb_epochs = 10
        inputs = Parameters({
            "alpha":
            Variable(start=0.6, end=0.05, nb_steps=nb_epochs),
            "sigma":
            Variable(start=0.5, end=0.2, nb_steps=nb_epochs),
            "data":
            MosaicImage(
                Image.open(
                    "/users/yabernar/workspace/watSOM/Code/fast_som/Elijah.png"
                ), Parameters({"pictures_dim": [10, 10]})).get_data(),
            "neurons_nbr": (n, n),
            "epochs_nbr":
            nb_epochs
        })

        som = FastSOM(inputs)
        som.run()

        end = time.time()
        print("Executed in " + str(end - start) + " seconds.", n, "neurons")
        print("MSQE :", som.mean_square_quantization_error(), "MSDtN :",
              som.mean_square_distance_to_neighbour())
コード例 #7
0
ファイル: execution.py プロジェクト: yabernar/watSOM
class Execution:
    def __init__(self):
        self.metadata = {}
        self.dataset = {}
        self.model = {}
        self.codebooks = {}
        self.metrics = {}

        self.data = None
        self.training_data = None
        self.map = None

    def open(self, path):
        txt = codecs.open(path, 'r', encoding='utf-8').read()
        data = json.loads(txt)
        self.metadata = data["metadata"]
        self.dataset = data["dataset"]
        self.model = data["model"]
        self.codebooks = data["codebooks"]
        self.metrics = data["metrics"]

    def light_open(self, path):
        txt = codecs.open(path, 'r', encoding='utf-8').read()
        data = json.loads(txt)
        self.metadata = data["metadata"]
        self.dataset = data["dataset"]
        self.model = data["model"]
        self.metrics = data["metrics"]

    def save(self, path):
        data = {
            "metadata": self.metadata,
            "dataset": self.dataset,
            "model": self.model,
            "metrics": self.metrics,
            "codebooks": self.codebooks
        }
        json.dump(data,
                  codecs.open(os.path.join(path,
                                           self.metadata["name"] + ".json"),
                              'w',
                              encoding='utf-8'),
                  indent=2)

    def load_dataset(self):
        if self.dataset["type"] == "image":
            path = os.path.join("Data", "images", self.dataset["file"])
            img = Image.open(path)
            parameters = Parameters({
                "pictures_dim":
                [self.dataset["width"], self.dataset["height"]]
            })
            self.data = MosaicImage(img, parameters)
        elif self.dataset["type"] == "random_image":
            path = os.path.join("Data", "images", self.dataset["file"])
            img = Image.open(path)
            parameters = Parameters({
                "pictures_dim":
                [self.dataset["width"], self.dataset["height"]]
            })
            self.data = MosaicImage(img, parameters)
            self.training_data = RandomImage(img, parameters)
        elif self.dataset["type"] == "tracking":
            path = os.path.join("Data", "tracking", "dataset",
                                self.dataset["file"], "back.jpg")
            #if self.metadata["seed"] % 2 == 1:
            #    path = os.path.join("Data", "tracking", "dataset", self.dataset["file"], "input", "bkg.jpg")
            #else:
            #    path = os.path.join("Data", "tracking", "dataset", self.dataset["file"], "input", "bkg2.jpg")
            img = Image.open(path)
            parameters = Parameters({
                "pictures_dim":
                [self.dataset["width"], self.dataset["height"]]
            })
            self.data = MosaicImage(img, parameters)
        else:
            print("Error : No dataset type specified !")

    def run(self):
        #if self.metadata["seed"] % 2 == 1:
        np.random.seed(self.metadata["seed"])
        #else:
        #    np.random.seed(self.metadata["seed"]-1)
        if self.model["model"] == "gng":
            self.runGNG()
        else:
            self.runSOM()

    def runGNG(self):
        if self.data is None:
            self.load_dataset()
        inputs = Parameters({
            "epsilon_winner":
            self.model["epsilon_winner"],  # 0.1,
            "epsilon_neighbour":
            self.model["epsilon_neighbour"],  # 0.006,
            "maximum_age":
            self.model["maximum_age"],  # 10,
            "error_decrease_new_unit":
            self.model["error_decrease_new_unit"],  # 0.5,
            "error_decrease_global":
            self.model["error_decrease_global"],  # 0.995,
            "data":
            self.data.get_data(),
            "neurons_nbr":
            self.model["nb_neurons"],
            "epochs_nbr":
            self.model["nb_epochs"]
        })
        self.map = GrowingNeuralGas(inputs)
        self.map.run()

    def runSOM(self):
        if self.data is None:
            self.load_dataset()
        nb_epochs = self.model["nb_epochs"]
        if "alpha_start" not in self.model: self.model["alpha_start"] = 0.2
        if "alpha_end" not in self.model: self.model["alpha_end"] = 0.05
        if "sigma_start" not in self.model: self.model["sigma_start"] = 0.7
        if "sigma_end" not in self.model: self.model["sigma_end"] = 0.015
        if "nb_images_evals" not in self.dataset:
            self.dataset["nb_images_evals"] = 75
        parameters = Parameters({
            "alpha":
            Variable(start=self.model["alpha_start"],
                     end=self.model["alpha_end"],
                     nb_steps=nb_epochs),
            "sigma":
            Variable(start=self.model["sigma_start"],
                     end=self.model["sigma_end"],
                     nb_steps=nb_epochs),
            "data":
            self.data.get_data(),
            "neurons_nbr": (self.model["width"], self.model["height"]),
            "epochs_nbr":
            nb_epochs
        })
        if self.model["model"] == "standard":
            self.map = SOM(parameters)
        elif self.model["model"] == "fast":
            self.map = FastSOM(parameters)
        elif self.model["model"] == "recursive":
            self.map = RecursiveSOM(parameters)
        else:
            print("Error : Unknown model !")

        # if "initialisation" not in self.codebooks:
        #     self.codebooks["initialisation"] = self.som.neurons.tolist()
        if "final" in self.codebooks:
            self.map.neurons = np.asarray(self.codebooks["final"])
        else:
            self.map.run()
            #for i in range(nb_epochs):
            #    self.map.run_epoch()
            #self.codebooks["final"] = copy.deepcopy(self.map.neurons.tolist())

        # for i in range(nb_epochs):
        #     print("Epoch "+str(i+1))
        #     if "Epoch "+str(i + 1) not in self.codebooks:
        #         if self.training_data is not None:
        #             self.som.data = self.training_data.get_data(self.som.data.shape[0])
        #         self.som.run_epoch()
        #         # self.codebooks["Epoch " + str(i + 1)] = copy.deepcopy(self.som.neurons.tolist())
        #     self.som.run_epoch()
        self.map.data = self.data.get_data()

    def compute_metrics(self):
        self.metrics["Square_error"] = self.map.square_error()
        if self.model["model"] == "gng":
            self.metrics["Neurons"] = len(self.map.network.nodes())
            self.metrics["Connections"] = len(self.map.network.edges())
        if self.dataset["type"] == "tracking":
            current_path = os.path.join("Data", "tracking", "dataset",
                                        self.dataset["file"])
            input_path = os.path.join(current_path, "input")
            roi_file = open(os.path.join(current_path, "temporalROI.txt"),
                            "r").readline().split()
            temporal_roi = (int(roi_file[0]), int(roi_file[1]))
            mask_roi = Image.open(os.path.join(current_path, "ROI.png"))

            nb_img_gen = self.dataset["nb_images_evals"]
            step = 1
            if nb_img_gen > 0:
                step = (temporal_roi[1] + 1 - temporal_roi[0]) // nb_img_gen

            base = os.path.join("Results", "GNGopti", self.metadata["name"],
                                "results")
            output_path = os.path.join(base, self.dataset["file"])
            supplements_path = os.path.join(base, "supplements")

            parameters = Parameters({
                "pictures_dim":
                [self.dataset["width"], self.dataset["height"]],
                "step":
                step
            })

            trackingMetric = TrackingMetrics(input_path,
                                             output_path,
                                             supplements_path,
                                             temporal_roi,
                                             mask_roi,
                                             parameters=parameters)
            trackingMetric.compute(self.map)
            cmp = Comparator()
            fmeasure, precision, recall = cmp.evaluate__folder_c(
                current_path, output_path, step)
            # print(fitness)
            self.metrics["fmeasure"] = fmeasure
            self.metrics["precision"] = precision
            self.metrics["recall"] = recall

    def compute_varying_threshold_metric(self):
        if self.dataset["type"] == "tracking":
            current_path = os.path.join("Data", "tracking", "dataset",
                                        self.dataset["file"])
            roi_file = open(os.path.join(current_path, "temporalROI.txt"),
                            "r").readline().split()
            temporal_roi = (int(roi_file[0]), int(roi_file[1]))

            base = os.path.join("Results", "Sizing", self.metadata["name"],
                                "results")
            output_path = os.path.join(base, self.dataset["file"])
            supplements_path = os.path.join(base, "supplements")
            difference_path = os.path.join(supplements_path, "saliency")

            nb_img_gen = self.dataset["nb_images_evals"]
            step = 1
            if nb_img_gen > 0:
                step = (temporal_roi[1] + 1 - temporal_roi[0]) // nb_img_gen

            res = []
            ranges = list(range(1, 20)) + list(range(20, 101, 5))
            for threshold in ranges:
                for img in os.listdir(difference_path):
                    som_difference = Image.open(
                        os.path.join(difference_path, img))
                    # Binarizing
                    fn = lambda x: 255 if x > threshold else 0
                    thresholded = som_difference.convert('L').point(fn,
                                                                    mode='1')
                    result = Image.new("L", som_difference.size)
                    result.paste(thresholded, (0, 0))
                    result.save(os.path.join(output_path, "bin" + img[3:]))

                cmp = Comparator()
                fitness = cmp.evaluate__folder_c(current_path, output_path,
                                                 step)
                # print(fitness)
                res.append(fitness)
                #self.metrics["fmeasure-t" + str(threshold)] = fitness
            self.metrics["fmeasure_threshold"] = res

    def compute_steps_metrics(self):
        # self.metrics["Square_error"] = self.som.square_error()
        # self.metrics["Neurons"] = len(self.som.network.nodes())
        # self.metrics["Connections"] = len(self.som.network.edges())
        if self.dataset["type"] == "tracking":
            current_path = os.path.join("Data", "tracking", "dataset",
                                        self.dataset["file"])
            input_path = os.path.join(current_path, "input")
            roi_file = open(os.path.join(current_path, "temporalROI.txt"),
                            "r").readline().split()
            temporal_roi = (int(roi_file[0]), int(roi_file[1]))
            mask_roi = Image.open(os.path.join(current_path, "ROI.png"))

            base = os.path.join("Results", "GNGoptimisation",
                                self.metadata["name"], "results")
            output_path = os.path.join(base, self.dataset["file"])
            supplements_path = os.path.join(base, "supplements")

            parameters = Parameters({
                "pictures_dim":
                [self.dataset["width"], self.dataset["height"]]
            })

            # ranges = list(range(1, 5)) + list(range(5, 101, 5))
            #res = []
            #ranges = range(1,201)
            #for i in ranges:
            #    cmp = Comparator()
            #    fitness = cmp.evaluate__folder_c(current_path, output_path, i)
            #    res.append(fitness)
            #self.metrics["fmeasure-steps"] = res

            res = []
            nb_image_ranges = range(5, 201)
            for i in nb_image_ranges:
                step = (temporal_roi[1] + 1 - temporal_roi[0]) // i
                cmp = Comparator()
                fitness = cmp.evaluate__folder_c(current_path, output_path,
                                                 step)
                res.append(fitness)
            self.metrics["fmeasure-nbimgs"] = res

    def full_threshold_evaluation(self, path):
        self.compute_varying_threshold_metric()
        self.save(path)
        print("Simulation", self.metadata["name"], "ended")

    def full_step_evaluation(self, path):
        self.compute_steps_metrics()
        self.save(path)
        print("Simulation", self.metadata["name"], "ended")

    def full_simulation(self, path):
        if "fmeasure" not in self.metrics:
            self.run()
            self.compute_metrics()
            self.save(path)
コード例 #8
0
ファイル: Recursive_SOM.py プロジェクト: yabernar/watSOM
        for i in diff:
            error.append(np.mean(np.abs(i)))
        return np.mean(error)

    def square_error(self):
        rec = self.get_reconstructed_data()
        diff = rec - self.data
        error = []
        for i in diff:
            error.append(np.mean(i**2))
        return np.mean(error)


if __name__ == '__main__':
    start = time.time()
    img = MosaicImage(Image.open("Data/images/Elijah.png"),
                      Parameters({"pictures_dim": [10, 10]}))

    nb_epochs = 100
    inputs = Parameters({
        "alpha":
        Variable(start=0.6, end=0.05, nb_steps=nb_epochs),
        "sigma":
        Variable(start=0.5, end=0.001, nb_steps=nb_epochs),
        "data":
        img.get_data(),
        "neurons_nbr": (8, 8),
        "epochs_nbr":
        nb_epochs
    })
    som = RecursiveSOM(inputs)
    som.run()
コード例 #9
0
import os
import numpy as np
from PIL import Image, ImageOps, ImageChops
from Code.Parameters import Parameters, Variable
from Code.SOM import SOM
from Data.Mosaic_Image import MosaicImage

pictures_dim = [10, 10]
name = "office"
path = os.path.join("Data", "spikes", "office", "src")
bkg = Image.open(os.path.join(path, name + "0.png"))
img_parameters = Parameters({"pictures_dim": pictures_dim})
data = MosaicImage(bkg, img_parameters)
nb_epochs = 100
parameters = Parameters({
    "alpha":
    Variable(start=0.6, end=0.05, nb_steps=nb_epochs),
    "sigma":
    Variable(start=0.5, end=0.001, nb_steps=nb_epochs),
    "data":
    data.get_data(),
    "neurons_nbr": (10, 10),
    "epochs_nbr":
    nb_epochs
})
map = SOM(parameters)
for i in range(nb_epochs):
    print("Epoch " + str(i + 1))
    map.run_epoch()

map.data = data.get_data()
コード例 #10
0
print(categories)
print(elements)

chosen_path = path + "/dataset/" + categories[11] + "/" + elements[0]
temporal_ROI = (1, 200)
plot = None
bkg = Image.open(chosen_path + "/input/" + 'bkg.jpg')
# bkg = Image.open(path2 + video + '{0:05d}.png'.format(1))
# bkg = Image.open(path3 + "example_base.png")

############
# LEARNING #
############
pictures_dim = [10, 10]
parameters = Parameters({"pictures_dim": pictures_dim})
data = MosaicImage(bkg, parameters)
nb_epochs = 50
inputs_SOM = Parameters({
    "alpha":
    Variable(start=0.5, end=0.25, nb_steps=nb_epochs),
    "sigma":
    Variable(start=0.1, end=0.03, nb_steps=nb_epochs),
    "data":
    data.get_data(),
    "neurons_nbr": (10, 10),
    "epochs_nbr":
    nb_epochs
})
som = SOM(inputs_SOM)
for i in range(nb_epochs):
    print('Epoch ', i)
コード例 #11
0
                vector - self.network.nodes[i]['vector'])

    def fully_random_vector(self):
        return np.random.randint(np.shape(self.data)[0])

    def unique_random_vector(self):
        self.current_vector_index = self.vector_list.pop(0)
        return self.current_vector_index

    def generate_random_list(self):
        self.vector_list = list(range(len(self.data)))
        np.random.shuffle(self.vector_list)


if __name__ == '__main__':
    img = MosaicImage(Image.open(os.path.join("Data", "Images", "Lenna.png")),
                      Parameters({"pictures_dim": [20, 20]}))
    data = img.get_data()

    start = time.time()

    nb_epochs = 20
    inputs = Parameters({
        "alpha":
        Variable(start=0.6, end=0.05, nb_steps=nb_epochs),
        "sigma":
        Variable(start=0.5, end=0.001, nb_steps=nb_epochs),
        "data":
        img.get_data(),
        "neurons_nbr": (5, 5),
        "epochs_nbr":
        nb_epochs
コード例 #12
0
ファイル: neural_modulation.py プロジェクト: yabernar/watSOM
print(elements)

chosen_path = path + "/dataset/" + categories[1] + "/" + elements[0]
temporal_ROI = (400, 1700)
plot = None
bkg = Image.open(chosen_path + "/input/" + 'in{0:06d}.jpg'.format(472))
# bkg = Image.open(path + 'ducks{0:05d}.png'.format(1))
# bkg = Image.open("/users/yabernar/workspace/watSOM/Data/color_test.png")

############
# LEARNING #
############
pictures_dim = [10, 10]
parameters = Parameters({"pictures_dim": pictures_dim})
data = SlidingWindow(bkg, parameters)
mosaic = MosaicImage(bkg, parameters)
nb_epochs = 5
inputs_SOM = Parameters({
    "alpha":
    Variable(start=0.5, end=0.25, nb_steps=nb_epochs),
    "sigma":
    Variable(start=0.1, end=0.03, nb_steps=nb_epochs),
    "data":
    data.get_data(),
    "neurons_nbr": (10, 10),
    "epochs_nbr":
    nb_epochs
})
som = SOM(inputs_SOM)
for i in range(nb_epochs):
    print('Epoch ', i)
コード例 #13
0
        for i in np.ndindex(winners.shape):
            error[i] = np.mean((self.working_data[i] - self.network.nodes[winners[i]]['vector'])**2)
        return np.mean(error)

    def display(self):
        reconstructed = self.img.reconstruct(self.get_reconstructed_data())
        # som_image = mosaic.reconstruct(gng.get_neural_list(), size=som.neurons_nbr)
        print("Error : ", self.square_error())
        print("Neurons : ", len(self.network.nodes()))
        print("Connections : ", len(self.network.edges()))
        plt.imshow(reconstructed)
        plt.show()


if __name__ == '__main__':
    img = MosaicImage(Image.open(os.path.join("Data", "Tracking", "Dataset", "baseline", "highway", "bkg.jpg")), Parameters({"pictures_dim": [16, 16]}))
    data = img.get_data()
    gng = GrowingNeuralGas(data)
    gng.img = img
    gng.fit_network(e_b=0.1, e_n=0.006, a_max=10, l=200, a=0.5, d=0.995, passes=100, plot_evolution=False)
    reconstructed = img.reconstruct(gng.get_reconstructed_data())
    # som_image = mosaic.reconstruct(gng.get_neural_list(), size=som.neurons_nbr)
    # print(gng.get_all_winners())
    plt.imshow(reconstructed)
    plt.show()
    print(len(gng.network.nodes))

    img2 = MosaicImage(Image.open(os.path.join("Data", "Tracking", "Dataset", "baseline", "highway", "input", "in001010.jpg")), Parameters({"pictures_dim": [16, 16]}))
    data2 = img2.get_data()
    old_winners = gng.get_all_winners()
    gng.data = data2
コード例 #14
0
categories = sorted(os.listdir(path + "/dataset"), key=str.lower)
elements = sorted(os.listdir(path + "/dataset/" + categories[1]), key=str.lower)
print(categories)
print(elements)

chosen_path = path + "/dataset/" + categories[1] + "/" + elements[1] + "/input/"
temporal_ROI = (570, 2050)
images_list = []
parameters = Parameters({"pictures_dim": pictures_dim})
plot = None
img = Image.open(chosen_path + 'in{0:06d}.jpg'.format(1)).convert('L')
bkg = Image.open(chosen_path + 'in{0:06d}.jpg'.format(60)).convert('L')
difference = ImageChops.difference(img, bkg)

data = MosaicImage(difference, parameters)

nb_epochs = 50
inputs_SOM = Parameters({"alpha": Variable(start=0.6, end=0.05, nb_steps=nb_epochs),
                         "sigma": Variable(start=0.5, end=0.001, nb_steps=nb_epochs),
                         "data": data.get_data(),
                         "neurons_nbr": (10, 10),
                         "epochs_nbr": nb_epochs})
som = SOM(inputs_SOM)
for i in range(nb_epochs):
    print('Epoch ', i)
    som.run_epoch()
    original = data.image
    reconstructed = data.reconstruct(som.get_reconstructed_data())
    som_image = data.reconstruct(som.get_neural_list(), size=som.neurons_nbr)
    difference = ImageChops.difference(original, Image.fromarray(reconstructed))