示例#1
0
def create_forward(workflow, normalizer, labels_mapping, loader_config):
    # Disable plotters:
    workflow.plotters_are_enabled = False

    # Link downloader
    workflow.start_point.unlink_after()
    workflow.downloader = Downloader(
        workflow,
        url="https://s3-eu-west-1.amazonaws.com/veles.forge/MNIST/"
            "mnist_test.tar",
        directory=root.common.dirs.datasets,
        files=["mnist_test"])
    workflow.downloader.link_from(workflow.start_point)
    workflow.repeater.link_from(workflow.downloader)

    # Cnanging MnistLoader for another Loader:
    new_loader = workflow.change_unit(
        workflow.loader.name,
        FileListImageLoader(workflow, **loader_config))

    # Link attributes:
    # TODO: remove link attributes after adding in change_unit() function
    # TODO: data links transmission
    workflow.forwards[0].link_attrs(
        new_loader, ("input", "minibatch_data"))

    workflow.evaluator.link_attrs(
        new_loader,
        "class_keys",
        ("batch_size", "minibatch_size"),
        ("labels", "minibatch_labels"),
        ("max_samples_per_epoch", "total_samples"),
        "class_lengths", ("offset", "minibatch_offset"))
    workflow.decision.link_attrs(
        new_loader, "minibatch_class", "last_minibatch",
        "minibatch_size", "class_lengths", "epoch_ended", "epoch_number")

    # Set normalizer from previous Loader to new one:
    new_loader._normalizer = normalizer

    # Set labels_mapping and class_keys in Evaluator to correct writting the
    # results:
    workflow.evaluator.labels_mapping = labels_mapping
    workflow.evaluator.class_keys = new_loader.class_keys
示例#2
0
    def __init__(self, workflow, **kwargs):
        super(WineWorkflow, self).__init__(workflow, **kwargs)
        layers = kwargs["layers"]

        self.downloader = Downloader(
            self,
            url=root.wine.downloader.url,
            directory=root.wine.downloader.directory,
            files=root.wine.downloader.files,
        )
        self.downloader.link_from(self.start_point)

        self.repeater.link_from(self.downloader)

        self.loader = WineLoader(
            self,
            minibatch_size=root.wine.loader.minibatch_size,
            force_numpy=root.wine.loader.force_numpy,
            dataset_file=root.wine.loader.dataset_file,
            normalization_type=root.wine.loader.normalization_type,
        )
        self.loader.link_from(self.repeater)

        # Add fwds units
        del self.forwards[:]
        for i, layer in enumerate(layers):
            if i < len(layers) - 1:
                aa = all2all.All2AllTanh(self, output_sample_shape=(layer,))
            else:
                aa = all2all.All2AllSoftmax(self, output_sample_shape=(layer,))
            self.forwards.append(aa)
            if i:
                self.forwards[-1].link_from(self.forwards[-2])
                self.forwards[-1].link_attrs(self.forwards[-2], ("input", "output"))
            else:
                self.forwards[-1].link_from(self.loader)
                self.forwards[-1].link_attrs(self.loader, ("input", "minibatch_data"))

        # Add evaluator for single minibatch
        self.evaluator = evaluator.EvaluatorSoftmax(self)
        self.evaluator.link_from(self.forwards[-1])
        self.evaluator.link_attrs(self.forwards[-1], "output", "max_idx")
        self.evaluator.link_attrs(
            self.loader,
            ("batch_size", "minibatch_size"),
            ("max_samples_per_epoch", "total_samples"),
            ("labels", "minibatch_labels"),
            ("offset", "minibatch_offset"),
            "class_lengths",
        )

        # Add decision unit
        self.decision = decision.DecisionGD(
            self, fail_iterations=root.wine.decision.fail_iterations, max_epochs=root.wine.decision.max_epochs
        )
        self.decision.link_from(self.evaluator)
        self.decision.link_attrs(
            self.loader,
            "minibatch_class",
            "minibatch_size",
            "last_minibatch",
            "class_lengths",
            "epoch_ended",
            "epoch_number",
        )
        self.decision.link_attrs(
            self.evaluator,
            ("minibatch_n_err", "n_err"),
            ("minibatch_confusion_matrix", "confusion_matrix"),
            ("minibatch_max_err_y_sum", "max_err_output_sum"),
        )

        self.snapshotter = NNSnapshotterToFile(
            self,
            prefix=root.wine.snapshotter.prefix,
            directory=root.common.dirs.snapshots,
            compression="",
            interval=root.wine.snapshotter.interval,
            time_interval=root.wine.snapshotter.time_interval,
        )
        self.snapshotter.link_from(self.decision)
        self.snapshotter.link_attrs(self.decision, ("suffix", "snapshot_suffix"))
        self.snapshotter.gate_skip = ~self.loader.epoch_ended
        self.snapshotter.skip = ~self.decision.improved

        self.end_point.link_from(self.snapshotter)
        self.end_point.gate_block = ~self.decision.complete

        # Add gradient descent units
        self.gds[:] = (None,) * len(self.forwards)
        self.gds[-1] = (
            gd.GDSoftmax(self)
            .link_from(self.snapshotter)
            .link_attrs(self.evaluator, "err_output")
            .link_attrs(self.forwards[-1], "output", "input", "weights", "bias")
            .link_attrs(self.loader, ("batch_size", "minibatch_size"))
        )
        self.gds[-1].gate_skip = self.decision.gd_skip
        self.gds[-1].gate_block = self.decision.complete
        for i in range(len(self.forwards) - 2, -1, -1):
            self.gds[i] = (
                gd.GDTanh(self)
                .link_from(self.gds[i + 1])
                .link_attrs(self.gds[i + 1], ("err_output", "err_input"))
                .link_attrs(self.forwards[i], "output", "input", "weights", "bias")
                .link_attrs(self.loader, ("batch_size", "minibatch_size"))
            )
            self.gds[i].gate_skip = self.decision.gd_skip
        self.gds[0].need_err_input = False
        self.repeater.link_from(self.gds[0])
        self.loader.gate_block = self.decision.complete
示例#3
0
    def __init__(self, workflow, **kwargs):
        kwargs["name"] = kwargs.get("name", "Kohonen")
        super(KohonenWorkflow, self).__init__(workflow, **kwargs)

        self.downloader = Downloader(
            self, url=root.kohonen.downloader.url,
            directory=root.kohonen.downloader.directory,
            files=root.kohonen.downloader.files)
        self.downloader.link_from(self.start_point)

        self.repeater.link_from(self.downloader)

        self.loader = KohonenLoader(
            self, name="Kohonen fullbatch loader",
            minibatch_size=root.kohonen.loader.minibatch_size,
            force_numpy=root.kohonen.loader.force_numpy)
        self.loader.link_from(self.repeater)

        # Kohonen training layer
        self.trainer = kohonen.KohonenTrainer(
            self, shape=root.kohonen.forward.shape,
            weights_filling=root.kohonen.forward.weights_filling,
            weights_stddev=root.kohonen.forward.weights_stddev,
            gradient_decay=root.kohonen.train.gradient_decay,
            radius_decay=root.kohonen.train.radius_decay)
        self.trainer.link_from(self.loader)
        self.trainer.link_attrs(self.loader, ("input", "minibatch_data"))

        # Loop decision
        self.decision = kohonen.KohonenDecision(
            self, max_epochs=root.kohonen.decision.epochs)
        self.decision.link_from(self.trainer)
        self.decision.link_attrs(self.loader,
                                 "minibatch_class",
                                 "last_minibatch",
                                 "class_lengths",
                                 "epoch_ended",
                                 "epoch_number")
        self.decision.link_attrs(self.trainer, "weights", "winners")

        self.ipython = Shell(self)
        self.ipython.link_from(self.decision)
        self.ipython.gate_skip = ~self.decision.epoch_ended

        self.repeater.link_from(self.ipython)
        self.ipython.gate_block = self.decision.complete

        self.end_point.link_from(self.decision)
        self.end_point.gate_block = ~self.decision.complete

        self.loader.gate_block = self.decision.complete

        # Error plotter
        self.plotters = [nn_plotting_units.KohonenHits(self),
                         nn_plotting_units.KohonenInputMaps(self),
                         nn_plotting_units.KohonenNeighborMap(self)]
        self.plotters[0].link_attrs(self.trainer, "shape") \
            .link_from(self.ipython)
        self.plotters[0].input = self.decision.winners_mem
        self.plotters[0].gate_block = ~self.decision.epoch_ended
        self.plotters[1].link_attrs(self.trainer, "shape") \
            .link_from(self.ipython)
        self.plotters[1].input = self.decision.weights_mem
        self.plotters[1].gate_block = ~self.decision.epoch_ended
        self.plotters[2].link_attrs(self.trainer, "shape") \
            .link_from(self.ipython)
        self.plotters[2].input = self.decision.weights_mem
        self.plotters[2].gate_block = ~self.decision.epoch_ended
示例#4
0
    def __init__(self, workflow, **kwargs):
        kwargs["name"] = kwargs.get("name", "Kohonen")
        super(KohonenWorkflow, self).__init__(workflow, **kwargs)

        self.downloader = Downloader(
            self,
            url=root.kohonen.downloader.url,
            directory=root.kohonen.downloader.directory,
            files=root.kohonen.downloader.files)
        self.downloader.link_from(self.start_point)

        self.repeater.link_from(self.downloader)

        self.loader = KohonenLoader(
            self,
            name="Kohonen fullbatch loader",
            minibatch_size=root.kohonen.loader.minibatch_size,
            force_numpy=root.kohonen.loader.force_numpy)
        self.loader.link_from(self.repeater)

        # Kohonen training layer
        self.trainer = kohonen.KohonenTrainer(
            self,
            shape=root.kohonen.forward.shape,
            weights_filling=root.kohonen.forward.weights_filling,
            weights_stddev=root.kohonen.forward.weights_stddev,
            gradient_decay=root.kohonen.train.gradient_decay,
            radius_decay=root.kohonen.train.radius_decay)
        self.trainer.link_from(self.loader)
        self.trainer.link_attrs(self.loader, ("input", "minibatch_data"))

        # Loop decision
        self.decision = kohonen.KohonenDecision(
            self, max_epochs=root.kohonen.decision.epochs)
        self.decision.link_from(self.trainer)
        self.decision.link_attrs(self.loader, "minibatch_class",
                                 "last_minibatch", "class_lengths",
                                 "epoch_ended", "epoch_number")
        self.decision.link_attrs(self.trainer, "weights", "winners")

        self.ipython = Shell(self)
        self.ipython.link_from(self.decision)
        self.ipython.gate_skip = ~self.decision.epoch_ended

        self.repeater.link_from(self.ipython)
        self.ipython.gate_block = self.decision.complete

        self.end_point.link_from(self.decision)
        self.end_point.gate_block = ~self.decision.complete

        self.loader.gate_block = self.decision.complete

        # Error plotter
        self.plotters = [
            nn_plotting_units.KohonenHits(self),
            nn_plotting_units.KohonenInputMaps(self),
            nn_plotting_units.KohonenNeighborMap(self)
        ]
        self.plotters[0].link_attrs(self.trainer, "shape") \
            .link_from(self.ipython)
        self.plotters[0].input = self.decision.winners_mem
        self.plotters[0].gate_block = ~self.decision.epoch_ended
        self.plotters[1].link_attrs(self.trainer, "shape") \
            .link_from(self.ipython)
        self.plotters[1].input = self.decision.weights_mem
        self.plotters[1].gate_block = ~self.decision.epoch_ended
        self.plotters[2].link_attrs(self.trainer, "shape") \
            .link_from(self.ipython)
        self.plotters[2].input = self.decision.weights_mem
        self.plotters[2].gate_block = ~self.decision.epoch_ended
示例#5
0
    def __init__(self, workflow, **kwargs):
        super(WineWorkflow, self).__init__(workflow, **kwargs)
        layers = kwargs["layers"]

        self.downloader = Downloader(
            self, url=root.wine.downloader.url,
            directory=root.wine.downloader.directory,
            files=root.wine.downloader.files)
        self.downloader.link_from(self.start_point)

        self.repeater.link_from(self.downloader)

        self.loader = WineLoader(
            self, minibatch_size=root.wine.loader.minibatch_size,
            force_numpy=root.wine.loader.force_numpy,
            dataset_file=root.wine.loader.dataset_file,
            normalization_type=root.wine.loader.normalization_type)
        self.loader.link_from(self.repeater)

        # Add fwds units
        del self.forwards[:]
        for i, layer in enumerate(layers):
            if i < len(layers) - 1:
                aa = all2all.All2AllTanh(
                    self, output_sample_shape=(layer,),
                    weights_stddev=0.05, bias_stddev=0.05)
            else:
                aa = all2all.All2AllSoftmax(
                    self, output_sample_shape=(layer,),
                    weights_stddev=0.05, bias_stddev=0.05)
            self.forwards.append(aa)
            if i:
                self.forwards[-1].link_from(self.forwards[-2])
                self.forwards[-1].link_attrs(
                    self.forwards[-2], ("input", "output"))
            else:
                self.forwards[-1].link_from(self.loader)
                self.forwards[-1].link_attrs(
                    self.loader, ("input", "minibatch_data"))

        # Add evaluator for single minibatch
        self.evaluator = evaluator.EvaluatorSoftmax(self)
        self.evaluator.link_from(self.forwards[-1])
        self.evaluator.link_attrs(self.forwards[-1], "output", "max_idx")
        self.evaluator.link_attrs(self.loader,
                                  ("batch_size", "minibatch_size"),
                                  ("max_samples_per_epoch", "total_samples"),
                                  ("labels", "minibatch_labels"),
                                  ("offset", "minibatch_offset"),
                                  "class_lengths")

        # Add decision unit
        self.decision = decision.DecisionGD(
            self, fail_iterations=root.wine.decision.fail_iterations,
            max_epochs=root.wine.decision.max_epochs)
        self.decision.link_from(self.evaluator)
        self.decision.link_attrs(self.loader,
                                 "minibatch_class", "minibatch_size",
                                 "last_minibatch", "class_lengths",
                                 "epoch_ended", "epoch_number")
        self.decision.link_attrs(
            self.evaluator,
            ("minibatch_n_err", "n_err"),
            ("minibatch_confusion_matrix", "confusion_matrix"),
            ("minibatch_max_err_y_sum", "max_err_output_sum"))

        self.snapshotter = NNSnapshotterToFile(
            self, prefix=root.wine.snapshotter.prefix,
            directory=root.common.dirs.snapshots, compression="",
            interval=root.wine.snapshotter.interval,
            time_interval=root.wine.snapshotter.time_interval)
        self.snapshotter.link_from(self.decision)
        self.snapshotter.link_attrs(self.decision,
                                    ("suffix", "snapshot_suffix"))
        self.snapshotter.gate_skip = ~self.loader.epoch_ended
        self.snapshotter.skip = ~self.decision.improved

        self.end_point.link_from(self.snapshotter)
        self.end_point.gate_block = ~self.decision.complete

        # Add gradient descent units
        self.gds[:] = (None,) * len(self.forwards)
        self.gds[-1] = gd.GDSoftmax(self) \
            .link_from(self.snapshotter) \
            .link_attrs(self.evaluator, "err_output") \
            .link_attrs(self.forwards[-1], "output", "input",
                        "weights", "bias") \
            .link_attrs(self.loader, ("batch_size", "minibatch_size"))
        self.gds[-1].gate_skip = self.decision.gd_skip
        self.gds[-1].gate_block = self.decision.complete
        for i in range(len(self.forwards) - 2, -1, -1):
            self.gds[i] = gd.GDTanh(self) \
                .link_from(self.gds[i + 1]) \
                .link_attrs(self.gds[i + 1], ("err_output", "err_input")) \
                .link_attrs(self.forwards[i], "output", "input",
                            "weights", "bias") \
                .link_attrs(self.loader, ("batch_size", "minibatch_size"))
            self.gds[i].gate_skip = self.decision.gd_skip
        self.gds[0].need_err_input = False
        self.repeater.link_from(self.gds[0])
        self.loader.gate_block = self.decision.complete