def __init__(self, to_combine, *args, **kwargs): super(OpSimpleCombiner, self).__init__(*args, **kwargs) operators = [build_operator(item, parent=self) for item in to_combine] combiner = OpMultiArrayStacker(parent=self) combiner.AxisFlag.setValue('c') combiner.Images.resize(len(operators)) for index, operator in enumerate(operators): combiner.Images[index].connect(operator.Output) operator.Input.connect(self.Input) valid_combiner = OpMultiArrayStacker(parent=self) valid_combiner.AxisFlag.setValue('c') valid_operators = [op for op in operators if hasattr(op, "Valid")] valid_combiner.Images.resize(len(valid_operators)) for index, operator in enumerate(valid_operators): valid_combiner.Images[index].connect(operator.Valid) self._combiner = combiner self._valid_combiner = valid_combiner self._operators = operators self.Output.connect(combiner.Output)
def _train(self): logger.info("============ TRAINING SUPERVISED ============") tds = self._opTrainData vds = self._opValidData if self._regression: # channel = "valid_objective" channel = "train_objective" else: channel = "valid_output_misclass" ext = [] if channel is not None: keep = MonitorBasedSaveBest.build(dict(channel=channel)) ext.append(keep) lra = sgd.MonitorBasedLRAdjuster(channel_name=channel) ext.append(lra) for other in self._extensions: ext.append(build_operator(other, workingdir=self._workingdir)) self.extensions_used = ext if self._terminate_early: termination_channel = channel else: termination_channel = None criteria = get_termination_criteria(epochs=self._max_epochs, channel=termination_channel) monitors = {'train': tds, 'valid': vds} algorithm = sgd.SGD(learning_rate=self._learning_rate, batch_size=self._batch_size, learning_rule=learning_rule.Momentum( init_momentum=self._init_momentum), termination_criterion=criteria, monitoring_dataset=monitors, monitor_iteration_mode="sequential", monitoring_batch_size=self._monitor_batch_size, seed=None, train_iteration_mode='sequential') trainer = train.Train(dataset=tds, model=self._nn, algorithm=algorithm, extensions=ext) trainer.main_loop() # set best parameters to layer params = keep.best_params best_cost = keep.best_cost logger.info("Restoring model with cost {}".format(best_cost)) self._nn.set_param_values(params) for ext in self.extensions_used: if isinstance(ext, PersistentTrainExtension): ext.store()
def __init__(self, workingdir=None, config=None): self._config = None self._graph = Graph() self._workingdir = workingdir self._start_time = None kwargs = dict(graph=self._graph) if config is None: return for key in config: assert isinstance(key, str) attr = "_" + key assert not hasattr(self, attr) if key == "preprocessing": value = [build_operator(subdict, **kwargs) for subdict in config[key]] else: subdir = os.path.join(workingdir, key) try: os.mkdir(subdir) except OSError as err: if "exists" in str(err): # that's fine pass else: raise kwargs["workingdir"] = subdir value = build_operator(config[key], **kwargs) setattr(self, attr, value) self._initialize() self._config = config
def testbuild_operator(self): class NotBuildable(OpArrayPiper): @classmethod def build(cls, config, parent=None, graph=None, workingdir=None): return cls(parent=parent, graph=graph) configs = ({"class": OpBuildableArrayPiper}, {"class": NotBuildable}, OpArrayPiper) kws = ({"graph": Graph()}, {"graph": Graph(), "workingdir": "temp"}) for config in configs: for kwargs in kws: op = build_operator(config, **kwargs) print(op.__class__) assert isinstance(op, Operator), str(op)
def init_model(self, model): sub_inits = self._initializers if isinstance(sub_inits, LayerWeightInitializer): sub_inits = repeat(sub_inits) last_dim = model.get_input_space().dim visited_layers = [] for init, layer in izip(sub_inits, model.layers): if isinstance(init, dict): init = build_operator(init, parent=self) next_dim = layer.get_output_space().dim if isinstance(init, OperatorLayerWeightInitializer): forward = OpForwardLayers(visited_layers, parent=self) forward.Input.connect(self.Data) init.Input.resize(2) init.Input[1].connect(self.Target) init.Input[0].connect(forward.Output) init.init_layer(layer, nvis=last_dim, nhid=next_dim) last_dim = next_dim visited_layers.append(layer)