class reactor_modeler(object): def __init__(self, sensorPins, hiddenLayerSize=10): assert (type(sensorPins) == dict) self.net = buildNetwork(len(sensorPins.keys), hiddenLayerSize, len(sensorPins), bias=true) self.dataset = ImportanceDataSet(len(sensorPins.keys), len(sensorPins.keys)) def add_trained_datapoint(self, inputDataPoint, outputDataPoint): self.dataset.addSample()
def fit(self, inp, y, sample_weight=None): self.classes_, y = numpy.unique(y, return_inverse=True) self.n_classes_ = len(self.classes_) n_features = inp.shape[1] random_state = check_random_state(self.random_state) # We need to build an ImportanceDataSet from inp, y and sample_weight dataset = ImportanceDataSet(n_features, self.n_classes_) if sample_weight is None: sample_weight = numpy.ones(len(y)) for x, label_pos, weight in izip(inp, y, sample_weight): target = numpy.zeros(self.n_classes_) target[label_pos] = 1 weight = weight * numpy.ones(self.n_classes_) dataset.newSequence() dataset.addSample(x, target, weight) if self.hidden_neurons is None: hidden_neurons = (n_features + self.n_classes_)/2 else: hidden_neurons = self.hidden_neurons self.network_ = buildNetwork( n_features, hidden_neurons, self.n_classes_, outclass=self._get_output_class() ) # Set the initial parameters in a repeatable way net_params = random_state.random_sample(self.network_.paramdim) self.network_.params[:] = net_params self.trainer_ = BackpropTrainer( self.network_, dataset=dataset, learningrate=self.learning_rate, lrdecay=self.lr_decay, momentum=self.momentum, weightdecay=self.weight_decay ) self.trainer_.trainUntilConvergence( random_state, maxEpochs=self.max_epochs, continueEpochs=self.continue_epochs, validationProportion=self.validation_percent ) return self
def __init__(self, sensorPins, hiddenLayerSize=10): assert (type(sensorPins) == dict) self.net = buildNetwork(len(sensorPins.keys), hiddenLayerSize, len(sensorPins), bias=true) self.dataset = ImportanceDataSet(len(sensorPins.keys), len(sensorPins.keys))