def process_example_dnn(self, example_batch): if self.numberOfExamplesProcessed == 0: self._print_model() x_val, y_val = example_batch y_val = 0 if y_val == -1 else y_val x_val = [0 if x == -1 else x for _, x in x_val] # x_val = [x for _, x in x_val] true_label = y_val node, idx = self.node_selector.get_node(self.nodes) predictionScore, loss = node.model.train((x_val, y_val)) # print "predictionScore" + str(predictionScore) self.total_regression_error += abs(true_label-predictionScore) generate(Event(PREDICTION, self.parameters, self.current_round, (true_label, predictionScore, idx), node.model.getModelIdentifier()))#, loss,self.current_round))) self.total_error += loss self.numberOfExamplesProcessed += 1 if self.numberOfExamplesProcessed % 500 == 0: print predictionScore, loss self._print_model() if self.numberOfExamplesProcessed % self.numberOfNodes == 0: self.synchronize() self.current_round+=1 if self.computeUpdateMagnitude: self.computeUpdateMagnitude()
def computeUpdateMagnitude(self, quiet=False): current_hypothesis = self.nodes[0].model distance = abs(self.last_hypothesis.distance(current_hypothesis)) if not quiet: generate( Event(UPDATE, self.parameters, self.current_round, (distance, ))) self.last_hypothesis.clone(current_hypothesis)
def logCommunication(self, env, nodesThatSentMessages): #Increment the message size cumulativeMessageSize = 0 for node in nodesThatSentMessages: cumulativeMessageSize += node.model.getModelSize() env.total_message_size += cumulativeMessageSize env.total_communication += len(nodesThatSentMessages) #Log the message size generate( Event(COMMUNICATION, env.parameters, env.current_round, (cumulativeMessageSize, env.current_round), env.model_identifier))
def process_example(self, example): if isinstance(self.modelClass, dnn.model.DNNModel): self.process_example_dnn(example) return record, true_label = example node = self.node_selector.get_node(self.nodes) predictionScore = node.model.getPredictionScore(record) self.total_error += self.lossFunction(true_label, predictionScore) self.total_regression_error += abs(true_label-predictionScore) generate(Event(PREDICTION, self.parameters, self.current_round, (true_label, predictionScore), node.model.getModelIdentifier()))#, loss,self.current_round))) self.updateRule.update(node.model, record, predictionScore, true_label, self.current_round) self.numberOfExamplesProcessed += 1 if self.numberOfExamplesProcessed % self.numberOfNodes == 0: self.synchronize() self.current_round+=1 if self.computeUpdateMagnitude: self.computeUpdateMagnitude()
def process_example_dnn(self, example_batch): x_val, y_val = example_batch node, idx = self.node_selector.get_node(self.nodes) predictionScore, loss = node.model.train((x_val, y_val)) # print predictionScore, loss self.total_regression_error += loss true_label = y_val[0] generate( Event(PREDICTION, self.parameters, self.current_round, (true_label, predictionScore, loss, idx), node.model.getModelIdentifier()) ) #, loss,self.current_round))) self.total_error += loss self.numberOfExamplesProcessed += len(x_val) if self.numberOfExamplesProcessed % 10000 == 0: print true_label, predictionScore, loss if self.numberOfExamplesProcessed % self.numberOfNodes == 0: self.synchronize() self.current_round += 1 if self.computeUpdateMagnitude: self.computeUpdateMagnitude()
def _generate_drift_event(self): events.generate(Event("drift", 'global', self.current_round, (), ""))