def _evaluate_solution(self, encoded_solution): decoded = self._decode_solution(encoded_solution) print('Evaluate: ' + str(decoded['layers']) + ' ...') model_hash = hashlib.sha224(str(decoded['look_back']).encode('UTF-8') + str(decoded['weights']).encode('UTF-8')).hexdigest() metrics = self.cache.upsert_cache(model_hash, None) if metrics is None: rnn_solution = nn.RNNBuilder(decoded['layers'], decoded['weights'], dense_activation=self.config.dense_activation) if self.config.blind: y_predicted = rnn_solution.predict_blind(self.data['train'], self.data['test'], self.config.x_features, self.config.y_features, decoded['look_back']) y_gt = self.data['test'][self.config.y_features].values[:,:] else: y_predicted = rnn_solution.predict(self.data[self.config.x_features], decoded['look_back']) y_gt = self.data[self.config.y_features].values[decoded['look_back']:,:] mse = ut.mse_loss(y_predicted, y_gt) mae = ut.mae_loss(y_predicted, y_gt) metrics = { 'trainable_params':int(rnn_solution.trainable_params), 'num_hidden_layers':int(rnn_solution.hidden_layers), 'layers':'-'.join(map(str, decoded['layers'])), 'mse':mse, 'mae':mae, 'num_hidden_neurons':int(np.sum(decoded['layers'][1:-1])), 'look_back':int(decoded['look_back']) } del rnn_solution self.cache.upsert_cache(model_hash, metrics) else: print('Metrics load from cache') print(metrics) self.memory_tracker.print_diff() return metrics
def _sample_architecture(self, layers, look_back): rnn_solution = nn.RNNBuilder(layers, dense_activation=self.config.dense_activation) maes = list() for i in range(self.config.samples): weights = self._generate_weights(layers) rnn_solution.update_weights( weights ) y_predicted = rnn_solution.predict(self.data[self.config.x_features], look_back) y_gt = self.data[self.config.y_features].values[look_back:,:] mae = ut.mae_loss(y_predicted, y_gt) maes.append(mae) del rnn_solution mean = np.mean(maes) sd = np.std(maes) metrics = {'mean':mean, 'sd':sd, 'maes':maes, 'arch':layers, 'look_back':look_back} return metrics