Пример #1
0
def tf_nse(true, pred, name='NSE'):
    """ Nash-Sutcliff efficiency to be used as loss function. It is subtracted from one before being returned"""
    neum = tf.reduce_sum(tf.square(tf.subtract(pred, true)))
    denom = tf.reduce_sum(
        tf.square(tf.subtract(true, tf.math.reduce_mean(true))))
    const = tf.constant(1.0, dtype=tf.float32)
    _nse = tf.subtract(const, tf.math.divide(neum, denom), name=name)
    return tf.subtract(const, _nse, name=name + '_LOSS')
        def val_step(_val_x, complete_y):
            masks = []
            y_trues = []
            for out in range(self.outs):
                mask = tf.greater(tf.reshape(complete_y[out], (-1,)), 0.0)  # # # (batch_size,)
                masks.append(mask)
                y_trues.append(complete_y[out][mask])

            predictions = self.k_model(_val_x, training=False)  # predictions for this minibatch

            val_step_losses = {}
            for out in range(self.outs):
                y_obj = predictions[out][masks[out]]

                val_step_losses['_' + str(out)] = keras.backend.mean(loss_fn(y_trues[out], y_obj))

            # Compute the loss value for this minibatch.
            _x = tf.stack(list(val_step_losses.values()))
            _x = tf.boolean_mask(_x, self.val_outs)
            loss_val = tf.reduce_sum(tf.boolean_mask(_x, tf.math.is_finite(_x)))
            val_step_losses.update({'loss': loss_val})

            return val_step_losses
        def train_step(train__x, complete_y):

            #skip_flag = False
            with tf.GradientTape() as taape:
                _masks = []
                _y_trues = []
                for out in range(self.outs):
                    mask = tf.greater(tf.reshape(complete_y[out], (-1,)), 0.0)  # # # (batch_size,)
                    _masks.append(mask)
                    _y_trues.append(complete_y[out][mask])

                _predictions = self.k_model(train__x, training=True)  # predictions for this minibatch

                losses = {}
                for out in range(self.outs):
                    y_obj = _predictions[out][_masks[out]]
                    #if len(y_obj) < 1:
                    #   skip_flag = True
                    losses['_' + str(out)] = keras.backend.mean(loss_fn(_y_trues[out], y_obj))

                # Compute the loss value for this minibatch.
                # loss_val = tf.reduce_sum(list(losses.values()))
                _x = tf.stack(list(losses.values()))
                _x = tf.boolean_mask(_x, self.tr_outs)
                loss_val = tf.reduce_sum(tf.boolean_mask(_x, tf.math.is_finite(_x)))

            losses.update({'loss': float(loss_val)})

            _grads = taape.gradient(loss_val, self.k_model.trainable_weights)  # list

            grads = [tf.clip_by_value(g, -1.0, 1.0) for g in _grads]

            # Run one step of gradient descent by updating the value of the variables to minimize the loss.
            optimizer.apply_gradients(zip(grads, self.k_model.trainable_weights))

            return losses#, skip_flag