예제 #1
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        z = x - self.x_opt
        out = tf.linalg.norm(z, axis=1)**2 + self.f_opt
        return out
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        z = self.t_osz(x - self.x_opt)

        out = tf.reduce_sum(self.c * z**2, axis=1) + self.f_opt
        return out
예제 #3
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        z = tf.transpose(self.t_osz(self.r @ tf.transpose(x - self.x_opt)))

        return 1.e6 * z[:, 0]**2 + tf.reduce_sum(z[:, 1:]**2,
                                                 axis=1) + self.f_opt
예제 #4
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        z = tf.transpose(self.c * self.r @ tf.transpose(x) + 1 / 2)
        a = z[:, :-1] ** 2 - z[:, 1:]
        b = z[:, :-1] - 1

        return tf.reduce_sum(100 * a ** 2 + b ** 2, axis=1) + self.f_opt
예제 #5
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        x = tf.transpose(x - self.x_opt)
        z = tf.transpose(self.r @ self.t_asy_beta(self.r @ x))

        out = z[:, 0]**2 + self.condition * tf.reduce_sum(z[:, 1:]**2,
                                                          axis=1) + self.f_opt
        return out
예제 #6
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        a = x[:, :-1] ** 2 - x[:, 1:]
        b = x[:, :-1] - 1

        out = tf.reduce_sum(100 * a ** 2 + b ** 2, axis=1)

        return out
예제 #7
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        # TODO: maybe flip data dimensions?
        z = tf.transpose(self.mat_fac @ self.t_asy_beta(
            tf.transpose(self.t_osz(x - self.x_opt))))

        out = 10 * (self.dim - tf.reduce_sum(tf.cos(
            2 * np.pi * z), axis=1)) + tflin.norm(z, axis=1)**2 + self.f_opt

        return out
예제 #8
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        z = self.c * (x - self.x_opt) + 1
        z_end = z[:, 1:]
        z_begin = z[:, :-1]

        a = z_begin ** 2 - z_end
        b = z_begin - 1

        return tf.reduce_sum(100 * a ** 2 + b ** 2, axis=1) + self.f_opt
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        y = self.t_osz(x - self.x_opt)
        s = np.zeros_like(y)

        for i in range(self.dim):
            if i % 2 == 0:
                s[:,
                  i] = np.where(y[:, i] > 0,
                                10 * np.power(np.sqrt(10), i / (self.dim - 1)),
                                np.power(np.sqrt(10), i / (self.dim - 1)))

        z = s * y

        out = 10 * (self.dim - np.sum(np.cos(2 * np.pi * z), axis=1)) + np.linalg.norm(z, axis=1) ** 2 \
              + 100 * self.f_pen(x) + self.f_opt

        return out
예제 #10
0
    def evaluate_full(self, x, context=None):
        x = tf_atleast_2d(x)
        assert x.shape[1] == self.dim

        return x[:, 0]**2 + 1e6 * tf.reduce_sum(x[:, 1:]**2, axis=1)