def test_array_param(self):

        # Check that the default initialization is finite.
        default_init = ArrayParam()
        self.assertTrue(np.isfinite(default_init.get()).all())

        lb = -0.1
        ub = 5.2
        shape = (3, 2)
        val = np.random.random(shape) * (ub - lb) + lb
        bad_val_ub = np.abs(val) + ub
        bad_val_lb = lb - np.abs(val)
        ap = ArrayParam('test', shape, lb=lb - 0.001, ub=ub + 0.001)

        # Check setting.
        ap_init = ArrayParam('test',
                             shape,
                             lb=lb - 0.001,
                             ub=ub + 0.001,
                             val=val)
        np_test.assert_array_almost_equal(val, ap_init.get())

        self.assertRaises(ValueError, ap.set, val[-1, :])

        # Bounds checking is disabled for now until it can be made optional.
        #self.assertRaises(ValueError, ap.set, bad_val_ub)
        #self.assertRaises(ValueError, ap.set, bad_val_lb)
        ap.set(val)

        # Check size.
        self.assertEqual(np.product(shape), ap.vector_size())
        self.assertEqual(np.product(shape), ap.free_size())

        # Check getting and free parameters.
        np_test.assert_array_almost_equal(val, ap.get())
        val_free = ap.get_free()
        ap.set(np.full(shape, 0.))
        ap.set_free(val_free)
        np_test.assert_array_almost_equal(val, ap.get())

        val_vec = ap.get_vector()
        ap.set(np.full(shape, 0.))
        ap.set_vector(val_vec)
        np_test.assert_array_almost_equal(val, ap.get())
Beispiel #2
0
    def dcov(x, y):
        dnx = d_n(x)
        dny = d_n(y)

        denom = np.product(dnx.shape)
        dc = (dnx * dny).sum() / denom
        dvx = (dnx**2).sum() / denom
        dvy = (dny**2).sum() / denom
        dr = dc / (np.sqrt(dvx) * np.sqrt(dvy))
        return dc  #, dr, dvx, dvy
Beispiel #3
0
def unpackage_params(d, shapes=[]):
    flat_params = unpack(d)

    params = []
    idx = 0
    for W_shape, b_shape in shapes:

        W_len = np.product(W_shape)
        b_len = np.product(b_shape)

        W = (flat_params[idx:idx + W_len]).reshape(*W_shape)
        idx += W_len

        b = (flat_params[idx:idx + b_len]).reshape(*b_shape)
        idx += b_len

        params.append((W, b))

    return params
Beispiel #4
0
 def loss_function_kernel(self, weights, x_in, ind):
     """Compute loss function with or without penalty factor."""
     net = self.mlp.network_output(x_in, weights)
     ref = self.reference_solution(ind)
     p_factor = 1.0
     is_boundary = (np.product(x_in) < 1.0E-12) or \
                   (abs(x_in[0] - 1.0) < 1.0E-12) or \
                   (abs(x_in[1] - 1.0) < 1.0E-12)
     if is_boundary:
         p_factor = self.penalty
     return ((ref - net) * p_factor)**2
Beispiel #5
0
    def place_control_points(self,
                             method=[],
                             domain=[],
                             step_size=[],
                             growths=[]):
        """Places control points within a given domain."""
        [
            self.mlp.assert_input_length(input_list)
            for input_list in [method, domain, step_size, growths]
        ]

        print("\nPlacing control points for {} input variables.".format(
            len(domain)))
        print("------------------------------------------------")

        current_var = [domain[i][0] + step_size[i] for i in range(len(domain))]
        points = [[domain[i][0]] for i in range(len(domain))]
        lengths = []

        for i, method_i in enumerate(method):
            if method_i == 'uniform':
                while (current_var[i] + 1.0E-6) < domain[i][1]:
                    points[i].append(current_var[i])
                    current_var[i] += step_size[i]
            else:
                while (current_var[i] + 1.0E-6) < domain[i][1]:
                    points[i].append(current_var[i])
                    current_var[i] *= growths[i]
            points[i].append(domain[i][1])
            lengths.append(len(points[i]))
        total = np.product(np.asarray(lengths))
        all_points = []
        assert self.mlp.number_of_inputs <= 3
        if self.mlp.number_of_inputs == 1:
            all_points = np.asarray(points[0])
        elif self.mlp.number_of_inputs == 2:
            for p0 in points[0]:
                for p1 in points[1]:
                    all_points.append(np.asarray([p0, p1]))
        elif self.mlp.number_of_inputs == 3:
            for p0 in points[0]:
                for p1 in points[1]:
                    for p2 in points[2]:
                        all_points.append(np.asarray([p0, p1, p2]))
        self.control_points = np.asarray(all_points)
        self.loss_control_points = self.control_points
        self.loss_indices = range(len(self.control_points))

        print("Control points per input parameter: {}".format(lengths))
        print("The total number of control points is {}".format(total))
    def __init__(self,
                 shape,
                 lb=-float("inf"),
                 ub=float("inf"),
                 default_validate=True,
                 free_default=None):
        """
        Parameters
        -------------
        shape: `tuple` of `int`
            The shape of the array.
        lb: `float`
            The (inclusive) lower bound for the entries of the array.
        ub: `float`
            The (inclusive) upper bound for the entries of the array.
        default_validate: `bool`, optional
            Whether or not the array is checked by default to lie within the
            specified bounds.
        free_default: `bool`, optional
            Whether the pattern is free by default.
        """
        self.default_validate = default_validate
        self._shape = tuple(shape)
        self._lb = lb
        self._ub = ub
        assert lb >= -float('inf')
        assert ub <= float('inf')
        if lb >= ub:
            raise ValueError(
                'Upper bound ub must strictly exceed lower bound lb')

        free_flat_length = flat_length = int(np.product(self._shape))

        super().__init__(flat_length,
                         free_flat_length,
                         free_default=free_default)

        # Cache arrays of indices for flat_indices
        # TODO: not sure this is a good idea or much of a speedup.
        self.__free_folded_indices = self.fold(np.arange(
            self.flat_length(free=True), dtype=int),
                                               validate_value=False,
                                               free=False)

        self.__nonfree_folded_indices = self.fold(np.arange(
            self.flat_length(free=False), dtype=int),
                                                  validate_value=False,
                                                  free=False)
Beispiel #7
0
def flatten(x):
    return np.reshape(x, np.product(x.shape))
Beispiel #8
0
 def vector_size(self):
     return np.product(self.__shape)
Beispiel #9
0
 def free_size(self):
     return np.product(self.__free_shape)
Beispiel #10
0
    def initialize_weights(self, method):
        """Intialize weights and biases.

            Biases are always initialized with zero.
            The weights can be chosen randomly or with the Xavier-He
            initialization. The MLP has one input layer, at least one
            hidden layers, and one output layer. All layers except for
            the output have exactly one bias unit. All biases and weights
            (input and output) are stored in the array 'weights'.

        Parameters
        ----------
        method - string : Defines the initialization method;
                          Can be either 'random' or 'xavier_he'

        Set/Return
        ----------
        weights - array-like : 2D array containing all weights and biases;
                               It has the shape [rows, columns] with
                               rows = rows_input + rows_hidden + rows_output
                               (further explanation in the code)
                               columns = neurons_per_layer (all hidden layers
                               have the same number of neurons)
                               [0, :] accesses the input bias
                               [1, :] accesses the weights between the first
                               input neuron and all neurons of the first hidden
                               layer
                               ...
                               [number_of_inputs + 1, :] accesses the bias of
                               the first hidden layer
                               [number_of_inputs + 2, :] accesses the weights
                               between the first neuron of the first hidden
                               layer and all neurons of the second hidden layer
                               ...
                               [(number_of_inputs + 1) * (layer - 1), :]
                               accesses the bias of 'layer' with layer=1 for
                               the first hidden layer and so on

        """
        # each input is connected to each neuron in the first hidden layer;
        # plus one for the bias
        input_rows = self.number_of_inputs + 1
        # each neuron in the hidden layer is connected to each neuron in the
        # next layer; plus one for the bias
        hidden_rows = self.neurons_per_layer + 1
        # +1 for the weights between last hidden layer and output neuron
        rows = input_rows + hidden_rows * (self.hidden_layers - 1) + 1
        weights = np.zeros([rows, self.neurons_per_layer]).astype(np.float64)
        np.random.seed(1111)

        if method == 'random':
            signs = self.random_signs(rows, self.neurons_per_layer)
            weights = np.multiply(np.random.rand(rows, self.neurons_per_layer),
                                  signs)
        elif method == 'xavier_he':
            stdv_in = np.sqrt(6 / self.number_of_inputs)
            stdv_rest = np.sqrt(6 / self.neurons_per_layer)

            # input weights
            signs = self.random_signs(self.number_of_inputs,
                                      self.neurons_per_layer)
            weights[1:self.number_of_inputs + 1] = np.multiply(
                np.random.rand(self.number_of_inputs, self.neurons_per_layer),
                signs) * stdv_in
            # weights of hidden layers
            start_hidden = self.number_of_inputs + 1
            for layer in range(self.hidden_layers - 1):
                signs = self.random_signs(self.neurons_per_layer,
                                          self.neurons_per_layer)
                start = start_hidden + (self.neurons_per_layer + 1) * layer + 1
                end = start + self.neurons_per_layer
                weights[start:end] = np.multiply(
                    np.random.rand(self.neurons_per_layer,
                                   self.neurons_per_layer), signs) * stdv_rest

            # output weights
            signs = self.random_signs(1, self.neurons_per_layer)
            weights[-1] = np.multiply(
                np.random.rand(1, self.neurons_per_layer), signs) * stdv_rest

        else:
            print(
                "Could not find method {} for initialization.".format(method))
            print("Possible options are 'random', 'xavier_he'")

        print("\nInitialized weights and biases for MLP with {} parameters.\n".
              format(rows * self.neurons_per_layer))
        print("MLP structure:")
        print("--------------")
        print("Input features:    {}".format(self.number_of_inputs))
        print("Hidden layers:     {}".format(self.hidden_layers))
        print("Neurons per layer: {}".format(self.neurons_per_layer))
        print("Number of weights: {}".format(np.product(weights.shape)))
        return weights
Beispiel #11
0
 def free_size(self):
     return int(np.product(self.__shape))