def initialize(deep_map, X,num_pseudo_params):
    smart_map = {}
    for layer,layer_map in deep_map.iteritems():
        smart_map[layer] = {}
        for unit,gp_map in layer_map.iteritems():
            smart_map[layer][unit] = {}
            cov_params = gp_map['cov_params']
            lengthscales = cov_params[1:]
            if layer == 0:
                pairs = itertools.combinations(X, 2)
                dists = np.array([np.abs(p1-p2) for p1,p2 in pairs])
                smart_lengthscales = np.array([np.log(np.median(dists[:,i])) for i in xrange(len(lengthscales))])
                kmeans = KMeans(n_clusters = num_pseudo_params, init = 'k-means++')
                fit = kmeans.fit(X)
                smart_x0 = fit.cluster_centers_
                #inds = npr.choice(len(X), num_pseudo_params, replace = False)
                #smart_x0 = np.array(X)[inds,:]
                smart_y0 = np.ndarray.flatten(smart_x0) 
                #smart_y0 = np.array(y)[inds]
                smart_noise_scale = np.log(np.var(smart_y0))
            else:
                smart_x0 = gp_map['x0']
                smart_y0 = np.ndarray.flatten(smart_x0[:,0])
                smart_lengthscales = np.array([np.log(1) for i in xrange(len(lengthscales))])
                smart_noise_scale = np.log(np.var(smart_y0))
            gp_map['cov_params'] = np.append(cov_params[0],smart_lengthscales)
            gp_map['x0'] = smart_x0
            gp_map['y0'] = smart_y0
            #gp_map['noise_scale'] = smart_noise_scale
            smart_map[layer][unit] = gp_map
    smart_params = pack_deep_params(smart_map)
    return smart_params
Esempio n. 2
0
 def fitMulticlass(self,X,y):
     self.X = np.array(X) #converting X to np array 
     self.y = np.array(y) #converting y to np array
     self.X = np.append(np.ones((self.X.shape[0],1)),self.X,axis=1)
     self.totalFeatures = self.X.shape[1]
     self.theta= np.zeros((self.totalFeatures,y.shape[1]))
     for i in range(self.maxIterations):
         softP = self.softmax(self.X, self.theta)
         for j in range(self.y.shape[1]):
             val = np.exp(self.X.dot(self.theta[:,j]))/softP   
             error = -(self.y[:,j]-val)
             self.theta[:,j] = self.theta[:,j]- (self.learningRate * error.dot(self.X))/self.X.shape[0]
     return self.theta
Esempio n. 3
0
 def predict(self, X):
     """
         predicts according to the trained model
         X: samples
         return prediction
     """
     self.X = X
     bias = np.ones((self.X.shape[0], 1))
     self.X = np.append(bias, self.X, axis=1)
     self.X = np.array(self.X)
     Z = 1 / (1 + np.exp(-(self.X.dot(self.coef_))))
     Y = np.round(Z)
     return Y
Esempio n. 4
0
    def step(self, action):
        #Y = self.obs(self.x)
        self.counter += 1
        self.x, self.P, K = self.EKF(self.x, self.P, action)
        self.L = np.linalg.cholesky(self.P)
        ind_tril = np.tril_indices(self.L.shape[0])
        done = self.terminal_state(self.x)
        reward = self.reward_func(action)

        self.state = np.append(self.x, self.L[ind_tril])
        if self.counter > 256:
            done = True
        return self.state, reward, done, {}
Esempio n. 5
0
    def fparams(self):
        """
        full parameter values

        Appends xi to params if the kernel uses xi

        Returns:
            np.array(float) - params.append(xi)
        """
        if self.kernel.use_xi:
            return np.append(self.params, self.kernel.xi)
        else:
            return self.params
Esempio n. 6
0
    def dump_state(self, xk):
        '''
    callback to save the state to disk during optimization
    '''
        filename = 'state.txt'

        if not os.path.exists(filename):
            past = np.zeros((0, xk.shape[0]))
        else:
            past = np.loadtxt(filename)
            if past.ndim < 2:
                past = past.reshape(1, -1)
        np.savetxt(filename, np.append(past, xk.reshape(1, -1), axis=0))
Esempio n. 7
0
 def predict_multi(self, X):
     """
         predict multiclass output
         X: samples
     """
     bias = np.ones((X.shape[0], 1))
     X = np.append(bias, X, axis=1)
     X = np.array(X)
     Z = 1 / (1 + np.exp(-(X.dot(self.coef_))))
     Y = []
     for i in Z:
         Y.append(np.argmax(i))
     return Y
Esempio n. 8
0
    def __init__(self):
        self.counter = 0
        self.targetUpdatefreq = 100 # Not being used

        self.max_action = 0.01
        self.world_box = np.array([[5.0, 5.0], [-5.0, -5.0]])
        #self.min_position = np.array([-5.0, -5.0])
        self.xlow = np.append(self.world_box[1], [0., -1., -1.])
        self.xhigh = np.append(self.world_box[0], [2*pi, 1., 1.])
        self.low_state = np.append(self.xlow, -10*np.ones(15))       #
        self.high_state = np.append(self.xhigh, 10*np.ones(15))       #

        self.action_space = spaces.Box(-np.ones(2), np.ones(2))
        self.observation_space = spaces.Box(self.low_state, self.high_state)

        self.viewer = None
        #self.state = self.observation_space.sample()

        self.noise = np.array([0.01]*5 + [0.2]*2) #std
        dt = 0.1
        self.Q = np.eye(5) * (0.01**2)
        self.R = np.eye(2) * (0.2**2)
        self.P = np.eye(5) * (0.0001**2)
        self.Id = np.eye(5)

        # initialize state
        pos = random.uniform(self.world_box[0], self.world_box[1])
        ang = math.atan2(pos[1], pos[1]) -pi + random.uniform(-pi/8, pi/8)
        ang %= 2*pi
        self.x = np.append(pos, [ang, 0., 0.])
        self.L = np.linalg.cholesky(self.P)

        self.goal_position = np.array([0., 0.])

        self.A = jacobian(self.dynamics)
        self.H = jacobian(self.obs)
        self.seed()
        self.reset()
Esempio n. 9
0
    def sdot(
        self, S, t, params, Cin
    ):  # X is population vector, t is time, R is intrinsic growth rate vector, C is the rate limiting nutrient vector, A is interaction matrix
        '''
        Calculates and returns derivatives for the numerical solver odeint

        Parameters:
            S: current state
            t: current time
            Cin: array of the concentrations of the auxotrophic nutrients and the
                common carbon source
            params: list parameters for all the equations
            num_species: the number of bacterial populations
        Returns:
            dsol: array of the derivatives for all state variables
        '''

        # extract variables

        # autograd gives t as an array_box, need to convert to int
        if str(
                type(t)
        ) == '<class \'autograd.numpy.numpy_boxes.ArrayBox\'>':  # sort this out
            t = t._value
            t = int(t)
        else:
            t = int(t)
        t = min(Cin.shape[0] - 1,
                t)  # to prevent solver from going past the max time

        C0in = Cin[t]

        #C0in = Cin
        N = S[0]
        C0 = S[1]
        # extract parameters
        q = self.ode_params[0]
        y, Rmax = params[0:2]
        Km = self.ode_params[3]
        R = self.monod(C0, Rmax, Km)

        # calculate derivatives
        dN = N * (R.astype(float) - q)  # q term takes account of the dilution
        dC0 = q * (C0in - C0) - 1 / y * R * N

        # consstruct derivative vector for odeint
        dC0 = np.array([dC0])
        dsol = np.append(dN, dC0)

        return tuple(dsol)
Esempio n. 10
0
File: firefly.py Progetto: svd3/DDPG
 def reward_func(self, action):
     R = np.eye(4) * 1.25
     P_reduced = np.delete(self.P, 2, 0)
     P_reduced = np.delete(P_reduced, 2, 1)
     #P_reduced = P_reduced + np.eye(4)*1e-4
     P_ = inv(P_reduced)
     S_ = inv(R) + P_
     S = inv(S_)
     mu = np.append(self.x[:2], self.x[3:])
     a = -0.5 * np.dot(mu.T.dot(P_ - np.dot(P_.dot(S), P_)), mu)
     reward = np.exp(a) * np.sqrt(np.linalg.det(S)/np.linalg.det(P_reduced)) #- np.sum(mu[:2]**2)
     #reward -= 1.
     reward -= -0.1*action[0]**2 + 1*action[1]**2 + 1.
     return reward
Esempio n. 11
0
 def gradient(self, params):
     # use the gradient if the model provides it.
     # if not, compute it using autograd.
     if not hasattr(self.mean, 'gradient'):
         _grad = lambda mean, argnum, params: jacobian(mean, argnum)(*params)
     else:
         _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]
     n_params = len(np.atleast_1d(params))
     grad_likelihood = np.array([])
     for i in range(n_params):
         grad = _grad(self.mean, i, params)
         grad_likelihood = np.append(grad_likelihood,
                                     np.nansum(grad * (1 - self.data / self.mean(*params))))
     return grad_likelihood
Esempio n. 12
0
    def __init__(self, breakpoints, *args, **kwargs):
        breakpoints = np.sort(breakpoints)
        if not (breakpoints[-1] < np.inf):
            raise ValueError("Do not add inf to the breakpoints.")

        if breakpoints[0] < 0:
            raise ValueError("First breakpoint must be greater than 0.")

        self.breakpoints = np.append(breakpoints, [np.inf])
        n_breakpoints = len(self.breakpoints)

        self._fitted_parameter_names = ["lambda_%d_" % i for i in range(n_breakpoints)]

        super(PiecewiseExponentialFitter, self).__init__(*args, **kwargs)
Esempio n. 13
0
    def __init__(self):
        self.counter = 0
        self.episode_len = 1000

        self.max_action = 1
        self.world_box = np.array([[5.0, 5.0], [-5.0, -5.0]])
        #self.min_position = np.array([-5.0, -5.0])
        #self.xlow = np.append(self.world_box[1], [0., -1., -1.])
        #self.xhigh = np.append(self.world_box[0], [2*pi, 1., 1.])
        self.xlow = np.array([0., -pi, -1., -1.])
        self.xhigh = np.array([8., pi, 1., 1.])

        self.low_state = np.append(self.xlow, -5 * np.ones(15))  #
        self.high_state = np.append(self.xhigh, 5 * np.ones(15))  #

        self.action_space = spaces.Box(-np.ones(2), np.ones(2))
        self.observation_space = spaces.Box(self.low_state, self.high_state)

        self.viewer = None
        #self.state = self.observation_space.sample()

        self.noise = np.array([0.01] * 5 + [0.1] * 2)  #std
        self.dt = 0.1
        self.Q = np.eye(5) * (0.01**2)
        self.R = np.eye(2) * (0.1**2)
        self.P = np.eye(5) * 0.
        self.Id = np.eye(5)

        self.A = jacobian(self.dynamics)
        self.H = jacobian(self.obs)

        self.goal_position = np.array([0., 0.])
        self.goal_radius = 0.8

        self.seed()
        self.reset()
Esempio n. 14
0
 def __init__(self, name='', length=1, matrix_size=2, diag_lb=0.0, val=None):
     self.name = name
     self.__matrix_size = int(matrix_size)
     self.__matrix_shape = np.array([ int(matrix_size), int(matrix_size) ])
     self.__length = int(length)
     self.__shape = np.append(self.__length, self.__matrix_shape)
     # vec_size is the size of a single matrix in vector form.
     self.__vec_size = int(matrix_size * (matrix_size + 1) / 2)
     self.__diag_lb = diag_lb
     assert diag_lb >= 0
     if val is None:
         default_val = np.diag(np.full(self.__matrix_size, diag_lb + 1.0))
         self.__val = np.broadcast_to(default_val, self.__shape)
     else:
         self.set(val)
Esempio n. 15
0
def l2_difference_minimisation(N, patterns, weights, biases, sc):
    '''
    minimisation of || lmbd*W @ Z - Z||
    '''
    Z = deepcopy(patterns).T.reshape(N, -1)
    W = np.array([])
    for i in range(N):
        A = Z.T
        b = Z[i, :]
        if sc == False:
            # add one more constraint to eliminate the diagonals
            tmp = np.zeros((1, N))
            tmp[0, i] = 1
            A = np.vstack([A, tmp])
            b = np.append(b, 0)
        elif sc == True:
            pass
        else:
            raise AttributeError(
                'The \'sc\' parameter can take only boolean values')
        w = lsqr(A, b)[0]
        W = np.append(W, w)
    weights = W.reshape(N, N)
    return weights, biases
Esempio n. 16
0
    def gradient_function(self, w):
        # separate w and w0
        w0 = w[-1]
        w = w[:-1]

        eta = np.dot(self.x, w.T) + w0

        grad_w = np.dot(
            np.where(eta > 30, self.y - 1,
                     self.y - (np.exp(eta) / (1 + np.exp(eta)))), self.x)
        grad_w_0 = np.dot(
            np.where(eta > 30, self.y - 1,
                     self.y - (np.exp(eta) / (1 + np.exp(eta)))), 1)
        print(grad_w_0)
        return np.append(grad_w, grad_w_0)
Esempio n. 17
0
    def grouping(self, solution):
        # grouping different groups in solution into one group

        if self.number_group > 1:
            # solution_help = np.zeros((solution.shape[0], 10))
            # for i in range(10):
            #     solution_help[:, i] = np.sum(solution[:, i * self.number_group:(i+1) * self.number_group], axis=1)
            # solution = solution_help

            solution_help = []
            for i in range(10):
                solution_help = np.append(solution_help, np.sum(solution[:, i * self.number_group:(i+1) * self.number_group], axis=1))
            solution = np.reshape(solution_help, (10, solution.shape[0])).T

        return solution
    def __init__(self, breakpoints, alpha=0.05, penalizer=0.0):
        super(PiecewiseExponentialRegressionFitter, self).__init__(alpha=alpha)

        breakpoints = np.sort(breakpoints)
        if len(breakpoints) and not (breakpoints[-1] < np.inf):
            raise ValueError("Do not add inf to the breakpoints.")

        if len(breakpoints) and breakpoints[0] < 0:
            raise ValueError("First breakpoint must be greater than 0.")

        self.breakpoints = np.append(breakpoints, [np.inf])
        self.n_breakpoints = len(self.breakpoints)

        self.penalizer = penalizer
        self._fitted_parameter_names = ["lambda_%d_" % i for i in range(self.n_breakpoints)]
Esempio n. 19
0
 def fit_autograd(self, X, y):
     self.X = np.array(X) #converting X to np array 
     self.y = np.array(y) #converting y to np array
     self.X = np.append(np.ones((self.X.shape[0],1)),self.X,axis=1) #appending columns of ones
     self.theta = np.random.rand(self.X.shape[1])#np.ones(self.X.shape[1])#
     agrad = elementwise_grad(self.costFunctionUnregularised)
     agrad1 = elementwise_grad(self.costFunctionL1Regularised)
     agrad2 = elementwise_grad(self.costFunctionL2Regularised)
     for iterationNum in range(self.maxIterations):
         if self.regularization == 'l1':
             self.theta -= (self.learningRate*(agrad1(self.theta, self.X, self.y))) /(self.X.shape[0])
         elif self.regularization == 'l2':
             self.theta -= (self.learningRate*(agrad2(self.theta, self.X, self.y))) /(self.X.shape[0])
         else: 
             self.theta -= (self.learningRate*(agrad(self.theta, self.X, self.y))) /(self.X.shape[0])
     return self.theta
Esempio n. 20
0
def chebyshev_centre(A, b, gamma):
    rows, cols = A.shape
    c = np.zeros(cols + 1)
    c[-1] = -1
    A_ = np.hstack([A, np.sqrt(np.sum(np.power(A, 2), axis=1)).reshape(-1, 1)])
    A_ = np.vstack([A_, -c.reshape(1, -1)])
    b_ = np.append(b, 100).reshape(-1, 1)

    # l2 norm minimisation of w
    P = gamma * np.eye(cols + 1)
    P[:, -1] = P[-1, :] = 0

    res = solve_qp(P=P, q=c, G=A_, h=b_)
    x_c = np.array(res[:-1])
    R = np.float(res[-1])
    return x_c, R
def run_nn(N, input_size, output_size):
    integer_part = int(np.floor(N[0]))
    alpha = N[0] - integer_part

    # Network parameters
    layer_sizes = [input_size]
    layer_sizes.extend([output_size for i in range(0, integer_part - 1)])
    L2_reg = 1.0

    # Training parameters
    param_scale = 0.1

    # Load and process wines data
    N_data, train_images, train_labels, test_images, test_labels = get_wine_data()
    batch_size = len(train_images)

    # Make neural net functions
    N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)

    # Gradient with respect to weights and alpha
    loss_grad_P = grad(loss_fun, 0)

    # Initialize weights
    rs = npr.RandomState(11)
    W = rs.randn(N_weights) * param_scale

    print("    Train err  |   Test err  |   Alpha")
    f_out = open(filename, 'w')
    f_out.write("    Train err  |   Test err  |   Alpha\n")
    f_out.close()

    def print_perf(params):
        f_out = open(filename, 'a')
        test_perf  = frac_err(params, test_images, test_labels)
        train_perf = frac_err(params, train_images, train_labels)
        print("{0:15}|{1:15}|{2:15}".format(train_perf, test_perf, params[-1]))
        f_out.write("{0:15}|{1:15}|{2:15}\n".format(train_perf, test_perf, params[-1]))
        f_out.close()

    # Minimize with BFGS
    res = optimize.minimize(loss_fun, np.append(W, alpha), jac=loss_grad_P, method='L-BFGS-B', \
        args=(train_images, train_labels), options={'disp': True, 'maxiter': 2})
    print(res)

    final_test_err = frac_err(res.x, train_images, train_labels)
    print(N[0], final_test_err)
    return final_test_err
Esempio n. 22
0
 def __init__(self, name='', array_shape=(1), matrix_size=2, diag_lb=0.0, val=None):
     self.name = name
     self.__matrix_size = int(matrix_size)
     self.__matrix_shape = np.array([ int(matrix_size), int(matrix_size) ])
     self.__array_shape = array_shape
     self.__array_ranges = [ range(0, t) for t in self.__array_shape ]
     self.__array_length = np.prod(self.__array_shape)
     self.__shape = np.append(self.__array_shape, self.__matrix_shape)
     # __vec_size is the size of a single matrix in vector form.
     self.__vec_size = int(matrix_size * (matrix_size + 1) / 2)
     self.__diag_lb = diag_lb
     assert diag_lb >= 0
     if val is None:
         default_val = np.diag(np.full(self.__matrix_size, diag_lb + 1.0))
         self.__val = np.broadcast_to(default_val, self.__shape)
     else:
         self.set(val)
Esempio n. 23
0
    def __init__(self, breakpoints, *args, **kwargs):
        if (breakpoints is None) or (not list(breakpoints)):
            raise ValueError("Breakpoints must be provided.")

        if not (max(breakpoints) < np.inf):
            raise ValueError("Do not add inf to the breakpoints.")

        if min(breakpoints) <= 0:
            raise ValueError("First breakpoint must be greater than 0.")

        breakpoints = np.sort(breakpoints)
        self.breakpoints = np.append(breakpoints, [np.inf])
        n_breakpoints = len(self.breakpoints)

        self._fitted_parameter_names = ["lambda_%d_" % i for i in range(n_breakpoints)]

        super(PiecewiseExponentialFitter, self).__init__(*args, **kwargs)
Esempio n. 24
0
    def __init__(self, breakpoints, alpha=0.05, penalizer=0.0, fit_intercept=True):
        super(PiecewiseExponentialRegressionFitter, self).__init__(alpha=alpha)

        breakpoints = np.sort(breakpoints)
        if len(breakpoints) and not (breakpoints[-1] < np.inf):
            raise ValueError("Do not add inf to the breakpoints.")

        if len(breakpoints) and breakpoints[0] < 0:
            raise ValueError("First breakpoint must be greater than 0.")

        self.breakpoints = np.append(breakpoints, [np.inf])
        self.n_breakpoints = len(self.breakpoints)

        self._hazard = egrad(self._cumulative_hazard, argnum=1)  # pylint: disable=unexpected-keyword-arg
        self.penalizer = penalizer
        self.fit_intercept = fit_intercept
        self._fitted_parameter_names = ["lambda_%d_" % i for i in range(self.n_breakpoints)]
Esempio n. 25
0
  def compute_restricted_hessian_and_dParamsdWeights(self, dims, weights,
                                                     comp_dParams_dWeights=True):
    '''
    Computes the dims.shape[0] by dims.shape[0] Hessian only along the entries
    in dims (used when using l_1 regularization)
    '''
    theta0 = self.params.get_free()

    # Objective to differentiate just along the dimensions specified
    def lowDimObj(weights, thetaOnDims, thetaOffDims, invPerm):
      allDims = np.append(dims, offDims)
      thetaFull = np.append(thetaOnDims, thetaOffDims)[invPerm]
      return self.weighted_model_objective(weights, thetaFull)

    offDims = np.setdiff1d(np.arange(self.params.get_free().shape[0]), dims)
    thetaOnDims = theta0[dims]
    thetaOffDims = theta0[offDims]

    # lowDimObj will concatenate thetaOnDims, thetaOffDims, then needs to
    #  un-permute them into the original theta.
    allDims = np.append(dims, offDims)
    invPerm = np.zeros(theta0.shape[0], dtype=np.int32)
    for i, idx in enumerate(allDims):
      invPerm[idx] = i

    evalHess = autograd.hessian(lowDimObj, argnum=1)
    array_box_go_away = self.params.get_free().copy()

    restricted_hess = evalHess(weights,
                               thetaOnDims,
                               thetaOffDims,
                               invPerm)
    self.params.set_free(theta0)

    dObj_dParams = autograd.jacobian(lowDimObj, argnum=1)
    d2Obj_dParamsdWeights = autograd.jacobian(dObj_dParams, argnum=0)

    if comp_dParams_dWeights:
      restricted_dParamsdWeights = d2Obj_dParamsdWeights(weights,
                                                         thetaOnDims,
                                                         thetaOffDims,
                                                         invPerm)
      return restricted_hess, restricted_dParamsdWeights
    else:
      return restricted_hess
    def predict_cumulative_hazard(self,
                                  df,
                                  times=None,
                                  conditional_after=None) -> pd.DataFrame:
        """
        Return the cumulative hazard rate of subjects in X at time points.

        Parameters
        ----------
        X: numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        times: iterable, optional
            an iterable of increasing times to predict the cumulative hazard at. Default
            is the set of all durations (observed and unobserved). Uses a linear interpolation if
            points in time are not in the index.

        Returns
        -------
        cumulative_hazard_ : DataFrame
            the cumulative hazard of individuals over the timeline
        """

        if isinstance(df, pd.Series):
            return self.predict_cumulative_hazard(df.to_frame().T)

        if conditional_after is not None:
            raise NotImplementedError()

        times = np.atleast_1d(
            coalesce(times, self.timeline,
                     np.unique(self.durations))).astype(float)
        n = times.shape[0]
        times = times.reshape((n, 1))

        lambdas_ = self._prep_inputs_for_prediction_and_return_parameters(df)

        bp = np.append(self.breakpoints, [np.inf])
        M = np.minimum(np.tile(bp, (n, 1)), times)
        M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])

        return pd.DataFrame(np.dot(M, (1 / lambdas_)),
                            columns=_get_index(df),
                            index=times[:, 0])
Esempio n. 27
0
def simple_contour(f, c=0, delta=0.01):
    '''Donne les coordonnées demandées sur un cadre de côté unitaire de manière optimisée'''
    x = np.array([])
    y = np.array([])

    def g(x):
        return (f(0, x))

    if find_seed(g, c) == None:
        return x, y
    y = np.append(y, np.array([find_seed(g, c)]))

    x = np.append(x, np.array([0]))

    grad = gradient(f, x[-1], y[-1])
    tang = [-grad[1], grad[0]]
    print(tang)
    if tang[0] < 0:
        tang = [grad[1], -grad[0]]
        while x[-1] <= 1 and y[-1] <= 1 and x[-1] >= 0 and y[-1] >= 0:
            grad = gradient(f, x[-1], y[-1])
            tang = [grad[1], -grad[0]]
            norme = math.sqrt(tang[0]**2 + tang[1]**2)
            x = np.append(x, np.array(x[-1] + (delta * tang[0]) / (norme)))
            y = np.append(y, np.array(y[-1] + (delta * tang[1]) / (norme)))

            def F(
                x, y
            ):  #On définit la fonction F pour les deux derniers points trouvés
                return np.array(
                    [f(x, y) - c, (x - x[-2])**2 + (y - y[-2])**2 - delta**2])

            res = newton(
                F, np.array([x[-1], y[-1]])
            )  #On applique Newton: ATTENTION, un problème non résolu apparait lors de l'éxécution de autograd.jacobian
            x[-1], y[-1] = res[0], res[1]
    else:
        tang = [-grad[1], grad[0]]
        while x[-1] <= (1) and y[-1] <= (1) and x[-1] >= 0 and y[-1] >= 0:
            grad = gradient(f, x[-1], y[-1])
            tang = [-grad[1], grad[0]]
            norme = math.sqrt(tang[0]**2 + tang[1]**2)
            x = np.append(x, np.array(x[-1] + (delta * tang[0]) / (norme)))
            y = np.append(y, np.array(y[-1] + (delta * tang[1]) / (norme)))

            def F(x, y):
                return np.array(
                    [f(x, y) - c, (x - x[-2])**2 + (y - y[-2])**2 - delta**2])

            res = newton(F, np.array([x[-1], y[-1]]))
            x[-1], y[-1] = res[0], res[1]
    return x, y
    def forward(self, model, x_traj, u_traj, k_traj, K_traj, alpha):

        x_traj_new = np.array(x_traj)
        u_traj_new = np.array(u_traj)

        for t in range(len(u_traj)):
            control = alpha**t * k_traj[t] + np.matmul(
                K_traj[t], (x_traj_new[t] - x_traj[t]))
            u_traj_new[t] = np.clip(u_traj[t] + control, -2, 2)

            # Create the vector of observations X={x,u}
            observations = np.array([np.append(x_traj_new[t], u_traj_new[t])])
            # Comment for Task 4
            test_predict = x_traj_new[t] + model.predict(observations)
            # TODO Task 4: use the dynamics of the system instead of the learned model compute the next state -- DONE
            x_traj_new[t + 1] = test_predict
            # x_traj_new[t + 1] = self.f(x_traj_new[t], u_traj_new[t])

        return x_traj_new, u_traj_new
Esempio n. 29
0
def cost(S, v):
    # Unpack parameters
    nu = np.append(v, 0)

    logdetS = np.expand_dims(np.linalg.slogdet(S)[1], 1)
    y = np.concatenate([samples.T, np.ones((1, N))], axis=0)

    # Calculate log_q
    y = np.expand_dims(y, 0)

    # 'Probability' of y belonging to each cluster
    log_q = -0.5 * (np.sum(y * np.linalg.solve(S, y), axis=1) + logdetS)

    alpha = np.exp(nu)
    alpha = alpha / np.sum(alpha)
    alpha = np.expand_dims(alpha, 1)

    loglikvec = logsumexp(np.log(alpha) + log_q, axis=0)
    return -np.sum(loglikvec)
Esempio n. 30
0
    def init_yields(self, quiet=False):

        nf = len(self.KL_amplitude_files)
        npar = len(self.param_sets)
        total_yields = nf * npar

        if not quiet:
            print "Making yields for bias study:"
            print "  # of params  : {}".format(npar)
            print "  # of A(KL)'s : {}".format(nf)
            print "  # TOTAL yield sets : {}".format(total_yields)

        yield_n = 0
        if not quiet: print("  Yield gen: {}/{}".format(yield_n, total_yields))
        for fi, f in enumerate(self.KL_amplitude_files):
            self.generator_model.load_KL_amplitude(f)
            for pi, p in enumerate(self.param_sets):
                physics_param = p

                sys.stdout.write("\033[F")  #back to previous line
                sys.stdout.write("\033[K")  #clear line
                yield_n += 1
                if not quiet:
                    print("  Yield gen: {}/{}".format(yield_n, total_yields))

                if len(p) == 3:
                    # single channel setup
                    avg_yields = self.generator_model.predict_yields(
                        p, self.N_signal[0]
                    )  # Yields calculated using FullModel.py
                elif len(p) == 5:
                    # two channel channel setup
                    avg_yields_ch1 = self.generator_model.predict_yields(
                        p[0:3], self.N_signal[0]
                    )  # Yields calculated using FullModel.py
                    avg_yields_ch2 = self.generator_model.predict_yields(
                        [p[0], p[3], p[4]], self.N_signal[1]
                    )  # Yields calculated using FullModel.py
                    avg_yields = np.append(avg_yields_ch1, avg_yields_ch2)

                self.yields["{}_{}".format(fi, pi)] = avg_yields

        self.yields_init = True
Esempio n. 31
0
 def fit(self, X, y):
     """
         trains the model
         X: samples
         y: labels
         returns trained weights
     """
     self.X = X.copy()
     bias = np.ones((self.X.shape[0], 1))
     self.X = np.append(bias, self.X, axis=1)  # Include Bais
     self.X = np.array(self.X)
     self.y = y
     self.nofFeatures = len(self.X[0])
     self.samples = len(self.X)
     self.coef_ = np.ones(self.nofFeatures)  # init features
     for i in range(self.epoch):
         err = self.sigmoid(self.X.dot(self.coef_)) - y  # error
         self.coef_ = self.coef_ - self.lr * (err).dot(self.X)  # coef
     return self.coef_
Esempio n. 32
0
    def yields_chi_square(self, xy_and_Fi, *args):
        ''' The function to me minimized when fitting x, y, is the chi-square
            xy_Fi: a list with elements [{Fi}, xm, ym, xp, yp]
            data: The yields to which the fit is being made
        '''
        data = args[0]
        Nplus  = sum(data[0:len(data)/2]) # B+
        Nminus =  sum(data[len(data)/2:]) # B-
        xy = xy_and_Fi[0:4]
        Fi = xy_and_Fi[4:]
        Fi = np.append(Fi, 1-sum(Fi)) 


        predictions = self.predict_yields(xy, Nplus, Nminus, Fi=Fi)
        uncertainty_squared = data
        for i, u in enumerate(uncertainty_squared):
            if u==0: uncertainty_squared[i]=1 # make sure there are no divisions by zero
        minLL = np.sum(0.5*(data - predictions)**2/uncertainty_squared) # least squares fit
        return minLL
Esempio n. 33
0
    f_out.close()

    def print_perf(params):
        f_out = open(filename, 'a')
        test_perf  = frac_err(params, test_images, test_labels)
        train_perf = frac_err(params, train_images, train_labels)
        print("{0:15}|{1:15}|{2:15}".format(train_perf, test_perf, params[-1]))
        f_out.write("{0:15}|{1:15}|{2:15}\n".format(train_perf, test_perf, params[-1]))
        f_out.close()

    # Minimize with BFGS
    num_iterations = []
    train_errors = []
    test_errors = [] 
    for i in range(0, 100):
        optimize.minimize(loss_fun, np.append(W, alpha), jac=loss_grad_P, method='L-BFGS-B', \
            args=(train_images, train_labels), options={'disp': True}, callback=print_perf)

        training_error = 0.
        test_error = 0.
        with open(filename, 'r') as input_file:
            next(input_file)
            curr_iteration = 0
            for line in input_file:
                data_as_string = line.split('|')
                data = map(float, data_as_string)

                training_error = data[0]
                test_error = data[1]
                curr_iteration += 1
            num_iterations.append(curr_iteration)
Esempio n. 34
0
def minConf_SPG(funObj, x, funProj, options=None):
    """ This function implements Mark Schmidt's MATLAB implementation of
    spectral projected gradient (SPG) to solve for projected quasi-Newton
    direction
                min funObj(x) s.t. x in C
    Parameters
    ----------
    funObj: function that returns objective function value and the gradient
    x: initial parameter value
    funProj: fcuntion that returns projection of x onto C
    options:
        verbose: level of verbosity (0: no output, 1: final, 2: iter (default), 3:
            debug)
        optTol: tolerance used to check for optimality (default: 1e-5)
        progTol: tolerance used to check for lack of progress (default: 1e-9)
        maxIter: maximum number of calls to funObj (default: 500)
        numDiff: compute derivatives numerically (0: use user-supplied
            derivatives (default), 1: use finite differences, 2: use complex
            differentials)
        suffDec: sufficient decrease parameter in Armijo condition (default
            : 1e-4)
        interp: type of interpolation (0: step-size halving, 1: quadratic,
            2: cubic)
        memory: number of steps to look back in non-monotone Armijo
            condition
        useSpectral: use spectral scaling of gradient direction (default:
            1)
        curvilinear: backtrack along projection Arc (default: 0)
        testOpt: test optimality condition (default: 1)
        feasibleInit: if 1, then the initial point is assumed to be
            feasible
        bbType: type of Barzilai Borwein step (default: 1)
 
    Notes: 
        - if the projection is expensive to compute, you can reduce the
            number of projections by setting testOpt to 0
    """
    
    nVars = x.shape[0]
    options_default = {'verbose':2, 'numDiff':0, 'optTol':1e-5, 'progTol':1e-9,\
                'maxIter':500, 'suffDec':1e-4, 'interp':2, 'memory':10,\
                'useSpectral':1,'curvilinear':0,'feasibleInit':0,'testOpt':1,\
                'bbType':1}
    options = setDefaultOptions(options, options_default)

    if options['verbose'] >= 2:
        if options['testOpt'] == 1:
            print '{:10s}'.format('Iteration') + \
                    '{:10s}'.format('FunEvals') + \
                    '{:10s}'.format('Projections') + \
                    '{:15s}'.format('StepLength') + \
                    '{:15s}'.format('FunctionVal') + \
                    '{:15s}'.format('OptCond')
        else:
            print '{:10s}'.format('Iteration') + \
                    '{:10s}'.format('FunEvals') + \
                    '{:10s}'.format('Projections') + \
                    '{:15s}'.format('StepLength') + \
                    '{:15s}'.format('FunctionVal')
    
    funEvalMultiplier = 1

    # evaluate initial point
    if options['feasibleInit'] == 0:
        x = funProj(x)
    [f, g] = funObj(x)
    projects = 1
    funEvals = 1

    # optionally check optimality
    if options['testOpt'] == 1:
        projects = projects + 1
        if np.max(np.abs(funProj(x-g)-x)) < options['optTol']:
            if options['verbose'] >= 1:
                print "First-order optimality conditions below optTol at initial point"
            return (x, f, funEvals, projects)
    
    i = 1
    while funEvals <= options['maxIter']:
        # compute step direction
        if i == 1 or options['useSpectral'] == 0:
            alpha = 1.
        else:
            y = g - g_old
            s = x - x_old
            if options['bbType'] == 1:
                alpha = np.dot(s,s)/np.dot(s,y)
            else:
                alpha = np.dot(s,y)/np.dot(y,y)
            if alpha <= 1e-10 or alpha >= 1e10:
                alpha = 1.
        
        d = -alpha * g
        f_old = f
        x_old = x
        g_old = g

        # compute projected step
        if options['curvilinear'] == 0:
            d = funProj(x+d) - x
            projects = projects + 1

        # check that progress can be made along the direction
        gtd = np.dot(g, d)
        if gtd > -options['progTol']:
            if options['verbose'] >= 1:
                print "Directional derivtive below progTol"
            break

        # select initial guess to step length
        if i == 1:
            t = np.minimum(1., 1./np.sum(np.abs(g)))
        else:
            t = 1.

        # compute reference function for non-monotone condition
        if options['memory'] == 1:
            funRef = f
        else:
            if i == 1:
                old_fvals = np.ones(options['memory'])*(-1)*np.infty
            
            if i <= options['memory']:
                old_fvals[i-1] = f
            else:
                old_fvals = np.append(old_fvals[1:], f)
            funRef = np.max(old_fvals)
        
        # evaluate the objective and gradient at the initial step
        if options['curvilinear'] == 1:
            x_new = funProj(x + t*d)
            projects = projects + 1
        else:
            x_new = x + t*d
        [f_new, g_new] = funObj(x_new)
        funEvals = funEvals + 1

        # Backtracking line search
        lineSearchIters = 1
        while f_new > funRef + options['suffDec']*np.dot(g,x_new-x) or \
                isLegal(f_new) == False:
            temp = t
            if options['interp'] == 0 or isLegal(f_new) == False:
                if options['verbose'] == 3:
                    print 'Halving step size'
                t = t/2.
            elif options['interp'] == 2 and isLegal(g_new):
                if options['verbose'] == 3:
                    print "Cubic Backtracking"
                t = polyinterp(np.array([[0,f,gtd],\
                        [t,f_new,np.dot(g_new,d)]]))[0]
            elif lineSearchIters < 2 or isLegal(f_prev):
                if options['verbose'] == 3:
                    print "Quadratic Backtracking"
                t = polyinterp(np.array([[0, f, gtd],\
                        [t, f_new, np.complex(0,1)]]))[0]
            else:
                if options['verbose'] == 3:
                    print "Cubic Backtracking on Function Values"
                t = polyinterp(np.array([[0., f, gtd],\
                                         [t,f_new,np.complex(0,1)],\
                                         [t_prev,f_prev,np.complex(0,1)]]))[0]
            # adjust if change is too small
            if t < temp*1e-3:
                if options['verbose'] == 3:
                    print "Interpolated value too small, Adjusting"
                t = temp * 1e-3
            elif t > temp * 0.6:
                if options['verbose'] == 3:
                    print "Interpolated value too large, Adjusting"
                t = temp * 0.6

            # check whether step has become too small
            if np.max(np.abs(t*d)) < options['progTol'] or t == 0:
                if options['verbose'] == 3:
                    print "Line Search failed"
                t = 0.
                f_new = f
                g_new = g
                break
            
            # evaluate new point
            f_prev = f_new
            t_prev = temp
            if options['curvilinear'] == True:
                x_new = funProj(x + t*d)
                projects = projects + 1
            else:
                x_new = x + t*d
            [f_new, g_new] = funObj(x_new)
            funEvals = funEvals + 1
            lineSearchIters = lineSearchIters + 1
        
        # done with line search

        # take step
        x = x_new
        f = f_new
        g = g_new

        if options['testOpt'] == True:
            optCond = np.max(np.abs(funProj(x-g)-x))
            projects = projects + 1

        # output log
        if options['verbose'] >= 2:
            if options['testOpt'] == True:
                print '{:10d}'.format(i) + \
                      '{:10d}'.format(funEvals*funEvalMultiplier) + \
                      '{:10d}'.format(projects) + \
                      '{:15.5e}'.format(t) + \
                      '{:15.5e}'.format(f) + \
                      '{:15.5e}'.format(optCond)
            else:
                print '{:10d}'.format(i) + \
                      '{:10d}'.format(funEvals*funEvalMultiplier) + \
                      '{:10d}'.format(projects) + \
                      '{:15.5e}'.format(t) + \
                      '{:15.5e}'.format(f)        
        # check optimality
        if options['testOpt'] == True:
            if optCond < options['optTol']:
                if options['verbose'] >= 1:
                    print "First-order optimality conditions below optTol"
                break

        if np.max(np.abs(t*d)) < options['progTol']:
            if options['verbose'] >= 1:
                print "Step size below progTol"
            break
        
        if np.abs(f-f_old) < options['progTol']:
            if options['verbose'] >= 1:
                print "Function value changing by less than progTol"
            break

        if funEvals*funEvalMultiplier > options['maxIter']:
            if options['verbose'] >= 1:
                print "Function evaluation exceeds maxIter"
            break

        i = i + 1

    return (x, f, funEvals, projects)
Esempio n. 35
0
def polyinterp(points, doPlot=None, xminBound=None, xmaxBound=None):
    """ polynomial interpolation
    Parameters
    ----------
    points: shape(pointNum, 3), three columns represents x, f, g
    doPolot: set to 1 to plot, default 0
    xmin: min value that brackets minimum (default: min of points)
    xmax: max value that brackets maximum (default: max of points)
    
    set f or g to sqrt(-1)=1j if they are not known
    the order of the polynomial is the number of known f and g values minus 1

    Returns
    -------
    minPos:
    fmin:
    """
    
    if doPlot == None:
        doPlot = 0

    nPoints = points.shape[0]
    order = np.sum(np.imag(points[:, 1:3]) == 0) -1
    
    # code for most common case: cubic interpolation of 2 points
    if nPoints == 2 and order == 3 and doPlot == 0:
        [minVal, minPos] = [np.min(points[:,0]), np.argmin(points[:,0])]
        notMinPos = 1 - minPos
        d1 = points[minPos,2] + points[notMinPos,2] - 3*(points[minPos,1]-\
                points[notMinPos,1])/(points[minPos,0]-points[notMinPos,0])

        t_d2 =  d1**2 - points[minPos,2]*points[notMinPos,2]
        if t_d2 > 0:
            d2 = np.sqrt(t_d2)
        else:
            d2 = np.sqrt(-t_d2) * np.complex(0,1)
        if np.isreal(d2):
            t = points[notMinPos,0] - (points[notMinPos,0]-points[minPos,0])*\
                    ((points[notMinPos,2]+d2-d1)/(points[notMinPos,2]-\
                    points[minPos,2]+2*d2))
            minPos = np.min([np.max([t,points[minPos,0]]), points[notMinPos,0]])
        else:
            minPos = np.mean(points[:,0])
        fmin = minVal
        return (minPos, fmin)
    
    xmin = np.min(points[:,0])
    xmax = np.max(points[:,0])

    # compute bounds of interpolation area
    if xminBound == None:
        xminBound = xmin
    if xmaxBound == None:
        xmaxBound = xmax

    # constraints based on available function values
    A = np.zeros((0, order+1))
    b = np.zeros((0, 1))
    for i in range(nPoints):
        if np.imag(points[i,1]) == 0:
            constraint = np.zeros(order+1)
            for j in np.arange(order,-1,-1):
                constraint[order-j] = points[i,0]**j
            A = np.vstack((A, constraint))
            b = np.append(b, points[i,1])
    
    # constraints based on availabe derivatives
    for i in range(nPoints):
        if np.isreal(points[i,2]):
            constraint = np.zeros(order+1)
            for j in range(1,order+1):
                constraint[j-1] = (order-j+1)* points[i,0]**(order-j)
            A = np.vstack((A, constraint))
            b = np.append(b,points[i,2])
    
    # find interpolating polynomial
    params = np.linalg.solve(A, b)

    # compute critical points
    dParams = np.zeros(order)
    for i in range(params.size-1):
        dParams[i] = params[i] * (order-i)
    
    if np.any(np.isinf(dParams)):
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0]))
    else:
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0], \
                np.roots(dParams)))
    
    # test critical points
    fmin = np.infty;
    minPos = (xminBound + xmaxBound)/2.
    for xCP in cp:
        if np.imag(xCP) == 0 and xCP >= xminBound and xCP <= xmaxBound:
            fCP = np.polyval(params, xCP)
            if np.imag(fCP) == 0 and fCP < fmin:
                minPos = np.double(np.real(xCP))
                fmin = np.double(np.real(fCP))
    
    # plot situation (omit this part for now since we are not going to use it
    # anyway)

    return (minPos, fmin)
 def pack_gp_params(mean, cov_params, noise_scale, x0, y0):
     params = np.append(mean,noise_scale)
     params = np.concatenate([params,cov_params])
     params = np.concatenate([params,np.ndarray.flatten(np.array(x0))])
     params = np.concatenate([params,np.ndarray.flatten(np.array(y0))])
     return params
        layer_gp_params = unpack_layer_params[layer](layer_params)
        for dim in xrange(dimensions[layer+1]):
            gp_params = layer_gp_params[dim]
            mean, cov_params, noise_scale, x0, y0 = unpack_gp_params_all[layer][dim](gp_params)
            lengthscales = cov_params[1:]
            if layer == 0:
                pairs = itertools.combinations(X, 2)
                dists = np.array([np.abs(p1-p2) for p1,p2 in pairs])
                smart_lengthscales = np.array([np.log(np.median(dists[:,i])) for i in xrange(len(lengthscales))])
                smart_x0 = np.array(X)[rs.choice(len(X), num_pseudo_params, replace=False),:]
                smart_y0 = np.ndarray.flatten(smart_x0)
            else:
                smart_x0 = x0
                smart_y0 = np.ndarray.flatten(x0)
                smart_lengthscales = np.array([np.log(1) for i in xrange(len(lengthscales))])
            cov_params = np.append(cov_params[0],smart_lengthscales)
            params = pack_gp_params_all[layer][dim](mean, cov_params, noise_scale, smart_x0, smart_y0)
            smart_params = np.append(smart_params, params)

    init_params = smart_params



    print("Optimizing covariance parameters...")
    objective = lambda params: -log_likelihood(params)

    params = minimize(value_and_grad(objective), init_params, jac=True,
                          method='BFGS', callback=callback)

    plt.pause(10.0)
Esempio n. 38
0
#Correct
def KL_via_sampling(params,a2,b2,U):
    a1 = params[0]
    b1 = params[1]
    theta = generate_kumaraswamy(params,U)
    E = np.log(kumaraswamy_pdf(theta,params)/kumaraswamy_pdf(theta,np.array([a2,b2])))
    E = np.mean(E)
    return E


if __name__=='__main__':
    n = 100
    k = 80
    params = np.random.uniform(10,100,2)
    params = np.append(params,1.)
    m = np.array([0.,0.,0.])
    v = np.array([0.,0.,0.])
    for i in range(50000):
        params,m,v = iterate(params,n,k,i,m,v)
        if i%100==0:
            print params
            #print m,v
            #U1=np.random.uniform(0,1,100)
            #U2=np.random.uniform(0,1,100)
            #U3=np.random.uniform(0,1,n)
            #print lower_bound(params,n,k,U1,U2,U3)
    print params
    plt.clf()
    print "true mean"
    print (k+1.)/(n+2.)
Esempio n. 39
0
def pack_gp_params(gp_details):
    params = np.append(gp_details["mean"], gp_details["noise_scale"])
    params = np.concatenate([params, gp_details["cov_params"]])
    params = np.concatenate([params, np.ndarray.flatten(np.array(gp_details["x0"]))])
    params = np.concatenate([params, np.ndarray.flatten(np.array(gp_details["y0"]))])
    return params
def pack_gp_params(gp_details):
    params = np.append(gp_details['mean'],gp_details['noise_scale'])
    params = np.concatenate([params,gp_details['cov_params']])
    params = np.concatenate([params,np.ndarray.flatten(np.array(gp_details['x0']))])
    params = np.concatenate([params,np.ndarray.flatten(np.array(gp_details['y0']))])
    return params