Пример #1
0
    mse = (error * error).mean()
    mse_delta = mse

    print 'Initial mean squared error: {0}'.format(mse)
    
    # Iterate until convergence is reached
    iteration = 1
    while mse_delta >= CONVERGENCE_THRESHOLD:
        # Calculate the features using the previous step's values where necessary
        for i,x in enumerate(X):
            # Get the partial residual
            error = y - np.sum(features[0:i], axis=0) - np.sum(features[i+1:], axis=0)

            # Update our kernel parameters for the partial derivative
            sqexp = partial(squared_exponential, BANDWIDTHS[i], 0.5 * (error.max() - error.min()), TAU2)
            gp.cov = sqexp

            # Get the new feature
            f_i = gp.predict(x, x, error, error.std(), percentile=None)[0]
            
            # Update the feature list
            features[i] = f_i

        # Calculate the total error
        error = y - np.sum(features, axis=0)

        # Track the squared error of the estimates
        prev_mse = mse
        mse = (error * error).mean()
        mse_delta = np.abs(prev_mse - mse)