def evaluate(self, theta, extra=False): for item in theta: if item < 0.0 or item > 1.0: return -np.inf sigma = 0.1 m = theta.size / 3 n = self.data.size A = (self.Amax - self.Amin) * theta[0::3] + self.Amin B = (self.Amax - self.Amin) * theta[1::3] + self.Amin f = (self.fmax - self.fmin) * theta[2::3] + self.fmin # i = np.arange(1, m + 1)[np.newaxis].T # f = fmax * np.cumprod(np.power(theta[2::3], # (1 / (m - i + 1))))[np.newaxis].T A = matlib.reshape(A, [m, 1]) B = matlib.reshape(B, [m, 1]) f = matlib.reshape(f, [m, 1]) A = matlib.repmat(A, 1, n) B = matlib.repmat(B, 1, n) f = matlib.repmat(f, 1, n) t = matlib.repmat(self.t, m, 1) # data = matlib.repmat(data, m, 1) g = np.sum( A * np.cos(2 * np.pi * f * t) + B * np.sin(2 * np.pi * f * t), 0) g = matlib.reshape(g, self.data.shape) logL = -1 * np.sum((g - self.data)**2) / (2 * sigma**2) if extra: return logL, A[:, 0], B[:, 0], f[:, 0], g else: return logL
def resizem(A_in, row_new, col_new): """ This function resizes regular data grid, by copying and pasting parts of the original array. :param A_in: Input matrix. :type A_in: numpy array :param row_new: New number of rows. :type row_new: integer :param col_new: New number of columns. :type col_new: integer :return A_out: Resized matrix. :rtype: numpy array """ row_rep = row_new // np.shape(A_in)[0] col_rep = col_new // np.shape(A_in)[1] A_inf = A_in.flatten(order="F")[np.newaxis] A_out = reshape( repmat( reshape(reshape(repmat((A_in.flatten(order="F")[np.newaxis]), row_rep, 1), (row_new, -1), order="F").T, (-1, 1), order="F"), 1, col_rep).T, (col_new, row_new), order="F", ).T return A_out
def __init__(self, t, data, fs, Amin, Amax): n = data.size self.t = matlib.reshape(t, [1, n]) self.data = matlib.reshape(data, [1, n]) self.fs = fs self.Amin = Amin self.Amax = Amax self.fmin = 0.0 self.fmax = fs / 10
def ccprmod(supports, idx_correct_label, B=20): """Python implementation of the ccprmod.m (Classifier competence based on probabilistic modelling) function. Matlab code is available at: http://www.mathworks.com/matlabcentral/mlc-downloads/downloads/submissions/28391/versions/6/previews/ccprmod.m/index.html Parameters ---------- supports: array of shape = [n_samples, n_classes] containing the supports obtained by the base classifier for each class. idx_correct_label: array of shape = [n_samples] containing the index of the correct class. B : int (Default = 20) number of points used in the calculation of the competence, higher values result in a more accurate estimation. Returns ------- C_src : array of shape = [n_samples] representing the classifier competences at each data point Examples -------- >>> supports = [[0.3, 0.6, 0.1],[1.0/3, 1.0/3, 1.0/3]] >>> idx_correct_label = [1,0] >>> ccprmod(supports,idx_correct_label) ans = [0.784953394056843, 0.332872292262951] References ---------- T.Woloszynski, M. Kurzynski, A probabilistic model of classifier competence for dynamic ensemble selection, Pattern Recognition 44 (2011) 2656–2668. """ if not isinstance(B, int): raise TypeError( 'Parameter B should be an integer. Currently B is {0}'.format( type(B))) if B <= 0 or B is None: raise ValueError( 'The parameter B should be higher than 0. Currently B is {0}'. format(B)) supports = np.asarray(supports) idx_correct_label = np.array(idx_correct_label) supports[supports > 1] = 1 N, C = supports.shape x = np.linspace(0, 1, B) x = np.matlib.repmat(x, N, C) a = npm.zeros(x.shape) for c in range(C): a[:, c * B:(c + 1) * B] = C * supports[:, c:c + 1] b = C - a # For extreme cases, with a or b equal to 0, add a small constant: eps = 1e-20 a[a == 0] = eps b[b == 0] = eps betaincj = betainc(a, b, x) C_src = np.zeros(N) for n in range(N): t = range((idx_correct_label[n]) * B, (idx_correct_label[n] + 1) * B) bc = betaincj[n, t] bi = betaincj[n, list(set(range(0, (C * B))) - set(t))] bi = npm.transpose(npm.reshape(bi, (B, C - 1), order='F')) C_src[n] = np.sum( np.multiply((bc[0, 1:] - bc[0, 0:-1]), np.prod((bi[:, 0:-1] + bi[:, 1:]) / 2, 0))) return C_src