def MSE(self, X, Y, delimiter=" "): """Mean Squared Error (or mis-classification error). """ MSE = 0 genX, _, N = batchX(X, self.batch, delimiter) genT, _ = batchT(Y, self.batch, delimiter, self.C_dict) for X, T in zip(genX, genT): H = np.dot(X, self.W) for i in xrange(H.shape[1]): H[:, i] = self.ufunc[i](H[:, i]) Th1 = H.dot(self.B) p = float(X.shape[0]) / N MSE += mse(T, Th1, self.classification, self.multiclass) * p return MSE
def train(self, X, T, delimiter=" "): """Trains ELM, can use any X and T(=Y), and specify neurons. Neurons: (number, type, [W], [B]) """ # get parameters of new data and add neurons self.Xmean, self.Xstd = meanstdX(X, self.batch, delimiter) if self.classification: self.C_dict = c_dictT(T, self.batch) # get data iterators genX, self.inputs, N = batchX(X, self.batch, delimiter) genT, self.targets = batchT(T, self.batch, delimiter, self.C_dict) # get mean value of targets if self.classification or self.multiclass: self.Tmean = np.zeros((self.targets,)) # for any classification else: self.Tmean, _ = meanstdX(T, self.batch, delimiter) # project data nn = len(self.ufunc) HH = np.zeros((nn, nn)) HT = np.zeros((nn, self.targets)) for X, T in zip(genX, genT): # get hidden layer outputs H = np.dot(X, self.W) for i in xrange(H.shape[1]): H[:, i] = self.ufunc[i](H[:, i]) H, T = semi_Tikhonov(H, T, self.Tmean) # add Tikhonov regularization # least squares solution - multiply both sides by H' p = float(X.shape[0]) / N HH += np.dot(H.T, H)*p HT += np.dot(H.T, T)*p # solve ELM model HH += self.cI * np.eye(nn) # enhance solution stability self.B = lstsq(HH, HT)[0]