def trte_split(X, Y, tr_frac): """Split the data in X/Y into training and testing portions.""" if gp.is_garray(X): X = X.as_numpy_array() else: X = np.array(X) if gp.is_garray(Y): Y = Y.as_numpy_array() else: Y = np.array(Y) obs_count = X.shape[0] obs_dim = X.shape[1] tr_count = round(tr_frac * obs_count) te_count = obs_count - tr_count Xtr = np.zeros((tr_count, X.shape[1])) Ytr = np.zeros((tr_count, Y.shape[1])) Xte = np.zeros((te_count, X.shape[1])) Yte = np.zeros((te_count, Y.shape[1])) idx = npr.permutation(range(obs_count)) # Basic manual iteration for i in range(obs_count): if (i < tr_count): Xtr[i,:] = X[idx[i],:] Ytr[i,:] = Y[idx[i],:] else: Xte[(i - tr_count),:] = X[idx[i],:] Yte[(i - tr_count),:] = Y[idx[i],:] return [gp.garray(Xtr), gp.garray(Ytr), gp.garray(Xte), gp.garray(Yte)]
def trte_split(X, Y, tr_frac): """Split the data in X/Y into training and testing portions.""" if gp.is_garray(X): X = X.as_numpy_array() else: X = np.array(X) if gp.is_garray(Y): Y = Y.as_numpy_array() else: Y = np.array(Y) obs_count = X.shape[0] obs_dim = X.shape[1] tr_count = round(tr_frac * obs_count) te_count = obs_count - tr_count Xtr = np.zeros((tr_count, X.shape[1])) Ytr = np.zeros((tr_count, Y.shape[1])) Xte = np.zeros((te_count, X.shape[1])) Yte = np.zeros((te_count, Y.shape[1])) idx = npr.permutation(range(obs_count)) # Basic manual iteration for i in range(obs_count): if (i < tr_count): Xtr[i, :] = X[idx[i], :] Ytr[i, :] = Y[idx[i], :] else: Xte[(i - tr_count), :] = X[idx[i], :] Yte[(i - tr_count), :] = Y[idx[i], :] return [gp.garray(Xtr), gp.garray(Ytr), gp.garray(Xte), gp.garray(Yte)]
def vector_weights(self, Wm=gp.garray(())): """Return the weights in Wm or self.W, vectorized.""" if (Wm.size == 0): Wm = self.W if not gp.is_garray(Wm): Wm = gp.garray(Wm) Wv = Wm.reshape((Wm.size, 1)) return Wv
def array(x, dtype=None, **kwargs): if gnp.is_garray(x): if dtype is gpu_float32: return x else: return np.array(gnp.as_numpy_array(x), dtype=dtype, **kwargs) else: if dtype is gpu_float32: return gnp.as_garray(np.array(x, **kwargs)) else: return np.array(x, dtype=dtype, **kwargs)
def matrix_weights(self, Wv=gp.garray(())): """Return the weights in Wv, or self.W, matrized.""" if (Wv.size == 0): Wm = self.Wm else: if not gp.is_garray(Wv): Wv = gp.garray(Wv) if (Wv.size != self.weight_count()): raise Exception('Wrong-sized Wv.') Wm = Wv.reshape((self.dim_output, self.dim_input)) return Wm
def matrix_weights(self, Wv=gp.garray(())): """Return the weights in Wv, or self.W, matrized.""" if (Wv.size == 0): Wm = self.Wm else: if not gp.is_garray(Wv): Wv = gp.garray(Wv) if (Wv.size != self.weight_count()): raise Exception('Wrong-sized Wv.') Wm = Wv.reshape((self.dim_output,self.dim_input)) return Wm
def set_weights(self, Ws): """Set weights for this network to those in the array Ws. Each weight array Wi = Ws[i] should be of the proper size for parameterizing the LNLayer at self.layers[i]. """ for i in range(self.layer_count): if not gp.is_garray(Ws[i]): Ws[i] = gp.garray(Ws[i]) for i in range(self.layer_count): self.layers[i].set_weights(Ws[i]) return
def set_weights(self, Wm): """Set weights in this layer to the given values. This performs a copy, so modifications of the given Wm, e.g. during network training, won't affect the values set for self.W. """ if ((Wm.shape[0] != self.dim_output) or \ (Wm.shape[1] != self.dim_input)): raise Exception('Wrong-sized Wm.') if not gp.is_garray(Wm): Wm = gp.garray(Wm) self.W = Wm.copy() return
def class_cats(Yi): """Change +1/-1 class indicator matrix to categorical vector.""" if not gp.is_garray(Yi): Yi = gp.garray(Yi) Yc = Yi.argmax(axis=1) return Yc