def load(backend="numpy",binary = False, want_dense = False): s=60000 T=np.zeros((s,10)) data_=open(nn.nas_address()+'/PSI-Share-no-backup/Ali/Dataset/MNIST/train-images.idx3-ubyte','r') data=data_.read() data_=np.fromstring(data[16:], dtype='uint8') X=np.reshape(data_,(s,784))/255.0 data_=open(nn.nas_address()+'/PSI-Share-no-backup/Ali/Dataset/MNIST/train-labels.idx1-ubyte','r') data=data_.read() T_train_labels = np.fromstring(data[8:], dtype='uint8') for n in range(s): T[n,T_train_labels[n]]=1 s_test=10000 X_test=np.zeros((s_test,784)) T_test=np.zeros((s_test,10)) data_=open(nn.nas_address()+'/PSI-Share-no-backup/Ali/Dataset/MNIST/t10k-images.idx3-ubyte','r') data=data_.read() data_=np.fromstring(data[16:], dtype='uint8') X_test=np.reshape(data_,(s_test,784))/255.0 data_=open(nn.nas_address()+'/PSI-Share-no-backup/Ali/Dataset/MNIST/t10k-labels.idx1-ubyte','r') data=data_.read() T_labels = np.fromstring(data[8:], dtype='uint8') T_labels = T_labels.astype("float32") for n in range(s_test): T_test[n,T_labels[n]]=1 if binary: X[X>.5]=1 X[X<.5]=0 X_test[X_test>.5]=1.0 X_test[X_test<.5]=0.0 if want_dense==False: X = X.reshape(60000,1,28,28) X_test = X_test.reshape(10000,1,28,28) if backend=="numpy": X=nn.array(X);T=nn.array(T);X_test=nn.array(X_test);T_test=nn.array(T_test);T_train_labels=nn.array(T_train_labels);T_labels=nn.array(T_labels) if backend=="gnumpy": X=nn.garray(X);T=nn.garray(T);X_test=nn.garray(X_test);T_test=nn.garray(T_test);T_train_labels=nn.garray(T_train_labels);T_labels=nn.garray(T_labels) # print X.dtype,T.dtype return X,T,X_test,T_test,T_train_labels,T_labels
def load(backend="numpy", bias=None, raw=False): f = scipy.io.loadmat( nn.nas_address() + "/PSI-Share-no-backup/Ali/Dataset/Frey/frey_rawface.mat") X = f['ff'].T X_mean = X.mean(axis=1) if not raw: X_std = (X.var(1) + 10)**.5 if not raw: X = (X - X_mean[:, np.newaxis]) / X_std[:, np.newaxis] else: X = (X - X_mean[:, np.newaxis]) # M = X.mean(axis=0) # X -= M def cov(X): X_mean = X.mean(axis=0) X -= X_mean return np.dot(X.T, X) / (1.0 * X.shape[0] - 1) if not raw and bias: sigma = cov(X) u, s, v = np.linalg.svd(sigma) P = np.dot(np.dot(u, np.diag(np.sqrt(1. / (s + bias)))), u.T) X = np.dot(X, P) X = X.reshape(1965, 1, 28, 20) X = X[:, :, 3:-5, :] if backend == "numpy": return X else: return nn.garray(X)
def extract_patch(backend="numpy",num_patch=10000,size=7,want_desne=True): img = Natural.load() assert(img.ndim)==4 # img = img.reshape(50000,3,32,32) # print img.max() # nn.show_images(img[:9,:,:,:],(3,3)) X = np.zeros((num_patch,img.shape[1],size,size)) # extract random patches for index in xrange(num_patch): if index%100000==0: print index x = random.randint(0,img.shape[2]-size) y = random.randint(0,img.shape[3]-size) patch = img[index%img.shape[0],:,x:x+size,y:y+size] X[index,:,:,:] = patch if want_desne: X = X.reshape(num_patch,size*size) if backend=="numpy": return X if backend=="gnumpy": return nn.garray(X)
def extract_patch(img, backend="numpy", num_patch=10000, size=7): # work_address = os.environ["WORK"] # assert whitened == True # if whitened: # f=np.load(work_address+"./Dataset/CIFAR10/cifar_bias_.1.npz") # img=f['X'];T=f['T'];X_test=f['X_test'];T_test=f['T_test'];T_train_labels=f['T_train_labels'];T_labels=f['T_labels'] # else: # img,T,X_test,T_test,T_train_labels,T_labels = dataset.load_cifar10(raw=True) assert (img.ndim) == 4 # img = img.reshape(50000,3,32,32) # print img.max() # nn.show_images(img[:9,:,:,:],(3,3)) X = np.zeros((num_patch, img.shape[1], size, size)) # extract random patches for index in xrange(num_patch): if index % 10000 == 0: print index x = random.randint(0, img.shape[2] - size) y = random.randint(0, img.shape[3] - size) patch = img[index % img.shape[0], :, x:x + size, y:y + size] X[index, :, :, :] = patch if backend == "numpy": return X if backend == "gnumpy": return nn.garray(X)
def extract_patch(backend="numpy", num_patch=10000, size=7, want_desne=True): img = Natural.load() assert (img.ndim) == 4 # img = img.reshape(50000,3,32,32) # print img.max() # nn.show_images(img[:9,:,:,:],(3,3)) X = np.zeros((num_patch, img.shape[1], size, size)) # extract random patches for index in xrange(num_patch): if index % 100000 == 0: print index x = random.randint(0, img.shape[2] - size) y = random.randint(0, img.shape[3] - size) patch = img[index % img.shape[0], :, x : x + size, y : y + size] X[index, :, :, :] = patch if want_desne: X = X.reshape(num_patch, size * size) if backend == "numpy": return X if backend == "gnumpy": return nn.garray(X)
def load(backend="numpy", bias=None,raw = False): f = scipy.io.loadmat(nn.nas_address()+"/PSI-Share-no-backup/Ali/Dataset/Frey/frey_rawface.mat") X = f['ff'].T X_mean=X.mean(axis=1) if not raw: X_std = (X.var(1)+10)**.5 if not raw: X = (X-X_mean[:, np.newaxis])/X_std[:, np.newaxis] else: X = (X-X_mean[:, np.newaxis]) # M = X.mean(axis=0) # X -= M def cov(X): X_mean = X.mean(axis=0) X -= X_mean return np.dot(X.T,X)/(1.0*X.shape[0]-1) if not raw and bias: sigma = cov(X) u,s,v=np.linalg.svd(sigma) P = np.dot(np.dot(u,np.diag(np.sqrt(1./(s+bias)))),u.T) X=np.dot(X,P) X = X.reshape(1965,1,28,20) X = X[:,:,3:-5,:] if backend=="numpy": return X else: return nn.garray(X)
def extract_patch(img,backend="numpy",num_patch=10000,size=7): # work_address = os.environ["WORK"] # assert whitened == True # if whitened: # f=np.load(work_address+"./Dataset/CIFAR10/cifar_bias_.1.npz") # img=f['X'];T=f['T'];X_test=f['X_test'];T_test=f['T_test'];T_train_labels=f['T_train_labels'];T_labels=f['T_labels'] # else: # img,T,X_test,T_test,T_train_labels,T_labels = dataset.load_cifar10(raw=True) assert(img.ndim)==4 # img = img.reshape(50000,3,32,32) # print img.max() # nn.show_images(img[:9,:,:,:],(3,3)) X = np.zeros((num_patch,img.shape[1],size,size)) # extract random patches for index in xrange(num_patch): if index%10000==0: print index x = random.randint(0,img.shape[2]-size) y = random.randint(0,img.shape[3]-size) patch = img[index%img.shape[0],:,x:x+size,y:y+size] X[index,:,:,:] = patch if backend=="numpy": return X if backend=="gnumpy": return nn.garray(X)
def load_cifar10_filter(backend="numpy", size = 5): assert backend == "gnumpy" X,T,X_test,T_test,T_train_labels,T_labels=load_cifar10(raw=True) X_mean=X.mean(axis=1) X_test_mean=X_test.mean(axis=1) X_std = (X.var(1)+10)**.5 X_test_std = (X_test.var(1)+10)**.5 X = (X-X_mean[:, np.newaxis])/X_std[:, np.newaxis] X_test = (X_test-X_test_mean[:, np.newaxis])/X_test_std[:, np.newaxis] X = X.reshape(50000,3,32,32) X_test = X_test.reshape(10000,3,32,32) filter = np.zeros((3,3,size,size)) T=nn.garray(T);T_test=nn.garray(T_test);T_train_labels=nn.garray(T_train_labels);T_labels=nn.garray(T_labels) for i in xrange(3): b = np.ones((size,size))*(1.0/size**2) a = np.zeros((size,size)) a[(size-1)/2,(size-1)/2]=1 filter[i,i,:,:] = a-b # X_filter = nn.ConvUp(X, filter, moduleStride = 1, paddingStart = (size-1)/2) # X_filter_test = nn.ConvUp(X_test, filter, moduleStride = 1, paddingStart = (size-1)/2) X_filter = nn.GnumpyBackend.zeros((50000,3,32,32)) for i in range(250): X_filter[i*200:(i+1)*200,:,:,:] = nn.ConvUp(nn.garray(X[i*200:(i+1)*200,:,:,:]), filter, moduleStride = 1, paddingStart = (size-1)/2) X_filter_test = nn.GnumpyBackend.zeros((10000,3,32,32)) for i in range(50): X_filter_test[i*200:(i+1)*200,:,:,:] = nn.ConvUp(nn.garray(X_test[i*200:(i+1)*200,:,:,:]), filter, moduleStride = 1, paddingStart = (size-1)/2) if backend=="gnumpy": return X_filter,T,X_filter_test,T_test,T_train_labels,T_labels
def load_whiten(backend="numpy",bias = .1): X,T,X_test,T_test,T_train_labels,T_labels=CIFAR10.load(want_mean=False,want_dense=True) #normalize for contrast X_mean=X.mean(axis=1) X_test_mean=X_test.mean(axis=1) X_std = X.std(1) X_test_std = X_test.std(1) X = (X-X_mean[:, np.newaxis])/X_std[:, np.newaxis] X_test = (X_test-X_test_mean[:, np.newaxis])/X_test_std[:, np.newaxis] #covariance def cov(X): X_mean = X.mean(axis=0) X -= X_mean return np.dot(X.T,X)/(1.0*X.shape[0]-1) #whiten X_mean = X.mean(axis=0) X -= X_mean X_test -= X_mean sigma = cov(X) u,s,v=np.linalg.svd(sigma) P = np.dot(np.dot(u,np.diag(np.sqrt(1./(s+bias)))),u.T) X=np.dot(X,P) X_test=np.dot(X_test,P) X = X.reshape(50000,3,32,32) X_test = X_test.reshape(10000,3,32,32) if backend=="numpy": return X,T,X_test,T_test,T_train_labels,T_labels if backend=="gnumpy": return nn.garray(X),nn.garray(T),nn.garray(X_test),nn.garray(T_test),nn.garray(T_train_labels),nn.garray(T_labels)
def load_cifar10_filter(backend="numpy", size=5): assert backend == "gnumpy" X, T, X_test, T_test, T_train_labels, T_labels = load_cifar10(raw=True) X_mean = X.mean(axis=1) X_test_mean = X_test.mean(axis=1) X_std = (X.var(1) + 10)**.5 X_test_std = (X_test.var(1) + 10)**.5 X = (X - X_mean[:, np.newaxis]) / X_std[:, np.newaxis] X_test = (X_test - X_test_mean[:, np.newaxis]) / X_test_std[:, np.newaxis] X = X.reshape(50000, 3, 32, 32) X_test = X_test.reshape(10000, 3, 32, 32) filter = np.zeros((3, 3, size, size)) T = nn.garray(T) T_test = nn.garray(T_test) T_train_labels = nn.garray(T_train_labels) T_labels = nn.garray(T_labels) for i in xrange(3): b = np.ones((size, size)) * (1.0 / size**2) a = np.zeros((size, size)) a[(size - 1) / 2, (size - 1) / 2] = 1 filter[i, i, :, :] = a - b # X_filter = nn.ConvUp(X, filter, moduleStride = 1, paddingStart = (size-1)/2) # X_filter_test = nn.ConvUp(X_test, filter, moduleStride = 1, paddingStart = (size-1)/2) X_filter = nn.GnumpyBackend.zeros((50000, 3, 32, 32)) for i in range(250): X_filter[i * 200:(i + 1) * 200, :, :, :] = nn.ConvUp( nn.garray(X[i * 200:(i + 1) * 200, :, :, :]), filter, moduleStride=1, paddingStart=(size - 1) / 2) X_filter_test = nn.GnumpyBackend.zeros((10000, 3, 32, 32)) for i in range(50): X_filter_test[i * 200:(i + 1) * 200, :, :, :] = nn.ConvUp( nn.garray(X_test[i * 200:(i + 1) * 200, :, :, :]), filter, moduleStride=1, paddingStart=(size - 1) / 2) if backend == "gnumpy": return X_filter, T, X_filter_test, T_test, T_train_labels, T_labels
def load_whiten(backend="numpy", bias=.1): X, T, X_test, T_test, T_train_labels, T_labels = CIFAR10.load( want_mean=False, want_dense=True) #normalize for contrast X_mean = X.mean(axis=1) X_test_mean = X_test.mean(axis=1) X_std = X.std(1) X_test_std = X_test.std(1) X = (X - X_mean[:, np.newaxis]) / X_std[:, np.newaxis] X_test = (X_test - X_test_mean[:, np.newaxis]) / X_test_std[:, np.newaxis] #covariance def cov(X): X_mean = X.mean(axis=0) X -= X_mean return np.dot(X.T, X) / (1.0 * X.shape[0] - 1) #whiten X_mean = X.mean(axis=0) X -= X_mean X_test -= X_mean sigma = cov(X) u, s, v = np.linalg.svd(sigma) P = np.dot(np.dot(u, np.diag(np.sqrt(1. / (s + bias)))), u.T) X = np.dot(X, P) X_test = np.dot(X_test, P) X = X.reshape(50000, 3, 32, 32) X_test = X_test.reshape(10000, 3, 32, 32) if backend == "numpy": return X, T, X_test, T_test, T_train_labels, T_labels if backend == "gnumpy": return nn.garray(X), nn.garray(T), nn.garray(X_test), nn.garray( T_test), nn.garray(T_train_labels), nn.garray(T_labels)
def load_cifar10_adam_patch(num_patch, size, backend="numpy", want_dense=True): img, T, X_test, T_test, T_train_labels, T_labels = CIFAR10.load( want_mean=False, want_dense=True) img = img.reshape(50000, 3, 32, 32) # print img.max() # nn.show_images(img[:9,:,:,:],(3,3)) X = np.zeros((num_patch, 3, size, size)) # extract random patches # for index in xrange(num_patch): # x = random.randint(0,31-size) # y = random.randint(0,31-size) # patch = img[index%50000,:,x:x+size,y:y+size] # X[index,:,:,:] = patch for index in xrange(num_patch): if index % 10000 == 0: print index x = random.randint(0, img.shape[2] - size) y = random.randint(0, img.shape[3] - size) patch = img[index % img.shape[0], :, x:x + size, y:y + size] X[index, :, :, :] = patch X = X.reshape(num_patch, -1) #normalize for contrast X_mean = X.mean(axis=1) print X_mean.shape # X_std = (X.std(1)+10)**.5 # X_std = X.std(1) X_std = (X.var(1) + 10)**.5 X = (X - X_mean[:, np.newaxis]) / X_std[:, np.newaxis] #covariance def cov(X): X_mean = X.mean(axis=0) X -= X_mean return np.dot(X.T, X) / (1.0 * X.shape[0] - 1) #whiten sigma = cov(X) + .1 * np.identity(X.shape[1]) M = X.mean(axis=0) X -= M u, s, v = np.linalg.svd(sigma) P = np.dot(np.dot(u, np.diag(np.sqrt(1. / s))), u.T) X = np.dot(X, P) # X=nn.garray(X) # print X.shape # np.savez("cifar10_adam", X = X) # print X.min(0) # print (X**2).sum(1)[:100] if not want_dense: X = X.reshape(num_patch, 3, size, size) if backend == "numpy": return X, M, P if backend == "gnumpy": return nn.garray(X)
def load(backend="numpy", binary=False, want_dense=False): s = 60000 T = np.zeros((s, 10)) data_ = open( nn.nas_address() + '/PSI-Share-no-backup/Ali/Dataset/MNIST/train-images.idx3-ubyte', 'r') data = data_.read() data_ = np.fromstring(data[16:], dtype='uint8') X = np.reshape(data_, (s, 784)) / 255.0 data_ = open( nn.nas_address() + '/PSI-Share-no-backup/Ali/Dataset/MNIST/train-labels.idx1-ubyte', 'r') data = data_.read() T_train_labels = np.fromstring(data[8:], dtype='uint8') for n in range(s): T[n, T_train_labels[n]] = 1 s_test = 10000 X_test = np.zeros((s_test, 784)) T_test = np.zeros((s_test, 10)) data_ = open( nn.nas_address() + '/PSI-Share-no-backup/Ali/Dataset/MNIST/t10k-images.idx3-ubyte', 'r') data = data_.read() data_ = np.fromstring(data[16:], dtype='uint8') X_test = np.reshape(data_, (s_test, 784)) / 255.0 data_ = open( nn.nas_address() + '/PSI-Share-no-backup/Ali/Dataset/MNIST/t10k-labels.idx1-ubyte', 'r') data = data_.read() T_labels = np.fromstring(data[8:], dtype='uint8') T_labels = T_labels.astype("float32") for n in range(s_test): T_test[n, T_labels[n]] = 1 if binary: X[X > .5] = 1 X[X < .5] = 0 X_test[X_test > .5] = 1.0 X_test[X_test < .5] = 0.0 if want_dense == False: X = X.reshape(60000, 1, 28, 28) X_test = X_test.reshape(10000, 1, 28, 28) if backend == "numpy": X = nn.array(X) T = nn.array(T) X_test = nn.array(X_test) T_test = nn.array(T_test) T_train_labels = nn.array(T_train_labels) T_labels = nn.array(T_labels) if backend == "gnumpy": X = nn.garray(X) T = nn.garray(T) X_test = nn.garray(X_test) T_test = nn.garray(T_test) T_train_labels = nn.garray(T_train_labels) T_labels = nn.garray(T_labels) # print X.dtype,T.dtype return X, T, X_test, T_test, T_train_labels, T_labels
def load_cifar10_adam_patch(num_patch,size,backend="numpy",want_dense = True): img,T,X_test,T_test,T_train_labels,T_labels=CIFAR10.load(want_mean = False, want_dense = True) img = img.reshape(50000,3,32,32) # print img.max() # nn.show_images(img[:9,:,:,:],(3,3)) X = np.zeros((num_patch,3,size,size)) # extract random patches # for index in xrange(num_patch): # x = random.randint(0,31-size) # y = random.randint(0,31-size) # patch = img[index%50000,:,x:x+size,y:y+size] # X[index,:,:,:] = patch for index in xrange(num_patch): if index%10000==0: print index x = random.randint(0,img.shape[2]-size) y = random.randint(0,img.shape[3]-size) patch = img[index%img.shape[0],:,x:x+size,y:y+size] X[index,:,:,:] = patch X = X.reshape(num_patch,-1) #normalize for contrast X_mean=X.mean(axis=1) print X_mean.shape # X_std = (X.std(1)+10)**.5 # X_std = X.std(1) X_std = (X.var(1)+10)**.5 X = (X-X_mean[:, np.newaxis])/X_std[:, np.newaxis] #covariance def cov(X): X_mean = X.mean(axis=0) X -= X_mean return np.dot(X.T,X)/(1.0*X.shape[0]-1) #whiten sigma = cov(X)+.1*np.identity(X.shape[1]) M = X.mean(axis=0) X -= M u,s,v=np.linalg.svd(sigma) P = np.dot(np.dot(u,np.diag(np.sqrt(1./s))),u.T) X=np.dot(X,P) # X=nn.garray(X) # print X.shape # np.savez("cifar10_adam", X = X) # print X.min(0) # print (X**2).sum(1)[:100] if not want_dense: X = X.reshape(num_patch,3,size,size) if backend=="numpy": return X,M,P if backend=="gnumpy": return nn.garray(X)