def test(self): nxn= [ np.random.randint(-5,50,[2,2]), np.random.randint(-5,50,[4,4]), np.random.randint(-5,50,[8,8]), np.random.randint(-5,50,[16,16]) ] had = [ linalg.hadamard(2), linalg.hadamard(4), linalg.hadamard(8), linalg.hadamard(16) ] vec = [ np.random.randint(-5,50,[2]), np.random.randint(-5,50,[4]), np.random.randint(-5,50,[8]), np.random.randint(-5,50,[16]) ] for t in range(4): #Problem 1: matmult(A,x). Multiply a matrix with a vector. nt.assert_equal(p2.matmult(nxn[t],vec[t]),nxn[t].dot(vec[t])) #Problem 2: hadmat(k). Create hadamard of size 2^k # calls student hadmat with t=1,2,3,4 # it is supposed to generate hadmat of size 2,4,8,16 nt.assert_equal(p2.hadmat(t+1),had[t]) #Problem 4: hadmatmult(H,x). Takes hadamart H and vector x, multiplies. nt.assert_equal(p2.hadmatmult(had[t],vec[t]),had[t].dot(vec[t]))
def MakeBinaryHadamard(n): matrix = [] for i in range(len(hadamard(n))): matrix.append([]) for entry in hadamard(n)[i]: if (entry==-1): matrix[i].append(0) else: matrix[i].append(entry) return matrix
def initialize_cdma(n = len(members)): """Initialize_cdma program and assign codes to each name in members.""" from scipy.linalg import hadamard code_list = hadamard(n) if is_power_of_two(n) else hadamard(closest_power_of_two(n)) code_list = code_list.tolist() for index, name in enumerate(members): codes[name] = code_list[index]
def test_basic(self): y = hadamard(1) assert_array_equal(y, [[1]]) y = hadamard(2, dtype=float) assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]]) y = hadamard(4) assert_array_equal(y, [[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]]) assert_raises(ValueError, hadamard, 0) assert_raises(ValueError, hadamard, 5)
def generate_contrast(problem): num_vars = problem['num_vars'] # Find the smallest n, such that num_vars < k k = [2 ** n for n in range(16)] k_chosen = 2 ** find_smallest(num_vars) # Generate the fractional factorial contrast contrast = np.vstack([hadamard(k_chosen), -hadamard(k_chosen)]) return contrast
def test_basic(self): y = hadamard(1) assert_array_equal(y, [[1]]) y = hadamard(2, dtype=float) assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]]) y = hadamard(4) assert_array_equal(y, [[1,1,1,1], [1,-1,1,-1], [1,1,-1,-1], [1,-1,-1,1]]) assert_raises(ValueError, hadamard, 0) assert_raises(ValueError, hadamard, 5)
def __init__(self, epsilon, hash_funcs, m): self.epsilon = epsilon self.hash_funcs = hash_funcs self.k = len(hash_funcs) self.m = m self.prob = 1 / (1 + math.pow(math.e, self.epsilon / 2)) self.had = hadamard(self.m)
def __init__(self, input_size, output_size, bias=True, fixed_weights=True, fixed_scale=None): super(HadamardProj, self).__init__() self.output_size = output_size self.input_size = input_size sz = 2**int(math.ceil(math.log(max(input_size, output_size), 2))) mat = torch.from_numpy(hadamard(sz)) if fixed_weights: self.proj = Variable(mat, requires_grad=False) else: self.proj = nn.Parameter(mat) init_scale = 1. / math.sqrt(self.output_size) if fixed_scale is not None: self.scale = Variable(torch.Tensor([fixed_scale]), requires_grad=False) else: self.scale = nn.Parameter(torch.Tensor([init_scale])) if bias: self.bias = nn.Parameter( torch.Tensor(output_size).uniform_(-init_scale, init_scale)) else: self.register_parameter('bias', None) self.eps = 1e-8
def hadamard(k, eps): """ The hadamard mechanism for general privacy regime :param k: the domain size :param eps: the privacy budget """ e_eps = np.exp(eps) temp = min(e_eps, 2 * k) B = 2**(np.ceil(np.log(temp) / np.log(2)) - 1) temp1 = (k / B) + 1 b = 2**(np.ceil(np.log(temp1) / np.log(2))) K = int(b * B) M = -np.ones((K, K)) H = linalg.hadamard(int(b)) ll = [] for i in range(int(B)): s = i * int(b) f = (i + 1) * int(b) M[s:f, s:f] = H ll.append(s) M = np.delete(M, ll, 1) M[np.where(M == 1)] = e_eps M[np.where(M == -1)] = 1 for i in range(M.shape[1]): M[:, i] = M[:, i] / np.sum(M[:, i]) return M[:, :k]
def randomSubspace(subspaceDimension, ambientDimension, method="gaussian", q=3.0): print(method) if method == "gaussian": return np.random.normal(0, 1, size=(subspaceDimension, ambientDimension)) elif method == "circulant": cmatrix = circulant(np.random.normal( 0, 1, ambientDimension))[:subspaceDimension] custm = stats.rv_discrete(values=([-1, 1], [1.0 / 2, 1.0 / 2])) dmatrix = np.diag(custm.rvs(size=ambientDimension)) return cmatrix.dot(dmatrix) elif method == "sparse": custm = stats.rv_discrete( values=([-1, 0, 1], [1.0 / (2 * q), 1 - (1.0 / q), 1.0 / (2 * q)])) return math.sqrt(q) * custm.rvs(size=(subspaceDimension, ambientDimension)) elif method == "hadamard": P = np.random.normal(0, 1, size=(subspaceDimension, ambientDimension)) H = hadamard(ambientDimension) custm = stats.rv_discrete(values=([-1, 1], [1.0 / 2, 1.0 / 2])) D = np.diag(custm.rvs(size=ambientDimension)) return (1 / math.sqrt(ambientDimension)) * P.dot(H.dot(D))
def test_fwht(seed_rng): L=5 ar1=np.random.normal(size=(2**L,)) ar2=ar1.copy() sparse.fwht(ar2) ar1=scla.hadamard(2**L)@ar1 assert ar1==pytest.approx(ar2)
def walsh(N): H = hadamard(N) B = copy.copy(H) ind = [] for x in range(N): ind.append(int(bin(N+x^x/2)[:2:-1],2)) for x in range(0,N): B[x,:] = H[ind[x],:] return B
def nonlinearity(box): rows = 256 #256 cols = 8 #8 boolarr = [[0] * 8 for i in range(256)] for hh in range(rows): for jj in range(cols): boolarr[hh][cols - jj - 1] = (-1)**(bitlist(box[hh])[-(jj + 1)]) # print(boolarr) h = hadamard(rows) asum = 0 nn = [0 * i for i in range(cols)] count = 0 for hh in range(cols): maximum = 0 for kk in range(rows): temp = 0 for jj in range(rows): if boolarr[jj][hh] == h[kk][jj]: temp += 1 else: temp -= 1 temp = abs(temp) count += 1 if temp > maximum: maximum = temp nn[cols - 1 - hh] = 128 - maximum / 2 asum = asum + nn[cols - 1 - hh] avg = asum / cols print(nn) return avg
def efficient(X, r=2, l=5, d=2): K_origin = (X.dot(X.T))**d n = X.shape[0] size = power_of_2(n) samples = r + l p = size - n R = np.concatenate([ np.zeros((p, samples)), np.identity(n)[:, np.random.permutation(n)[:samples]] ]) K = np.concatenate( [np.concatenate([K_origin, np.zeros((p, n))]), np.zeros((size, p))], axis=1) D = np.diag(np.random.randint(2, size=size) * 2 - 1) H = hadamard(size) W = R.T.dot(H.dot(D.dot(K))) U, _, _ = np.linalg.svd(W.T) Q = U[:, :samples] A = D.dot(H.dot(R)) B = (np.linalg.inv(A.T.dot(Q))).dot(W.dot(Q)) U, E, _ = np.linalg.svd(B.T) U = U[:, :r] E = E[:r] X_out = ((np.sqrt(E) * U).T.dot(Q.T)).T[:X.shape[0]] K = X_out.dot(X_out.T) return K, X_out
def __init__(self, inputs: Union[str, List[str]], outputs: Union[str, List[str]], n_classes: int, code_length: Optional[int] = None, mode: Union[None, str, Iterable[str]] = None, ds_id: Union[None, str, Iterable[str]] = None) -> None: super().__init__(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id) self.in_list, self.out_list = True, True if len(self.inputs) != len(self.outputs): raise ValueError( "Hadamard requires the same number of input and output keys.") self.n_classes = n_classes if code_length is None: code_length = 1 << (n_classes - 1).bit_length() if code_length <= 0 or (code_length & (code_length - 1) != 0): raise ValueError( f"code_length must be a positive power of 2, but got {code_length}." ) if code_length < n_classes: raise ValueError( f"code_length must be >= n_classes, but got {code_length} and {n_classes}." ) self.code_length = code_length labels = hadamard(self.code_length).astype(np.float32) labels[np.arange(0, self.code_length, 2), 0] = -1 # Make first column alternate labels = labels[:self.n_classes] self.labels = labels
def theta(pi1, pi2, data): """ This function generates theta, a matrix used in the Fast Hadamard Transform. This matrix is later multiplied by a scalar to obtain the overview chromatogram. :param pi1: The first permutation matrix used in the Fast Hadamard Transform. Should be a column vector of length n, with values ranging from 1 to n. Can be generated using the function pi_one. :param pi2: The second permutation matrix used in the Fast Hadamard Transform. Should be a column vector of length n, with values ranging from 0 to n-1. Can be generated using the function pi_two. :param data: The raw data or measurements output by the GC, after being divided into the appropriate time bins. Should be a column vector of length n. :return: The matrix theta, which is used directly in the Fast Hadamard Transform. Should be a column vector of length n. """ n = len(pi1) W = np.zeros((n, 1)) for i in range(n): W[pi1[i] - 1] = data[i] X = np.zeros((n + 1, 1)) for i in range(n): X[i + 1] = W[i] H = hadamard((n + 1)) Y = np.dot(H, X) Y = np.delete(Y, (0), axis=0) theta = np.zeros((n, 1)) for i in range(n): theta[pi2[i]] = Y[i] theta = theta * -1 # print theta, "<-- theta \n" return theta
def create_Spreadcodes_Transmitter(): # This function makes Hadamard spread codes with the information of the # amount of users and the length of the spread code, given in the config # file. # __________________ # OUTPUT: User 1 | Spreadcode user 1 | 0 0 0 0 0 0 0 0 # User 2 | Spreadcode user 2 | 0 1 0 1 0 1 0 1 # ------------------- # ------------------------------------------------------------------------ # JSON file as config file with open('ConfigTransmitter.json') as json_file: config = json.load(json_file) # Generate spread code matrix #rows = #colums = config.order spreadcode_Matrix = hadamard(config['order']) # not necessary to transpose the matrix (symmetric matrix) # Select spread codes for the users (bv: 2 users --> 2 spreadcodes) spreadcode_Users_Matrix_Bip = spreadcode_Matrix[1:config['aantal_Users'] + 1] # Make the spreadcode polar: '1' -->'1' '-1' --> '0' spreadcode_Users_Matrix_Bip[spreadcode_Users_Matrix_Bip == -1] = 0 return spreadcode_Users_Matrix_Bip
def test_numerical_fuzzing_fht(): for length in [2, 4, 8, 16, 32, 64]: input_ = np.random.normal(size=length) copy = input_.copy() H = hadamard(length) cyfht(input_) npt.assert_array_almost_equal(np.dot(copy, H), input_)
def compute_embedded(btsize, zdim, trgs, is_cond=False): ''' Computes the embedded vector for the generator. @param btsize batch size. @param zdim embedded vector dimension. @param trgs target inputs for conditional GAN. @param is_cond conditional flag. ''' # Computing random noise zvar = torch.randn(btsize, zdim) # Testing for conditional info if is_cond: # Computing Hadamard matrix tdim = trgs.size(1) had_mat = hadamard(zdim)[:tdim, :] / 128.0 had_mat = torch.from_numpy(had_mat).float() # Converting targets to embedded dimension zvar = zvar + torch.mm(trgs, had_mat) # Return variables return torch.autograd.Variable(zvar.cuda())
def srht(lin_op, m, indices=None, signs=None): """ Subsampled Randomized Hadamard Transform Parameters ---------- lin_op : user-defined linear operator (see LinearOperator class for requirements), with shape (n,d) m : sketch size, np.int Returns ------- np.ndarray with shape (m,d) """ lin_op = LinearOperator(lin_op) n = lin_op.shape[0] new_dim = 2**(np.int(np.ceil(np.log(n) / np.log(2)))) if indices is None: indices = np.sort( np.random.choice(np.arange(new_dim), m, replace=False)) if signs is None: signs = np.random.choice([-1, 1], n, replace=True).reshape((-1, 1)) if hasattr(lin_op, 'row_slice'): matrix = signs * lin_op.row_slice(range(n)).copy() if n & (n - 1) != 0: matrix = np.vstack( [matrix, np.zeros((new_dim - n, matrix.shape[1]))]) return 1. / np.sqrt(m) * _srht(indices, matrix) else: RHD = 1. / np.sqrt(m) * hadamard( new_dim)[:, :n][indices] * signs.squeeze() return lin_op.hmul(RHD.T).T
def firstEval(nm): escalar = 1. / (mt.sqrt(2)) hada=hadamard(2)*escalar hada2= np.kron(hada,np.eye(2)) #Primer estado r1 = hada2 @ nm print("Evaluamos el primer estado") print(r1) print() #Segundo estado cnot = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 1., 0.]]) r2 = cnot@r1 print("Evaluamos el segundo estado") print(r2) #gatex(r2) print() #Tercera estado r3 = hada2@r2 print("Evaluamos el tercera estado") print(r3) print()
def get_ruggedness_function(metric, N, gt_code): """ Return the correct ruggedness calculating function according to the input "metric" """ global phi, idx_1 if metric == 'N_max': if N == 15: return get_N_max else: return get_N_max elif metric == 'r_s': return cal_r_s elif metric == 'epi': return cal_epi elif metric == 'open_ratio': return cal_open_ratio elif metric == 'E': phi = hadamard(2**N, dtype='float32') idx_1 = gt_code.sum(axis=1) == 1 return cal_E elif metric == 'gamma': return cal_gamma elif metric == 'adptwalk_steps': return cal_adptwalk_steps elif metric == 'adptwalk_probs': return cal_adptwalk_probs
def marginalQ(d, k, eps): """ Fourier mechanism :param d: number of dimensions :param k: the order of the marginals of interest e.g., 3 for 3 way marginals, d for all marginals, etc. :param eps: the privacy budget """ #d=4 #k=2 T = combo(d, k) #Set of putput index T_int = [int(r, 2) for r in T] # int value of the output index Input_item = combo(d, d) U = 2**d O = len(T) Q = np.zeros((2 * O, U)) H = linalg.hadamard(2**d) #eps=1.0 p = np.exp(eps) / (1 + np.exp(eps)) for i in range(O): for j in range(2**d): if H[T_int[i], j] == 1: Q[i, j] = p * (1 / O) Q[O + i, j] = (1 - p) * (1 / O) else: Q[i + O, j] = p * (1 / O) Q[i, j] = (1 - p) * (1 / O) return Q
def get_hash_targets(self, n_class, bit): H_K = hadamard(bit) H_2K = np.concatenate((H_K, -H_K), 0) hash_targets = torch.from_numpy(H_2K[:n_class]).float() if H_2K.shape[0] < n_class: hash_targets.resize_(n_class, bit) for k in range(20): for index in range(H_2K.shape[0], n_class): ones = torch.ones(bit) # Bernouli distribution sa = random.sample(list(range(bit)), bit // 2) ones[sa] = -1 hash_targets[index] = ones # to find average/min pairwise distance c = [] for i in range(n_class): for j in range(n_class): if i < j: TF = sum(hash_targets[i] != hash_targets[j]) c.append(TF) c = np.array(c) # choose min(c) in the range of K/4 to K/3 # see in https://github.com/yuanli2333/Hadamard-Matrix-for-hashing/issues/1 # but it is hard when bit is small if c.min() > bit / 4 and c.mean() >= bit / 2: print(c.min(), c.mean()) break return hash_targets
def cake_cutting(n): H = (hadamard(n) + np.ones(n)) / 2 structure = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) nComp = [] for i in range(n): basis = H[i, :] #non tutti sono quadrati!-> 2^5 non lo è #basis=basis.reshape((int(math.sqrt(n)),int(math.sqrt(n)))) basis = basis.reshape(int(n / 8), 8) labeled, nC = label(basis, structure) nComp.append(nC) order = np.zeros((n, 1)) - 1 min_prev = -1 for i in range(n): prev = 0 eq = 0 for j in range(n): if nComp[j] < nComp[i]: prev += 1 elif nComp[j] == nComp[i]: if i > j: eq += 1 order[i] = prev + eq H1 = np.zeros((n, n)) for i in range(n): H1[int(order[i]), :] = H[i, :] H1 = 2 * H1 - 1 return H1
def generateSensingMatrix(self, m, n, type): #Generate sensing matrix of dim m by n of given type if type == 'sdnormal': self.sensing_matrix = np.random.randn(m,n) #column normalize the matrix: for i in range(n): self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i]) if type == 'uniform01': self.sensing_matrix = np.random.rand(m,n) for i in range(n): self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i]) if type == 'bernoulli': #For small m and n, s columns has high prob. of being linearly dependent. This causes x_S feature to have components #which blow up, which causes the output of the neural network to output a deterministic prob. dist. of all zeros and a single 1, and v to be nan(because it is so large) self.sensing_matrix = np.random.binomial(1,1/2,(m,n)) self.sensing_matrix = self.sensing_matrix.astype(float) for i in range(n): self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i]) if type == 'hadamard': #n must be a power of 2 here!!! For small m and n, s columns has high prob. of being linearly dependent. This causes x_S feature to have components #which blow up, which causes the output of the neural network to output a deterministic prob. dist. of all zeros and a single 1, and v to be nan(because it is so large) A = hadamard(n) S = sample(range(1, n), m) #sample m indices randomly self.sensing_matrix = A[S,:] self.sensing_matrix = self.sensing_matrix.astype(float) for i in range(n): self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i]) if type == 'subsampled_haar': A = ortho_group.rvs(n) S = sample(range(1, n), m) self.sensing_matrix = A[S,:] for i in range(n): self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i])
def Apply(self, matrixA, applyLeft=True): m, n = matrixA.shape temp_state = np.random.get_state() np.random.set_state(self.randstate) if applyLeft: #augmented dimension d(must be power of two) d = 1 << int(np.ceil(np.log2(m))) H = hadamard(d) # Diagonal sign matrix signs = 2 * np.random.binomial(n=1, p=.5, size=d) - 1 # Random signs D = np.diag(signs) # Uniform sampling matrix # Each column has a single 1; each row has at most one 1 inds = np.random.choice(a=d, size=self.sketchdim, replace=False) P = csc_matrix( (np.ones(self.sketchdim), (inds, range(self.sketchdim))), shape=(d, self.sketchdim)) # print(P.T.shape, H.shape, D.shape) matrixS = P.T @ H @ D * math.sqrt(1 / self.sketchdim) print(matrixS.shape) print(matrixA.shape) return matrixS @ matrixA else: # sanity check needed #augmented dimension d(must be power of two) d = 1 << int(np.ceil(np.log2(n))) H = hadamard(d) # Diagonal sign matrix signs = 2 * np.random.binomial(n=1, p=.5, size=d) - 1 # Random signs D = np.diag(signs) # Uniform sampling matrix # Each column has a single 1; each row has at most one 1 inds = np.random.choice(a=d, size=self.sketchdim, replace=False) P = csc_matrix( (np.ones(self.sketchdim), (inds, range(self.sketchdim))), shape=(d, self.sketchdim)) matrixS = P.T @ H @ D * math.sqrt(1 / self.sketchdim) matrixC = matrixA @ matrixS np.random.set_state(temp_state)
def __init__(self, vec_dim=512, vec_norm=10, L=10, K=8, bucket_limit=None): self.vec_dim = vec_dim # must be power of 2 self.vec_norm = vec_norm self.bucket_limit = bucket_limit self.L = L self.K = K self.H_dim = linalg.hadamard(vec_dim).astype(np.float32) self.reset_params()
def setUp(self): self.data = dict() self.data['hadamard'] = hadamard(32) n_rows = 64 n_cols = 77 X = np.random.randn(n_rows, n_cols) self.data['random matrix'] = X
def getH(s): n_by_s = int(n / s) # print(n_by_s) Is = np.eye(s) Hs = np.multiply(1 / np.sqrt(s), hadamard(s)) Gs = np.vstack([Hs, Is]) H_bar = block_diag(*([Gs] * n_by_s)) print("Normalized H shape", H_bar.shape) return H_bar
def __init__(self, epsilon, hash_funcs, m): self.epsilon = epsilon self.hash_funcs = hash_funcs self.k = len(hash_funcs) self.m = m if (self.m & (self.m - 1)) == 0: self.hadamard_matrix = hadamard( self.m) # Cache hadamard for performance
def fjlt_derive(n,N): S = np.zeros((n,N)) for i in xrange(n): S[i,np.random.randint(0,N)] = 1 S = S * np.sqrt(N)/np.sqrt(n) T = random_bernoulli(n,n) H = hadamard(N) return np.matrix(T) * np.matrix(S) * np.matrix(H)
def gen_walsh_masks(rank,invert=False): res = 2**rank vol = (res)**2 # volume H = hadamard(vol) H[H==-1]=0 if invert: H = -H+1 return [H[:,k].reshape(res,res) for k in range(vol)]
def masks(self): rank = self.rank res = 2**rank H = hadamard(res**2) H[H==-1]=0 if self.invert: H = -H+1 return [H[:,k].reshape(res,res) for k in range(res**2)]
def walsh(N): H = hadamard(N) B = copy.copy(H) ind = [] for x in range(N): ind.append(int(bin(N + x ^ x / 2)[:2:-1], 2)) for x in range(0, N): B[x, :] = H[ind[x], :] return B
def generate_contrast(problem): """Generates the raw sample from the problem file Arguments ========= problem : dict The problem definition """ num_vars = problem['num_vars'] # Find the smallest n, such that num_vars < k k = [2 ** n for n in range(16)] k_chosen = 2 ** find_smallest(num_vars) # Generate the fractional factorial contrast contrast = np.vstack([hadamard(k_chosen), -hadamard(k_chosen)]) return contrast
def setUp(self): self.nxn= [ np.random.randint(-5,50,[2,2]), np.random.randint(-5,50,[4,4]), np.random.randint(-5,50,[8,8]), np.random.randint(-5,50,[16,16]) ] self.had = [ linalg.hadamard(2), linalg.hadamard(4), linalg.hadamard(8), linalg.hadamard(16) ] self.vec = [ np.random.randint(-5,50,[2]), np.random.randint(-5,50,[4]), np.random.randint(-5,50,[8]), np.random.randint(-5,50,[16]) ]
def multiresolutional_walsh_hadamard_transform(grayscale_image): """ MR-WHT implementation. Outputs 1-level MR-WHT transformed image 23-July-2015 """ def is_power2(num): return num != 0 and ((num & (num - 1)) == 0) assert is_power2(grayscale_image.shape[0]) and ( grayscale_image.shape[0] == grayscale_image.shape[1]), 'Grayscale Image size is not a power of 2 or not square!' # image = Image.fromarray(numpy.uint8(grayscale_image)) # image.show() # print 'Pre-transformed_grayscale_image:\r\n',grayscale_image # Do usual WHT n = grayscale_image.shape[0] h = hadamard(n) transformed_grayscale_image = numpy.dot(h, grayscale_image) # print 'transformed_grayscale_image:\r\n',transformed_grayscale_image # RowWiseTransformedGrayscaleImage = numpy.zeros(transformed_grayscale_image.shape) # # for row in range(0,n): # for column in range(1,int(n/2)): # RowWiseTransformedGrayscaleImage[row][column] = math.floor(float(transformed_grayscale_image[row][2*column-1]+transformed_grayscale_image[row][2*column])/2) # RowWiseTransformedGrayscaleImage[row][column+int(n/2)-1] = transformed_grayscale_image[row][2*column-1]-transformed_grayscale_image[row][2*column] # # print 'Row wise transformed_grayscale_image:\r\n',RowWiseTransformedGrayscaleImage # ColumnWiseTransformedGrayscaleImage = numpy.zeros(transformed_grayscale_image.shape) # # for column in range(0,n): # for row in range(1,int(n/2)): # ColumnWiseTransformedGrayscaleImage[row][column] = math.floor(float(RowWiseTransformedGrayscaleImage[2*row-1][column]+RowWiseTransformedGrayscaleImage[2*row][column])/2) # ColumnWiseTransformedGrayscaleImage[row+int(n/2)-1][column] = RowWiseTransformedGrayscaleImage[2*row-1][column]-RowWiseTransformedGrayscaleImage[2*row][column] # # print 'Column wise transformed_grayscale_image:\r\n',ColumnWiseTransformedGrayscaleImage # transformed_grayscale_image = RowWiseTransformedGrayscaleImage return transformed_grayscale_image
def inverse_multiresolutional_walsh_hadamard_transform(transformed_grayscale_image): """ MR-WHT implementation. Outputs 1-level inverse MR-WHT transformed image 23-July-2015 """ n = transformed_grayscale_image.shape[0] # ColumnWiseTransformedGrayscaleImage = numpy.zeros(transformed_grayscale_image.shape) # # for column in range(0,n): # for row in range(1,int(n/2)): # ColumnWiseTransformedGrayscaleImage[2*row-1][column] = transformed_grayscale_image[row][column] + math.floor(float(transformed_grayscale_image[n/2 + row][column] + 1)/2) # ColumnWiseTransformedGrayscaleImage[2*row][column] = transformed_grayscale_image[2*row-1][column] - transformed_grayscale_image[n/2 + row][column] # # print 'Column wise inverse_transformed_grayscale_image:\r\n',ColumnWiseTransformedGrayscaleImage # RowWiseTransformedGrayscaleImage = numpy.zeros(transformed_grayscale_image.shape) # # for row in range(0,n): # for column in range(1,int(n/2)): # RowWiseTransformedGrayscaleImage[row][2*column-1] = transformed_grayscale_image[row][column] + math.floor(float(transformed_grayscale_image[row][n/2 + column] + 1)/2) # RowWiseTransformedGrayscaleImage[row][2*column] = transformed_grayscale_image[row][2*column-1] - transformed_grayscale_image[row][n/2 + column] # # print 'Row wise inverse_transformed_grayscale_image:\r\n',RowWiseTransformedGrayscaleImage # Do usual IWHT h = hadamard(n) inverse_transformed_grayscale_image = numpy.dot(h, transformed_grayscale_image) / n # image = Image.fromarray(numpy.uint8(inverse_transformed_grayscale_image)) # image.show() return inverse_transformed_grayscale_image
if((i*i)%p==v): f=1; break; i+=1 return f; #for key in keys: #print ("%d %d",key*key,len(s[key])) t=int(raw_input()) for q in range(t): n=int(raw_input()) if(is_power2(n)): print "YES" mat=linalg.hadamard(n) #print mat for j in range(n): for k in range(n): print mat[j][k], print "" elif(n in keys): print "YES" for symb in s[n]: if symb=='+': print 1, else: print -1, print "" elif(n in keys1): print "YES"
def run( defaultVolt, maxVolt, offsetChannel, listChannels, orderChannel, activateZeroSlopes, log, repeatMeas, fracBits): dimHadamard = int( 2**(_numpy.ceil(_numpy.log2( len(orderChannel) ))) ) hadamardMat = hadamard( dimHadamard ) hadamardMat[len(orderChannel):dimHadamard,:] = 0 log.write("_hadamardMat:\n%s\n" % ( hadamardMat )) _numpy.set_printoptions(threshold='nan') #print hadamardMat assert (defaultVolt-maxVolt) >= 0, \ "defaultVolt-maxVolt must be greater zero for hadamard application" # open comedi device device = _device(filename='/dev/comedi0') device.open() ao_subdevice = device.find_subdevice_by_type( _constant.SUBDEVICE_TYPE.ao) mem_subdevice = device.find_subdevice_by_type( _constant.SUBDEVICE_TYPE.memory) #print ao_subdevice insnR = mem_subdevice.insn() insnR2 = mem_subdevice.insn() insnR.data = 256*[0] insnR2.data = 256*[0] insnSHWFSTrigger = [mem_subdevice.insn(), mem_subdevice.insn(), mem_subdevice.insn()] insnSHWFSTrigger[0].insn = insnSHWFSTrigger[2].insn = _constant.INSN.gtod insnSHWFSTrigger[0].data = insnSHWFSTrigger[2].data = [0, 0] insnSHWFSTrigger[1].data = [1] insnSHWFSTrigger[1].insn = _constant.INSN.config # deactivate repositioning if enabled! insnDeactivateRePos = mem_subdevice.insn() insnDeactivateRePos.data = [3, 0, 0] device.do_insn(insnDeactivateRePos) time.sleep(0.1) # set selected channels to defaultVoltage #print listChannels setInsn = setDefaultVoltage( ao_subdevice, offsetChannel, listChannels, defaultVolt ) device.do_insn(setInsn) time.sleep(1) # average slopeZero measurement slopeZero = averageMeasurements( device, repeatMeas, insnSHWFSTrigger, insnR, fracBits ) print "-------------------------------" # create empty matrix for slopes measured from now on shwfsData = _numpy.uint32(_numpy.zeros( (dimHadamard,2*len(insnR.data)) )) shwfsData_ref_previous = _numpy.uint32(_numpy.zeros( (dimHadamard,2*len(insnR.data)) )) #print shwfsData.shape kk = True while kk == True: for k in range( dimHadamard ): insnsW = setDefaultVoltage( ao_subdevice, offsetChannel, listChannels, defaultVolt ) time.sleep(1) # average zeroSlopes for each set ret2 = averageMeasurements( device, repeatMeas, insnSHWFSTrigger, insnR, fracBits ) for l in range(0,11): tmp = _numpy.floor( defaultVolt + hadamardMat[:,k] * maxVolt/10*l ) dat = insnsW.data dat[orderChannel] = tmp insnsW.data = dat device.do_insn(insnsW) # wait 50 ms time.sleep(0.05) time.sleep(1) # average max slope for set slope = averageMeasurements( device, repeatMeas, insnSHWFSTrigger, insnR, fracBits ) slopeX = slope.slopeX slopeY = slope.slopeY if activateZeroSlopes == True: shwfsData_ref_previous[k,:] = _numpy.append(_numpy.uint32(ret2.slopeX),_numpy.uint32(ret2.slopeY)) print " activateZeroSlopes true" print " captured wavefront" # store captured slope data into matrix shwfsData[k,:] = _numpy.append(slopeX,slopeY) insnsW = setDefaultVoltage( ao_subdevice, offsetChannel, listChannels, defaultVolt ) for l in reversed(range(0,11)): tmp = _numpy.floor( defaultVolt + hadamardMat[:,k] * maxVolt/10*l ) dat = insnsW.data dat[orderChannel] = tmp insnsW.data = dat device.do_insn(insnsW) # wait 50 ms time.sleep(0.05) print "number %d" % k+" of %d" % dimHadamard kk = False print "-------------------------------" time.sleep(1) # average slopeZero2 measurement slopeZero2 = averageMeasurements( device, repeatMeas, insnSHWFSTrigger, insnR, fracBits ) # set previous selected channels to zero volt clearInsn = setDefaultVoltage( ao_subdevice, offsetChannel, listChannels, 0) device.do_insn(clearInsn) timeD = datetime.now() c = ao_subdevice.channel(index=0) chanVolt = c.get_range(index=0) log.write("_comediVoltMin:\n%s\n" % ( chanVolt.min )) log.write("_comediVoltMax:\n%s\n" % ( chanVolt.max )) log.write("_time:\n%s\n" % ( timeD.strftime("%Y-%m-%d_%H-%M") ) ) # close comedi device device.close() #print(shwfsData) saveData( time, shwfsData, slopeZero, slopeZero2, shwfsData_ref_previous, activateZeroSlopes, log, hadamardMat )
def hadet(n): a = hadamard(n) da = det(a) ina = inv(a) return da, a, ina
def hadamard_transform(n): H = hadamard(n) d = np.random.choice([-1, 1], size=n) D = np.zeros((n, n)) np.fill_diagonal(D, d) return H, D
def alvarez_embed(grayscale_container_path, binary_watermark_path, watermarked_image_path): """ Alvarez embedding method implementation. Outputs the resulting watermarked image to watermarked_image_path 23-July-2015 """ binary_watermark_1darray = numpy.asarray(genetic_algorithm_pretreatment(binary_watermark_path)) m = int(math.sqrt(binary_watermark_1darray.shape[0])) grayscale_container_2darray = numpy.asarray(Image.open(grayscale_container_path).convert("L")) while grayscale_container_2darray.shape[0] != grayscale_container_2darray.shape[1]: if grayscale_container_2darray.shape[0] > grayscale_container_2darray.shape[1]: grayscale_container_2darray = numpy.c_[ grayscale_container_2darray, numpy.zeros(grayscale_container_2darray.shape[0])] elif grayscale_container_2darray.shape[0] < grayscale_container_2darray.shape[1]: grayscale_container_2darray = numpy.r_[ grayscale_container_2darray, numpy.zeros(grayscale_container_2darray.shape[1])[numpy.newaxis]] n = grayscale_container_2darray.shape[0] # Now try to find a normalized Hadamard matrix of size 4t closest to floor(n/m) hadamard_matrix_size = int(math.floor(float(n) / float(m))) hadamard_matrix_size_right = hadamard_matrix_size hadamard_matrix_size_left = hadamard_matrix_size # Find hadamard_matrix_size as an integer, divisible by 4 and a power of 2 and bigger than m/n while (hadamard_matrix_size_right % 4 != 0) or ( (hadamard_matrix_size_right & (hadamard_matrix_size_right - 1)) != 0): hadamard_matrix_size_right += 1 # Find hadamard_matrix_size as an integer, divisible by 4 and a power of 2 and less than m/n while (hadamard_matrix_size_left % 4 != 0) or ((hadamard_matrix_size_left & (hadamard_matrix_size_left - 1)) != 0): hadamard_matrix_size_left -= 1 # Pick the closest or the least if equally distant if hadamard_matrix_size_right - hadamard_matrix_size < hadamard_matrix_size - hadamard_matrix_size_left: hadamard_matrix_size = hadamard_matrix_size_right else: hadamard_matrix_size = hadamard_matrix_size_left # print 'Hadamard matrix size: ', hadamard_matrix_size h = hadamard(hadamard_matrix_size) # print 'Hadamard matrix h: ', h block_size = int(math.floor(float(n) / float(m))) watermarked_image_2darray = numpy.copy(grayscale_container_2darray) for i in range(0, m * m - 1): col_index = i % (n / block_size) row_index = i / (n / block_size) a = grayscale_container_2darray[col_index * block_size:col_index * block_size + hadamard_matrix_size, row_index * block_size:row_index * block_size + hadamard_matrix_size] # if i == 0 : # print a _b = numpy.dot(numpy.dot(h, a), h.transpose()) / hadamard_matrix_size # if i == 0 : # print b b1 = _b[3][3] b2 = _b[3][5] # let b equal hadamard_matrix_size/4, as proposed by authors in 3.1 -> 1 b = hadamard_matrix_size / 4 d = abs((b1 - b2) / 2) if binary_watermark_1darray[i]: _b[3][3] = b1 - d - b _b[3][5] = b2 + d + b else: _b[3][3] = b1 + d + b _b[3][5] = b2 - d - b a = numpy.dot(numpy.dot(h.transpose(), _b), h) / hadamard_matrix_size # After HT, some values are more than 255 and less than 0, so fix it a[a > 255] = 255 a[a < 0] = 0 # if i == 0 : # print a watermarked_image_2darray[col_index * block_size:col_index * block_size + hadamard_matrix_size, row_index * block_size:row_index * block_size + hadamard_matrix_size] = a watermarked_image = Image.fromarray(numpy.uint8(watermarked_image_2darray)) # watermarked_image.show() # Write image to file watermarked_image.save(watermarked_image_path) return
def time_hadamard(self, size): sl.hadamard(size)
print "Norm of inverse is %g" % (inv_norm) print "Norm of residual is %g" % (res_norm) sgn, det = slogdet(Rkk) print "Log-determinant of selected columns is %g with sign %g" %\ (det, sgn) print "Conditioning of selected columns is %g" %(cond(Rkk)) p = p[:k] return Q, R, p if __name__ == '__main__': from scipy.linalg import hadamard A = np.random.randn(100,30) k = 30 Q, R, p = srrqr(A, k, verbose = True) print p print np.allclose(A[:,p],np.dot(Q,R[:,:k])) A = hadamard(32, dtype = 'd') k = 5 Q, R, p = srrqr(A, k, verbose = True) print p print np.allclose(A[:,p],np.dot(Q,R[:,:k])) A = np.random.randn(30,100) k = 30 Q, R, p = srrqr(A, k, verbose = True) print p print np.allclose(A[:,p],np.dot(Q,R[:,:k]))
def alvarez_extract(grayscale_stego_path, extracted_watermark_path, watermark_size): """ Alvarez extracting method implementation. Outputs the extracted watermark to extracted_watermark_path 23-July-2015 """ grayscale_stego_2darray = numpy.asarray(Image.open(grayscale_stego_path).convert("L")) assert grayscale_stego_2darray.shape[0] == grayscale_stego_2darray.shape[1], 'Grayscale Stego is not square!' n = grayscale_stego_2darray.shape[0] m = watermark_size # Now try to find a normalized Hadamard matrix of size 4t closest to floor(n/m) hadamard_matrix_size = int(math.floor(float(n) / float(m))) hadamard_matrix_size_right = hadamard_matrix_size hadamard_matrix_size_left = hadamard_matrix_size # Find hadamard_matrix_size as an integer, divisible by 4 and a power of 2 and bigger than m/n while (hadamard_matrix_size_right % 4 != 0) or ( (hadamard_matrix_size_right & (hadamard_matrix_size_right - 1)) != 0): hadamard_matrix_size_right += 1 # Find hadamard_matrix_size as an integer, divisible by 4 and a power of 2 and less than m/n while (hadamard_matrix_size_left % 4 != 0) or ((hadamard_matrix_size_left & (hadamard_matrix_size_left - 1)) != 0): hadamard_matrix_size_left -= 1 # Pick the closest or the least if equally distant if hadamard_matrix_size_right - hadamard_matrix_size < hadamard_matrix_size - hadamard_matrix_size_left: hadamard_matrix_size = hadamard_matrix_size_right else: hadamard_matrix_size = hadamard_matrix_size_left # print 'Hadamard matrix size: ', hadamard_matrix_size h = hadamard(hadamard_matrix_size) # print 'Hadamard matrix h: ', h block_size = int(math.floor(float(n) / float(m))) extracted_watermark = numpy.zeros(m * m) for i in range(0, m * m - 1): col_index = i % (n / block_size) row_index = i / (n / block_size) a = grayscale_stego_2darray[col_index * block_size:col_index * block_size + hadamard_matrix_size, row_index * block_size:row_index * block_size + hadamard_matrix_size] # if i == 0 : # print a b = numpy.dot(numpy.dot(h, a), h.transpose()) / hadamard_matrix_size # if i == 0 : # print b b1 = b[3][3] b2 = b[3][5] extracted_watermark[i] = 255 if b1 > b2 else 0 extracted_watermark_image = Image.fromarray(numpy.uint8(extracted_watermark.reshape(m, m))) # Write image to file extracted_watermark_image.save(extracted_watermark_path) return