def solve(self, target, all=False): target = vector(self.field, target) - self.zero x = self.matrix.solve_right(vector(self.field, target)) if not all: return x for z in self.kernel: yield z + x
def __init__(self, oracle, n_in, field=F2, ntests=10): self.oracle = oracle self.field = field self.n_in = int(n_in) self.zero = vector(field, oracle([0] * n_in)) self.n_out = len(self.zero) cols = [] for i in range(n_in): x = [0] * n_in x[i] = 1 y = vector(field, oracle(x)) - self.zero cols.append(y) self.matrix = matrix(field, cols).transpose() self._kernel = None self.rank = self.matrix.rank() self.kernel_dimension = self.matrix.ncols() - self.rank self.test_oracle(ntests)
def matrix_mult_int_rev(mat, x): """ LSB to MSB vector >>> matrix_mult_int_rev( \ matrix(GF(2), [[1, 0, 1], [1, 0, 0]]), \ 0b110) # read as 6 -> 0,1,1 -> 1,0 -> 1 1 """ assert mat.base_ring() == GF(2) n = mat.ncols() x = vector(GF(2), Bin(x, n).tuple[::-1]) y = mat * x return Bin(y[::-1]).int
def matrix_mult_int(mat, x): """ MSB to LSB vector >>> matrix_mult_int( \ matrix(GF(2), [[1, 0, 1], [1, 0, 0]]), \ 0b110) # read as 6 -> 1,1,0 -> 1,1 -> 3 3 """ assert mat.base_ring() == GF(2) n = mat.ncols() x = vector(GF(2), Bin(x, n).tuple) y = mat * x return Bin(y).int
def oracle(x): nonlocal m return m * vector(fld, x)