Ejemplo n.º 1
0
def put_cpp_tensor(name, values, dimension, fractional=1):
    if values is None:
        # If values is none, input none to dict
        _set_tensor(name, None, dimension)
    elif fractional != 1 or not all(isinstance(v, int) for v in values):
        # If fractional is not 1 or we have a list of not solely integers,
        # perform list comprehension
        tmp = [modulo_pmax(int(fractional * v)) for v in values]
        _set_tensor(name, minionn.VectorInt(tmp), dimension)
    else:
        # Else, simply add to dict
        _set_tensor(name, minionn.VectorInt(values), dimension)
Ejemplo n.º 2
0
def matrix_mult_client(inp, outp, v_in, order_w_x=True):
    tmp = minionn.VectorInt([])
    cpp_v = _get_tensor(v_in)

    # Calculate dimensions as a [m,n,o] list
    dims = [
        _get_tensor_dim(inp[0])[0],  #first dimension of w
        _get_tensor_dim(inp[0])[1],  #second dim of w
        _get_tensor_dim(inp[1])[1]  # second dim of x
    ]

    logger.debug("Client Gemm with dimensions " + str(dims) +
                 " and actual dims " + str(_get_tensor_dim(inp[0])) + " and " +
                 str(_get_tensor_dim(inp[1])))

    #Compute and store
    minionn.matrixmul_simple(cpp_v, dims[1], dims[2], dims[0], tmp)

    # Floor the resulting vector to reverse the fractional shifting
    minionn.vector_floor(
        tmp,
        pow(config.fractional_base, 1) * config.fractional_downscale)

    my_outp = outp
    # If we have a reversed order of operations, we also need to
    # transpose v!!
    if not order_w_x:
        my_outp += "T"

    _set_tensor(my_outp, tmp, [dims[0], dims[2]])
Ejemplo n.º 3
0
def server_prepare_w(w_list, pkey):
    """
    Prepares the W to send over to the client.
    This W contains all w from every matrix multiplication
     and is encrypted with the server's public key.
    Arranging the Ws is done doing the following:
     For each m x n * n x o matrix multiplication,
      this multiplication's W has every row of w repeated o times.
     Each multiplication's W is then attached to the overall W.

     Input: 
      - w_list: List of tuples:(name of W, dimensions of matrix multiplication [m,n,o])
      - public key of server
    """

    # We will use numpy to properly arrange the Ws.
    # In future, this has a way better performance if numpy is
    #  the primary library in use
    overall_w = []
    for (w, dim) in w_list:
        # Get list as reshaped numpy array
        tensor = get_cpp_tensor(w, reshape=True)

        for dm in range(0, dim[0]):
            for do in range(0, dim[2]):
                overall_w.extend(tensor[dm].tolist())

    if config.debug_mode:
        logger.debug("W has size " + str(len(overall_w)))
        logger.debug("W starts with " +
                     str(overall_w[:config.debug_print_length_long]) +
                     " and ends with " +
                     str(overall_w[-config.debug_print_length_long:]))

    return minionn.encrypt_w(minionn.VectorInt(overall_w), pkey)
Ejemplo n.º 4
0
def matrix_mult(inp, outp, instance_u, order_w_x=True):
    """
    calculates W*x + U + b or
    if order_w_x is False, calculates (W' * X' + U)' + b
    """
    tmp = minionn.VectorInt([])

    cpp_w = _get_tensor(inp[0])
    cpp_x = _get_tensor(inp[1])
    cpp_b = _get_tensor(inp[2])

    my_outp = outp

    # Calculate dimensions as a [m,n,o] list
    dims = [
        _get_tensor_dim(inp[0])[0],  #first dimension of w
        _get_tensor_dim(inp[0])[1],  #second dim of w
        _get_tensor_dim(inp[1])[1]  # second dim of x
    ]

    if config.debug_mode:
        logger.debug("U is " + str(instance_u))

    b_string = "(b ROW wise)"
    if not order_w_x:
        b_string = "(b COLUMN wise)"

    logger.debug("Performing cpp matrix multiplication " + b_string +
                 " with " + str(inp) + " to " + my_outp +
                 " with the following dimensions " +
                 str(_get_tensor_dim(inp[0])[0]) + "x" +
                 str(_get_tensor_dim(inp[0])[1]) + " * " +
                 str(_get_tensor_dim(inp[1])[0]) + "x" +
                 str(_get_tensor_dim(inp[1])[1]))

    #Compute based on order of W and x
    if order_w_x:
        # Normal order, calculate W*x + U + b
        minionn.matrixmul(cpp_w, cpp_b, instance_u, cpp_x, dims[1], dims[2],
                          dims[0], tmp)
        # Dimensions are the ones that we were given:
        #  first dimension of w and second dim of x
    else:
        # Reversed order: (W' * X' + U + b')'
        minionn.matrixmul_b_columns(cpp_w, cpp_b, instance_u, cpp_x, dims[1],
                                    dims[2], dims[0], tmp)

        # As we received W' and x', the output now has the dimensions
        #  of the first dimension of x and the second dimension of w (both are transposed)
        # Also, keep in mind that the order is reversed because we store the transposed
        #  of the final output.
        my_outp = my_outp + "T"

    # Floor the resulting vector to reverse the fractional shifting and store it
    minionn.vector_floor(
        tmp,
        pow(config.fractional_base, 1) * config.fractional_downscale)

    _set_tensor(my_outp, tmp, [dims[0], dims[2]])
Ejemplo n.º 5
0
def relu_server(inp, outp):
    # Prepare vectors
    xs = _get_tensor(inp)
    ys = minionn.VectorInt([])
    dims = _get_tensor_dim(inp)

    # Calculate num of elements in vector
    num = reduce(mul, dims, 1)

    # Execute relu
    minionn.relu_server(num, xs, ys)

    # Store ys. Dims did not change
    _set_tensor(outp, ys, dims)
Ejemplo n.º 6
0
def relu_client(inp, outp, responsible_r):
    # Prepare vectors
    xc = _get_tensor(inp)
    yc = minionn.VectorInt([])
    dims = _get_tensor_dim(inp)
    rc = _get_tensor(responsible_r)

    # Calculate num of elements in vector
    num = reduce(mul, dims, 1)

    # Execute relu
    minionn.relu_client(num, xc, rc, yc)

    # Store ys. Dims did not change
    _set_tensor(outp, yc, dims)
Ejemplo n.º 7
0
def extract_sum(inp, dimensions, offset):
    """
    Extracts the sum of the tensor of shape dimension (beginning
    at offset) and returns it.
    dim is assuming a list for [m, n, o] for the matrix calculation mxn * nxo
    This is equal to crow, ccol, srow where server matrix gets multiplied with client matrix
    """
    tmp = minionn.VectorInt([])
    minionn.extract_sum(inp, tmp, dimensions[1], dimensions[2], dimensions[0],
                        offset)

    logger.debug("Extract sum: Extracted with offset " + str(offset) +
                 " and dimensions " + str(dimensions))
    if config.debug_mode:
        logger.debug("Extracted U starts with " +
                     str(list(tmp)[:config.debug_print_length_long]) +
                     " and ends with " +
                     str(list(tmp)[-config.debug_print_length_long:]))
    return tmp
Ejemplo n.º 8
0
def _transpose(inp):
    """
    Takes the input vector from cpp_vectors, reshapes it into 
    the dimensions given, transposes the matrix, and creates a new,
    flattened, cpp vector as "<inp>T" with <inp> being the input string.
    """
    # Calculate new name (to prevent double T namings)
    new_name = _tensor_get_base_name(inp)
    if not _tensor_is_transposed(inp):
        new_name += "T"
    logger.debug("Transposing " + inp + " to output " + new_name)

    # Get vec and dim
    vec_in = list(cpp_tensors[inp][0])
    dim_in = cpp_tensors[inp][1]

    # Transpose the reshaped matrix
    reshaped = np.reshape(vec_in, dim_in)
    transposed = np.transpose(reshaped)
    dim_out = list(transposed.shape)

    # Flatten and store
    _set_tensor(new_name, minionn.VectorInt(transposed.flatten().tolist()),
                dim_out)
Ejemplo n.º 9
0
def vector_sub(vec_a, vec_b):
    cpp_a = minionn.VectorInt(vec_a)
    cpp_b = minionn.VectorInt(vec_b)
    return minionn.vector_sub(cpp_a, cpp_b)
Ejemplo n.º 10
0
def client_precomputation(encW, slot_size, w_list):
    """
    Performs the client precomputation.
    This takes the encrypted W from the server and generates
     a v and r for each matrix multiplication.
     r has the shape of x in the W*x multiplication (n x o)
     v has the shape of m x n x o (which gets summed up to n x o later during the client matrix multiplication)
    
    As the r and v values are needed later, they are stored as r0,v0,r1,v1,.. tensors in the tensor dictionary.

    Input:
     - encrypted W
     - slot size
     - w_list: List of tuples:(name of W, dimensions of matrix multiplication [m,n,o])
    Output:
     - encrypted U that can be sent back to the server
    """
    logger.info("Started Client Precomputation.")

    # Use numpy to generate r and v
    client_randoms = []
    for (w, dim) in w_list:
        # Generate v
        v = np.random.randint(config.PMAX,
                              dtype='uint64',
                              size=(dim[0], dim[1], dim[2]))

        if not config.random_v:
            # Allow the construction of a static v in debug mode
            v = np.zeros((dim[0], dim[1], dim[2]), dtype='uint64')

        # Generate r in column major order
        # We will need to transpose r before using it later, but now for precomputation
        #  column major order is required
        r = np.random.randint(config.PMAX,
                              dtype='uint64',
                              size=(dim[2], dim[1]))

        if not config.random_r:
            # Allow the construction of a static r in debug mode
            r = np.multiply(np.ones((dim[2], dim[1]), dtype='uint64'), 1)

        client_randoms.append((r, v))

    logger.debug(" - Generated r and v values:")
    for (r, v) in client_randoms:
        logger.debug(" -- r size " + str(r.shape) + " v size " + str(v.shape))

    # Now assemble the big r and v that are used for precomputation
    assembled_R = []
    assembled_V = []
    for i in range(0, len(w_list)):  # For every Gemm
        # Assemble R by repeating r_i for every row of W (m times)
        for dm in range(0,
                        w_list[i][1][0]):  # For every server row (m) (W row)
            for do in range(
                    0, w_list[i][1][2]):  # For every client column o (x col)
                assembled_R.extend(
                    client_randoms[i][0][do].tolist()
                )  # Append a row of r (here, column because it is transposed - Matrix multiplication takes a row times a column)

        # Assemble v by just appending all v's after each other
        assembled_V.extend(client_randoms[i][1].flatten().tolist())

    if config.debug_mode:
        logger.debug(" - Assembled big R: Size " + str(len(assembled_R)) +
                     "; starts with " +
                     str(assembled_R[:config.debug_print_length_long]))
        logger.debug(" - Assembled big V: Size " + str(len(assembled_V)) +
                     "; starts with " +
                     str(assembled_V[:config.debug_print_length_long]))

    # Now we need to transpose the r matrices so that they can be used later (remember, we used r as columns earlier for the matrix multiplication with W)
    logger.debug(" - Transposing r values:")
    for i in range(0, len(client_randoms)):
        # Transpose r
        client_randoms[i] = (client_randoms[i][0].T, client_randoms[i][1])

        # And convert the uint numpy arrays to int cpp arrays for later use
        #  NOTE: We use a modulo with PMAX here to convert from uint to int
        #  This is the same that is done on the cpp side for the homomorphic encryptions.
        #  For the precomputation, Uint64 is needed, and for everything afterwards, int64
        iR = minionn.VectorInt(
            [modulo_pmax(r) for r in client_randoms[i][0].flatten().tolist()])
        _set_tensor("initial_r" + str(i), iR, list(client_randoms[i][0].shape))

        iV = minionn.VectorInt(
            [modulo_pmax(v) for v in client_randoms[i][1].flatten().tolist()])
        _set_tensor("v" + str(i), iV, list(client_randoms[i][1].shape))

        logger.debug(" -- r" + str(i) + " now has size " +
                     str(client_randoms[i][0].shape) + " v" + str(i) +
                     " size " + str(client_randoms[i][1].shape))

    # Generate assembled uint vectors
    uR = minionn.VectorUInt(assembled_R)
    uV = minionn.VectorUInt(assembled_V)

    # Use them for the client precomputation
    encU = minionn.client_precomputation(encW, uR, uV)

    logger.info("Client Precomputation success.")

    # return U
    return encU
Ejemplo n.º 11
0
def server_decrypt_u(encU, skey):
    tmp = minionn.VectorInt([])
    minionn.decrypt_w(encU, skey, tmp)
    return tmp