Exemplo n.º 1
0
def sum(*summands: List[Variable], axes=None):
    '''
    This function can compute an elementwise or axiswise sum of 
    a single or multiple inputs.

    Parameters
    ----------
    summands: Variable(s)
        The Variable(s) over which to take the sum
       
    axes: tuple[int]
        The axes along which the sum will be taken over

    '''

    out = Variable()
    for expr in summands:
        if not isinstance(expr, Variable):
            raise TypeError(expr, " is not an Variable object")
        out.add_dependency_node(expr)

    if axes == None:
        if len(summands) == 1:
            out.build = lambda: SingleTensorSumComp(
                in_name=summands[0].name,
                shape=summands[0].shape,
                out_name=out.name,
                val=summands[0].val,
            )
        else:
            out.shape = expr.shape
            out.build = lambda: MultipleTensorSumComp(
                in_names=[expr.name for expr in summands],
                shape=expr.shape,
                out_name=out.name,
                vals=[expr.val for expr in summands],
            )
    else:
        output_shape = np.delete(expr.shape, axes)
        out.shape = tuple(output_shape)

        if len(summands) == 1:
            out.build = lambda: SingleTensorSumComp(
                in_name=expr.name,
                shape=expr.shape,
                out_name=out.name,
                out_shape=out.shape,
                axes=axes,
                val=summands[0].val,
            )
        else:
            out.build = lambda: MultipleTensorSumComp(
                in_names=[expr.name for expr in summands],
                shape=expr.shape,
                out_name=out.name,
                out_shape=out.shape,
                axes=axes,
                vals=[expr.val for expr in summands],
            )
    return out
Exemplo n.º 2
0
def expand(expr: Variable, shape: tuple, indices=None):

    if not isinstance(expr, Variable):
        raise TypeError(expr, " is not an Variable object")

    if indices is not None:
        if not isinstance(indices, str):
            raise TypeError(indices, " is not a str or None")

        if '->' not in indices:
            raise ValueError(indices, " is invalid")

    if indices is not None:
        in_indices, out_indices = indices.split('->')
        expand_indices = []
        for i in range(len(out_indices)):
            index = out_indices[i]

            if index not in in_indices:
                expand_indices.append(i)

    out = Variable()
    out.shape = shape
    out.add_dependency_node(expr)

    if not expr.shape == (1, ):
        if indices is None:
            raise ValueError('If expanding something other than a scalar ' +
                             'indices must be given')
        (
            _,
            _,
            _,
            in_shape,
            _,
            _,
        ) = decompose_shape_tuple(shape, expand_indices)

        if in_shape != expr.shape:
            raise ValueError('Shape or indices is invalid')

        out.build = lambda: ArrayExpansionComp(
            shape=shape,
            expand_indices=expand_indices,
            in_name=expr.name,
            out_name=out.name,
            val=expr.val,
        )
    else:
        out.build = lambda: ScalarExpansionComp(
            shape=shape,
            in_name=expr.name,
            out_name=out.name,
        )
    return out
Exemplo n.º 3
0
def einsum_new_api(*operands: List[Variable],
                   operation: List[tuple],
                   partial_format='dense'):
    '''
    The Einstein Summation function performs the equivalent of numpy.einsum using a new api

    Parameters
    ----------
    operands: Variables(s)
        The Variable(s) which you would like to perform an einsum with.
       
    subscripts: list[tuple]
        Specifies the subscripts for summation as a list of tuples 
    
    partial_format: str
        Denotes whether to compute 'dense' partials or 'sparse' partials 
        
    '''
    out = Variable()
    for expr in operands:
        if not isinstance(expr, Variable):
            raise TypeError(expr, " is not an Variable object")
        out.add_dependency_node(expr)
    scalar_output = False
    if len(operands) == len(operation):
        scalar_output = True
    operation_aslist, operation_string = new_einsum_subscripts_to_string_and_list(
        operation, scalar_output=scalar_output)

    shape = compute_einsum_shape(operation_aslist,
                                 [expr.shape for expr in operands])
    out.shape = shape

    if partial_format == 'dense':
        out.build = lambda: EinsumComp(
            in_names=[expr.name for expr in operands],
            in_shapes=[expr.shape for expr in operands],
            out_name=out.name,
            operation=operation_string,
            out_shape=shape,
            in_vals=[expr.val for expr in operands],
        )
    elif partial_format == 'sparse':
        out.build = lambda: SparsePartialEinsumComp(
            in_names=[expr.name for expr in operands],
            in_shapes=[expr.shape for expr in operands],
            out_name=out.name,
            operation=operation_string,
            out_shape=shape,
            in_vals=[expr.val for expr in operands],
        )
    else:
        raise Exception('partial_format should be either dense or sparse')
    return out
Exemplo n.º 4
0
def dot(expr1: Variable, expr2: Variable, axis=None):
    '''
    This can the dot product between two inputs.

    Parameters
    ----------
    expr1: Variable
        The first input for the dot product.
    
    expr2: Variable
        The second input for the dot product.     

    axis: int
        The axis along which the dot product is taken. The axis must 
        have an axis of 3.
    '''

    if not (isinstance(expr1, Variable) and isinstance(expr2, Variable)):
        raise TypeError("Arguments must both be Variable objects")
    out = Variable()
    out.add_dependency_node(expr1)
    out.add_dependency_node(expr2)

    if expr1.shape != expr2.shape:
        raise Exception("The shapes of the inputs must match!")

    print(len(expr1.shape))
    print(len(expr2.shape))

    if len(expr1.shape) == 1:
        out.build = lambda: VectorInnerProductComp(
            in_names=[expr1.name, expr2.name],
            out_name=out.name,
            in_shape=expr1.shape[0],
            in_vals=[expr1.val, expr2.val],
        )
    else:
        if expr1.shape[axis] != 3:
            raise Exception(
                "The specified axis must correspond to the value of 3 in shape"
            )
        else:
            out.shape = tuple(np.delete(list(expr1.shape), axis))

            out.build = lambda: TensorDotProductComp(
                in_names=[expr1.name, expr2.name],
                out_name=out.name,
                in_shape=expr1.shape,
                axis=axis,
                out_shape=out.shape,
                in_vals=[expr1.val, expr2.val],
            )
    return out
Exemplo n.º 5
0
def pnorm(expr, pnorm_type=2, axis=None):
    '''
    This function computes the pnorm 

    Parameters
    ----------
    expr: Variable
        The Variable(s) over which to take the minimum
       
    pnorm_type: int
        This specifies what pnorm to compute. Values must be nonzero positive and even.
    
    axis: int
        Specifies the axis over which to take the pnorm
    '''

    if not isinstance(expr, Variable):
        raise TypeError(expr, " is not an Variable object")
    if axis is not None:
        if not isinstance(axis, int) and not isinstance(axis, tuple):
            raise TypeError("axis must be an integer or tuple of integers")
    out = Variable()
    out.add_dependency_node(expr)

    if pnorm_type % 2 != 0 or pnorm_type <= 0:
        raise Exception(pnorm_type, " is not positive OR is not even")

    else:
        if axis == None:
            out.build = lambda: VectorizedPnormComp(
                shape=expr.shape,
                in_name=expr.name,
                out_name=out.name,
                pnorm_type=pnorm_type,
                val=expr.val,
            )
        else:
            output_shape = np.delete(expr.shape, axis)
            out.shape = tuple(output_shape)

            out.build = lambda: VectorizedAxisWisePnormComp(
                shape=expr.shape,
                in_name=expr.name,
                out_shape=out.shape,
                out_name=out.name,
                pnorm_type=pnorm_type,
                axis=axis if isinstance(axis, tuple) else (axis, ),
                val=expr.val,
            )
    return out
Exemplo n.º 6
0
def rotmat(expr: Variable, axis: str):
    '''
    This function creates a rotation matrix depending on the input value and the axis.

    Parameters
    ----------
    expr: Variable
        The value which determines by how much the rotation matrix 
       
    axis: str
        The axis along which the rotation matrix should rotate. Can we specified 
        with: 'x' , 'y' , or 'z'.

    '''
    if not isinstance(expr, Variable):
        raise TypeError(expr, " is not an Variable object")
    out = Variable()
    out.add_dependency_node(expr)

    if expr.shape == (1, ):
        out.shape = (3, 3)

    else:
        out.shape = expr.shape + (3, 3)

    out.build = lambda: RotationMatrixComp(
        shape=expr.shape,
        in_name=expr.name,
        out_name=out.name,
        axis=axis,
        val=expr.val,
    )
    return out
Exemplo n.º 7
0
def reorder_axes(expr: Variable, operation: str):
    '''
    The function reorders the axes of the input.

    Parameters
    ----------
    expr: Variable
        The Variable that will have its axes reordered. 
       
    operation: str
        Specifies the subscripts for reordering as comma separated list of subscript labels.
        Ex: 'ijk->kij'
    
    '''
    if not isinstance(expr, Variable):
        raise TypeError(expr, " is not an Variable object")
    out = Variable()
    out.add_dependency_node(expr)

    # Computing out_shape
    new_axes_locations = compute_new_axes_locations(expr.shape, operation)
    out.shape = tuple(expr.shape[i] for i in new_axes_locations)

    out.build = lambda: ReorderAxesComp(
        in_name=expr.name,
        in_shape=expr.shape,
        out_name=out.name,
        out_shape=out.shape,
        operation=operation,
        new_axes_locations=new_axes_locations,
        val=expr.val,
    )
    return out
Exemplo n.º 8
0
def reshape(expr: Variable, new_shape: tuple):
    '''
    This function reshapes the input into a new shape. 

    Parameters
    ----------
    expr: Variable
        The Variable which you want to reshape 
       
    new_shape: tuple[int] 
        A tuple of ints specifying the new shape desired
    '''
    if not isinstance(expr, Variable):
        raise TypeError(expr, " is not an Variable object")
    out = Variable()
    out.shape = new_shape
    out.add_dependency_node(expr)
    out.build = lambda: ReshapeComp(
        shape=expr.shape,
        in_name=expr.name,
        out_name=out.name,
        new_shape=out.shape,
        val=expr.val,
    )
    return out
Exemplo n.º 9
0
def matvec(mat1, vec1):
    '''
    This function can compute a matrix-vector multiplication, similar to the 
    numpy counterpart.

    Parameters
    ----------
    mat1: Variable
        The matrix needed for the matrix-vector multiplication
       
    vec1: Variable
        The vector needed for the matrix-vector multiplication

    '''
    if not (isinstance(mat1, Variable) and isinstance(vec1, Variable)):
        raise TypeError("Arguments must both be Variable objects")
    out = Variable()
    out.add_dependency_node(mat1)
    out.add_dependency_node(vec1)

    if mat1.shape[1] == vec1.shape[0] and len(vec1.shape) == 1:

        out.shape = (mat1.shape[0], )

        out.build = lambda: MatVecComp(
            in_names=[mat1.name, vec1.name],
            out_name=out.name,
            in_shapes=[mat1.shape, vec1.shape],
            in_vals=[mat1.val, vec1.val],
        )

    else:
        raise Exception("Cannot multiply: ", mat1.shape, "by", vec1.shape)
    return out
Exemplo n.º 10
0
def einsum(*operands: List[Variable], subscripts: str, partial_format='dense'):
    '''
    The Einstein Summation function performs the equivalent of numpy.einsum

    Parameters
    ----------
    operands: Variable(s)
        The Variable(s) which you would like to perform an einsum with.
       
    subscripts: str
        Specifies the subscripts for summation as comma separated list of subscript labels
    
    partial_format: str
        Denotes whether to compute 'dense' partials or 'sparse' partials 
        
    '''
    out = Variable()
    for expr in operands:
        if not isinstance(expr, Variable):
            raise TypeError(expr, " is not an Variable object")
        out.add_dependency_node(expr)
    operation_aslist = einsum_subscripts_tolist(subscripts)
    shape = compute_einsum_shape(operation_aslist,
                                 [expr.shape for expr in operands])
    out.shape = shape

    if partial_format == 'dense':
        out.build = lambda: EinsumComp(
            in_names=[expr.name for expr in operands],
            in_shapes=[expr.shape for expr in operands],
            out_name=out.name,
            operation=subscripts,
            out_shape=shape,
            in_vals=[expr.val for expr in operands],
        )
    elif partial_format == 'sparse':
        out.build = lambda: SparsePartialEinsumComp(
            in_names=[expr.name for expr in operands],
            in_shapes=[expr.shape for expr in operands],
            out_name=out.name,
            operation=subscripts,
            out_shape=shape,
            in_vals=[expr.val for expr in operands],
        )
    else:
        raise Exception('partial_format should be either dense or sparse')
    return out
Exemplo n.º 11
0
def matmat(mat1, mat2):
    '''
    This function can compute a matrix-matrix multiplication, similar to the 
    numpy counterpart.

    Parameters
    ----------
    mat1: Variable
        The first input for the matrix-matrix multiplication
       
    mat2: Variable
        The second input for the matrix-matrix multiplication

    '''

    if not (isinstance(mat1, Variable) and isinstance(mat2, Variable)):
        raise TypeError("Arguments must both be Variable objects")
    out = Variable()
    out.add_dependency_node(mat1)
    out.add_dependency_node(mat2)

    if mat1.shape[1] == mat2.shape[0] and len(mat2.shape) == 2:
        # Compute the output shape if both inputs are matrices
        out.shape = (mat1.shape[0], mat2.shape[1])

        out.build = lambda: MatMatComp(
            in_names=[mat1.name, mat2.name],
            out_name=out.name,
            in_shapes=[mat1.shape, mat2.shape],
            in_vals=[mat1.val, mat2.val],
        )

    elif mat1.shape[1] == mat2.shape[0] and len(mat2.shape) == 1:
        out.shape = (mat1.shape[0], 1)

        mat2_shape = (mat2.shape[0], 1)

        out.build = lambda: MatMatComp(
            in_names=[mat1.name, mat2.name],
            out_name=out.name,
            in_shapes=[mat1.shape, mat2_shape],
            in_vals=[mat1.val, mat2.val.reshape(mat2_shape)],
        )
    else:
        raise Exception("Cannot multiply: ", mat1.shape, "by", mat2.shape)
    return out
Exemplo n.º 12
0
def outer(expr1: Variable, expr2: Variable):
    '''
    This can the outer product between two inputs.

    Parameters
    ----------
    expr1: Variable
        The first input for the outer product.
    
    expr2: Variable
        The second input for the outer product.     

    '''
    if not isinstance(expr1, Variable):
        raise TypeError(expr1, " is not an Variable object")
    elif not isinstance(expr2, Variable):
        raise TypeError(expr2, " is not an Variable object")
    out = Variable()
    out.add_dependency_node(expr1)
    out.add_dependency_node(expr2)

    if len(expr1.shape) == 1 and len(expr2.shape) == 1:
        out.shape = tuple(list(expr1.shape) + list(expr2.shape))

        out.build = lambda: VectorOuterProductComp(
            in_names=[expr1.name, expr2.name],
            out_name=out.name,
            in_shapes=[expr1.shape[0], expr2.shape[0]],
            in_vals=[expr1.val, expr2.val],
        )

    else:
        out.shape = tuple(list(expr1.shape) + list(expr2.shape))

        out.build = lambda: TensorOuterProductComp(
            in_names=[expr1.name, expr2.name],
            out_name=out.name,
            in_shapes=[expr1.shape, expr2.shape],
            in_vals=[expr1.val, expr2.val],
        )
    return out
Exemplo n.º 13
0
def sinh(expr):
    if not isinstance(expr, Variable):
        raise TypeError(expr, " is not an Variable object")
    out = Variable()
    out.shape = expr.shape
    out.add_dependency_node(expr)
    out.build = lambda: SinhComp(
        shape=expr.shape,
        in_name=expr.name,
        out_name=out.name,
        val=expr.val,
    )
    return out
Exemplo n.º 14
0
def cross(in1, in2, axis: int):
    '''
    This can the cross product between two inputs.

    Parameters
    ----------
    in1: Variable
        The first input for the cross product.
    
    in2: Variable
        The second input for the cross product.     

    axis: int
        The axis along which the cross product is taken. The axis specified must   
        have a value of 3.
    '''

    if not (isinstance(in1, Variable) and isinstance(in2, Variable)):
        raise TypeError("Arguments must both be Variable objects")
    out = Variable()
    out.add_dependency_node(in1)
    out.add_dependency_node(in2)

    if in1.shape != in2.shape:
        raise Exception("The shapes of the inputs must match!")
    else:
        out.shape = in1.shape

    if in1.shape[axis] != 3:
        raise Exception(
            "The specified axis must correspond to the value of 3 in shape")

    out.build = lambda: CrossProductComp(
        shape=in1.shape,
        in1_name=in1.name,
        in2_name=in2.name,
        out_name=out.name,
        axis=axis,
        in1_val=in1.val,
        in2_val=in2.val,
    )
    return out
Exemplo n.º 15
0
def if_else(
    condition: Variable,
    expr_true: Variable,
    expr_false: Variable,
):
    if expr_true.shape != expr_false.shape:
        raise ValueError(
            "Variable shapes must be the same for Variable objects for both branches of execution"
        )

    out = Variable()
    out.add_dependency_node(condition)
    out.add_dependency_node(expr_true)
    out.add_dependency_node(expr_false)
    out.build = lambda: ConditionalComponent(
        out_name=out.name,
        condition=condition,
        expr_true=expr_true,
        expr_false=expr_false,
    )
    return out
Exemplo n.º 16
0
def transpose(expr: Variable):
    '''
    This function can perform the transpose of an input 

    Parameters
    ----------
    expr: Variable
        The input which will be transposed
       
    '''
    if not isinstance(expr, Variable):
        raise TypeError(expr, " is not an Variable object")
    out = Variable()
    out.add_dependency_node(expr)
    out.shape = expr.shape[::-1]
    out.build = lambda: TransposeComp(
        in_name=expr.name,
        in_shape=expr.shape,
        out_name=out.name,
        out_shape=out.shape,
        val=expr.val,
    )
    return out
Exemplo n.º 17
0
def max(*exprs, axis=None, rho=20.):
    '''
    This function can compute an elementwise or axiswise maximum of 
    a single or multiple inputs.

    Parameters
    ----------
    exprs: Variable(s)
        The Variable(s) over which to take the maximum
       
    axis: int
        The axis along which the maximum will be taken over
    
    rho: float
        This is a smoothing parameter, which dictates how smooth or sharp  
        the maximum is 
    '''

    out = Variable()
    for expr in exprs:
        if not isinstance(expr, Variable):
            raise TypeError(expr, " is not an Variable object")
        out.add_dependency_node(expr)

    if len(exprs) == 1 and axis != None:
        output_shape = np.delete(expr.shape, axis)
        out.shape = tuple(output_shape)

        out.build = lambda: AxisMaxComp(
            shape=exprs[0].shape,
            in_name=exprs[0].name,
            axis=axis,
            out_name=out.name,
            rho=rho,
            val=exprs[0].val,
        )

    elif len(exprs) > 1 and axis == None:

        shape = exprs[0].shape
        for expr in exprs:
            if shape != expr.shape:
                raise Exception("The shapes of the inputs must match!")

        out.shape = expr.shape

        out.build = lambda: ElementwiseMaxComp(
            shape=expr.shape,
            in_names=[expr.name for expr in exprs],
            out_name=out.name,
            rho=rho,
            vals=[expr.val for expr in exprs],
        )

    elif len(exprs) == 1 and axis == None:

        out.build = lambda: ScalarExtremumComp(
            shape=exprs[0].shape,
            in_name=exprs[0].name,
            out_name=out.name,
            rho=rho,
            lower_flag=False,
            val=exprs[0].val,
        )

    else:
        raise Exception("Do not give multiple inputs and an axis")
    return out
Exemplo n.º 18
0
def average(*operands: List[Variable], axes=None):
    '''
    This function can compute the average of a single input, multiple inputs, or 
    along an axis.

    Parameters
    ----------
    
    operands: Variables
        The Variable(s) over which to take the average
        

    axes: tuple[int]
        Axes along which to take the average, default value is None

    '''

    out = Variable()
    for expr in operands:
        if not isinstance(expr, Variable):
            raise TypeError(expr, " is not an Variable object")
        out.add_dependency_node(expr)

    if axes == None:
        if len(operands) == 1:
            out.build = lambda: SingleTensorAverageComp(
                in_name=operands[0].name,
                shape=operands[0].shape,
                out_name=out.name,
                val=operands[0].val,
            )
        else:
            out.shape = expr.shape
            out.build = lambda: MultipleTensorAverageComp(
                in_names=[expr.name for expr in operands],
                shape=expr.shape,
                out_name=out.name,
                vals=[expr.val for expr in operands],
            )
    else:
        output_shape = np.delete(expr.shape, axes)
        out.shape = tuple(output_shape)

        if len(operands) == 1:
            out.build = lambda: SingleTensorAverageComp(
                in_name=operands[0].name,
                shape=operands[0].shape,
                out_name=out.name,
                out_shape=out.shape,
                axes=axes,
                val=operands[0].val,
            )
        else:
            out.build = lambda: MultipleTensorAverageComp(
                in_names=[expr.name for expr in operands],
                shape=expr.shape,
                out_name=out.name,
                out_shape=out.shape,
                axes=axes,
                vals=[expr.val for expr in operands],
            )
    return out