예제 #1
0
def test_user_def_func_numpy(op, variable, expected, func_mode, benchmark):
    if op == "TANH":
        def myFunction(variable):
            return np.tanh(variable)
    elif op == "EXP":
        def myFunction(variable):
            return np.exp(variable)
    elif op == "SHAPE":
        def myFunction(variable):
            return variable.shape
    elif op == "ASTYPE_FLOAT":
        def myFunction(variable):
            return variable.astype(float)
    elif op == "ASTYPE_INT":
        # return types cannot be integers, so we cast back to float and check for truncation
        def myFunction(variable):
            return variable.astype(int).astype(float)
    elif op == "NP_MAX":
        def myFunction(variable):
            return np.max(variable)
    elif op == "FLATTEN":
        def myFunction(variable):
            return variable.flatten()

    U = UserDefinedFunction(custom_function=myFunction, default_variable=variable)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, variable)
    assert np.allclose(val, expected, equal_nan=True)
    def test_user_def_func_builtin(self, op, variable, expected, bin_execute,
                                   benchmark):
        if op == "SUM":

            def myFunction(variable):
                return sum(variable)
        elif op == "LEN":

            def myFunction(variable):
                return len(variable)
        elif op == "LEN_TUPLE":

            def myFunction(variable):
                return len((1, 2))
        elif op == "MAX":

            def myFunction(variable):
                return max(variable)
        elif op == "MAX_MULTI":
            # special cased, since passing in multiple variables without a closure is hard
            def myFunction(_):
                return max(1, 2, 3, 4, 5, 6, -1, -2)

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=variable)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, variable)
        assert np.allclose(val, expected)
예제 #3
0
def test_user_def_bin_arith(param1, param2, func, func_mode, benchmark):

    U = UserDefinedFunction(custom_function=func, param1=param1, param2=param2)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, 0)
    assert np.allclose(val, func(0, param1=param1, param2=param2))
예제 #4
0
def test_user_def_func_branching(func, var, expected, func_mode, benchmark):

    U = UserDefinedFunction(custom_function=func, default_variable=var, param2=3)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, var)
    assert np.allclose(val, expected)
예제 #5
0
def test_user_def_func_cmpop_numpy(op, var1, var2, expected, func_mode, benchmark):
    # we explicitly use np here to ensure that the result is castable to float in the scalar-scalar case
    if op == "Eq":
        def myFunction(variable, var1, var2):
            return np.equal(var1, var2).astype(float)
    elif op == "NotEq":
        def myFunction(variable, var1, var2):
            return np.not_equal(var1, var2).astype(float)
    elif op == "Lt":
        def myFunction(variable, var1, var2):
            return np.less(var1, var2).astype(float)
    elif op == "LtE":
        def myFunction(variable, var1, var2):
            return np.less_equal(var1, var2).astype(float)
    elif op == "Gt":
        def myFunction(variable, var1, var2):
            return np.greater(var1, var2).astype(float)
    elif op == "GtE":
        def myFunction(variable, var1, var2):
            return np.greater_equal(var1, var2).astype(float)

    U = UserDefinedFunction(custom_function=myFunction, default_variable=[0], var1=var1, var2=var2)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, [0])
    assert np.allclose(expected, val)
    def test_user_def_func_boolop(self, op, bin_execute, benchmark):
        if op == "AND":

            def myFunction(variable):
                var1 = True
                var2 = False
                # compiled UDFs don't support python bool type outputs
                if var1 and var2:
                    return 0.0
                else:
                    return 1.0
        elif op == "OR":

            def myFunction(variable):
                var1 = True
                var2 = False
                # compiled UDFs don't support python bool type outputs
                if var1 or var2:
                    return 1.0
                else:
                    return 0.0

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[0])
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, [0])
        assert val == 1.0
예제 #7
0
def test_udf_runtime_params_reset():
    def myFunction(variable, x):
        return variable + x

    U = UserDefinedFunction(custom_function=myFunction, x=0)
    assert U.function(0) == 0
    assert U.function(0, params={'x': 1}) == 1
    assert U.function(0) == 0
    def test_user_def_func_cmpop(self, op, var1, var2, expected, bin_execute,
                                 benchmark):
        # we explicitly use np here to ensure that the result is castable to float in the scalar-scalar case
        if op == "Eq":

            def myFunction(variable, var1, var2):
                if var1 == var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "NotEq":

            def myFunction(variable, var1, var2):
                if var1 != var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "Lt":

            def myFunction(variable, var1, var2):
                if var1 < var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "LtE":

            def myFunction(variable, var1, var2):
                if var1 <= var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "Gt":

            def myFunction(variable, var1, var2):
                if var1 > var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "GtE":

            def myFunction(variable, var1, var2):
                if var1 >= var2:
                    return 1.0
                else:
                    return 0.0

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[0],
                                var1=var1,
                                var2=var2)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, [0])
        assert np.allclose(expected, val)
예제 #9
0
def test_user_def_reward_func(func_mode, benchmark):
    variable = [[1,2,3,4]]
    def myFunction(x,t0=0.48):
        return (x[0][0]>0).astype(float) * (x[0][2]>0).astype(float) / (np.max([x[0][1],x[0][3]]) + t0)
    U = UserDefinedFunction(custom_function=myFunction, default_variable=variable, param=variable)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, variable)
    assert np.allclose(val, 0.2232142857142857)
 def test_user_def_func(self):
     def myFunction(variable, param1, param2):
         return variable * 2 + 3
     U = UserDefinedFunction(custom_function=myFunction, default_variable=[[0, 0]], param2=0)
     myMech = ProcessingMechanism(function=U, size=2, name='myMech')
     # assert 'param1' in myMech.parameter_ports.names # <- FIX reinstate when problem with function params is fixed
     assert 'param2' in myMech.parameter_ports.names
     val = myMech.execute([1, 3])
     assert np.allclose(val, [[5, 9]])
    def test_udf_creates_parameter_ports(self):
        def func(input=[[0], [0]], p=0, q=1):
            return (p + q) * input

        m = ProcessingMechanism(default_variable=[[0], [0]],
                                function=UserDefinedFunction(func))

        assert len(m.parameter_ports) == 2
        assert 'p' in m.parameter_ports.names
        assert 'q' in m.parameter_ports.names
예제 #12
0
def test_user_def_func_variable_index(func_mode, benchmark):
    def myFunction(variable):
        variable[0][0] = variable[0][0] + 5
        variable[0][1] = variable[0][1] + 7
        return variable

    U = UserDefinedFunction(custom_function=myFunction, default_variable=[[0, 0]])
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, [[1, 3]])
    assert np.allclose(val, [[6, 10]])
    def test_udf_with_pnl_func(self):
        L = Logistic(gain=2)

        def myFunction(variable, params, context):
            return L(variable) + 2

        U = UserDefinedFunction(custom_function=myFunction, default_variable=[[0, 0, 0]])
        myMech = ProcessingMechanism(function=myFunction, size=3, name='myMech')
        val1 = myMech.execute(input=[1, 2, 3])
        val2 = U.execute(variable=[[1, 2, 3]])
        assert np.allclose(val1, val2)
        assert np.allclose(val1, L([1, 2, 3]) + 2)
    def test_user_def_func(self, bin_execute, benchmark):
        def myFunction(variable, param1, param2):
            return variable * 2 + param2

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[[0, 0]],
                                param2=3)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, [1, 3])
        assert np.allclose(val, [[5, 9]])
    def test_user_def_func_usub(self, variable, bin_execute, benchmark):
        def myFunction(variable, param):
            return -param

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=variable,
                                param=variable)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, variable)
        assert np.allclose(val, -variable)
    def test_user_def_func_variable_index(self, bin_execute, benchmark):
        def myFunction(variable):
            variable[0][0] = variable[0][0] + 5
            variable[0][1] = variable[0][1] + 7
            return variable

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[[0, 0]])
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, [[1, 3]])
        assert np.allclose(val, [[6, 10]])
    def test_user_def_func_mul(self, param1, param2, bin_execute, benchmark):
        # default val is same shape as expected output
        def myFunction(_, param1, param2):
            # we only use param1 and param2 to avoid automatic shape changes of the variable
            return param1 * param2

        U = UserDefinedFunction(custom_function=myFunction,
                                param1=param1,
                                param2=param2)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, 0)
        assert np.allclose(val, param1 * param2)
    def test_user_def_reward_func(self, bin_execute, benchmark):
        variable = [[1, 2, 3, 4]]

        def myFunction(x, t0=0.48):
            return (x[0][0] > 0).astype(float) * (
                x[0][2] > 0).astype(float) / (np.max([x[0][1], x[0][3]]) + t0)

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=variable,
                                param=variable)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, variable)
        assert np.allclose(val, 0.2232142857142857)
    def test_user_def_func_branching(self, bin_execute, benchmark):
        def myFunction(variable, param1, param2):
            if variable[0][0] > 0 and variable[0][1] > 0:
                return variable * 2 + param2
            else:
                return variable * -2 + param2

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[[0, 0]],
                                param2=3)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, [[1, 3]])
        assert np.allclose(val, [[5, 9]])
        val2 = e([[-1, 3]])
        assert np.allclose(val2, [[5, -3]])
    def test_user_def_func_numpy(self, op, variable, expected, bin_execute,
                                 benchmark):
        if op == "TANH":

            def myFunction(variable):
                return np.tanh(variable)
        elif op == "EXP":

            def myFunction(variable):
                return np.exp(variable)
        elif op == "SHAPE":

            def myFunction(variable):
                return variable.shape
        elif op == "ASTYPE_FLOAT":

            def myFunction(variable):
                return variable.astype(float)
        elif op == "ASTYPE_INT":
            # return types cannot be integers, so we cast back to float and check for truncation
            def myFunction(variable):
                return variable.astype(int).astype(float)
        elif op == "NP_MAX":

            def myFunction(variable):
                return np.max(variable)
        elif op == "FLATTEN":

            def myFunction(variable):
                return variable.flatten()

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=variable)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, variable)
        assert np.allclose(val, expected)
예제 #21
0
    def test_user_def_func_numpy(self, op, expected, bin_execute, benchmark):
        variable = [[1, 3]]
        if op == "TANH":

            def myFunction(variable):
                return np.tanh(variable)
        elif op == "EXP":

            def myFunction(variable):
                return np.exp(variable)

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[[0, 0]])
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, variable)
        assert np.allclose(val, expected)
    def test_user_def_func_cmpop_numpy(self, op, var1, var2, expected,
                                       bin_execute, benchmark):
        # we explicitly use np here to ensure that the result is castable to float in the scalar-scalar case
        if op == "Eq":

            def myFunction(variable, var1, var2):
                return np.equal(var1, var2).astype(float)
        elif op == "NotEq":

            def myFunction(variable, var1, var2):
                return np.not_equal(var1, var2).astype(float)
        elif op == "Lt":

            def myFunction(variable, var1, var2):
                return np.less(var1, var2).astype(float)
        elif op == "LtE":

            def myFunction(variable, var1, var2):
                return np.less_equal(var1, var2).astype(float)
        elif op == "Gt":

            def myFunction(variable, var1, var2):
                return np.greater(var1, var2).astype(float)
        elif op == "GtE":

            def myFunction(variable, var1, var2):
                return np.greater_equal(var1, var2).astype(float)

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[0],
                                var1=var1,
                                var2=var2)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, [0])
        assert np.allclose(expected, val)
예제 #23
0
def test_user_def_func_return(dtype, expected, func_mode, benchmark):
    if dtype == "SCALAR_VAR":
        def myFunction(variable):
            var = 1.0
            return var
    elif dtype == "VECTOR_VAR":
        def myFunction(variable):
            var = [1,2]
            return var
    elif dtype == "MATRIX_VAR":
        def myFunction(variable):
            var = [[1,2],[3,4]]
            return var
    elif dtype == "BOOL_VAR":
        def myFunction(variable):
            var = True
            return 1.0
    elif dtype == "TUPLE_VAR":
        def myFunction(variable):
            var = (1, 2, 3, 4)
            return var
    elif dtype == "SCALAR_LIT":
        def myFunction(variable):
            return 1.0
    elif dtype == "VECTOR_LIT":
        def myFunction(variable):
            return [1,2]
    elif dtype == "MATRIX_LIT":
        def myFunction(variable):
            return [[1,2],[3,4]]
    elif dtype == "TUPLE_LIT":
        def myFunction(variable):
            return (1, 2, 3, 4)

    U = UserDefinedFunction(custom_function=myFunction, default_variable=0)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, 0)
    assert np.allclose(val, expected)
예제 #24
0
def test_user_def_func_builtin(op, variable, expected, func_mode, benchmark):
    if op == "SUM":
        def myFunction(var):
            return sum(var) + sum((var[0], var[1])) + sum([var[0], var[1]])
    elif op == "LEN":
        def myFunction(var):
            return len(var) + len((var[0], var[1])) + len([var[0], var[1]]) + len((1.0, (1,2)))
    elif op == "MAX":
        def myFunction(variable):
            return max(variable)
    elif op == "MAX_MULTI":
        # special cased, since passing in multiple variables without a closure is hard
        def myFunction(variable):
            return max(variable[0], variable[1], variable[2], -5, 6)
    elif op == "MAX_TUPLE":
        def myFunction(variable):
            return max((variable[0], variable[1], variable[2], -5, 6))

    U = UserDefinedFunction(custom_function=myFunction, default_variable=variable)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, variable)
    assert np.allclose(val, expected)
    def test_user_def_func_assign(self, dtype, expected, bin_execute,
                                  benchmark):
        if dtype == "SCALAR":

            def myFunction(variable):
                var = 1.0
                return var
        elif dtype == "VECTOR":

            def myFunction(variable):
                var = [1, 2]
                return var
        elif dtype == "MATRIX":

            def myFunction(variable):
                var = [[1, 2], [3, 4]]
                return var
        elif dtype == "BOOL":

            def myFunction(variable):
                var = True
                return 1.0
        elif dtype == "TUPLE":

            def myFunction(variable):
                var = (1, 2, 3, 4)
                return var

        U = UserDefinedFunction(custom_function=myFunction, default_variable=0)
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, 0)
        assert np.allclose(val, expected)
예제 #26
0
def test_user_def_func_builtin_direct(func, args, expected, benchmark):
    func = UserDefinedFunction(func)

    val = benchmark(func, *args)
    assert np.allclose(val, expected)
예제 #27
0
def test_user_def_func_cmpop(func, var1, var2, expected, func_mode, benchmark):
    U = UserDefinedFunction(custom_function=func, default_variable=[0], var1=var1, var2=var2)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, [0])
    assert np.allclose(expected, val)
예제 #28
0
def test_user_def_func_unary(func, variable, func_mode, benchmark):
    U = UserDefinedFunction(custom_function=func, default_variable=variable, param=variable)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, variable)
    assert np.allclose(val, func(variable, param=variable))
예제 #29
0
def test_user_def_func_boolop(op, var, expected, func_mode, benchmark):
    U = UserDefinedFunction(custom_function=op, default_variable=var)
    e = pytest.helpers.get_func_execution(U, func_mode)

    val = benchmark(e, var)
    assert val == expected
예제 #30
0
    def test_user_def_func_cmpop(self, op, bin_execute, benchmark):
        if op == "Eq":

            def myFunction(variable):
                var1 = 1.0
                var2 = 1.0
                if var1 == var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "NotEq":

            def myFunction(variable):
                var1 = 1.0
                var2 = 2.0
                if var1 != var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "Lt":

            def myFunction(variable):
                var1 = 1.0
                var2 = 2.0
                if var1 < var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "LtE":

            def myFunction(variable):
                var1 = 1.0
                var2 = 2.0
                var3 = 1.0
                if var1 <= var2 and var1 <= var3:
                    return 1.0
                else:
                    return 0.0
        elif op == "Gt":

            def myFunction(variable):
                var1 = 2.0
                var2 = 1.0
                if var1 > var2:
                    return 1.0
                else:
                    return 0.0
        elif op == "GtE":

            def myFunction(variable):
                var1 = 3.0
                var2 = 2.0
                var3 = 3.0
                if var1 >= var2 and var1 >= var3:
                    return 1.0
                else:
                    return 0.0

        U = UserDefinedFunction(custom_function=myFunction,
                                default_variable=[0])
        if bin_execute == 'LLVM':
            e = pnlvm.execution.FuncExecution(U).execute
        elif bin_execute == 'PTX':
            e = pnlvm.execution.FuncExecution(U).cuda_execute
        else:
            e = U
        val = benchmark(e, [0])
        assert val == 1.0