Ejemplo n.º 1
0
def find_peaks(x,
               mph=None,
               mpd=1,
               threshold=0,
               edge='rising',
               kpsh=False,
               valley=False,
               show=False,
               ax=None):
    import cppimport
    m = cppimport.imp('PyFindPeaks')

    edge_type = m.EdgeRising
    if edge == None:
        edge_type = m.EdgeNone
    elif edge == 'falling':
        edge_type = m.EdgeFalling
    elif edge == 'both':
        edge_type = m.EdgeBoth

    x = np.atleast_1d(x).astype('float64')
    ind = m.FindPeaks(x.tolist(), math.nan if mph == None else mph, mpd,
                      threshold, edge_type, kpsh, valley)
    ind = np.array(ind)

    if show:
        _plot(x, mph, mpd, threshold, edge, valley, ax, np.array(ind),
              'FindPeaks(C++)')

    return ind
Ejemplo n.º 2
0
def test_rebuild_header_after_change():
    mymodule = cppimport.imp("mymodule")
    test_code = '''
import cppimport; cppimport.set_quiet(False); mymodule = cppimport.imp("mymodule"); mymodule.Thing().cheer()
'''
    with appended('tests/thing.h', add_to_thing):
        subprocess_check(test_code)
Ejemplo n.º 3
0
def test_rebuild_header_after_change():
    mymodule = cppimport.imp("mymodule")
    test_code = '''
import cppimport; cppimport.set_quiet(False); mymodule = cppimport.imp("mymodule"); mymodule.Thing().cheer()
'''
    with appended('tests/thing.h', add_to_thing):
        subprocess_check(test_code)
Ejemplo n.º 4
0
    def compile_module():
        cpp_code_header = f"""
/*
<%
setup_pybind11(cfg)
cfg['include_dirs'] += {dolfinx_pc["include_dirs"] + [petsc4py.get_include()] + [str(pybind_inc())]}
cfg['compiler_args'] += {["-D" + dm for dm in dolfinx_pc["define_macros"]]}
cfg['libraries'] += {dolfinx_pc["libraries"]}
cfg['library_dirs'] += {dolfinx_pc["library_dirs"]}
%>
*/
"""

        cpp_code = """
#include <pybind11/pybind11.h>
#include <petscvec.h>
#include <caster_petsc.h>

void PETSc_exp(Vec x)
{
    assert(x);
    VecExp(x);
}
PYBIND11_MODULE(petsc_casters_cppimport, m)
{
    m.def("PETSc_exp", &PETSc_exp);
}
"""

        path = pathlib.Path(tempdir)
        open(pathlib.Path(tempdir, "petsc_casters_cppimport.cpp"),
             "w").write(cpp_code + cpp_code_header)
        rel_path = path.relative_to(pathlib.Path(__file__).parent)
        p = str(rel_path).replace("/", ".") + ".petsc_casters_cppimport"
        return cppimport.imp(p)
Ejemplo n.º 5
0
def test_rebuild_after_failed_compile():
    mymodule = cppimport.imp("mymodule")
    test_code = '''
import cppimport; mymodule = cppimport.imp("mymodule");assert(mymodule.add(1,2) == 3)
'''
    with appended('tests/mymodule.cpp', ";asdf;"):
        subprocess_check(test_code, 1)
    subprocess_check(test_code, 0)
Ejemplo n.º 6
0
def test_pybind11(x=np.load('numpy_arrays/z.npy'), y=np.load('numpy_arrays/w.npy')):
    import cppimport
    code = cppimport.imp("wrap7")
    x = np.unpackbits(x)
    y = np.unpackbits(y)
    vector_a = _n_apply_weights(x)
    vector_b = _n_apply_weights(y)
    return code.ruzicka(vector_a, vector_b)
Ejemplo n.º 7
0
def test_rebuild_after_failed_compile():
    mymodule = cppimport.imp("mymodule")
    test_code = '''
import cppimport; mymodule = cppimport.imp("mymodule");assert(mymodule.add(1,2) == 3)
'''
    with appended('tests/mymodule.cpp', ";asdf;"):
        subprocess_check(test_code, 1)
    subprocess_check(test_code, 0)
Ejemplo n.º 8
0
def test_no_rebuild_if_no_deps_change():
    mymodule = cppimport.imp("mymodule")
    test_code = '''
import cppimport;
mymodule = cppimport.imp("mymodule");
assert(not hasattr(mymodule, 'Thing'))
'''
    with appended('tests/thing2.h', add_to_thing):
        subprocess_check(test_code)
Ejemplo n.º 9
0
def mesh2volume(mesh_file, scaling, exterior_band=1, interior_band=1000, spacing=None):
    global _vdb_meshing
    if not _vdb_meshing:
        _vdb_meshing = cppimport.imp('volume2mesh.internal.vdb_meshing')

    spacing = spacing or 1/scaling
    if isinstance(spacing, (float, int)):
        spacing = [spacing] * 3
    return _vdb_meshing.meshToVolume(mesh_file, spacing, exterior_band, interior_band)
Ejemplo n.º 10
0
def test_no_rebuild_if_no_deps_change():
    mymodule = cppimport.imp("mymodule")
    test_code = '''
import cppimport;
mymodule = cppimport.imp("mymodule");
assert(not hasattr(mymodule, 'Thing'))
'''
    with appended('tests/thing2.h', add_to_thing):
        subprocess_check(test_code)
Ejemplo n.º 11
0
def mesh2volume_known_dimensions(mesh_file, origin, spacing, shape, exterior_band=1, interior_band=1000):
    global _vdb_meshing
    if not _vdb_meshing:
        _vdb_meshing = cppimport.imp('volume2mesh.internal.vdb_meshing')
    return _vdb_meshing.meshToVolumeKnownDimensions(mesh_file,
                                                    origin,
                                                    spacing,
                                                    shape[::-1],
                                                    exterior_band,
                                                    interior_band)
Ejemplo n.º 12
0
def calc_vertex_data_from_scalar_field(mesh_file, scalar_field, origin, spacing, exterior_band=1, interior_band=1000):
    global _vdb_meshing
    if not _vdb_meshing:
        _vdb_meshing = cppimport.imp('volume2mesh.internal.vdb_meshing')
    assert len(scalar_field.shape) == 3
    return _vdb_meshing.calcVertexDataFromScalarField(mesh_file,
                                                      scalar_field,
                                                      origin,
                                                      spacing,
                                                      exterior_band,
                                                      interior_band)
Ejemplo n.º 13
0
def generate_data(n_samples=1000,
                  max_runs=1000,
                  write_data_dir=None,
                  write_data_suffix=None):
    print("Generating data")
    cpp_calculator = cppimport.imp("tools.montecarlo_cpp.pymontecarlo")
    cpp_equity_func = cpp_calculator.montecarlo
    get_equity = cpp_equity_func

    table = HoldemTable()
    rank_enc, suit_enc = make_one_hot_encoders()
    n = 0
    x_data = []
    y_data = []
    for i in range(n_samples):
        # Create deck
        table._create_card_deck()

        # Sample player cards
        p1_cards = sample_cards(table.deck, 2)
        p2_cards = sample_cards(table.deck, 2)

        # Sample table cards from either preflop,
        # flop, river or turn
        stage_card_nums = [0, 3, 4, 5]
        num_table_samples = np.random.choice(stage_card_nums)
        table_cards = sample_cards(table.deck, num_table_samples)

        equity = get_equity(set(p1_cards), set(table_cards), 2, max_runs)

        encoded_state = preprocess_data_state(p1_cards, table_cards, rank_enc,
                                              suit_enc)

        x_data.append(encoded_state)
        y_data.append(equity)

    x_data = np.array(x_data)
    y_data = np.array(y_data)

    if write_data_dir and write_data_suffix:
        X_data_path = write_data_dir + 'X_' + write_data_suffix
        Y_data_path = write_data_dir + 'Y_' + write_data_suffix

        with open(X_data_path, 'wb') as handle:
            pickle.dump(x_data, handle, protocol=pickle.HIGHEST_PROTOCOL)

        with open(Y_data_path, 'wb') as handle:
            pickle.dump(y_data, handle, protocol=pickle.HIGHEST_PROTOCOL)

    return x_data, y_data
Ejemplo n.º 14
0
def speed_and_error_comparison(number_of_samples):
    # python_equity_calculator = montecarlo_python.MonteCarlo()
    # py_equity_func = python_equity_calculator.run_montecarlo

    cpp_calculator = cppimport.imp("tools.montecarlo_cpp.pymontecarlo")
    cpp_equity_func = cpp_calculator.montecarlo

    players = 2

    load_model = "equity_optuna_4_17"
    nn_equity_calculator = nn_equity.PredictEquity(load_model_name=load_model, load_model_dir='./tools/nn_equity_model/')
    nn_equity_func = nn_equity_calculator.get_equity

    model_data_dict = {'cpp_montecarlo_10k': {'equity_function': cpp_equity_func, 'runs': 10000, 'error_data': [], 'time_data': []},
                    'cpp_montecarlo_1k': {'equity_function': cpp_equity_func, 'runs': 1000, 'error_data': [], 'time_data': []},
                    # 'py_montecarlo_1k': {'equity_function': py_get_equity, 'runs': 1000, 'error_data': [], 'time_data': []},
                    'neural_network': {'equity_function': nn_equity_func, 'runs': 1, 'error_data': [], 'time_data': []}}

    for i in range(number_of_samples):
        print("Sample number {}".format(i))

        my_cards, cards_on_table = sample_scenario()
        base_line_res, base_line_time = test_model(cpp_equity_func, my_cards, cards_on_table, players, runs=100000)
        

        for key in model_data_dict.keys():
            equity_func = model_data_dict[key]['equity_function']
            runs = model_data_dict[key]['runs']
            res, ex_time = test_model(equity_func, my_cards, cards_on_table, players, runs=runs)
            error = abs(res - base_line_res)

            if error > 0.1 and key=='neural_network':
                print(my_cards)
                print(cards_on_table)

            # Don't save first results. Tensorflow is slow on first prediction use 
            # of a model
            if i == 0:
                continue

            print("Name: {}, \tError: {}, \tTime: {}".format(key, error, ex_time))


            model_data_dict[key]['error_data'].append(error)
            model_data_dict[key]['time_data'].append(ex_time)
    

    print(model_data_dict['neural_network']['time_data'])
    return model_data_dict
Ejemplo n.º 15
0
    def compile(self):
        try:
            import cppimport
        except ImportError:
            try:
                from torch.utils.cpp_extension import load
            except Exception:
                assert False, 'cppimport or torch ist required for compiling pybind11 modules'

        assert not self.is_cuda

        source_code = self.CPP_IMPORT_PREFIX + str(self)
        hash_str = _hash(source_code.encode()).hexdigest()
        source_code_with_hash = source_code.replace(
            f'PYBIND11_MODULE({self.module_name}',
            f'PYBIND11_MODULE(cppimport_{hash_str}')

        cache_dir = join(get_cache_config()['object_cache'])
        file_name = join(cache_dir, f'cppimport_{hash_str}.cpp')

        os.makedirs(cache_dir, exist_ok=True)
        if not exists(file_name):
            write_file(file_name, source_code_with_hash)
        # TODO: propagate extra headers
        if cache_dir not in sys.path:
            sys.path.append(cache_dir)

        # Torch regards CXX
        os.environ['CXX'] = get_compiler_config()['command']

        try:
            torch_extension = cppimport.imp(f'cppimport_{hash_str}')
        except Exception as e:
            print(e)
            torch_extension = load(hash,
                                   [file_name],
                                   with_cuda=self.is_cuda,
                                   extra_cflags=['--std=c++14',
                                                 get_compiler_config()['flags'].replace('--std=c++11', '')],
                                   extra_cuda_cflags=['-std=c++14', '-ccbin', get_compiler_config()['command']],
                                   build_directory=cache_dir,
                                   extra_include_paths=[get_pycuda_include_path(),
                                                        get_pystencils_include_path()])
        return torch_extension
Ejemplo n.º 16
0
def find_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
    kpsh=False, valley=False, show=False, ax=None):
    import cppimport
    m = cppimport.imp('PyFindPeaks')
    
    edge_type = m.EdgeRising
    if edge == None:
        edge_type = m.EdgeNone
    elif edge == 'falling':
        edge_type = m.EdgeFalling
    elif edge == 'both':
        edge_type = m.EdgeBoth

    x = np.atleast_1d(x).astype('float64')
    ind = m.FindPeaks(x.tolist(), math.nan if mph == None else mph, mpd, threshold, edge_type, kpsh, valley)
    ind = np.array(ind)
    
    if show:
       detect_peaks._plot(x, mph, mpd, threshold, edge, valley, ax, np.array(ind))

    return ind
def bucket_size_benchmark():
    #bucket_sizes = 2048, 2039
    bucket_sizes = (2039, )

    fig = plt.figure("Atom string hashing distribution")

    for plot_number, size in enumerate(bucket_sizes):
        m = cppimport.imp('cii_atom_cppimport')

        with open(same_dir('wordlist'), encoding='utf-8') as f:
            for word in f:
                word = word.strip()
                if word:
                    m.Atom_new_from_string(word)

        buckets_hist = tuple(map(m.Atom_bench_bucket_len, range(m.Atom_bench_buckets_size())))
        plt.subplot(len(bucket_sizes), 1, plot_number+1)
        plt.bar(tuple(range(size)), buckets_hist)
        plt.title('bucket size: {}'.format(size))

    plt.show()
Ejemplo n.º 18
0
def volume2mesh(file,
                volume,
                threshold,
                adaptivity=0.,
                spacing=[1., 1., 1.],
                origin=[0., 0., 0.],
                binary_file=True,
                only_write_biggest_components=False,
                max_component_count=1):
    global _vdb_meshing
    if not _vdb_meshing:
        _vdb_meshing = cppimport.imp('volume2mesh.internal.vdb_meshing')
    _vdb_meshing.writeMeshFromVolume(
        file,
        volume,
        -threshold,
        adaptivity,
        spacing,
        origin,
        binary_file,
        only_write_biggest_components,
        max_component_count)
def bucket_size_benchmark():
    #bucket_sizes = 2048, 2039
    bucket_sizes = (2039, )

    fig = plt.figure("Atom string hashing distribution")

    for plot_number, size in enumerate(bucket_sizes):
        m = cppimport.imp('cii_atom_cppimport')

        with open(same_dir('wordlist'), encoding='utf-8') as f:
            for word in f:
                word = word.strip()
                if word:
                    m.Atom_new_from_string(word)

        buckets_hist = tuple(
            map(m.Atom_bench_bucket_len, range(m.Atom_bench_buckets_size())))
        plt.subplot(len(bucket_sizes), 1, plot_number + 1)
        plt.bar(tuple(range(size)), buckets_hist)
        plt.title('bucket size: {}'.format(size))

    plt.show()
Ejemplo n.º 20
0
def cpp_run():
    parser = argparse.ArgumentParser(
        description='Run a C++ file with cppimport')
    parser.add_argument('filename', help='The file to run.')
    parser.add_argument(
        '--add_main_caller',
        '-m',
        action='store_true',
        help='Add a pybind11 function that will call your main()')
    parser.add_argument('--verbose',
                        '-v',
                        action='store_true',
                        help='Tell me everything!')
    args = parser.parse_args()

    if args.verbose:
        cppimport.set_quiet(False)

    filename = args.filename
    filedir = os.path.dirname(filename)
    filebasename = os.path.basename(filename)
    module_name, file_extension = os.path.splitext(filebasename)

    if args.add_main_caller:
        cpprun_dir = '.cpprunfiles'
        if not os.path.exists(cpprun_dir):
            os.makedirs(cpprun_dir)
        src = os.path.join(cpprun_dir, filebasename)
        open(src, 'w').write(open(filename, 'r').read() + footer)
        sys.path.append(cpprun_dir)
    else:
        sys.path.append(filedir)

    module = cppimport.imp(module_name)

    if args.verbose:
        print("Launching!")
    module.main()
Ejemplo n.º 21
0
def cpp_run():
    parser = argparse.ArgumentParser(description='Run a C++ file with cppimport')
    parser.add_argument('filename', help = 'The file to run.')
    parser.add_argument(
        '--add_main_caller', '-m',
        action = 'store_true',
        help = 'Add a pybind11 function that will call your main()'
    )
    parser.add_argument(
        '--verbose', '-v',
        action = 'store_true',
        help = 'Tell me everything!'
    )
    args = parser.parse_args()

    if args.verbose:
        cppimport.set_quiet(False)

    filename = args.filename
    filedir = os.path.dirname(filename)
    filebasename = os.path.basename(filename)
    module_name, file_extension = os.path.splitext(filebasename)

    if args.add_main_caller:
        cpprun_dir = '.cpprunfiles'
        if not os.path.exists(cpprun_dir):
            os.makedirs(cpprun_dir)
        src = os.path.join(cpprun_dir, filebasename)
        open(src, 'w').write(open(filename, 'r').read() + footer)
        sys.path.append(cpprun_dir)
    else:
        sys.path.append(filedir)

    module = cppimport.imp(module_name)

    if args.verbose:
        print("Launching!")
    module.main()
Ejemplo n.º 22
0
def read_vdb(file, grid_name='', dense_shape=[0]*3, array=None, return_spacing_origin=False):
    global _vdb_io
    if not _vdb_io:
        _vdb_io = cppimport.imp('volume2mesh.internal.vdb_io')

    if grid_name == '':
        try:
            tuple_dict = _vdb_io.readFloatVdbGrid(file, dense_shape)
        except Exception:
            tuple_dict = _vdb_io.readIntVdbGrid(file, dense_shape)

        rtn = {k: v[0] for k, v in tuple_dict.items()}
        spacing = {k: v[1] for k, v in tuple_dict.items()}
        origin = {k: v[2] for k, v in tuple_dict.items()}

    elif array is None:
        rtn, spacing, origin = _vdb_io.readFloatVdbGrid(file, grid_name, dense_shape)
    else:
        rtn, spacing, origin = _vdb_io.readFloatVdbGrid(file, array, grid_name, dense_shape)

    if return_spacing_origin:
        return rtn, spacing, origin
    else:
        return rtn
Ejemplo n.º 23
0
    def compile_module():
        cpp_code_header = f"""
/*
<%
setup_pybind11(cfg)
cfg['compiler_args'] = ['-std=c++17']
cfg['include_dirs'] += {dolfinx_pc["include_dirs"]
                        + [mpi4py.get_include()]
                        + [str(wrappers.get_include_path())]}
%>
*/
"""

        cpp_code = """
#include <pybind11/pybind11.h>
#include <caster_mpi.h>

dolfinx_wrappers::MPICommWrapper
test_comm_passing(const dolfinx_wrappers::MPICommWrapper comm)
{
    MPI_Comm c = comm.get();
    return dolfinx_wrappers::MPICommWrapper(c);
}

PYBIND11_MODULE(mpi_comm_wrapper, m)
{
    m.def("test_comm_passing", &test_comm_passing);
}
"""

        path = pathlib.Path(tempdir)
        open(pathlib.Path(tempdir, "mpi_comm_wrapper.cpp"),
             "w").write(cpp_code + cpp_code_header)
        rel_path = path.relative_to(pathlib.Path(__file__).parent)
        p = str(rel_path).replace("/", ".") + ".mpi_comm_wrapper"
        return cppimport.imp(p)
Ejemplo n.º 24
0
__author__ = "Mikael Mortensen <*****@*****.**>"
__date__ = "2013-12-13"
__copyright__ = "Copyright (C) 2013 " + __author__
__license__ = "GNU Lesser GPL version 3 or any later version"
import cppimport
import petsc4py
from dolfin import info, Function, FunctionSpace, assemble, TrialFunction, TestFunction, dx, Matrix, as_backend_type

compiled_gradient_module = cppimport.imp('fenicstools.fem.gradient_weight')


def weighted_gradient_matrix(mesh,
                             i,
                             family='CG',
                             degree=1,
                             constrained_domain=None):
    """Compute weighted gradient matrix

    The matrix allows you to compute the gradient of a P1 Function
    through a simple matrix vector product

    CG family:
        p_ is the pressure solution on CG1
        dPdX = weighted_gradient_matrix(mesh, 0, 'CG', degree)
        V = FunctionSpace(mesh, 'CG', degree)
        dpdx = Function(V)
        dpdx.vector()[:] = dPdX * p_.vector()

        The space for dpdx must be continuous Lagrange of some order

    CR family:
Ejemplo n.º 25
0
import cppimport
cppimport.set_quiet(False)

wrapper = cppimport.imp("taskloaf.wrapper").wrapper
task = wrapper.task
ready = wrapper.ready
Future = wrapper.Future
Config = wrapper.Config

def launch_local(n_cores, cfg = Config()):
    return wrapper.launch_local(n_cores, cfg)

# def when_all(*args):
#     def make_split_args(f):
#         def split_args(x):
#             return f(*x)
#         return split_args
#
#     def flatten_tuple(t):
#         return sum(t, ())
#
#     def when_all_helper(*args):
#         stage = []
#         for i in range(0, len(args) - 1, 2):
#             stage.append(when_both(args[i], args[i + 1]))
#         if len(args) % 2 == 1:
#             stage.append(args[-1])
#         if len(stage) > 1:
#             return when_all_helper(*stage).then(flatten_tuple)
#         else:
#             return stage[0]
Ejemplo n.º 26
0
"""Test numpy based equity calculator"""

import cppimport
import pytest

calculator = cppimport.imp("tools.montecarlo_cpp.pymontecarlo")


def _runner(my_cards,
            cards_on_table,
            players,
            expected_result,
            iterations=5000):
    """Montecarlo test"""
    if len(cards_on_table) < 3:
        cards_on_table = {'null'}
    equity = calculator.montecarlo(my_cards, cards_on_table, players,
                                   iterations) * 100
    assert equity == pytest.approx(expected_result, abs=3)


def test_montecarlo1():
    """Montecarlo test"""
    my_cards = {'3H', '3S'}
    cards_on_table = {'8S', '4S', 'QH', '8C', '4H'}
    expected_results = 40.2
    players = 2
    _runner(my_cards, cards_on_table, players, expected_results)


def test_montecarlo2():
Ejemplo n.º 27
0
def write_vdb(file, array, grid_name, spacing=[1., 1., 1.], origin=[0., 0., 0.], clipping_tolerance=0.):
    global _vdb_io
    if not _vdb_io:
        _vdb_io = cppimport.imp('volume2mesh.internal.vdb_io')
    array = np.ascontiguousarray(array, np.float32)
    _vdb_io.writeFloatVdbGrid(file, array, grid_name, spacing, origin,  clipping_tolerance)
Ejemplo n.º 28
0
# import wrap
import cppimport

funcs = cppimport.imp('wrap')


def test_add():
    print(funcs.add(5, 4))
    assert (funcs.add(3, 4) == 7)


if __name__ == '__main__':
    test_add()
Ejemplo n.º 29
0
__author__ = "Mikael Mortensen <*****@*****.**>"
__date__ = "2011-12-19"
__copyright__ = "Copyright (C) 2011 " + __author__
__license__  = "GNU Lesser GPL version 3 or any later version"
"""
This module contains functionality for efficiently probing a Function many times.
"""
from dolfin import *
from numpy import zeros, array, squeeze, reshape, save
import os, inspect
from mpi4py.MPI import COMM_WORLD as comm
import cppimport

probe11 = cppimport.imp('fenicstools.probe.probe11')

# Give the compiled classes some additional pythonic functionality
class Probe(probe11.Probe):

    def __call__(self, *args):
        return self.eval(*args)

    def __len__(self):
        return self.value_size()

    def __getitem__(self, i):
        return self.get_probe_at_snapshot(i)


class Probes(probe11.Probes):

    def __call__(self, *args):
Ejemplo n.º 30
0
import cppimport
cppimport.set_quiet(False)
# A hack required to get relative imports working
# with cppimport
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
try:
    cppimport.imp('marker_ba')
#    cppimport.imp('_theia')
finally:
    sys.path.pop(0)
from marker_ba import *
#import _theia as theia
Ejemplo n.º 31
0

def test_cython3_pac(x=np.load('numpy_arrays/z.npy'),
                     y=np.load('numpy_arrays/w.npy')):
    from ruzicka import retrial
    x = np.unpackbits(x)
    y = np.unpackbits(y)
    vector_a = _n_apply_weights(x)
    vector_b = _n_apply_weights(y)
    return retrial(vector_a, vector_b)


import cppimport
cppimport.set_quiet(False)
cppimport.force_rebuild()
code = cppimport.imp("wrap7")


def test_pybind11(x=np.load('numpy_arrays/z.npy'),
                  y=np.load('numpy_arrays/w.npy')):
    x = np.unpackbits(x)
    y = np.unpackbits(y)
    vector_a = _n_apply_weights(x)
    vector_b = _n_apply_weights(y)
    return code.ruzicka(vector_a, vector_b)


def calculate_n(num):
    #time_base = timeit.timeit("test_base()", number=num, setup="from __main__ import test_base")
    #time_test_numpy = timeit.timeit("test_numpy()", number=num, setup="from __main__ import test_numpy")
    time_test_numpy_v2 = timeit.timeit(
Ejemplo n.º 32
0
    def __init__(self,
                 initial_stacks=100,
                 small_blind=1,
                 big_blind=2,
                 render=False,
                 funds_plot=True,
                 max_raising_rounds=2,
                 use_cpp_montecarlo=False):
        """
        The table needs to be initialized once at the beginning

        Args:
            num_of_players (int): number of players that need to be added
            initial_stacks (real): initial stacks per placyer
            small_blind (real)
            big_blind (real)
            render (bool): render table after each move in graphical format
            funds_plot (bool): show plot of funds history at end of each episode
            max_raising_rounds (int): max raises per round per player

        """
        if use_cpp_montecarlo:
            import cppimport
            calculator = cppimport.imp("tools.montecarlo_cpp.pymontecarlo")
            get_equity = calculator.montecarlo
        else:
            from tools.montecarlo_python import get_equity
        self.get_equity = get_equity
        self.use_cpp_montecarlo = use_cpp_montecarlo
        self.num_of_players = 0
        self.small_blind = small_blind
        self.big_blind = big_blind
        self.render_switch = render
        self.players = []
        self.table_cards = None
        self.dealer_pos = None
        self.player_status = []  # one hot encoded
        self.current_player = None
        self.player_cycle = None  # cycle iterator
        self.stage = None
        self.last_player_pot = None
        self.viewer = None
        self.player_max_win = None  # used for side pots
        self.second_round = False
        self.last_caller = None
        self.last_raiser = None
        self.raisers = []
        self.callers = []
        self.played_in_round = None
        self.min_call = None
        self.community_data = None
        self.player_data = None
        self.stage_data = None
        self.deck = None
        self.action = None
        self.winner_ix = None
        self.initial_stacks = initial_stacks
        self.acting_agent = None
        self.funds_plot = funds_plot
        self.max_round_raising = max_raising_rounds

        # pots
        self.community_pot = 0
        self.current_round_pot = 9
        self.player_pots = None  # individual player pots

        self.observation = None
        self.reward = None
        self.info = None
        self.done = False
        self.funds_history = None
        self.array_everything = None
        self.legal_moves = None
        self.illegal_move_reward = -1000000
        self.action_space = Discrete(len(Action) - 2)
        self.first_action_for_hand = None
Ejemplo n.º 33
0
def test_inner_package_mymodule():
    apackage = cppimport.imp("apackage.inner.mymodule")
    module_tester(apackage.inner.mymodule)
Ejemplo n.º 34
0
def test_with_file_in_syspath():
    orig_sys_path = copy.copy(sys.path)
    sys.path.append(os.path.join(os.path.dirname(__file__), 'mymodule.cpp'))
    mymodule = cppimport.imp("mymodule")
    sys.path = orig_sys_path
Ejemplo n.º 35
0
__author__ = "Mikael Mortensen <*****@*****.**>"
__date__ = "2013-12-13"
__copyright__ = "Copyright (C) 2013 " + __author__
__license__  = "GNU Lesser GPL version 3 or any later version"
from dolfin import Function
import cppimport

compiled_fem_module = cppimport.imp('fenicstools.fem.interpolation')

def interpolate_nonmatching_mesh(u0, V):
    """Interpolate from GenericFunction u0 to FunctionSpace V.

    The FunctionSpace V can have a different mesh than that of u0, if u0
    has a mesh.

    """
    u = Function(V)
    compiled_fem_module.interpolate(u0, u)
    return u

def interpolate_nonmatching_mesh_any(u0, V):
    """Interpolate from GenericFunction u0 to FunctionSpace V.

    The FunctionSpace V can have a different mesh than that of u0, if u0
    has a mesh.

    This function works for any finite element space, not just Lagrange.

    """
    u = Function(V)
    compiled_fem_module.interpolate_any(u0, u)
Ejemplo n.º 36
0
__author__ = "Miroslav Kuchta <*****@*****.**>"
__date__ = "2014-04-05"
__copyright__ = "Copyright (C) 2013 " + __author__
__license__  = "GNU Lesser GPL version 3 or any later version"

from dolfin import TensorFunctionSpace, VectorFunctionSpace, FunctionSpace,\
    Function, interpolate, assemble, \
    TrialFunction, TestFunction, dx, Matrix, dot, div
from fenicstools import SetMatrixValue
from os.path import abspath, join
import cppimport

compiled_cr_module = cppimport.imp('fenicstools.fem.cr_divergence')

def gauss_divergence(u, mesh=None):
    '''
    This function uses Gauss divergence theorem to compute divergence of u
    inside the cell by integrating normal fluxes across the cell boundary.
    If u is a vector or tensor field the result of computation is diverence
    of u in the cell center = DG0 scalar/vector function. For scalar fields,
    the result is grad(u) = DG0 vector function. The fluxes are computed by
    midpoint rule and as such the computed divergence is exact for linear
    fields.
    '''

    # Require u to be Function
    assert isinstance(u, Function)

    # Require u to be scalar/vector/rank 2 tensor
    rank = u.value_rank()
    assert rank in [0, 1, 2]
Ejemplo n.º 37
0
def test_mymodule():
    mymodule = cppimport.imp("mymodule")
    module_tester(mymodule)
Ejemplo n.º 38
0
    def compile_eigen_csr_assembler_module():
        cpp_code_header = f"""
<%
setup_pybind11(cfg)
cfg['include_dirs'] = {dolfinx_pc["include_dirs"] + [petsc4py.get_include()] + [str(pybind_inc())]}
cfg['compiler_args'] = {["-D" + dm for dm in dolfinx_pc["define_macros"]]}
cfg['compiler_args'] = ['-std=c++17']
cfg['libraries'] = {dolfinx_pc["libraries"]}
cfg['library_dirs'] = {dolfinx_pc["library_dirs"]}
%>
"""

        cpp_code = """
#include <pybind11/pybind11.h>
#include <pybind11/eigen.h>
#include <pybind11/stl.h>
#include <vector>
#include <Eigen/Sparse>
#include <petscsys.h>
#include <dolfinx/fem/assembler.h>
#include <dolfinx/fem/DirichletBC.h>
#include <dolfinx/fem/Form.h>

template<typename T>
Eigen::SparseMatrix<T, Eigen::RowMajor>
assemble_csr(const dolfinx::fem::Form<T>& a,
        const std::vector<std::shared_ptr<const dolfinx::fem::DirichletBC<T>>>& bcs)
{
std::vector<Eigen::Triplet<T>> triplets;
const auto mat_add
    = [&triplets](std::int32_t nrow, const std::int32_t* rows,
                std::int32_t ncol, const std::int32_t* cols, const T* v)
    {
    for (int i = 0; i < nrow; ++i)
        for (int j = 0; j < ncol; ++j)
        triplets.emplace_back(rows[i], cols[j], v[i * ncol + j]);
    return 0;
    };

dolfinx::fem::assemble_matrix<T>(mat_add, a, bcs);

auto map0 = a.function_space(0)->dofmap()->index_map;
auto map1 = a.function_space(1)->dofmap()->index_map;
Eigen::SparseMatrix<T, Eigen::RowMajor> mat(
    map0->block_size() * (map0->size_local() + map0->num_ghosts()),
    map1->block_size() * (map1->size_local() + map1->num_ghosts()));
mat.setFromTriplets(triplets.begin(), triplets.end());
return mat;
}

PYBIND11_MODULE(eigen_csr, m)
{
m.def("assemble_matrix", &assemble_csr<PetscScalar>);
}
"""

        path = pathlib.Path(tempdir)
        open(pathlib.Path(tempdir, "eigen_csr.cpp"),
             "w").write(cpp_code + cpp_code_header)
        rel_path = path.relative_to(pathlib.Path(__file__).parent)
        p = str(rel_path).replace("/", ".") + ".eigen_csr"
        return cppimport.imp(p)
Ejemplo n.º 39
0
def test_extra_sources():
    mod = cppimport.imp("extra_sources")
    assert(mod.square_sum(3, 4) == 25)
Ejemplo n.º 40
0
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import win32api
import win32con
import win32ui
import win32gui
import time
import cppimport

zoom = 1  # 显示器放大倍数
img = None

if __name__ == "__main__":
    somecode = cppimport.imp("somecode")
    hwnd = win32gui.FindWindow(None, '跃动方块')  # 窗口的编号,0号表示当前活跃窗口
    # 根据窗口句柄获取窗口的设备上下文DC(Divice Context)
    hwndDC = win32gui.GetWindowDC(hwnd)
    # 根据窗口的DC获取mfcDC
    mfcDC = win32ui.CreateDCFromHandle(hwndDC)
    # mfcDC创建可兼容的DC
    saveDC = mfcDC.CreateCompatibleDC()
    # 创建bigmap准备保存图片
    saveBitMap = win32ui.CreateBitmap()
    # 获取监控器信息
    left, top, right, bottom = win32gui.GetWindowRect(hwnd)
    w = int((right - left) * zoom)
    h = int((bottom - top) * zoom)
    # print("得到窗口大小:", w, h)  # 图片大小
    # 为bitmap开辟空间
Ejemplo n.º 41
0
def test_raw_extensions():
    raw_extension = cppimport.imp("raw_extension")
    assert(raw_extension.add(1,2) == 3)
Ejemplo n.º 42
0
import cppimport

cppimport.imp('agents.cppmodule.agent')
cppimport.imp('agents.cppmodule.core')
Ejemplo n.º 43
0
This code is released under Apache 2.0 license
http://www.apache.org/licenses/LICENSE-2.0
"""


def test_two_vectors(vector, expected_list):
    assert len(list(vector)) == len(expected_list), "Length wrong" + str(
        len(list(vector))) + " instead of " + str(len(expected_list))
    assert list(vector) == expected_list, "Wrong result: " + str(
        list(vector)) + " instead of expected " + str(expected_list)


import cppimport
#This will pause for a moment to compile the module
cppimport.set_quiet(False)
m = cppimport.imp("minionn")
print("Successfuly imported c++ code\n")

print("Testing MPC functions, server side...")
m.init_aby("127.0.0.1", 5000, True)
print("Connected to client, testing ReLu.")

num = 5
xs = m.VectorInt([-5, -4, -3, -2, 1])

ys = m.VectorInt([])
m.relu_server(num, xs, ys)

print("Relu done, testing correctness.")
#print("Num is " + str(num))
#print("Xs is " + str(xs))
Ejemplo n.º 44
0
import cppimport
apss = cppimport.imp("apss_py")

vec = [[(10, 0.1), (20, 0.9), (30, 0.5)], [(10, 0.99), (40, 0.99)],
       [(30, 2.0), (40, 0.6)]]
threshold = 0.5
for u, v in apss.all_pairs2(vec, threshold):
    print(u, v)
Ejemplo n.º 45
0
__author__ = "Mikael Mortensen <*****@*****.**>"
__date__ = "2014-01-08"
__copyright__ = "Copyright (C) 2014 " + __author__
__license__  = "GNU Lesser GPL version 3 or any later version"

import cppimport

compiled_module = cppimport.imp('fenicstools.fem.common')

def getMemoryUsage(rss=True):
    return compiled_module.getMemoryUsage(rss)

def SetMatrixValue(A, val):
    compiled_module.SetMatrixValue(A, val)