Пример #1
0
def llh_time():
    ret, x0, val_llh = garch_data()
    ret = np.array(ret)
    x0 = np.array(x0)

    t = benchmark(lambda: garchLLH(ret, x0), args.n, val_llh)
    return t
Пример #2
0
def llh_time():
    ret, x0, val_llh = garch_data()
    ret = cp.array(ret, dtype='float32')
    x0 = cp.array(x0, dtype='float32')

    t = benchmark(lambda: garchLLH(ret, x0), args.n, val_llh)
    print('time:', t)
    return t
Пример #3
0
def run_models(modelpath, run_sat=False, run_maxsat_one=False, run_maxsat_all=False, run_maxsat_part=False,
               run_maxsat_part_auto=False, timeout=180, repeat=5):
  models = filter(lambda x: x.startswith("sat") and x.endswith(".als"), os.listdir(modelpath))
  problems = []
  table_files = []
  tag_files = []
  sat_files = []

  for m in models:
    problems.append(m[len("sat_"):-len(".als")])
    sat_files.append(path.join(modelpath, m))
    table_files.append(path.join(modelpath, m.replace("sat", "table")))
    tag_files.append(path.join(modelpath, m.replace("sat", "tag")))
  
  if run_sat:
    benchmark(problems, sat_files, timeout=timeout, repeat=repeat)
  # print("====================\nTable-based\n====================")
  benchmark(problems, None, table_files, run_maxsat_one, run_maxsat_all,
            run_maxsat_part, run_maxsat_part_auto, timeout, repeat)
Пример #4
0
def main():
    # TBD use sys.argv[0] for num_iter or dataset
    print("loading..")
    dfs = unix_time(load)["return"]

    print("name,real,user,sys")
    print("------------------")

    benchmark(analytics, init_pd, dfs, num_iter=3)
    benchmark(search, init_pd, dfs, num_iter=3)
    benchmark(search_concat, init_pd, dfs, num_iter=3)
    benchmark(discovery, init_pd, dfs, num_iter=3)
Пример #5
0
def run(outpath, run_sat=False, run_maxsat_one=False, run_maxsat_all=False, run_maxsat_part=False,
        run_maxsat_part_auto=False, timeout=180, repeat=5):
  params = [
    (7, 28, 6),
    (7, 30, 6),
    (8, 30, 7),
    (8, 32, 7),
    # (9, 34, 8),
  ]
  min_p = 3
  max_p = 7

  problems = []
  table_files = []
  tag_files = []
  sat_files = []

  for tag, p, tab in params:
    problem = f"{tag}_{p}_{tag}_{tab}_{min_p}_{max_p}"
    problems.append(problem)

    sat, table_based, tag_based = generate(tag, p, tag, tab, min_p, max_p)

    sat_filename = path.join(outpath, f"sat_{problem}.als")
    sat_files.append(sat_filename)
    with open(sat_filename, "w") as f:
      f.write(sat)

    table_filename = path.join(outpath, f"table_{problem}.als")
    table_files.append(table_filename)
    with open(table_filename, "w") as f:
      f.write(table_based)
    
    tag_filename = path.join(outpath, f"tag_{problem}.als")
    tag_files.append(tag_filename)
    with open(tag_filename, "w") as f:
      f.write(tag_based)
  
  if run_sat:
    benchmark(problems, sat_files, timeout=timeout, repeat=repeat)
  # print("====================\nTable-based\n====================")
  benchmark(problems, None, table_files, run_maxsat_one, run_maxsat_all,
            run_maxsat_part, run_maxsat_part_auto, timeout, repeat)
Пример #6
0
def run(outpath,
        run_sat=False,
        run_maxsat_one=False,
        run_maxsat_all=False,
        run_maxsat_part=False,
        run_maxsat_part_auto=False,
        timeout=180,
        repeat=5):
    params = [
        # (30, 20, 6, 3, 3, 2),
        (34, 20, 6, 3, 3, 2),
        (38, 20, 6, 3, 3, 2),
        (42, 20, 6, 3, 3, 2),
        (46, 20, 6, 3, 3, 2),
        (50, 20, 6, 3, 3, 2),
        (52, 20, 6, 3, 3, 2),
    ]
    problems = []
    maxsat_files = []
    sat_files = []

    for num_tasks, max_deadline, max_process_time, max_frags, max_slack, max_dep in params:
        problem = f"{num_tasks}_{max_deadline}_{max_process_time}_{max_frags}_{max_slack}_{max_dep}"
        problems.append(problem)

        sat, maxsat = generate(num_tasks, max_deadline, max_process_time,
                               max_frags, max_slack, max_dep)

        sat_filename = path.join(outpath, f"sat_{problem}.als")
        sat_files.append(sat_filename)
        with open(sat_filename, "w") as f:
            f.write(sat)

        maxsat_filename = path.join(outpath, f"maxsat_{problem}.als")
        maxsat_files.append(maxsat_filename)
        with open(maxsat_filename, "w") as f:
            f.write(maxsat)

    sat_files = sat_files if run_sat else None
    benchmark(problems, sat_files, maxsat_files, run_maxsat_one,
              run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout,
              repeat)
Пример #7
0
def select_features(data,
                    strategy: Strategy,
                    num_features_to_select: int,
                    iterations: int,
                    plot_step: int,
                    true_relevances: List[float] = None,
                    alpha=.001) -> Tuple[List[str], List[float]]:
    with benchmark(strategy.name):
        features_with_target = data.columns.values  # type:List[str]
        features = list(filter(lambda i: i != 'target', features_with_target))

        hics = HiCS(data,
                    alpha=alpha,
                    iterations=1,
                    categorical_features=features_with_target)

        relevance_by_feature = dict([
            (feature, RandomVariableSamples()) for feature in features
        ])  # type:Dict[str, RandomVariableSamples]

        if true_relevances is None:
            true_relevances = [0] * len(features)

        true_relevance_by_feature = dict([
            (feature, true_relevances[int(feature)]) for feature in features
        ])
        selected_relevant_feature_shares = []

        for iteration in range(iterations):
            items = Items(relevance_by_feature=relevance_by_feature,
                          num_features_to_select=num_features_to_select,
                          iteration=iteration,
                          true_relevance_by_feature=true_relevance_by_feature,
                          name=strategy.name)

            selected_relevant_feature_shares.append(
                items.share_selected_relevant_features)

            feature, value = strategy.choose(items)

            relevance_by_feature[feature].append(
                hics.calculate_contrast([feature], 'target'))

            if iteration % plot_step == plot_step - 1:
                items.save_plot()

            print(
                f"Iteration {iteration}, share of relevant features selected: {items.share_selected_relevant_features}"
            )

        return items.selected_features, selected_relevant_feature_shares
Пример #8
0
def run_models(modelpath,
               run_sat=False,
               run_maxsat_one=False,
               run_maxsat_all=False,
               run_maxsat_part=False,
               run_maxsat_part_auto=False,
               timeout=180,
               repeat=5):
    models = filter(lambda x: x.startswith("maxsat") and x.endswith(".als"),
                    os.listdir(modelpath))
    problems = []
    maxsat_files = []
    sat_files = []

    for m in models:
        problems.append(m[len("maxsat_"):-len(".als")])
        maxsat_files.append(path.join(modelpath, m))
        sat_files.append(path.join(modelpath, m.replace("maxsat", "sat")))

    sat_files = sat_files if run_sat else None
    benchmark(problems, sat_files, maxsat_files, run_maxsat_one,
              run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout,
              repeat)
Пример #9
0
def run(outpath,
        run_sat=False,
        run_maxsat_one=False,
        run_maxsat_all=False,
        run_maxsat_part=False,
        run_maxsat_part_auto=False,
        timeout=180,
        repeat=5):
    max_core = 3
    max_interests = 6
    params = [(30, 40), (40, 50), (50, 60), (60, 70), (70, 80), (80, 90),
              (90, 100)]
    problems = []
    maxsat_files = []
    sat_files = []

    for num_courses, num_stu in params:
        problem = f"{num_courses}_{num_stu}_{max_core}_{max_interests}"
        problems.append(problem)

        sat, maxsat = generate(num_courses, num_stu, max_core, max_interests)

        sat_filename = path.join(outpath, f"sat_{problem}.als")
        sat_files.append(sat_filename)
        with open(sat_filename, "w") as f:
            f.write(sat)

        maxsat_filename = path.join(outpath, f"maxsat_{problem}.als")
        maxsat_files.append(maxsat_filename)
        with open(maxsat_filename, "w") as f:
            f.write(maxsat)

    sat_files = sat_files if run_sat else None
    benchmark(problems, sat_files, maxsat_files, run_maxsat_one,
              run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout,
              repeat)
Пример #10
0
import subprocess
import os
from os import path
import shutil
import signal
import sys

sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))

from util import benchmark, options

if __name__ == "__main__":
    run_sat, run_maxsat_one, run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout, repeat, model, from_file = options(
    )

    problems = ["10_2_1", "20_3_1", "26_2_1", "26_2_2"]
    sat_files = None
    maxsat_files = list(map(lambda x: "maxsat_" + x + ".als", problems))

    benchmark(problems, sat_files, maxsat_files, run_maxsat_one,
              run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout,
              repeat, from_file)
Пример #11
0
    return (
        -0.5 * (tf.cast(y.shape[0], tf.float32) - 1) * tf.math.log(2 * np.pi) -
        0.5 * tf.reduce_sum(tf.math.log(hts) + tf.square(y / tf.sqrt(hts))))


@tf.function
def llh_unroll(y, params):
    def garch(ht, y):
        return params[0] + params[1] * y + params[2] * ht

    y2 = tf.square(y)
    ht0 = tf.reduce_mean(y2)
    h = ht0
    hts = []
    for t in range(len(y2) - 1):
        h = garch(h, y2[t])
        hts.append(h)
    # hts0 = [h := garch_like(h, y2[t]) for t in range(len(y2)-1)]
    hts = tf.concat(([ht0], tf.stack(hts)), 0)
    return (
        -0.5 * (tf.cast(y.shape[0], tf.float32) - 1) * tf.math.log(2 * np.pi) -
        0.5 * tf.reduce_sum(tf.math.log(hts) + tf.square(y / tf.sqrt(hts))))


t = benchmark(lambda: llh(ret, x0).numpy(), args.n, val_llh)
out['tensorflow-' + args.mode] = t

t = benchmark(lambda: llh_unroll(ret, x0).numpy(), args.n, val_llh)
out['tensorflow-' + args.mode + '-unroll'] = t

print(json.dumps(out))
Пример #12
0
def paths(S, tau, r, q, v, M, N):
    """Generate GBM price paths"""
    dt = tau / M
    g1 = (r - q - v / 2) * dt
    g2 = tf.sqrt(v * dt)
    return tf.exp(tf.log(S) + tf.cumsum(g1 + g2 * tf.random_normal((M, N))))


def barrier(S0, K, B, tau, r, q, v, M, N):
    """Price a barrier option"""
    S = paths(S0, tau, r, q, v, M, N)
    l = tf.to_float(tf.greater(tf.reduce_min(S, 0), B))
    payoffs = l * tf.maximum(S[-1, :] - K, 0)
    return tf.exp(-r * tau) * tf.reduce_mean(payoffs)


config = tf.ConfigProto()
if args.mode == 'cpu':
    config = tf.ConfigProto(device_count={'GPU': 0}, allow_soft_placement=True)

with tf.Session(config=config) as sess:
    data = barrier_data()
    barr = barrier(data['price'], data['strike'], data['barrier'], data['tau'],
                   data['rate'], data['dy'], data['vol'], data['time_steps'],
                   data['n_rep'])
    t = benchmark(lambda: sess.run(barr), data['val'], tol=data['tol'])
    out['tensorflow-v1-' + args.mode] = t

print(json.dumps(out))
Пример #13
0
    return params[0] + params[1] * y + params[2] * ht


y2 = T.square(y)
hts, updates = theano.scan(fn=garch_like,
                           sequences=[y2],
                           n_steps=y2.size - 1,
                           outputs_info=ht0)

hts = T.concatenate(([ht0], hts))
tllh = (-0.5 * (y.size - 1) * T.log(2 * np.pi) -
        0.5 * T.sum(T.log(hts) + T.square(y / T.sqrt(hts))))
gllh = T.grad(tllh, wrt=params)
llh = theano.function(inputs=[y, params], outputs=tllh)

t = benchmark(lambda: llh(ret, x0), args.n, val_llh)
out['theano-' + args.mode] = t


def unroll(tx, th0, n):
    th = th0
    th_tmp = []
    for t in range(n):
        th = garch_like(tx[t], th)
        th_tmp.append(th)
    return T.stack(th_tmp)


t_unroll0 = unroll(y2, ht0, len(ret) - 1)
t_unroll = T.concatenate(([ht0], t_unroll0))
t_unroll_out = (-0.5 * (y.size - 1) * T.log(2 * np.pi) -
hermite = np.polynomial.hermite.hermgauss(M)
hermite_e = np.polynomial.hermite_e.hermegauss(M)

# coordinate-space delta shell = l/(2mu) sin(k'b)sin(kb)/(k'k)
#        alpha a          l: strength  b:displacement   alpha*delta(r-a)
LECs = [[1.5, 10], 1]

grids = [[textbookgaussgrid, LECs, 'Gauss'], [legendre, LECs, 'Legendre'],
         [hermite, LECs, 'Hermite'], [cheb, LECs, 'Chebyshev'],
         [laguerre, LECs, 'Laguerre'], [hermite_e, LECs, 'Hermite_e']]

# visualization of the Gauss mesh as a function of the number
# of points on it; lim_m->inf(<p>)=inf *BUT* lim_m->inf(median[p])<inf
#plot_grids([gauss(m, 2, 0., midpoint)[0] for m in range(10, 500, 15)])

with benchmark("Matrix inversion on Gaussian grid:"):

    # calculate phase shifts for each quadrature polynomial, i.e., momentum grid
    for grd in grids[:2]:

        k, w = grd[0][0], grd[0][1]
        pot = V_grid_k0(k, k0range, grd[1])
        tmpres = []
        kstep = 0

        for k0 in k0range:

            k[-1] = k0

            # D matrix '=' (free propagator)*k^2
            for i in range(M - 1):
Пример #15
0
    """Generate GBM price paths"""
    dt = tau / M
    g1 = (r - q - v / 2) * dt
    g2 = tf.sqrt(v * dt)
    return tf.exp(
        tf.math.log(S) + tf.cumsum(g1 + g2 * tf.random.normal((M, N))))


@tf.function
def barrier(S0, K, B, tau, r, q, v, M, N):
    """Price a barrier option"""
    S = paths(S0, tau, r, q, v, M, N)
    l = tf.cast(tf.greater(tf.reduce_min(S, 0), B), dtype=tf.float32)
    payoffs = l * tf.maximum(S[-1, :] - K, 0)
    return tf.exp(-r * tau) * tf.reduce_mean(payoffs)


data = barrier_data()


def barrier_fun():
    return barrier(data['price'], data['strike'], data['barrier'], data['tau'],
                   data['rate'], data['dy'], data['vol'], data['time_steps'],
                   data['n_rep'])


t = benchmark(barrier_fun, data['val'], tol=data['tol'])
out['tensorflow-' + args.mode] = t

print(json.dumps(out))
Пример #16
0
# processor n fills rows [mpi_row_offset , mpi_row_offset + mpi_nbrrows]
mpi_row_offset = mpi_rank * int(dv / (mpi_size - 1))

# calculate only the Nev lowest eigenvalues
Nev = 4

# initialization of the two components of the Hamiltonian:
# H = mkinetic + mpotential
mpotential = np.zeros((dv, dv))
mkinetic = np.zeros((dv, dv))
mhamilton = np.zeros((dv, dv))

# writing of the Hamilton matrix contains two basic loops
# column index b and row index a; e.g.:2 particles: a=(x1,y1,z1,x2,y2,y3) and
# b=(x1',y1',z1',x2',y2',y3')
with benchmark("cpu%d: Matrix filling" % mpi_rank):
    # column index
<<<<<<< HEAD
    colidx = mpi_row_offset
    print('cpu%d: rows %d -> %d' % (mpi_rank, mpi_row_offset,
                                    mpi_row_offset + mpi_nbrrows))
    # row loop; each grid point specifies <spacedims> coordinates per particle
    for a in list(
            product(np.arange(1, N), repeat=spacedims *
                    partnbr))[mpi_row_offset:mpi_row_offset + mpi_nbrrows]:
=======
    colidx = 0
    # row loop; each grid point specifies <SPACEDIMS> coordinates per particle
    for a in product(np.arange(1, N), repeat=SPACEDIMS * PARTNBR):
>>>>>>> 925e980509de94dc4a8879b8eefe0800f1d175b7
        # row index
Пример #17
0
def test_benchmark():
    with benchmark('foo'):
        time.sleep(0.1)
Пример #18
0
                    help='')
args = parser.parse_args()
out = {}


def pricepaths(S, tau, r, q, v, M, N):
    dt = tau / M
    g1 = (r - q - v / 2) * dt
    g2 = math.sqrt(v * dt)
    aux = math.log(S) + cp.cumsum(
        g1 + g2 * cp.random.randn(M, N, dtype=cp.float32), 0)
    return cp.exp(aux)


def barrier(S0, K, B, tau, r, q, v, M, N):
    S = pricepaths(S0, tau, r, q, v, M, N)
    l = cp.min(S, 0) > B
    payoffs = l * cp.maximum(S[-1, :] - K, 0)
    return (math.exp(-r * tau) * cp.mean(payoffs)).get().flatten()


data = barrier_data()

t = benchmark(lambda: barrier(data['price'], data['strike'], data[
    'barrier'], data['tau'], data['rate'], data['dy'], data['vol'], data[
        'time_steps'], data['n_rep'])[0],
              data['val'],
              tol=data['tol'])
out['cupy'] = t

print(json.dumps(out))
Пример #19
0
    h = np.zeros_like(ret2)
    h[0] = np.mean(ret2)
    for i in range(1, len(ret)):
        h[i] = p[0] + p[1] * ret2[i - 1] + p[2] * h[i - 1]
    return h


def garchLLH(y, par):
    h = garchSim(np.square(y), par)
    t = len(y)
    return -0.5 * (t - 1) * np.log(
        2 * math.pi) - 0.5 * np.sum(np.log(h) + (y / np.sqrt(h))**2)


if args.mode == 'numba':
    garchSim = jit(garchSim)
    garchLLH = jit(garchLLH)

out = {}
ret, x0, val_llh = garch_data()

if args.mode == 'c++':
    import cppimport
    cpp_llh = cppimport.imp('cppllh')
    out['c++'] = benchmark(lambda: cpp_llh.garchLLH(ret, x0), args.n, val_llh)
    print('c++ time:', out['c++'])
else:
    out['numpy-' + args.mode] = benchmark(lambda: garchLLH(ret, x0), args.n,
                                          val_llh)
print(json.dumps(out))
Пример #20
0
                "city":
                data['informacion_general']['entidad_federativa_nacimiento']
                ['nom_agee'],
                "state_code":
                str(data['informacion_general']
                    ['entidad_federativa_nacimiento']['cve_agee'])
            }
            # CURP and RFC Fix
            data['informacion_general']['curp'] = generate.GenerateCURP(
                **kwargs).data
            # RFC
            data['informacion_general']['rfc'] = generate.GenerateRFC(
                **kwargs).data
            return data

        with benchmark('data faker'):

            document_samples = Seed(
                name='samples',
                props={'s': number_of_samples * years},
                state={'count1': 0},
                next_state=lambda seed: {'count1': seed.state.count + 1},
                initial_value=lambda seed, node: number_of_samples * years,
                next_value=lambda seed, node: seed.value
                if seed.state.count1 % seed.props.s != 0 else seed.reset(node))

            personal_info = Seed(
                name='informacion_personal',
                props={'y': years + 1},
                state={'count2': 1},
                next_state=lambda seed: {'count2': seed.state.count2 + 1},
Пример #21
0
    # STATUS: the potential matrix is assumed to be diagonal (future: OPE+B => potential has non-zero offdiagonal elements)
    mpotential = np.diag(
        calc_potential(n_part, dim_space, spec_pot, coordOP_evSYS))

    mhamilton = (mkinetic + mpotential)
    return mhamilton


""" main section of the program
    1. set up the Hamiltonian
    2. full Diagonalization
    3. approximate Diagonalization (extract only the N_EIGENV lowest EV's)
"""

ham = []
with benchmark("Matrix filling"):
    ham = calc_mhamilton(PARTNBR, SPACEDIMS, BASIS_DIM, BASIS_SINE,
                         POT_HO_INTER)

sparsimonius = True  # False '=' full matrix diagonalization; True '=' approximate determination of the lowest <N_EIGEN> eigenvalues

if sparsimonius:
    with benchmark("Diagonalization -- sparse matrix structure (DVR)"):
        # calculate the lowest N eigensystem of the matrix in sparse format
        evals_small, evecs_small = eigsh(coo_matrix(ham),
                                         N_EIGENV,
                                         which='SA',
                                         maxiter=5000)
        print(
            'Hamilton ( %d X %d ) matrix: %d/%d = %3.2f%% non-zero entries\n' %
            (np.shape(ham)[0], np.shape(ham)[1], coo_matrix(ham).nnz,