Esempio n. 1
0
def _parallel_mc(processes, path_stem, calc_formula, l, M, build_output, variable_name, sigma_function, interp_fn, angles_fn, Lmin, Lmax, wavetype, dictionary_type, iteration, normalisation_factor):
    """
    Split the tasks so the algorithm be parallelised and then collect the parallel output
    """
    
    # put in queue
    in_queue = mp.Queue()
    out_queue = mp.Queue()
    future_res = []
    for i in range(processes):
        path = path_stem + str(i) + '/'
        if not os.path.exists(path):
            os.makedirs(path)
        
        future_res.append(mp.Process(target = calc_formula, args = (path, in_queue, out_queue)))
        future_res[-1].start()
    
    for j in range(iteration):
        in_queue.put((l, M, build_output, sigma_function, variable_name, Lmin, Lmax, wavetype, interp_fn, angles_fn, normalisation_factor))
    # send stop signals
    for i in range(processes):
        in_queue.put('stop')
    
    # collect output
    results = []
    for i in range(iteration):
        if (i+1)%1000 == 0:
            print(i)
            mlmc.write(logfile, "%f" % (i))
            mlmc.write(logfile, "\n")
        results.append(out_queue.get())
    
    outputf = [f[0] for f in results]
    outputc = [f[1] for f in results]
    outputfc = 0
    outputfc2 = 0
    outputfc3 = 0
    outputfc4 = 0
    outputfsum = 0
    outputf2sum = 0

    # record output values in csv (necessary for doing inverse sampling)
    try:
        print('output' + str(l+1)+'.csv')
        outputprevf = pd.read_csv('output' + str(l+1)+'.csv')
        outputf_df = pd.DataFrame(outputf, columns = ['output'])
        pd.concat([outputf_df, outputprevf]).to_csv('output' + str(l+1)+'.csv', index = False)
        print(len(pd.concat([outputf_df, outputprevf])))
    except:
        pd.DataFrame(outputf, columns = ['output']).to_csv('output' + str(l+1) + '.csv', index = False)
        
    try:
        print('output' + str(l)+'.csv')        
        outputprevc = pd.read_csv('output' + str(l)+'.csv')
        outputc_df = pd.DataFrame(outputc, columns = ['output'])
        pd.concat([outputc_df, outputprevc]).to_csv('output' + str(l)+'.csv', index = False)
        print(len(pd.concat([outputc_df, outputprevc])))
    except:
        pd.DataFrame(outputc, columns = ['output']).to_csv('output' + str(l) + '.csv', index = False) 
    
    for i in range(len(outputf)):
        if np.isnan(outputf[i]):
            print("nan")
        elif np.isnan(outputc[i]):
            print("nan")
        else:
            outputfc += (outputf[i] - outputc[i])
            outputfc2 += ((outputf[i] - outputc[i])**2)
            outputfc3 += ((outputf[i] - outputc[i])**3)
            outputfc4 += ((outputf[i] - outputc[i])**4)
            outputfsum += outputf[i]
            outputf2sum += outputf[i]**2

    
    sums = np.array([outputfc, outputfc2, outputfc3, outputfc4, outputfsum, outputf2sum])
    return sums
Esempio n. 2
0
def _parallel_mc(processes, path_stem, calc_formula, l, M, build_output, variable_name, sigma_function, interp_fn, angles_fn, Lmin, Lmax, wavetype, dictionary_type, iteration, normalisation_factor):
   
    """
    Split the tasks so the algorithm be parallelised and then collect the parallel output
    """
    
    # putting runs into queue
    
    in_queue = mp.Queue()
    out_queue = mp.Queue()
    future_res = []
    for i in range(processes):
        path = path_stem + str(i) + '/'
        if not os.path.exists(path):
            os.makedirs(path)
    
        future_res.append(mp.Process(target = calc_formula, args = (path, in_queue, out_queue)))
        future_res[-1].start()

    for j in range(iteration):
        in_queue.put((l, M, build_output, sigma_function, variable_name, Lmin, Lmax, wavetype, interp_fn, angles_fn))
    # send stop signals
    for i in range(processes):
        in_queue.put('stop')
        
    # collect output    
    results = []
    for i in range(iteration):
        if (i+1)%1000 == 0:
            print(i)
            mlmc.write(logfile, "%f" % (i))
            mlmc.write(logfile, "\n")
        results.append(out_queue.get())

    outputf = [f[0] for f in results]
    outputc = [f[1] for f in results]
    outputfc = 0
    outputfc2 = 0
    outputfc3 = 0
    outputfc4 = 0
    outputfsum = 0
    outputf2sum = 0
    
    # record output to csv file
    pd.DataFrame(outputf).to_csv(filename[:-4] + '.csv')
    
    for i in range(len(outputf)):
        if np.isnan(outputf[i]):
            print("nan")
        elif np.isnan(outputc[i]):
            print("nan")
        else:
            outputfc += (outputf[i] - outputc[i])
            outputfc2 += ((outputf[i] - outputc[i])**2)
            outputfc3 += ((outputf[i] - outputc[i])**3)
            outputfc4 += ((outputf[i] - outputc[i])**4)
            outputfsum += outputf[i]
            outputf2sum += outputf[i]**2
    
    sums = np.array([outputfc, outputfc2, outputfc3, outputfc4, outputfsum, outputf2sum])
    return sums
Esempio n. 3
0
dellist = []
varlist = []

ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')

# set up a unique file to store outputs
filename = "mlmc_xbeach_" + st + ".txt"
logfile = open(filename, "w+")
logfile.close()

# run MLMC algorithm
for i in range(no_runs):
    t0 = time.time()
    logfile = open(filename, "a+")#, 0)
    mlmc.write(logfile, str(no_parallel_processes))
    if init_test == True:
        alpha_mod, beta, gamma, cost, var1, var2, del1 = mlmc.mlmc_test(no_parallel_processes, path_stem, _parallel_mc, multilevel, wavetype, dictionary_type, M, L, N0, Lmin, Lmax, logfile, build_output, variable_name, sample, interp_fn = interp_fn, normalisation_factor = init_vol)
        vardifflist.append(var1)
        varsing.append(var2)
        costlist.append(cost)
        dellist.append(del1)
        expectedlist.append(sum(del1))
    if eps_test == True:
        # these values come from the results of init_test and thus must be replaced if a new init_test run is performed
        alpha_mod = 0.921053
        beta  = 1.970078  
        gamma = 2.400694
        var1 = [5.9081e-07, 6.3646e-08, 1.9078e-08, 1.0362e-08, 1.5862e-09]
        del1 = [1.0026e-03, 1.2740e-04, 1.0285e-4, 9.3601e-05, 4.8057e-05]
        Ns, varlevel, Plist, P_seq_list, cost_per_epsilon = mlmc.eps_only(alpha_mod, beta, gamma, var1, del1, no_parallel_processes, path_stem, _parallel_mc, multilevel, wavetype, dictionary_type, M, L, 2, epsilon, Lmin, Lmax, logfile, build_output, variable_name, sample, interp_fn = interp_fn, angles_fn = 0, normalisation_factor = init_vol)
Esempio n. 4
0
variable_name = 'Hm0' # name of uncertain variable
dictionary_type = 'w' # dictionary where uncertain variable lives

costlist = []
dellist = []

# set output txt file
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')

filename = "mc_xbeach_" + st + ".txt"
logfile = open(filename, "w+")
logfile.close()

t0 = time.time()
logfile = open(filename, "a+")#, 0)
mlmc.write(logfile, str(no_parallel_processes))


# run monte carlo algorithm
cost, del1 = mlmc.mlmc_test(no_parallel_processes, path_stem, _parallel_mc, multilevel, 'jonstable', dictionary_type, M, L, N0, Lmin, Lmax, logfile, build_output, variable_name, sample, interp_fn = interp_fn, angles_fn = angle_dict)
costlist.append(cost)
dellist.append(del1)

# record cost and expectation from this one run
logfile = open(filename, "a+")#, 0)
print('total time: ' + str(t1 - t0))
mlmc.write(logfile, "total time: %f" % (t1-t0))
mlmc.write(logfile, "\n")