Esempio n. 1
0
def LHS_for_guesses(sample_count):

  # we use latin hypercube sampling to obtain initial guesses for curve fitting
  lhsmdu.setRandomSeed(None)
  alpha_tau_sample = lhsmdu.sample(2,sample_count)
  epsilon_mu_sample = lhsmdu.sample(2,sample_count)
  alpha_tau_sample = alpha_tau_sample.tolist()
  epsilon_mu_sample = epsilon_mu_sample.tolist()

  # we then adjust the variables to the correct ranges
  adjusted_sample = []
  # for AT, we adjust to between 1 and 1/30
  for var_dist in alpha_tau_sample:
    var_dist = [(1 + var*(1/30-1)) for var in var_dist]
    adjusted_sample.append(var_dist)
  # for EM, we adjust to between 10 and 0.0001
  # however, we actually use theta where EM = 10^theta, so theta is between 1 and -3
  # prevents overweighting towards the top end of the spectrum
  for var_dist in epsilon_mu_sample:
    var_dist = [(1 + var*(-5-1)) for var in var_dist]
    adjusted_sample.append(var_dist)

  # then, for each pair of 4 variables, we run it through the fit and determine cost
  # for every guess, we check to see if that's the lowest cost generated thus far
  # if it is, we store it, and at the end, that's our result
  lowest_cost = [10000000000000, [0,0,0,0]]

  for i in range(sample_count):
    test_guesses = []
    for var_dist in adjusted_sample:
      test_guesses.append(var_dist[i])
    try:
      # we have to rearrange test_guesses so that it goes alpha, epsilon, tau, mu
      tau = test_guesses[1]
      test_guesses[1] = test_guesses[2]
      test_guesses[2] = tau
      # we then adjust epsilon and mu to be 10^[their value], because it's currently theta
      test_guesses[1] = 10**(test_guesses[1])
      test_guesses[3] = 10**(test_guesses[3])

      cost = SD_curve_fit(test_guesses).cost
      print(f"{test_guesses} cost: {cost}")

      if cost < lowest_cost[0]:
        lowest_cost = [cost, test_guesses]
    except OverflowError as e:
      print("Overflow while running LHS for initial guesses: ", e)
    except ValueError as e:
      print("Residual error while running LHS for initial guesses: ", e)
  
  print(f"LHS suggests that {lowest_cost[1]} is the best set of guesses")

  return lowest_cost[1]
Esempio n. 2
0
import lhsmdu

lhsmdu.setRandomSeed(None)
samples = lhsmdu.sample(1, 100)
# initially, it is generated between 0 and 1
# we need to change it to a distribution between -100 and 100

adj = []
samples = samples.tolist()
print(samples[0])
Esempio n. 3
0
        squared_sum += 100
    if any(M_model) > np.max(range_M):
        squared_sum += 100
    if any(M_model) < np.min(range_M):
        squared_sum += 100

    # Step 5: Calculate cost-function
    squared_sum = np.sum((yG_model - yG_vec))**2 + np.sum(
        (yI_model - yI_vec)**2)

    return squared_sum


## Hypercube set up
randSeed = 2  # random number of choice
lhsmdu.setRandomSeed(
    randSeed)  # Latin Hypercube Sampling with multi-dimensional uniformity
start = np.array(
    lhsmdu.sample(18, 4)
)  # Latin Hypercube Sampling with multi-dimensional uniformity (parameters, samples)

para, samples = start.shape

## intervals for the parameters
para_int = [0, 500]

minimum = (np.inf, None)

# Bounds for the model
bound_low = np.array(
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1])
bound_upp = np.repeat(np.inf, para)
Esempio n. 4
0
# perform latin hypercube sampling across the desired parameter space of disc properties.
# Specify desired params and their corresponding upper and lower bounds.
# Resultant file setups will be written into a 'fresh_batch/' subdirectory, along with a
# corresponding csv containing the setups params.

import numpy as np
import matplotlib.pyplot as plt
import lhsmdu
import os
import shutil

#set random seed
lhsmdu.setRandomSeed(111)

#param limits
Rp_lims = np.asarray([20.0, 35.0])  # initial planet semi-major axis, AU
iRp = 0
Mpl_lims = np.asarray([1.0, 15.0
                       ]) * 0.0009543  # inital planet mass, Solar masses
iMpl = 1
a_lims = np.asarray([100.0, 250.0])  # initial binary separation, AU
ia = 2
Ms1_lims = np.asarray([0.85, 1.2])  # Primary star mass, Solar masses
iMs1 = 3
Ms2_lims = np.asarray([0.08, 0.4])  # Secondary star mass, Solar masses
iMs2 = 4
alpha_lims = np.asarray([1e-4, 1e-2])  # alpha viscosity limits
ialpha = 5
pindex_lims = np.asarray([0.0, 1.5])  # Surface density profile exponent
ipindex = 6
H_lims = np.asarray([0.025, 0.1])  # Disc aspect ratio