Ejemplo n.º 1
0
def run_batch(problem,
              sample,
              nprocs=1,
              sleep_time=0.2,
              path="run_output",
              keep_output=True):

    # Get base working directory
    baseDir = os.getcwd()

    # Initialize model run scheduler
    runscheduler = scheduler.ModelScheduler(nprocs=nprocs,
                                            sleep_time=sleep_time,
                                            keep_output=keep_output)

    setup = problem["setup"]
    measure = problem["measure"]

    # Unpack parameter and design variable names
    params = problem["params"]
    design_vars = problem["design_vars"]

    # Unpack experimental data and measurement errors
    x = np.array(problem["input_data"])

    if len(x.shape) < 2:
        x = x[:, None]

    # Dictionary with design variables
    var_dicts = [{design_vars[m]: x[n, m]
                  for m in range(x.shape[1])} for n in range(x.shape[0])]
    param_dict = {params[m]: sample[m] for m in range(len(params))}

    print("Evaluating...")

    tmax = var_dicts[0]["tmax"]
    tmeas = var_dicts[0]["tmeas"]

    # Initialize output buffers
    qoi = np.empty((np.arange(tmeas, tmax + 1, tmeas).shape[0], x.shape[0]))
    c_err = np.empty((np.arange(tmeas, tmax + 1, tmeas).shape[0], x.shape[0]))

    batch = createBatch(setup, "hemocell_batch", var_dicts, param_dict, path,
                        measure)
    runscheduler.enqueueBatch(batch)

    # Run batch and retrieve quantities of interest
    evaluated = False
    while not evaluated:
        if runscheduler.pollBatch(batch) is not None:
            for m, run in enumerate(batch):
                qoi[:, m], c_err[:, m] = run.output

            evaluated = True

        runscheduler.pushQueue()
        time.sleep(sleep_time)

    return qoi, c_err
Ejemplo n.º 2
0
    # INITIALIZE CLIENT
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    while True:
        try:
            s.connect((server_host, port))
            break
        except socket.error as err:
            print(err)
            print("Will try to connect again in 5 seconds")
            time.sleep(5)

    print("Successfully connected to server")

    runscheduler = scheduler.ModelScheduler(nprocs, keep_output=keep_output)

    # Send client data to server
    data = {}
    data["nprocs"] = nprocs

    message = dill.dumps(data)
    message += b"\r\n\r\n\r\n"
    _, wlist, _ = select.select([], [s], [])
    s.sendall(message)

    runs = [None for n in range(nprocs)]

    buf = b''
    messages = []
Ejemplo n.º 3
0
def run_external(problem,
                 samples,
                 nprocs=1,
                 sleep_time=0.2,
                 path="run_output",
                 keep_output=True):

    model_type = problem.get("model_type", None)

    if model_type == "external":
        # Initialize model run scheduler
        runscheduler = scheduler.ModelScheduler(nprocs=nprocs,
                                                sleep_time=sleep_time,
                                                keep_output=keep_output)
    elif model_type == "external_cluster":
        runscheduler = scheduler.ClusterScheduler(nprocs=nprocs,
                                                  sleep_time=sleep_time,
                                                  keep_output=keep_output)

        # Initialize the server and listen for connection requests
        runscheduler.server_bind()
        runscheduler.server_listen()

    else:
        print("No valid model_type specified in problem.")
        return None

    setup = problem["setup"]
    measure = problem["measure"]

    # Unpack parameter and design variable names
    params = problem["params"]
    design_vars = problem["design_vars"]

    # Unpack design variable data
    x = np.array(problem["input_data"])

    if len(x.shape) < 2:
        x = x[:, None]

    # Dictionary with design variables
    var_dicts = [{design_vars[m]: x[n, m]
                  for m in range(x.shape[1])} for n in range(x.shape[0])]

    param_dicts = [{params[m]: samples[n, m]
                    for m in range(len(params))}
                   for n in range(samples.shape[0])]

    print("Evaluating...")

    # Initialize output buffers
    qoi = np.empty((samples.shape[0], x.shape[0]))
    c_err = np.empty((samples.shape[0], x.shape[0]))

    evaluated = np.zeros(samples.shape[0], dtype=bool)

    batches = [
        createBatch(setup, "batch_%i" % (n), var_dicts, param_dicts[n], path,
                    measure) for n in range(samples.shape[0])
    ]

    # Enqueue all sample batches
    for batch in batches:
        runscheduler.enqueueBatch(batch)

    # Initialize progress bar
    pbar = tqdm.tqdm(total=len(batches))

    # Run all batches and retrieve quantities of interest
    while not np.all(evaluated):
        for n, batch in enumerate(batches):
            if evaluated[n] or runscheduler.pollBatch(batch) is None:
                continue

            for m, run in enumerate(batch):
                qoi[n, m], c_err[n, m] = run.output

            evaluated[n] = True
            pbar.update(1)

        runscheduler.pushQueue()
        time.sleep(sleep_time)

    if model_type == "external_cluster":
        runscheduler.close()

    # Close progress bar
    pbar.close()

    return qoi, c_err
Ejemplo n.º 4
0
    def __init__(self,
                 problem,
                 invP0=5,
                 invPa=10,
                 alpha_goal=0.44,
                 xi_init=1.0,
                 lamb_init=0.6,
                 tol=0.1,
                 max_stages=20,
                 nprocs=1,
                 sleep_time=0.2,
                 logstep=50,
                 logpath="ABCSubSim_log.pkl"):

        # Store default arguments
        self.invP0 = invP0
        self.invPa = invPa
        self.alpha_goal = alpha_goal
        self.xi_init = xi_init
        self.lamb_init = lamb_init
        self.tol = tol
        self.max_stages = max_stages
        self.sleep_time = sleep_time
        self.logstep = logstep
        self.logpath = logpath

        self.model_type = problem.get("model_type", None)

        # Unpack model functions according to model type
        if self.model_type == "external":
            # Initialize model run scheduler
            self.runscheduler = scheduler.ModelScheduler(nprocs=nprocs,
                                                         sleep_time=sleep_time)

            self.setup = problem["setup"]
            self.measure = problem["measure"]
        elif self.model_type == "external_cluster":
            # Initialize model run scheduler
            self.runscheduler = scheduler.ClusterScheduler(
                nprocs=nprocs, sleep_time=sleep_time)

            self.sleep_time = sleep_time

            self.setup = problem["setup"]
            self.measure = problem["measure"]

            # Initialize the server and listen for connection requests
            self.runscheduler.server_bind()
            self.runscheduler.server_listen()
        elif self.model_type == "python":
            self.evaluate = problem["evaluate"]
        else:
            print("No valid model_type specified in problem.")
            return None

        # Unpack parameter and design variable names
        self.model_params = problem["model_params"]
        self.design_vars = problem["design_vars"]

        # Unpack distance measure
        self.distance = problem["distance"]

        # Unpack experimental data and measurement errors
        self.x = np.array(problem["input_data"])

        if len(self.x.shape) < 2:
            self.x = self.x[:, None]

        self.y = np.array(problem["output_data"])
        self.y_err = np.array(problem["data_errors"])

        # Unpack prior functions and samplers
        self.priors = problem["priors"]
        self.samplers = problem["samplers"]

        # Dictionary with design variables
        self.var_dicts = [{
            self.design_vars[m]: self.x[n, m]
            for m in range(self.x.shape[1])
        } for n in range(self.x.shape[0])]

        # Full prior function
        #self.full_prior = lambda sample: np.prod([self.priors[n](sample[n]) for n in range(len(self.priors))])

        return
Ejemplo n.º 5
0
    def __init__(self,
                 problem,
                 likelihood_function=normal_likelihood,
                 p_scheduler=adaptive_p,
                 cov_scale=0.2,
                 COVtol=1.0,
                 nburn=0,
                 lmax=np.inf,
                 nprocs=1,
                 sleep_time=0.2,
                 logstep=50,
                 logpath="TMCMC_log.pkl",
                 output_directory="TMCMC_output",
                 keep_output=True):

        # Unpack default keyword arguments
        self.likelihood_function = likelihood_function
        self.p_scheduler = p_scheduler
        self.cov_scale = cov_scale
        self.COVtol = COVtol
        self.nburn = nburn
        self.lmax = lmax
        self.logstep = logstep
        self.logpath = logpath
        self.output_directory = output_directory

        self.model_type = problem.get("model_type", None)

        # Unpack model functions according to model type
        if self.model_type == "external":
            # Initialize model run scheduler
            self.runscheduler = scheduler.ModelScheduler(
                nprocs=nprocs, sleep_time=sleep_time, keep_output=keep_output)

            self.sleep_time = sleep_time

            self.setup = problem["setup"]
            self.measure = problem["measure"]
        elif self.model_type == "external_cluster":
            # Initialize model run scheduler
            self.runscheduler = scheduler.ClusterScheduler(
                nprocs=nprocs, sleep_time=sleep_time, keep_output=keep_output)

            self.sleep_time = sleep_time

            self.setup = problem["setup"]
            self.measure = problem["measure"]

            # Initialize the server and listen for connection requests
            self.runscheduler.server_bind()
            self.runscheduler.server_listen()
        elif self.model_type == "python":
            self.evaluate = problem["evaluate"]
        else:
            print("No valid model_type specified in problem.")
            return None

        # Unpack parameter and design variable names
        self.model_params = problem["model_params"]
        self.error_params = problem["error_params"]
        self.design_vars = problem["design_vars"]

        # Unpack experimental data and measurement errors
        self.x = np.array(problem["input_data"])

        if len(self.x.shape) < 2:
            self.x = self.x[:, None]

        self.y = np.array(problem["output_data"])
        self.y_err = np.array(problem["data_errors"])

        # Modelling error per data point
        self.model_errors = problem["error_mapping"]

        # Unpack prior functions and samplers
        self.model_prior = problem["model_prior"]
        self.model_sampler = problem["model_sampler"]

        self.error_prior = problem["error_prior"]
        self.error_sampler = problem["error_sampler"]

        # Construct full prior function
        self.full_prior = lambda sample: self.model_prior(sample[:len(self.model_params)]) * \
                                         self.error_prior(sample[len(self.model_params):])

        # Dictionary with design variables
        self.var_dicts = [{
            self.design_vars[m]: self.x[n, m]
            for m in range(self.x.shape[1])
        } for n in range(self.x.shape[0])]