Beispiel #1
0
def get_slurm_dask_client(n_workers, n_cores, n_processes):

    cluster = SLURMCluster(cores=n_cores,
                           processes=n_processes,
                           memory='80GB',
                           interface='ib0',
                           queue='standard',
                           job_extra=['-e slurm-%j.err', '-o slurm-%j.out',
                                      '--time=72:00:00 --requeue'])

    header_lines = cluster.job_header.split('\n')
    mem_pos = find_mem_pos(header_lines)
    header_lines = header_lines[:mem_pos]+header_lines[mem_pos+1:]
    cluster.job_header = '\n'.join(header_lines)
    print(cluster.job_script())
    # Scale cluster to n_workers
    cluster.scale(n_workers)
    # Wait for cluster to start
    time.sleep(30)
    client = Client(cluster)
    print(client.scheduler_info())

    return client
Beispiel #2
0
    # Generate synthetic receiver data from true model.
    true_d, _, _ = solver.forward(m=true_model.m)

    return true_d.data


cluster = SLURMCluster(cores=2,
                       memory="10GB",
                       queue="standard",
                       interface="ib0",
                       job_extra=['--time=72:00:00 --requeue'])
header_lines = cluster.job_header.split('\n')
mem_pos = find_mem_pos(header_lines)
header_lines = header_lines[:mem_pos] + header_lines[mem_pos + 1:]
cluster.job_header = '\n'.join(header_lines)
print(cluster.job_script())
print("Waiting for workers")
cluster.scale(18)
client = Client(cluster)

# Get parameters.
js = open('parameters.json')
par = json.load(js)

# Load the data
shots = numpy.fromfile('shots.file', dtype=numpy.float32)
shots = numpy.reshape(shots, (6559, 369, 50))

# Set up source/receiver data and geometry.
src_coordinates = numpy.empty((par['nshots'], len(par['shape'])))