Exemple #1
0
def multi_run(runs):
    """
    Run several simulations

    Arg:
    runs - A list of dictionaries.
        Dic; "processes"; The number of processes
             "sdp"; The event control file parameters, as attributes on an
                    object

    """
    for run in runs:
        # get a temporary file
        (handle, control_file) = tempfile.mkstemp(
            '.py', 'multi_run_generated_')
        os.close(handle)

         # Build the base eqrm_flags.
        flags = create_parameter_data(run["sdp"])
        num_nodes = run["processes"]
        # Write an EQRM control file, then do an mpi run call
        eqrm_flags_to_control_file(control_file, flags)
        (cluster, _) = util.get_hostname()

        cmd = mpi_command(cluster, num_nodes, control_file)
        subprocess.call(cmd)

        # clean up
        os.remove(control_file)
Exemple #2
0
def multi_run(runs):
    """
    Run several simulations
    
    Arg:
    runs - A list of dictionaries. 
        Dic; "processes"; The number of processes
             "sdp"; The event control file parameters
             
    """
    for run in runs:
        control_file = 'temp.py'
         # Build the base eqrm_flags.
        flags = create_parameter_data(run["sdp"])
        num_nodes = run["processes"]
        # Write an EQRM control file, then do an mpi run call
        eqrm_flags_to_control_file(control_file, flags)
        (cluster, _) = util.get_hostname()
    
        cmd = mpi_command(cluster, num_nodes, control_file)
        subprocess.call(cmd)
Exemple #3
0
    def save_analysis_objects(self, output_dir, site_tag):
        (event_set, event_activity, source_model, sites, motion,
         eqrm_flags) = self.create_analysis_objects()

        # 2. Save test objects to file
        event_set.save(os.path.join(output_dir, '%s_event_set' % site_tag))
        event_activity.save(os.path.join(output_dir,
                                         '%s_event_set' % site_tag))
        source_model.save(os.path.join(output_dir, '%s_event_set' % site_tag))
        sites.save(os.path.join(output_dir, '%s_sites' % site_tag))
        # Motion is an numpy.ndarray so save manually
        os.mkdir(os.path.join(output_dir, '%s_motion' % site_tag))
        save(
            os.path.join(output_dir, '%s_motion' % site_tag, 'bedrock_SA.npy'),
            motion)
        save(
            os.path.join(output_dir, '%s_motion' % site_tag,
                         'atten_periods.npy'), eqrm_flags['atten_periods'])
        # ... and eqrm_flags
        eqrm_flags_to_control_file(os.path.join(output_dir, 'eqrm_flags.py'),
                                   eqrm_flags)
Exemple #4
0
 def save_analysis_objects(self, output_dir, site_tag):
     (event_set,
      event_activity,
      source_model,
      sites,
      motion,
      eqrm_flags) = self.create_analysis_objects()
      
     # 2. Save test objects to file
     event_set.save(os.path.join(output_dir, '%s_event_set' % site_tag))
     event_activity.save(os.path.join(output_dir, '%s_event_set' % site_tag))
     source_model.save(os.path.join(output_dir, '%s_event_set' % site_tag))
     sites.save(os.path.join(output_dir, '%s_sites' % site_tag))
     # Motion is an numpy.ndarray so save manually
     os.mkdir(os.path.join(output_dir, '%s_motion' % site_tag))
     save(os.path.join(output_dir, '%s_motion' % site_tag, 'bedrock_SA.npy'), 
          motion)
     save(os.path.join(output_dir, '%s_motion' % site_tag, 'atten_periods.npy'), 
          eqrm_flags['atten_periods'])
     # ... and eqrm_flags
     eqrm_flags_to_control_file(os.path.join(output_dir, 'eqrm_flags.py'),
                                eqrm_flags)
def create_nci_job(nodes, param_file):
    """
    Creates an NCI job package from the given parameter file and the number of
    nodes specified.
    """
    # Initial node number validation
    if nodes > 8 and nodes % 8 != 0:
        raise Exception('Nodes must be a multiple of 8 if greater than 8.')
    if nodes > LIMIT_NODES:
        raise Exception('The node limit is %s' % LIMIT_NODES)

    # Parse param_file to eqrm_flags
    eqrm_flags = create_parameter_data(param_file)

    # Some validation based on the event_set_handler value
    if eqrm_flags.event_set_handler is 'save':
        raise Exception(
            'Please ensure that event_set_handler is load or generate')
    if eqrm_flags.event_set_handler is not 'load':
        log.info('')
        log.info('event_set_handler not load. Generating event set for NCI.')
        log.info('')

    # Calculate parameters required for job
    params = calc_params(eqrm_flags)
    req_memory = calc_memory(nodes, params)
    req_jobfs = calc_jobfs(nodes)
    req_walltime = calc_walltime(nodes)

    # Validation based on parameters
    msg = ''
    if req_memory > nodes * LIMIT_MEMORY_MULTIPLIER:
        msg = '%sRequired memory %sMB greater than limit %sMB.\n' % (
            msg, req_memory, nodes * LIMIT_MEMORY_MULTIPLIER)
    if req_jobfs > nodes * LIMIT_JOBFS_MULTIPLIER:
        msg = '%sRequired jobfs %sMB greater than limit %sMB\n' % (
            msg, req_jobfs, nodes * LIMIT_JOBFS_MULTIPLIER)
    if req_walltime > LIMIT_WALLTIME_MULTIPLIER(nodes):
        msg = '%sRequired walltime %ssecs greater than limit %ssecs\n' % (
            msg, req_walltime, LIMIT_WALLTIME_MULTIPLIER(nodes))
    if len(msg) > 0:
        msg += 'Consider reducing the size of your simulation.'
        raise Exception(msg)

    # Create directory to package into
    nci_dir = os.path.join('.', 'nci_job')
    if os.path.exists(nci_dir):
        rmtree(nci_dir)
    os.makedirs(nci_dir)

    log.info('')
    log.info('Saving package to %s' % nci_dir)
    log.info('(replaces current directory if exists)')

    # Copy input, output and save data to the packaged directory
    input_dir = os.path.join(nci_dir, 'input')
    copytree(eqrm_flags.input_dir, input_dir)

    output_dir = os.path.join(nci_dir, 'output')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    save_dir = os.path.join(nci_dir, 'save')
    copytree(
        os.path.join(eqrm_flags.output_dir,
                     '%s_event_set' % eqrm_flags.site_tag), save_dir)

    # Modify eqrm_flags directories for NCI
    eqrm_flags['input_dir'] = os.path.join('.', 'input')
    eqrm_flags['output_dir'] = os.path.join('.', 'output')
    eqrm_flags['data_array_storage'] = "getenv('PBS_JOBFS')"
    eqrm_flags['event_set_load_dir'] = os.path.join('.', 'save')

    # We always want a load job
    eqrm_flags['event_set_handler'] = "load"

    # Write new setdata file
    eqrm_flags_to_control_file(os.path.join(nci_dir, param_file), eqrm_flags)

    # Write NCI job file
    job_file = open(os.path.join(nci_dir, '%s_job' % param_file), 'w')
    job_file.write('#!/bin/bash\n')
    job_file.write('#PBS -wd\n')
    job_file.write('#PBS -q normal\n')
    job_file.write('#PBS -l ncpus=%s\n' % nodes)
    job_file.write('#PBS -l walltime=%s\n' % req_walltime)
    job_file.write('#PBS -l vmem=%sMB\n' % req_memory)
    job_file.write('#PBS -l jobfs=%sMB\n' % req_jobfs)
    job_file.write('\n')
    job_file.write('mpirun python %s\n' % param_file)
    job_file.close()

    log.info('')
    log.info('Now tar gzip %s and copy to NCI. e.g.' % nci_dir)
    log.info('tar czvf nci_job.tar.gz %s' % nci_dir)
    log.info('scp nci_job.tar.gz <username>@<nci_host>:/short/<project>/jobs/')
    log.info('')
Exemple #6
0
def multi_run(base_control,
              nodes, 
              sites=None,
              max_site_index=None,
              input_dir=None,
              output_dir_funct=None,
              total_events=None, 
              #execute=True, 
              **kwargs):
    """
    Run several simulations
    
    Arg:
    base_control -  An instance with attributes of EQRM paramters.
    input_dir_funct -  a function that can make the input_dir string, 
                       **kwargs is passed in.
    output_dir_funct -  a function that can make the output_dir string, 
                       **kwargs is passed in.
    nodes - a list of the number of nodes to run each simulation on.
            The first value is for the first simulation ect.
    sites - a list of number of sites to run each simulation on.
            The first value is for the first simulation ect.
            The sites chosen will be spread out.
    max_site_index - The number of sites in the site file.
    total_events - a list of the number of events to run each simulation on.
            The first value is for the first simulation ect.
    **kwargs - Each key is an eqrm control file attribute. The value is
            a lists of vaules to go into each simulation.
             The first value is for the first simulation ect.
    """
    control_file = 'temp.py'
    # Build the base eqrm_flags.
    flags = create_parameter_data(base_control)
    
    # First check that all the array sizes are correct
    runs = len(nodes)
    for k, v in kwargs.items():
        if not len(v) == runs:
            msg = k  + " list is length " + len(v) + "," + runs + \
                " was expected." 
            raise ListLengthError(msg)
     
    # Start Looping
    for i, num_nodes in enumerate(nodes):       
        
        new_flags = {}
        for k, v in kwargs.items():
            kwargs_column[k] = v[i]
            
        # Add the kwargs
        flags.update(new_flags)
        
        # Add the directories
        if output_dir_funct is not None:
            flags['output_dir'] = output_dir_funct(**flags)
        if input_dir is not None:
            flags['input_dir'] = input_dir   
        
        # Write an EQRM control file, then do an mpi run call
        eqrm_flags_to_control_file(control_file, flags)
        (cluster, _) = util.get_hostname()
    
        cmd = mpi_command(cluster, num_nodes, control_file)
        subprocess.call(cmd)
Exemple #7
0
def create_nci_job(nodes, param_file):
    """
    Creates an NCI job package from the given parameter file and the number of
    nodes specified.
    """
    # Initial node number validation
    if nodes > 8 and nodes % 8 != 0:
        raise Exception('Nodes must be a multiple of 8 if greater than 8.')
    if nodes > LIMIT_NODES:
        raise Exception('The node limit is %s' % LIMIT_NODES)
        
    # Parse param_file to eqrm_flags
    eqrm_flags = create_parameter_data(param_file)
    
    # Some validation based on the event_set_handler value
    if eqrm_flags.event_set_handler is 'save':
        raise Exception('Please ensure that event_set_handler is load or generate')
    if eqrm_flags.event_set_handler is not 'load':
        log.info('')
        log.info('event_set_handler not load. Generating event set for NCI.')
        log.info('')
        
    
    # Calculate parameters required for job
    params = calc_params(eqrm_flags)
    req_memory = calc_memory(nodes, params)
    req_jobfs = calc_jobfs(nodes)
    req_walltime = calc_walltime(nodes)
    
    # Validation based on parameters
    msg = ''
    if req_memory > nodes * LIMIT_MEMORY_MULTIPLIER:
        msg = '%sRequired memory %sMB greater than limit %sMB.\n' % (msg,
                                                                     req_memory,
                                                nodes * LIMIT_MEMORY_MULTIPLIER)
    if req_jobfs > nodes * LIMIT_JOBFS_MULTIPLIER:
        msg = '%sRequired jobfs %sMB greater than limit %sMB\n' % (msg,
                                                                   req_jobfs,
                                                 nodes * LIMIT_JOBFS_MULTIPLIER)
    if req_walltime > LIMIT_WALLTIME_MULTIPLIER(nodes):
        msg = '%sRequired walltime %ssecs greater than limit %ssecs\n' % (msg,
                                                                   req_walltime,
                                               LIMIT_WALLTIME_MULTIPLIER(nodes))
    if len(msg) > 0:
        msg += 'Consider reducing the size of your simulation.'
        raise Exception(msg)
    
    # Create directory to package into
    nci_dir = os.path.join('.', 'nci_job')
    if os.path.exists(nci_dir):
        rmtree(nci_dir)
    os.makedirs(nci_dir)
    
    log.info('')
    log.info('Saving package to %s' % nci_dir)
    log.info('(replaces current directory if exists)')
    
    # Copy input, output and save data to the packaged directory
    input_dir = os.path.join(nci_dir, 'input')
    copytree(eqrm_flags.input_dir,input_dir)
    
    output_dir = os.path.join(nci_dir, 'output')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    save_dir = os.path.join(nci_dir, 'save')
    copytree(os.path.join(eqrm_flags.output_dir, 
                          '%s_event_set' % eqrm_flags.site_tag), save_dir)
    
    # Modify eqrm_flags directories for NCI
    eqrm_flags['input_dir'] = os.path.join('.', 'input')
    eqrm_flags['output_dir'] = os.path.join('.', 'output')
    eqrm_flags['data_array_storage'] = "getenv('PBS_JOBFS')"
    eqrm_flags['event_set_load_dir'] = os.path.join('.', 'save')
    
    # We always want a load job
    eqrm_flags['event_set_handler'] = "load"
    
    # Write new setdata file
    eqrm_flags_to_control_file(os.path.join(nci_dir, param_file), eqrm_flags)
    
    # Write NCI job file
    job_file = open(os.path.join(nci_dir, '%s_job' % param_file), 'w')
    job_file.write('#!/bin/bash\n')
    job_file.write('#PBS -wd\n')
    job_file.write('#PBS -q normal\n')
    job_file.write('#PBS -l ncpus=%s\n' % nodes)
    job_file.write('#PBS -l walltime=%s\n' % req_walltime)
    job_file.write('#PBS -l vmem=%sMB\n' % req_memory)
    job_file.write('#PBS -l jobfs=%sMB\n' % req_jobfs)
    job_file.write('\n')
    job_file.write('mpirun python %s\n' % param_file)
    job_file.close()
    
    log.info('')
    log.info('Now tar gzip %s and copy to NCI. e.g.' % nci_dir)
    log.info('tar czvf nci_job.tar.gz %s' % nci_dir)
    log.info('scp nci_job.tar.gz <username>@<nci_host>:/short/<project>/jobs/')
    log.info('')