Exemple #1
0
def load_sites(parallel, load_dir):
    """
    Load the site object from file in load_dir
    """
    log.info('P%s: Loading site from %s' % (parallel.rank, load_dir))

    sites = Sites.load(load_dir)

    return sites
Exemple #2
0
def load_sites(parallel, load_dir):
    """
    Load the site object from file in load_dir
    """
    log.info('P%s: Loading site from %s' % (parallel.rank, load_dir))

    sites = Sites.load(load_dir)

    return sites
Exemple #3
0
def log_analysis():
    '''log_analysis - analyses the current log and returns a tuple list containing
    - Memory usage 'event' key
    - Memory usage data value

    It makes the assumption that the current pattern for logging memory usage
    is consistent. e.g.

    log.debug('Memory: event_set_zone created')
    log.resource_usage()

    This will produce lines in the log file that looks like
    2012-01-12 13:12:29,694 DEBUG                     analysis:228 |Memory: event_set_zone created
    2012-01-12 13:12:29,695 DEBUG                     analysis:229 |Resource usage: memory=1151.0MB resident=758.2MB stacksize=0.3MB

    What we want from this example is a tuple that looks like
    ('event_set_zone created', 'memory=1151.0MB resident=758.2MB stacksize=0.3MB')

    Where the 'event' key is the 'Memory:' log reference and the data value is the
    'Resource usage:' log reference.
    '''

    results = []

    # Only do this is the logfile exists
    if os.path.exists(log.log_filename):
        # 1. Determine logfile for the previous run
        # Note: analysis.main changes the value of log.log_filename per setdata
        # file
        log.info('log_analysis - using log file %s' % log.log_filename)
        log_file = open(log.log_filename)

        # 2. Grab the Memory/Resource lines in a tuple
        for line in log_file:
            if line.find('Memory:') > -1:
                key_line = line
                value_line = log_file.next()

                # This assumes the current pattern will always be
                _, key = key_line.split('Memory: ', 1)
                _, value = value_line.split('Resource usage: ', 1)

                # We want to preserve order so cannot use a dict
                results.append((key.strip(), value.strip()))

        log_file.close()

    # 3. Return tuple array
    return results
Exemple #4
0
def log_analysis():
    '''log_analysis - analyses the current log and returns a tuple list containing
    - Memory usage 'event' key
    - Memory usage data value

    It makes the assumption that the current pattern for logging memory usage
    is consistent. e.g.

    log.debug('Memory: event_set_zone created')
    log.resource_usage()

    This will produce lines in the log file that looks like
    2012-01-12 13:12:29,694 DEBUG                     analysis:228 |Memory: event_set_zone created
    2012-01-12 13:12:29,695 DEBUG                     analysis:229 |Resource usage: memory=1151.0MB resident=758.2MB stacksize=0.3MB

    What we want from this example is a tuple that looks like
    ('event_set_zone created', 'memory=1151.0MB resident=758.2MB stacksize=0.3MB')

    Where the 'event' key is the 'Memory:' log reference and the data value is the
    'Resource usage:' log reference.
    '''

    results = []

    # Only do this is the logfile exists
    if os.path.exists(log.log_filename):
        # 1. Determine logfile for the previous run
        # Note: analysis.main changes the value of log.log_filename per setdata
        # file
        log.info('log_analysis - using log file %s' % log.log_filename)
        log_file = open(log.log_filename)

        # 2. Grab the Memory/Resource lines in a tuple
        for line in log_file:
            if line.find('Memory:') > -1:
                key_line = line
                value_line = log_file.next()

                # This assumes the current pattern will always be
                _, key = key_line.split('Memory: ', 1)
                _, value = value_line.split('Resource usage: ', 1)

                # We want to preserve order so cannot use a dict
                results.append((key.strip(), value.strip()))

        log_file.close()

    # 3. Return tuple array
    return results
Exemple #5
0
        events = Event_Set.generate_synthetic_events(
            file_name,
            fault_width,
            azi,
            dazi,
            fault_dip,
            prob_min_mag_cutoff,
            override_xml,
            prob_number_of_events_in_zones)
#        print "events.trace_start_lat", events.trace_start_lat
#         print " events.trace_start_lon", events.trace_start_lon
#         print "events.trace_end_lat", events.trace_end_lat
#         print "events.trace_end_lon", events.trace_end_lon
#         print "events.rupture_centroid_lat", events.rupture_centroid_lat
#         print "events.rupture_centroid_lon", events.rupture_centroid_lon
#         print "events.rupture_centroid_x", events.rupture_centroid_x
#         print "events.rupture_centroid_y", events.rupture_centroid_y

        os.remove(file_name)

                                                     
#-------------------------------------------------------------
if __name__ == "__main__":
    event_num = 100000
    eqrmlog.console_logging_level = eqrmlog.INFO
    eqrmlog.info('Memory: before creating ' + str(event_num) + ' events')
    eqrmlog.resource_usage(level=eqrmlog.INFO)
    create_event_set(event_num)
    eqrmlog.info('Memory: after')
    eqrmlog.resource_usage(level=eqrmlog.INFO)
def create_nci_job(nodes, param_file):
    """
    Creates an NCI job package from the given parameter file and the number of
    nodes specified.
    """
    # Initial node number validation
    if nodes > 8 and nodes % 8 != 0:
        raise Exception('Nodes must be a multiple of 8 if greater than 8.')
    if nodes > LIMIT_NODES:
        raise Exception('The node limit is %s' % LIMIT_NODES)

    # Parse param_file to eqrm_flags
    eqrm_flags = create_parameter_data(param_file)

    # Some validation based on the event_set_handler value
    if eqrm_flags.event_set_handler is 'save':
        raise Exception(
            'Please ensure that event_set_handler is load or generate')
    if eqrm_flags.event_set_handler is not 'load':
        log.info('')
        log.info('event_set_handler not load. Generating event set for NCI.')
        log.info('')

    # Calculate parameters required for job
    params = calc_params(eqrm_flags)
    req_memory = calc_memory(nodes, params)
    req_jobfs = calc_jobfs(nodes)
    req_walltime = calc_walltime(nodes)

    # Validation based on parameters
    msg = ''
    if req_memory > nodes * LIMIT_MEMORY_MULTIPLIER:
        msg = '%sRequired memory %sMB greater than limit %sMB.\n' % (
            msg, req_memory, nodes * LIMIT_MEMORY_MULTIPLIER)
    if req_jobfs > nodes * LIMIT_JOBFS_MULTIPLIER:
        msg = '%sRequired jobfs %sMB greater than limit %sMB\n' % (
            msg, req_jobfs, nodes * LIMIT_JOBFS_MULTIPLIER)
    if req_walltime > LIMIT_WALLTIME_MULTIPLIER(nodes):
        msg = '%sRequired walltime %ssecs greater than limit %ssecs\n' % (
            msg, req_walltime, LIMIT_WALLTIME_MULTIPLIER(nodes))
    if len(msg) > 0:
        msg += 'Consider reducing the size of your simulation.'
        raise Exception(msg)

    # Create directory to package into
    nci_dir = os.path.join('.', 'nci_job')
    if os.path.exists(nci_dir):
        rmtree(nci_dir)
    os.makedirs(nci_dir)

    log.info('')
    log.info('Saving package to %s' % nci_dir)
    log.info('(replaces current directory if exists)')

    # Copy input, output and save data to the packaged directory
    input_dir = os.path.join(nci_dir, 'input')
    copytree(eqrm_flags.input_dir, input_dir)

    output_dir = os.path.join(nci_dir, 'output')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    save_dir = os.path.join(nci_dir, 'save')
    copytree(
        os.path.join(eqrm_flags.output_dir,
                     '%s_event_set' % eqrm_flags.site_tag), save_dir)

    # Modify eqrm_flags directories for NCI
    eqrm_flags['input_dir'] = os.path.join('.', 'input')
    eqrm_flags['output_dir'] = os.path.join('.', 'output')
    eqrm_flags['data_array_storage'] = "getenv('PBS_JOBFS')"
    eqrm_flags['event_set_load_dir'] = os.path.join('.', 'save')

    # We always want a load job
    eqrm_flags['event_set_handler'] = "load"

    # Write new setdata file
    eqrm_flags_to_control_file(os.path.join(nci_dir, param_file), eqrm_flags)

    # Write NCI job file
    job_file = open(os.path.join(nci_dir, '%s_job' % param_file), 'w')
    job_file.write('#!/bin/bash\n')
    job_file.write('#PBS -wd\n')
    job_file.write('#PBS -q normal\n')
    job_file.write('#PBS -l ncpus=%s\n' % nodes)
    job_file.write('#PBS -l walltime=%s\n' % req_walltime)
    job_file.write('#PBS -l vmem=%sMB\n' % req_memory)
    job_file.write('#PBS -l jobfs=%sMB\n' % req_jobfs)
    job_file.write('\n')
    job_file.write('mpirun python %s\n' % param_file)
    job_file.close()

    log.info('')
    log.info('Now tar gzip %s and copy to NCI. e.g.' % nci_dir)
    log.info('tar czvf nci_job.tar.gz %s' % nci_dir)
    log.info('scp nci_job.tar.gz <username>@<nci_host>:/short/<project>/jobs/')
    log.info('')
Exemple #7
0
 def logMessages(self):
     log.debug('test at level DEBUG')
     log.info('test at level INFO')
     log.warning('test at level WARNING')
     log.error('test at level ERROR')
     log.critical('test at level CRITICAL')
Exemple #8
0
def create_nci_job(nodes, param_file):
    """
    Creates an NCI job package from the given parameter file and the number of
    nodes specified.
    """
    # Initial node number validation
    if nodes > 8 and nodes % 8 != 0:
        raise Exception('Nodes must be a multiple of 8 if greater than 8.')
    if nodes > LIMIT_NODES:
        raise Exception('The node limit is %s' % LIMIT_NODES)
        
    # Parse param_file to eqrm_flags
    eqrm_flags = create_parameter_data(param_file)
    
    # Some validation based on the event_set_handler value
    if eqrm_flags.event_set_handler is 'save':
        raise Exception('Please ensure that event_set_handler is load or generate')
    if eqrm_flags.event_set_handler is not 'load':
        log.info('')
        log.info('event_set_handler not load. Generating event set for NCI.')
        log.info('')
        
    
    # Calculate parameters required for job
    params = calc_params(eqrm_flags)
    req_memory = calc_memory(nodes, params)
    req_jobfs = calc_jobfs(nodes)
    req_walltime = calc_walltime(nodes)
    
    # Validation based on parameters
    msg = ''
    if req_memory > nodes * LIMIT_MEMORY_MULTIPLIER:
        msg = '%sRequired memory %sMB greater than limit %sMB.\n' % (msg,
                                                                     req_memory,
                                                nodes * LIMIT_MEMORY_MULTIPLIER)
    if req_jobfs > nodes * LIMIT_JOBFS_MULTIPLIER:
        msg = '%sRequired jobfs %sMB greater than limit %sMB\n' % (msg,
                                                                   req_jobfs,
                                                 nodes * LIMIT_JOBFS_MULTIPLIER)
    if req_walltime > LIMIT_WALLTIME_MULTIPLIER(nodes):
        msg = '%sRequired walltime %ssecs greater than limit %ssecs\n' % (msg,
                                                                   req_walltime,
                                               LIMIT_WALLTIME_MULTIPLIER(nodes))
    if len(msg) > 0:
        msg += 'Consider reducing the size of your simulation.'
        raise Exception(msg)
    
    # Create directory to package into
    nci_dir = os.path.join('.', 'nci_job')
    if os.path.exists(nci_dir):
        rmtree(nci_dir)
    os.makedirs(nci_dir)
    
    log.info('')
    log.info('Saving package to %s' % nci_dir)
    log.info('(replaces current directory if exists)')
    
    # Copy input, output and save data to the packaged directory
    input_dir = os.path.join(nci_dir, 'input')
    copytree(eqrm_flags.input_dir,input_dir)
    
    output_dir = os.path.join(nci_dir, 'output')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    save_dir = os.path.join(nci_dir, 'save')
    copytree(os.path.join(eqrm_flags.output_dir, 
                          '%s_event_set' % eqrm_flags.site_tag), save_dir)
    
    # Modify eqrm_flags directories for NCI
    eqrm_flags['input_dir'] = os.path.join('.', 'input')
    eqrm_flags['output_dir'] = os.path.join('.', 'output')
    eqrm_flags['data_array_storage'] = "getenv('PBS_JOBFS')"
    eqrm_flags['event_set_load_dir'] = os.path.join('.', 'save')
    
    # We always want a load job
    eqrm_flags['event_set_handler'] = "load"
    
    # Write new setdata file
    eqrm_flags_to_control_file(os.path.join(nci_dir, param_file), eqrm_flags)
    
    # Write NCI job file
    job_file = open(os.path.join(nci_dir, '%s_job' % param_file), 'w')
    job_file.write('#!/bin/bash\n')
    job_file.write('#PBS -wd\n')
    job_file.write('#PBS -q normal\n')
    job_file.write('#PBS -l ncpus=%s\n' % nodes)
    job_file.write('#PBS -l walltime=%s\n' % req_walltime)
    job_file.write('#PBS -l vmem=%sMB\n' % req_memory)
    job_file.write('#PBS -l jobfs=%sMB\n' % req_jobfs)
    job_file.write('\n')
    job_file.write('mpirun python %s\n' % param_file)
    job_file.close()
    
    log.info('')
    log.info('Now tar gzip %s and copy to NCI. e.g.' % nci_dir)
    log.info('tar czvf nci_job.tar.gz %s' % nci_dir)
    log.info('scp nci_job.tar.gz <username>@<nci_host>:/short/<project>/jobs/')
    log.info('')
Exemple #9
0
 def logMessages(self):
     log.debug('test at level DEBUG')
     log.info('test at level INFO')
     log.warning('test at level WARNING')
     log.error('test at level ERROR')
     log.critical('test at level CRITICAL')