Exemple #1
0
    def __load_job_raw_data(self, job_dirs=[]):
        """
        Parse all output.out files and extract all kind of job output information

        Return a dictionary with content from the job directories
        {
            [name of job directory] :
            {
                #
                # Dictionary with data from job generation
                # (read from [jobdir]/jobgeneration.pickle)
                #
                'jobgeneration':
                {
                    'compile': [...],
                    'runtime': [...],
                    'parallelization': [...],
                    'platforms_platform': [...],
                    'platform_resources': [...],
                },
                'output':
                {
                    'SimulationBenchmarkTimings.main': [value],
                    'SimulationBenchmarkTimings.main_simulationLoop': [value],
                    [...]
                }
            }
        """

        self.__jobs_data = {}
        for job_dir in job_dirs:
            if self.verbosity > 5:
                self.info("")
                self.info("Processing '" + job_dir + "'")

            job = JobData(job_dir, verbosity=self.verbosity)
            if 'jobgeneration.job_unique_id' not in job.get_flattened_data():
                # Be backward compatible
                self.__jobs_data[job_dir] = job
            else:
                job_unique_id = job.get_flattened_data(
                )['jobgeneration.job_unique_id']
                self.__jobs_data[job_unique_id] = job
    def compute_spectrum(
        self,
        filename_phys,
        params,
    ):

        #
        # Get meta information about job
        #

        self.job_metadata_available = False

        try:
            # Step 1) Determine job directory name
            jobdir = os.path.dirname(filename_phys)

            print("Loading data from job directory '" + jobdir + "'")

            # Step 2) Load job meta information
            j = JobData(jobdir=jobdir)

            self.job_metadata_available = True

        except Exception as e:
            print(str(e))
            self.job_metadata_available = False

        if self.job_metadata_available:
            data = j.get_flattened_data()

            if 'runtime.plane_domain_size' not in data:
                raise Exception(
                    "Physical domain size must be specified via runtime parameters in MULE"
                )

            domain_size = data['runtime.plane_domain_size']

            if isinstance(domain_size, list):
                domain_size = domain_size[0]

        #
        # The following code is converted from a development of Pedro Peixoto to fit into the MULE framework
        #

        # some parameter
        mmin = 0

        # Load file
        udata_ = PlaneDataPhysical(filename_phys)
        udata = udata_.data

        #Calculate spectrum
        #-----------------------------------

        print("Physical shape")
        print(udata.shape)

        uspec = np.fft.fft2(udata) / (udata.shape[0] * udata.shape[1])

        print("Spectral shape")
        print(uspec.shape)

        # Calculate amplitude spectrum
        data = np.multiply(uspec, np.conjugate(uspec))
        data = data.real

        n = data.shape[0]

        # Since data u,v data is real, the spectrum has a symmetry and all that matters is the 1st quadrant
        # we multiply by 2 to account for the symmetry
        data = 2 * data[0:int(n / 2) - 1, 0:int(n / 2) - 1]

        # Adjust data size
        n = data.shape[0]

        # m=int(n/2)+1
        m = int(2 * n / 3) + 1  #anti-aliasing cut
        if mmin == 0:
            mmin = m
        else:
            if m > mmin:
                m = mmin

        print("Anti-aliased spectrum region:", m)

        #Calculate energy per shell
        # TODO: convert to linspace
        r = np.arange(0, m + 1, 1)  # radius
        energy = np.zeros(m + 1)
        shell_pattern = np.zeros((m + 1, m + 1))

        print("Generating energy in shells (Each . is 1/", m, ")")
        for i in range(0, m):
            for j in range(0, m):
                k = np.sqrt(pow(float(i), 2) + pow(float(j), 2))
                intk = int(k)
                if intk < m:
                    energy[intk] = energy[intk] + data[i, j]
                    shell_pattern[i, j] = intk
            print(".", end='', flush=True)
            #print(i, j, k, intk, data[i,j], energy[intk], data.shape, energy.shape)

        print(".")

        #Quick check to see if things match
        #print("Energy in shells: ", energy[0:10])
        #print("Energy in first column of data: ", data[0:10,0])
        #print("Shell modes: ", r[0:10])
        #print("Pattern:\n", shell_pattern[0:10,0:10])

        self.spectrum = energy[:]
        self.spectrum_mode = r[:]

        if self.job_metadata_available:
            # Convert wavenumber to wavelength
            self.spectrum_wavelength = np.zeros(m + 1)
            self.spectrum_wavelength[1:] = domain_size * 1000 / r[1:]

        else:
            self.spectrum_wavelength = None
#Figure definitions
fontsize = 18
figsize = (9, 7)

if len(sys.argv) <= 1:
    print("")
    print("Usage:")
    print("")
    print(" $ " + sys.argv[0] + " [physdata1] [physdata2] [...]")
    print("")
    sys.exit(1)

for filename in sys.argv[1:]:

    jd = JobData(os.path.split(filename)[0])
    jobdata = jd.get_flattened_data()

    #Load data
    print(filename)
    data = np.loadtxt(filename)

    if 'prog_h' in filename:
        depth = jobdata['runtime.h0']

        print("Summing full depth")
        print(depth)

        data = data + depth

        #Define in km
Exemple #4
0

from mule.postprocessing.JobData import *

if len(sys.argv) > 2:
	output_filename = sys.argv[1]
else:
	print("")
	print("Usage:")
	print("")
	print("	"+sys.argv[0]+" [output_filename.pdf] [jobdir]")
	print("")
	sys.exit(1)

#List all parameters of job
jd = JobData(sys.argv[2])
jd_flat = jd.get_flattened_data()
#for key in jd_flat:
	#print(key, '->', jd_flat[key])		
jd_raw = jd.get_job_raw_data()

output=jd_raw['output']
runtime=jd_raw['jobgeneration']
runtime=runtime['runtime']

nm_name=output['simulation_benchmark_normal_modes.case']
nwaves=int(output['simulation_benchmark_normal_modes.nwaves'])

k0=[]
k1=[]
d0=[]