Ejemplo n.º 1
0
    def _setup(
        self,
        ref_file_ending=None,
        jobdir_pattern=None,
        params=[],
    ):

        if jobdir_pattern == None:
            jobdir_pattern = './job_bench*'

        j = JobsData(jobdir_pattern, verbosity=0)
        jobs = j.get_flattened_data()

        no_reference_job_unique_id_found = True

        if len(jobs) == 0:
            raise Exception("No jobs found!")

        for key, job in jobs.items():
            print("Processing " + key)

            # job directory path
            job_dirpath = job['jobgeneration.job_dirpath']

            # files
            data_files = glob.glob(job_dirpath + '/output*prog*.csv')

            if len(data_files) == 0:
                self.info("WARNING")
                self.info("WARNING: No files found")
                self.info(
                    "WARNING: However, there should be at least one file (the one at t=0)"
                )
                self.info("WARNING")

            # Iterate over all files
            for data_file in data_files:

                # Skip files stored in spectral space
                if '_spec' in data_file:
                    continue

                try:
                    s = PlaneData_Spectrum(data_file, params)

                except FileNotFoundError as e:
                    # Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
                    if "ignore_missing_file" in params:
                        self.info("Ignoring Error:")
                        self.info(str(e))
                        continue

                    raise Exception(e)

                except IOError as e:
                    # Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
                    if "ignore_missing_file" in params:
                        self.info("Ignoring Error:")
                        self.info(str(e))
                        continue

                    raise Exception(e)

                # determine (time-depending) ending of reference file
                pickle_filename = data_file.replace('.csv', '_spectrum.pickle')

                print("Writing file " + pickle_filename)
                s.write_file(pickle_filename)
Ejemplo n.º 2
0
    print("")
    print("Usage:")
    print("")
    print("	" + sys.argv[0] +
          " [output_filename.pdf] [jobdir1] [jobdir2] ... [jobdirN]")
    print("")
    sys.exit(1)

if len(sys.argv) >= 4:
    # Load Jobs specified via program parameters
    jd = JobsData(job_dirs=sys.argv[3:])
else:
    # Load all Jobs
    jd = JobsData()

data = jd.get_flattened_data()


def label(d):
    val = d['runtime.timestepping_method'].replace(
        '_', '\\_') + ', $\Delta t = ' + str(d['runtime.timestep_size']) + '$'
    return val


def x_values(d):
    return d[muletag + '.spectrum_wavelength']


def y_values(d):
    return d[muletag + '.spectrum']
	def _setup(
			self,
			ref_file_ending = None,
			jobdir_pattern = None,
			job_dirs = None,
			params = [],
		):

		if job_dirs != None:
			j = JobsData(job_dirs=job_dirs, verbosity=0)

		else:
			if jobdir_pattern == None:
				jobdir_pattern = './job_bench*'

			j = JobsData(jobdir_pattern, verbosity=0)

		jobs = j.get_flattened_data()

		no_reference_job_unique_id_found = True

		if len(jobs) == 0:
			raise Exception("No jobs found!")

		for key, job in jobs.items():
			print("Processing "+key)

			# job directory path
			job_dirpath = job['jobgeneration.job_dirpath']

			# u-velocity 
			u_vel_files = glob.glob(job_dirpath+'/output_*_u_*.csv')

			if len(u_vel_files) == 0:
				self.info("WARNING")
				self.info("WARNING: No velocity files found")
				self.info("WARNING: However, there should be at least one velocity file (the one at t=0)")
				self.info("WARNING")


			# only process the very last file
			if 'only_last_file' in params:

				u_vel_files.sort()
				u_vel_files = [u_vel_files[-1]]

				# determine (time-depending) ending of reference file
				pos = u_vel_files[0].rfind('_')
				if pos < 0:
					raise Exception("File ending not found for reference file '"+u_vel_files[0]+"'")


			# Iterate over all velocity files for different time stamps
			for u_vel_file in u_vel_files:
				v_vel_file = u_vel_file.replace('_u_', '_v_')

				try:
					s = PlaneData_KineticEnergy(
							u_vel_file,
							v_vel_file,
							params
					)

				except FileNotFoundError as e:
					# Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
					if "ignore_missing_file" in params:
						self.info("Ignoring Error:")
						self.info(str(e))
						continue

					raise Exception(e)

				except IOError as e:
					# Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
					if "ignore_missing_file" in params:
						self.info("Ignoring Error:")
						self.info(str(e))
						continue

					raise Exception(e)

				#s.print()

				if 'only_last_file' in params:
					pickle_filename = 'plane_data_kinetic_energy.pickle'

				else:

					# determine (time-depending) ending of reference file
					pos = u_vel_files[0].rfind('_')
					if pos < 0:
						raise Exception("File ending not found for reference file '"+u_vel_files[0]+"'")

					pickle_file_ending = u_vel_file[pos:]
					pickle_file_ending = pickle_file_ending.replace('.csv', '')
					print("pickle_file_ending: "+pickle_file_ending)

					pickle_filename = 'plane_data_kinetic_energy'+pickle_file_ending+'.pickle'

				print("Writing file "+pickle_filename)
				s.write_file(job['jobgeneration.job_dirpath']+'/'+pickle_filename)
	def _setup(
			self,
			ref_file_ending = None,
			jobdir_pattern = None,
			job_dirs = None,
			params = [],
		):

		if job_dirs != None:
			j = JobsData(job_dirs=job_dirs, verbosity=0)

		else:
			if jobdir_pattern == None:
				jobdir_pattern = './job_bench*'

			j = JobsData(jobdir_pattern, verbosity=0)

		jobs = j.get_flattened_data()


		no_reference_job_unique_id_found = True

		if len(jobs) == 0:
			raise Exception("No jobs found!")

		for key, job in jobs.items():
			print("Processing "+key)

			# Sort out jobs which don't have a reference job id
			# These jobs are likely the reference jobs themselves
			if 'jobgeneration.reference_job_unique_id' not in job:
				continue

			no_reference_job_unique_id_found = False

			reference_job_unique_id = job['jobgeneration.reference_job_unique_id']
			print(" + ref job id: "+reference_job_unique_id)

			ref_key = None
			for skey, sjob in jobs.items():
				if sjob['jobgeneration.job_unique_id'] == reference_job_unique_id:
					if ref_key != None:
						raise Exception("FATAL: Reference job already found and now there's another one? This is probably not what you wanted, there might be 2 reference jobs")

					ref_key = skey

			if ref_key == None:
				print("")
				print("FATAL: missing reference job with id "+reference_job_unique_id)
				print("")
				print("FATAL: reference job was intended for job with dirpath: "+job['jobgeneration.job_dirpath'])
				print("")
				print("FATAL: Hint: If specifying job directories manually, reference job *MUST* be included in the provided job directories!")
				print("")
				raise Exception("Reference job not found!")

			# Load reference job
			ref_job = jobs[ref_key]

			#
			# Load
			# 	ref_files:		list of reference files
			#	use_ref_file_ending:	file ending for pickle output file
			#
			# Were the reference filenames provided?
			# ...
			if 'output.reference_filenames' in sjob:
				# ... then we use the reference files

				# They are available in 'output.reference_filenames' and separated by ';'
				ref_files = sjob['output.reference_filenames'].split(";")

				#
				# Now we have to find the file ending without the time stamp
				# We do this to generate a unique pickle file which is independent of the time
				#
				# We guess that this starts at the last '_' character in the filename
				# E.g. '_t00000864000.00000000.csv'
				pos = ref_files[0].rfind('_')
				if pos < 0:
					raise Exception("File ending not found for reference file '"+ref_files[0]+"'")

				use_ref_file_ending = ref_files[0][pos:]
				print("use_ref_file_ending: "+use_ref_file_ending)

			else:

				if ref_file_ending != None:
					use_ref_file_ending = ref_file_ending
				else:
					print("*"*80)

					# "output_%s_t%020.8f.csv"
					use_ref_file_ending = "_t{:020.8f}.csv".format(float(ref_job['runtime.max_simulation_time'])/(60*60))

				if use_ref_file_ending == "":
					raise Exception("No reference file ending provided / found")

				# Load reference files
				ref_files = []
				files = os.listdir(ref_job['jobgeneration.job_dirpath'])
				for f in files:
					if use_ref_file_ending in f:
						ref_files.append(f)


			if len(ref_files) == 0:
				print("No reference files found!")
				print("*"*80)
				print("Reference directory: "+ref_job['jobgeneration.job_dirpath'])
				print("Reference file endings: "+use_ref_file_ending)
				print("*"*80)
				raise Exception("Reference files not found!")


			for ref_file in ref_files:
				print("")
				print("Reference file: "+ref_file)

				if '_spec_' in ref_file:
					self.info("WARNING: Skipping '"+ref_file+"', since this is spectral data")

				else:
					s = None

					try:
						s = PlaneDataPhysicalDiff(
								ref_job['jobgeneration.job_dirpath']+'/'+ref_file,
								job['jobgeneration.job_dirpath']+'/'+ref_file,
								params
						)

					except FileNotFoundError as e:
						# Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
						if "ignore_missing_file" in params:
							self.info("Ignoring Error:")
							self.info(str(e))
							continue

						raise Exception(e)

					except IOError as e:
						# Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
						if "ignore_missing_file" in params:
							self.info("Ignoring Error:")
							self.info(str(e))
							continue

						raise Exception(e)


					s.print()

					pickle_filename = 'plane_data_diff_'+ref_file.replace('output_', '').replace(use_ref_file_ending, '')+'.pickle'

					print("Writing file "+pickle_filename)
					s.write_file(job['jobgeneration.job_dirpath']+'/'+pickle_filename)

			print(ref_key)
			print("")



		if no_reference_job_unique_id_found:
			print("*"*80)
			print("Warning: No data generated")
			print("No job with a reference_job_unique_id found!")
			print("*"*80)
    def _setup(
        self,
        ref_file_ending=None,
        jobdir_pattern=None,
    ):

        if jobdir_pattern == None:
            jobdir_pattern = './job_bench*'

        j = JobsData(jobdir_pattern, verbosity=0)
        jobs = j.get_flattened_data()

        no_reference_job_unique_id_found = True

        if len(jobs) == 0:
            raise Exception("No jobs found!")

        for key, job in jobs.items():
            print("Processing " + key)

            # Sort out jobs which don't have a reference job id
            # These jobs are likely the reference jobs themselves
            if 'jobgeneration.reference_job_unique_id' not in job:
                continue

            no_reference_job_unique_id_found = False

            reference_job_unique_id = job[
                'jobgeneration.reference_job_unique_id']
            print(" + ref job id: " + reference_job_unique_id)

            ref_key = None
            for skey, sjob in jobs.items():
                if sjob['jobgeneration.job_unique_id'] == reference_job_unique_id:
                    ref_key = skey

            if ref_key == None:
                print("Fatal: missing reference job with id " +
                      reference_job_unique_id)
                print(
                    "Fatal: reference job was intended for job with dirpath: "
                    + job['jobgeneration.job_dirpath'])
                raise Exception("Reference job not found!")

            # Load reference job
            ref_job = jobs[ref_key]

            if ref_file_ending != None:
                use_ref_file_ending = ref_file_ending
            else:
                # "output_%s_t%020.8f.csv"
                use_ref_file_ending = "_t{:020.8f}.csv".format(
                    float(ref_job['runtime.max_simulation_time']) / (60 * 60))

            if use_ref_file_ending == "":
                raise Exception("No reference file ending provided / found")

            # Load reference files
            ref_files = []
            files = os.listdir(ref_job['jobgeneration.job_dirpath'])
            for f in files:
                if use_ref_file_ending in f:
                    ref_files.append(f)
            if len(ref_files) == 0:
                print("No reference files found!")
                print("*" * 80)
                print("Reference directory: " +
                      ref_job['jobgeneration.job_dirpath'])
                print("Reference file endings: " + use_ref_file_ending)
                print("*" * 80)
                raise Exception("Reference files not found!")

            for ref_file in ref_files:
                s = None
                try:
                    s = SphereDataPhysicalDiff(
                        ref_job['jobgeneration.job_dirpath'] + '/' + ref_file,
                        job['jobgeneration.job_dirpath'] + '/' + ref_file)
                except Exception as e:
                    print("Error occured which is ignored")
                    print(str(e))
                    # Ignore missing files
                    continue

                s.print()

                pickle_filename = 'sphere_data_diff_' + ref_file.replace(
                    'output_', '').replace(use_ref_file_ending, '') + '.pickle'
                print("Writing file " + pickle_filename)
                s.write_file(job['jobgeneration.job_dirpath'] + '/' +
                             pickle_filename)

            print(ref_key)
            print("")

        if no_reference_job_unique_id_found:
            print("*" * 80)
            print("Warning: No data generated")
            print("No job with a reference_job_unique_id found!")
            print("*" * 80)
    def _setup(
            self,
            job_directories = None,
            cmp_file_ending = None,
            jobdir_pattern = None,
        ):

        if job_directories != None:
            j = JobsData(job_dirs = job_directories, verbosity=0)

        else:
            if jobdir_pattern == None:
                jobdir_pattern = './job_bench*'

            j = JobsData(jobdir_pattern, verbosity=0)


        jobs = j.get_flattened_data()

        no_reference_job_unique_id_found = True

        if len(jobs) == 0:
            raise Exception("No jobs found!")


        def proc_job(key, job):
            print("Processing "+key)

            # Sort out jobs which don't have a reference job id
            # These jobs are likely the reference jobs themselves
            if 'jobgeneration.reference_job_unique_id' not in job:
                return

            no_reference_job_unique_id_found = False

            reference_job_unique_id = job['jobgeneration.reference_job_unique_id']
            print(" + ref job id: "+reference_job_unique_id)

            ref_key = None
            for skey, sjob in jobs.items():
                if sjob['jobgeneration.job_unique_id'] == reference_job_unique_id:
                    ref_key = skey

            if ref_key == None:
                print("Fatal: missing reference job with id "+reference_job_unique_id)
                print("Fatal: reference job was intended for job with dirpath: "+job['jobgeneration.job_dirpath'])
                raise Exception("Reference job not found!")

            # Load reference job
            ref_job = jobs[ref_key]

            if cmp_file_ending != None:
                use_cmp_file_ending = cmp_file_ending
            else:
                # "output_%s_t%020.8f.csv"
                use_cmp_file_ending = "_t{:020.8f}.sweet".format(float(ref_job['runtime.max_simulation_time'])/(60*60))

            if use_cmp_file_ending == "":
                raise Exception("No comparison file ending provided / found")

            # Load comparison files
            cmp_files = []
            files = os.listdir(ref_job['jobgeneration.job_dirpath'])
            for f in files:
                if use_cmp_file_ending in f:
                    cmp_files.append(f)

            if len(cmp_files) == 0:
                print("No reference files found!")
                print("*"*80)
                print("Reference directory: "+ref_job['jobgeneration.job_dirpath'])
                print("Comparison file endings: "+use_cmp_file_ending)
                print("*"*80)
                raise Exception("Reference files not found!")

            for cmp_file in cmp_files:
                s = None
                try:
                    s = SphereDataSpectralDiff(
                            ref_job['jobgeneration.job_dirpath']+'/'+cmp_file,
                            job['jobgeneration.job_dirpath']+'/'+cmp_file
                    )

                except Exception as e:
                    #raise e
                    #raise Exception("Error occured which is ignored")
                    print(str(e))
                    # Ignore missing files
                    return

                s.print()

                pickle_filename = 'sphere_data_diff_'+cmp_file.replace('output_', '').replace(use_cmp_file_ending, '')+'.pickle'
                print("Writing file "+pickle_filename)
                s.write_file(job['jobgeneration.job_dirpath']+'/'+pickle_filename)

            print(ref_key)
            print("")


        k = list(jobs.keys())
        v = list(jobs.values())

        n = len(jobs)
        def fun(i):
            try:
                proc_job(k[i], v[i])
            except:
                raise

        if 0:
            # Doesn't really provide any speedup :-(
            parhelper(n, fun, None, use_tqdm=True, verbose=1)

        else:
            for i in range(len(k)):
                fun(i)


        if no_reference_job_unique_id_found:
            print("*"*80)
            print("Warning: No data generated")
            print("No job with a reference_job_unique_id found!")
            print("*"*80)