Exemplo n.º 1
0
 def get_latest_ouput_file_name(self):
     """Return latest output file name (latest .out file in working folder)"""
     canditates = glob1(self.workingdir, self.jobname + '*.out')
     if len(canditates) > 0:
         # make newest first:
         canditates.sort(reverse=True)
         self.out_file = path.join(self.workingdir, canditates[0])
         log.info('Post-processing output file from previous '
                  'simulation run: {0}'.format(self.out_file))
     else:
         # we try again using only the file suffix, maybe the folder or file was renamed:
         candidates = glob1(self.workingdir, '*.out')
         if len(candidates) > 0:
             # make newest first:
             candidates.sort(reverse=True)
             self.out_file = path.join(self.workingdir, candidates[0])
             log.info('Post-processing output file from previous '
                      'simulation run: {0}'.format(self.out_file))
             if len(candidates) > 1:
                 log.warn(
                     "Other output files were found in same folder: {0}".
                     format(candidates))
         else:
             log.exception('Cannot post-process, no simulation output '
                           'file found!')
             return
     return self.out_file
Exemplo n.º 2
0
 def write_ctl_file(self, where='./'):
     filename = path.join(where, self.ctl_file)
     log.info("writing ctl file to %s" % filename)
     log.info("### ctl file for reference: ###\n" + str(self) +
              '\n### end of ctl file ###\n\n')
     with open(filename, 'w') as input_file:
         input_file.write(str(self))
Exemplo n.º 3
0
    def plot_fluxes(self,
                    interactive=False,
                    only_fluxplanes=None,
                    norm_with_influx=True,
                    gap=None,
                    xlim=None,
                    ylim=None):

        if interactive:
            fig_filename = None
        else:
            fig_filename = path.join(self.workingdir,
                                     self.jobname + '_fluxes.png')
            log.info('saving flux diagram to file %s' % fig_filename)

        self.fluxdata = postprocess.load_flux_data(
            self.get_latest_ouput_file_name())

        fig = plt.figure(figsize=(13.8, 6))
        X = self.fluxdata[:, 0]
        #X = range(len(fluxes[:, 0]))
        if only_fluxplanes is None:
            # plot all:
            only_fluxplanes = range(1, self.fluxdata.shape[1])

        refl = self.fluxdata[:, 1]
        trans = self.fluxdata[:, 2]
        influx = trans - refl
        #plt.plot(X, influx)
        for i in only_fluxplanes:
            if norm_with_influx:
                plt.plot(X, self.fluxdata[:, i] / influx, label=i)
            else:
                plt.plot(X, self.fluxdata[:, i], label=i)

        if xlim is not None:
            plt.xlim(*xlim)
        if ylim is not None:
            plt.ylim(*ylim)

        ymin, ymax = plt.ylim()
        if gap is not None:
            plt.fill([gap[0], gap[0], gap[1], gap[1]],
                     [ymin, ymax, ymax, ymin],
                     'b',
                     alpha=0.15)
        plt.ylim(ymin, ymax)

        plt.legend()
        plt.tight_layout()
        if fig_filename is not None:
            fig.savefig(fig_filename,
                        transparent=False,
                        bbox_inches='tight',
                        pad_inches=0)
        else:
            plt.show()

        return
Exemplo n.º 4
0
def h5topng_field_series(field_component,
                         epsfile,
                         workingdir,
                         x_tiles=1,
                         y_tiles=1,
                         sliceparam='-0z0'):
    """ Run mpbdata and then h5topng on multiple files using dkbluered color scale. Return 0 if successful. 
  
    field_component: e.g. 'hz';
    dataset: e.g. 'hz.r';

    """
    h5files = glob1(workingdir, field_component + '-*.h5')
    if len(h5files) == 0:
        log.warning("h5topng: Could not open file: %s" % field_component +
                    '-*.h5')
        return 1
    # The dataset must be supplied, otherwise mpb_data does nothing.
    # If only real values were used in sim, then the dataset is e.g. 'hz', otherwise 'hz.r' or 'hz.i'.
    # Get first dataset of first file, other files should be the same:
    dataset = sp.check_output(["h5ls",
                               path.join(workingdir, h5files[0])
                               ]).decode('utf-8').split()[0]
    # Export real part:
    if dataset == 'hz.i': dataset = 'hz.r'
    for h5file in h5files:
        # call mpbdata for each file:
        error = mpb_data(h5file,
                         dataset,
                         'temp_' + h5file,
                         workingdir,
                         x_tiles=x_tiles,
                         y_tiles=y_tiles)
        if error:
            return 1
    # and now h5topng on all files in one call, so that we can use -R:
    callstr = defaults.fieldh5topng_call % dict(h5_file=' '.join(
        ['temp_' + f for f in h5files]),
                                                eps_file=epsfile,
                                                sliceparam=sliceparam)
    log.info("running: %s (in working dir: %s)" % (callstr, workingdir))
    error = sp.call(callstr.split(), cwd=workingdir)
    if error:
        return error
    # move pngs into subdirectory and remove temporary files again:
    dname = path.join(workingdir, sliceparam.lstrip('-'))
    if not path.exists(dname):
        mkdir(dname)
    for h5file in h5files:
        rename(path.join(workingdir, 'temp_' + h5file.rstrip('h5') + 'png'),
               path.join(dname,
                         h5file.rstrip('h5') + 'png'))
        remove(path.join(workingdir, 'temp_' + h5file))
Exemplo n.º 5
0
def h5topng_pwr(h5file, outputfile, epsfile, workingdir, sliceparam='-0z0'):
    """ Run h5topng using hot color scale. Return 0 if successful. """
    if has_magic(h5file):
        names = glob1(workingdir, h5file)
        if len(names) == 0:
            log.warning("h5topng: Could not open file: %s" % h5file)
            return 1
        elif len(names) > 1:
            log.warning(
                'h5topng: Warning: Globbing found multiple'
                ' matching filenames, but will only use first one: %s' %
                names[0])
        # only load the first one found:
        h5file = names[0]
    callstr = defaults.pwrh5topng_call % dict(h5_file=h5file,
                                              output_file=outputfile,
                                              eps_file=epsfile,
                                              sliceparam=sliceparam)
    log.info("running: %s (in working dir: %s)" % (callstr, workingdir))
    return sp.call(callstr.split(), cwd=workingdir)
Exemplo n.º 6
0
    def plot_raw_fluxes(self, interactive=False):
        """Plot all recorded flux data

        :return: None

        """
        self.fluxdata = postprocess.load_flux_data(
            self.get_latest_ouput_file_name())

        #if 'fcen' in self.__dict__ and 'df' in self.__dict__:
        #    y_range = (self.fcen - self.df / 2.0, self.fcen + self.df / 2.0)
        #else:
        #    y_range = None

        if interactive:
            fig_filename = None
        else:
            fig_filename = path.join(self.workingdir,
                                     self.jobname + '_fluxes_raw.png')
            log.info('saving flux diagram to file %s' % fig_filename)

        fig = plt.figure(figsize=(14, 5))
        for i in range(self.fluxdata.shape[1] - 1):
            plt.plot(self.fluxdata[:, 0],
                     self.fluxdata[:, i + 1],
                     label='flux plane %i' % (i + 1))

        plt.legend()
        #plt.xlim(0.43, 0.45)
        #plt.ylim(1.5, 2.5)

        plt.tight_layout()
        if fig_filename is not None:
            fig.savefig(fig_filename,
                        transparent=False,
                        bbox_inches='tight',
                        pad_inches=0)
        else:
            plt.show()

        return
Exemplo n.º 7
0
def mpb_data(h5file, dataset, outputfile, workingdir, x_tiles=1, y_tiles=1):
    """ Run mpb-data. Return 0 if successful. """
    if has_magic(h5file):
        names = glob1(workingdir, h5file)
        if len(names) == 0:
            log.warning("mpb_data: Could not open file: %s" % h5file)
            return 1
        elif len(names) > 1:
            log.warning(
                'mpb_data: Warning: Globbing found multiple'
                ' matching filenames, but will only use first one: %s' %
                names[0])
        # only load the first one found:
        h5file = names[0]
    callstr = defaults.mpbdata_call % dict(h5_file=h5file,
                                           dataset=dataset,
                                           output_file=outputfile,
                                           tiles_x=x_tiles,
                                           tiles_y=y_tiles)
    log.info("running: %s (in working dir: %s)" % (callstr, workingdir))
    return sp.call(callstr.split(), cwd=workingdir)
Exemplo n.º 8
0
    def run_simulation(self, num_processors=2):
        self.write_ctl_file(self.workingdir)

        meep_call_str = defaults.meep_call % dict(num_procs=num_processors)

        with open(self.out_file, 'w') as outputFile:
            log.info("Using MEEP " + defaults.meepversion)
            log.info("Running the MEEP-computation using the following "
                     "call:\n" + " ".join([meep_call_str, self.ctl_file]))
            log.info("Writing MEEP output to %s" % self.out_file)
            # write Time and ctl as reference:
            outputFile.write("This is a simulation started by pyMEEP\n")
            outputFile.write("Run on: " + uname()[1] + "\n")
            starttime = datetime.now()
            outputFile.write("Date: " + str(starttime) + "\n")
            outputFile.write("\n=================================\n")
            outputFile.write("=========== CTL INPUT ===========\n")
            outputFile.write("=================================\n\n")
            outputFile.write(str(self))
            outputFile.write("\n\n==================================\n")
            outputFile.write("=========== MEEP OUTPUT ===========\n")
            outputFile.write("==================================\n\n")
            outputFile.flush()
            log.info('MEEP simulation is running... To see progress, please '
                     'check the output file %s' % self.out_file)
            # run MEEP, write output to outputFile:
            # TODO can we also pipe MEEP output to stdout, so the user can
            # see progress?
            p = sp.Popen(meep_call_str.split() + [self.ctl_file],
                         stdout=outputFile,
                         stderr=sp.STDOUT,
                         cwd=self.workingdir)
            retcode = p.wait()
            endtime = datetime.now()
            outputFile.write("finished on: %s (duration: %s)\n" %
                             (str(endtime), str(endtime - starttime)))
            outputFile.write("returncode: " + str(retcode))
            log.info("Simulation finished, returncode: " + str(retcode))

        return retcode
Exemplo n.º 9
0
def main():
    ownname = path.splitext(path.basename(sys.argv[0]))[0].capitalize()
    if len(sys.argv) > 1:
        runmode = sys.argv[1][0]
    else:
        print(
            'please provide mode: "sim" / "s" or "display" / "d" or "post-process" / "p"'
        )
        return

    if len(sys.argv) > 2:
        onlystep = float(sys.argv[2])
    else:
        onlystep = None

    resolution = 12
    fcen = 0.5
    df = 0.5
    #    dpml = 3
    #    sx = 22
    #    sy = 12
    #    sz = 10
    thickness = 0.8
    containing_folder = './'

    minstep = 1
    stepsize = 1
    maxstep = 8.0  #minstep + stepsize * 6
    numsteps = int((maxstep - minstep) / stepsize + 1.5)
    steps = np.linspace(minstep, maxstep, num=numsteps, endpoint=True)
    #steps = [0.8]

    if runmode == 'd' and numsteps > 1 and onlystep is None:
        print(
            'will only plot first step: %f. Alternatively, specify step as 2nd parameter'
            % minstep)
        steps = [minstep]
    elif onlystep is not None:
        steps = [onlystep]

    numsteps = len(steps)

    # save previous step's data, needed for comparison with current data:
    prev_step_data = None

    for i, step in enumerate(steps):
        dpml = step
        sx = 16 + 2 * dpml
        sy = 6 + 2 * dpml
        sz = 4 + 2 * dpml
        log.info("running simulation with {0:n} steps:\n{1}".format(
            numsteps, steps) +
                 '\n  ### step: #{0} ({1}) ###\n'.format(i + 1, step))

        ### create and run simulation ###

        jobname = ownname + '_res{0:03.0f}'.format(resolution)
        jobname_suffix = '_dpml{0:03.0f}'.format(dpml * 10)

        sim = Simulation(
            jobname=jobname + jobname_suffix,
            ctl_template=template,
            resolution=resolution,
            work_in_subfolder=path.join(containing_folder,
                                        jobname + jobname_suffix),
            clear_subfolder=runmode.startswith('s'),
            sx=sx,
            sy=sy,
            sz=sz,
            thickness=thickness,
            dpml=dpml,
            fcen=fcen,
            df=df,
        )

        if runmode == 's':
            error = sim.run_simulation(num_processors=g_num_processors)
            if error:
                log.error(
                    'an error occured during simulation. See the .out file')
                return
        if runmode in ['s', 'd', 'p']:
            sim.plot_fluxes(interactive=runmode == 'd', only_fluxplanes=None)

        log.info(' ##### step={0} - success! #####\n\n'.format(step))

        # reset logger; the next stuff logged is going to next step's file:
        log.reset_logger()
def main():
    ownname = path.splitext(path.basename(sys.argv[0]))[0].capitalize()
    if len(sys.argv) > 1:
        runmode = sys.argv[1][0]
    else:
        print(
            'please provide mode: "sim" / "s" or "display" / "d" or "post-process" / "p"'
        )
        return

    if len(sys.argv) > 2:
        onlystep = float(sys.argv[2])
    else:
        onlystep = None

    radius = 0.38
    thickness = 0.8
    resolution = 12
    sz = 4
    dpml = 1
    containing_folder = './'
    T = 200
    fcen = 0.5
    df = 0.3
    nx = 7
    k_interp = 15

    minstep = 0.20
    stepsize = 0.02
    maxstep = 0.48  #minstep + stepsize * 6
    numsteps = int((maxstep - minstep) / stepsize + 1.5)
    steps = np.linspace(minstep, maxstep, num=numsteps, endpoint=True)
    #steps = [0.8]

    if runmode == 'd' and numsteps > 1 and onlystep is None:
        print(
            'will only plot first step: %f. Alternatively, specify step as 2nd parameter'
            % minstep)
        steps = [minstep]
    elif onlystep is not None:
        steps = [onlystep]

    numsteps = len(steps)

    # save previous step's data, needed for comparison with current data:
    #prev_step_data = None

    for i, step1 in enumerate(steps):
        for j, step2 in enumerate(steps[1::2]):
            log.info(
                "running simulation with "
                "{0:n} first row radius steps:\n{1}".format(numsteps, steps) +
                "{0:n} second row radius steps:\n{1}".format(numsteps, steps) +
                '\n  ### current step: row1: #{0} ({1}); row2: #{2} ({3}) ###\n'
                .format(i + 1, step1, j + 1, step2))

            r1 = step1
            r2 = step2

            ### create and run simulation ###

            jobname = ownname + '_r{0:03.0f}_t{1:03.0f}_res{2:03.0f}'.format(
                radius * 1000, thickness * 100, resolution)
            jobname_suffix = '_1r{0:03.0f}_2r{1:03.0f}'.format(
                r1 * 1000, r2 * 1000)

            sim = Simulation(jobname=jobname + jobname_suffix,
                             ctl_template=template,
                             resolution=resolution,
                             work_in_subfolder=path.join(
                                 containing_folder, jobname + jobname_suffix),
                             clear_subfolder=runmode.startswith('s'),
                             radius=radius,
                             thickness=thickness,
                             sz=sz,
                             dpml=dpml,
                             harminv_time_steps=T,
                             fcen=fcen,
                             df=df,
                             nx=nx,
                             k_interp=k_interp,
                             wg_row1rad=r1,
                             wg_row2rad=r2)

            if runmode == 's':
                error = sim.run_simulation(num_processors=g_num_processors)
                if error:
                    log.error(
                        'an error occured during simulation. See the .out file'
                    )
                    return
            if runmode in ['s', 'd', 'p']:
                sim.post_process_bands(interactive=runmode == 'd',
                                       gap=[0.44090, 0.52120])

            log.info(' ##### step={0}-{1} - success! #####\n\n'.format(
                step1, step2))

            # reset logger; the next stuff logged is going to next step's file:
            log.reset_logger()
Exemplo n.º 11
0
    def onclick(self, event):
        """This is the function called if the bands are plotted with a
        picker supplied and the user clicks on a vertex in the plot. It then
        prints some information about the vertex(ices) clicked on to stdout,
        including the mode, the k-vector and -index and the frequency(ies).

        The k-index, the frequency and a default bandwidth (should be adjusted
        manually later) is added to a file ('patterns_to_simulate') which can
        be used later to selectively excite a single mode and save the mode
        pattern in another simulation.
        On the other hand, if the mode pattern was already simulated in this
        way, there should exist a subfolder with the name
        'pattern_k{0:03.0f}_f{1:.4f}'.format(kindex, frequency).replace('.', 'p').
        Then a new figure is displayed with all pngs found in this subfolder.

        """
        try:
            thisline = event.artist
            xdata = thisline.get_xdata()
            ydata = thisline.get_ydata()
            ind = event.ind
            print('event.ind:', ind, thisline.indexes.shape)
            #xaxisformatter = event.mouseevent.inaxes.xaxis.major.formatter
        except AttributeError:
            print('error getting event data')
            return

        print()
        for i in ind:
            kindex = thisline.indexes[0, i]
            bandindex = thisline.indexes[1, i]
            kvec = thisline.kdata[kindex]
            freq = thisline.hdata[kindex, bandindex, 0]
            #xaxispos = xdata[i]
            #freq = ydata[i]
            s = 'picker_index={0}, band_index={1}, k_index={2:.0f}, k_vec={3}, freq={4}'.format(
                i, bandindex, kindex, kvec, freq)
            print(s + '; ')

        ## display mode pattern if it was exported;
        patterndir = path.join(
            self.workingdir,
            'pattern_k{0:03.0f}_f{1:.4f}'.format(kindex,
                                                 freq).replace('.', 'p'))

        ## calculate it if not exported yet:
        if not path.exists(patterndir):
            log.info(
                'Folder "%s" not found; added to list of modes to be simulated.'
                % patterndir)
            with open(path.join(self.workingdir, 'patterns_to_simulate'),
                      'a') as f:
                f.write("{0:.0f}\t{1:.4f}\t{2:.4f}\n".format(
                    kindex, freq, defaults.mode_pattern_sim_df))
            # mark the mode in the plot:
            s = plt.scatter([kindex], [freq],
                            facecolors='none',
                            edgecolors='r',
                            s=100)
            plt.show()

        if path.exists(patterndir):
            # display all pngs in folder
            pngs = glob1(patterndir, '*.png')
            cnt = len(pngs)
            if not cnt:
                return
            print('displaying all pngs in folder: %s' % patterndir)
            # print the frequencies found in the simulation to stdout, to make sure only one mode was excited:
            with open(path.join(patterndir,
                                glob1(patterndir, '*.out')[0])) as f:
                for line in f:
                    if line.startswith('harminv'):
                        print(line.rstrip())

            # Start interactive mode:
            plt.ion()
            # create a new popup figure:
            fig, axes = plt.subplots(ncols=cnt,
                                     num='mode pattern',
                                     figsize=(min(16, cnt * 2), 2),
                                     sharey=True)
            plt.subplots_adjust(left=0, right=1, bottom=0, top=0.92, wspace=0)

            maxx = 0
            maxy = 0
            for i, fname in enumerate(pngs):
                img = mpimg.imread(path.join(patterndir, fname))
                axes[i].imshow(img, )
                #origin='upper',
                #extent=(xl, xr, yb, yt),
                #interpolation='none')
                maxx = max(maxx, img.shape[1])
                maxy = max(maxy, img.shape[0])
                axes[i].set_title(fname, {'fontsize': 6})
                # remove ticks:
                axes[i].set_xticks([])
                axes[i].set_yticks([])

            print(maxx, maxy)
            for ax in axes:
                ax.set_xlim(0, maxx)
                ax.set_ylim(0, maxy)
Exemplo n.º 12
0
    def post_process_bands(self,
                           interactive=False,
                           gap=None,
                           maxq=None,
                           coldataindex=1):
        """Make csv files for all band information.

        :return: None

        """

        # load all frequencies, even if two harminv outputs present:
        self.kdata, self.hdata = postprocess.load_bands_data(
            self.get_latest_ouput_file_name(),
            phase_tolerance=100,
            freq_tolerance=-1)  # 0.01)

        #        x_axis_formatter = axis_formatter.KVectorAxisFormatter(5)
        #        # add hover data:
        #        if x_axis_formatter._hover_func_is_default:
        #            x_axis_formatter.set_hover_data(self.kdata)
        #
        #        plotter = BandPlotter(figure_size=defaults.fig_size)
        #        print (self.kdata.shape, self.hdata.shape)
        #        plotter.plot_bands(
        #            self.hdata[:, :, 0], self.kdata,
        #            formatstr='o'
        #            x_axis_formatter=x_axis_formatter)
        #        plotter.show()

        if 'fcen' in self.__dict__ and 'df' in self.__dict__:
            y_range = (self.fcen - self.df / 2.0, self.fcen + self.df / 2.0)
        else:
            y_range = None

        if interactive:
            fig_filename = None
            self.patts_simulated = self.find_modes_with_calculated_patterns()
            try:
                self.patts_to_sim = np.loadtxt(path.join(
                    self.workingdir, 'patterns_to_simulate'),
                                               ndmin=2)
            except IOError:
                self.patts_to_sim = None
        else:
            fig_filename = path.join(self.workingdir,
                                     self.jobname + '_bands.png')
            log.info('saving band diagram to file %s' % fig_filename)
            self.patts_simulated = None
            self.patts_to_sim = None
        postprocess.plot_bands(
            self.kdata,
            self.hdata,
            draw_light_line=True,
            maxq=maxq,
            filename=fig_filename,
            onpick=self.onclick,
            modes_with_calculated_patterns=self.patts_simulated,
            mode_patterns_to_be_calculated=self.patts_to_sim,
            y_range=y_range,
            gap=gap,
            coldataindex=coldataindex)

        return
Exemplo n.º 13
0
    def __init__(self,
                 jobname,
                 ctl_template,
                 resolution=defaults.default_resolution,
                 work_in_subfolder=True,
                 clear_subfolder=True,
                 logger=True,
                 quiet=defaults.isQuiet,
                 **kwargs):

        self.jobname = jobname
        self.ctl_template = ctl_template
        self.resolution = resolution
        self.quiet = quiet
        # All extra kwargs are going into this object's __dict__.
        # They are supposed to be used in ctl_template:
        self.__dict__.update(kwargs)

        self.work_in_subfolder = work_in_subfolder
        self.clear_subfolder = clear_subfolder
        if isinstance(work_in_subfolder, bool):
            if work_in_subfolder:
                # create default subfolder from jobname:
                self.workingdir = path.abspath(path.join(path.curdir, jobname))
            else:
                # work here, no subfolder:
                self.workingdir = path.abspath(path.curdir)
        else:
            # hopefully a string
            self.workingdir = path.abspath(
                path.join(path.curdir, work_in_subfolder))

        # the .ctl file that MEEP will use:
        self.ctl_file = jobname + '.ctl'
        # a date & time stamp added to log and output filenames:
        dtstamp = ('_{0.tm_year}-{0.tm_mon:02}-{0.tm_mday:02}'
                   '_{0.tm_hour:02}-{0.tm_min:02}-'
                   '{0.tm_sec:02}').format(time.localtime())
        # the output file, where all MEEP output will go:
        self.out_file = path.join(self.workingdir, jobname + dtstamp + '.out')
        # a log file, where information from pyMEEP will go:
        self.log_file = path.join(self.workingdir, jobname + dtstamp + '.log')
        # the file where MEEP usually saves the dielectric:
        self.eps_file = path.join(self.workingdir, 'epsilon.h5')

        # logger is not setup yet, because the log file might be placed in a
        # subfolder that still needs to be created. But, I want to log that
        # I created a new directory. So make a simple log buffer:
        to_log = []

        to_log.append('Working in directory ' + self.workingdir)
        if self.work_in_subfolder:
            if path.exists(self.workingdir):
                to_log.append('directory exists already: ' + self.workingdir)
                if self.clear_subfolder:
                    # directory exists, make backup
                    backupdir = self.workingdir + '_bak'
                    if path.exists(backupdir):
                        # previous backup exists already, remove old
                        # backup, but keep .log and .out files (they have
                        # unique names):
                        keepers = (glob1(self.workingdir + '_bak', '*.log') +
                                   glob1(self.workingdir + '_bak', '*.out'))
                        to_log.append(
                            ('removing existing backup {0}, but keeping {1}'
                             ' old log and output files').format(
                                 backupdir, len(keepers)))
                        for f in keepers:
                            rename(path.join(backupdir, f),
                                   path.join(self.workingdir, f))
                        rmtree(backupdir)
                        to_log.append(backupdir + ' removed')
                    # rename current (old) dir to backup:
                    rename(self.workingdir, backupdir)
                    to_log.append('existing ' + self.workingdir +
                                  ' renamed to ' + backupdir)
                    # make new empty working directory:
                    mkdir(self.workingdir)
                    to_log.append('created directory ' + self.workingdir +
                                  '\n')
                else:
                    to_log.append('working in existing directory.')
            else:
                # make new empty working directory:
                mkdir(self.workingdir)
                to_log.append('created directory ' + self.workingdir + '\n')

        if logger:
            if hasattr(logger, 'log') and callable(logger.log):
                # a custom logger was given as parameter, use it:
                log.logger = logger
            else:
                # Create the logger. Afterwards, we can also use
                # log.info() etc. in other modules. All status, logging
                # and stderr output will go through this logger (except
                # MEEP's output during simulation):
                log.setup_logger('root.' + self.jobname,
                                 self.log_file,
                                 self.quiet,
                                 redirect_stderr=True)

        # now we can log the stuff from before:
        if to_log:
            log.info('\n' + '\n'.join(to_log))
        del to_log

        new_environ_dict = {'GUILE_WARN_DEPRECATED': 'no'}
        environ.update(new_environ_dict)
        log.info('added to environment:' + ''.join([
            '\n  {0}={1}'.format(key, environ[key])
            for key in new_environ_dict.keys()
        ]))

        log.info('pymeep Simulation created with following properties:' +
                 ''.join([
                     '\npymeepprop: {0}={1!r}'.format(key, val)
                     for key, val in self.__dict__.items()
                 ]) + '\n\n')
Exemplo n.º 14
0
def main():
    ownname = path.splitext(path.basename(sys.argv[0]))[0].capitalize()
    if len(sys.argv) > 1:
        runmode = sys.argv[1][0]
    else:
        print(
            'please provide mode: "sim" / "s" or "display" / "d" or "post-process" / "p"'
        )
        return

    if len(sys.argv) > 2:
        onlystep = float(sys.argv[2])
    else:
        onlystep = None

    gdsfile = './design.gds'
    gdscell = 'bend_optS'
    gdsunitlength = 0.27e-6
    resolution = 12
    fcen = 0.5
    df = 0.5
    dpml = 3
    dpml_thick = 6  # thicker pml where phc protrudes into pml
    swgwidth = '(sqrt 3)'
    swglength = 4 + dpml
    holerad = 0.32
    phcterm = 0
    sx = np.ceil((swglength + dpml_thick + 9) / 2.0) * 2
    sy = np.ceil((5 + dpml_thick) * np.sqrt(3) / 2.0) * 2
    sz = 4 + 2 * dpml
    thickness = 0.8
    containing_folder = './'

    minstep = 0
    stepsize = 0.1
    maxstep = 0  #minstep + stepsize * 6
    numsteps = int((maxstep - minstep) / stepsize + 1.5)
    steps = np.linspace(minstep, maxstep, num=numsteps, endpoint=True)
    #steps = [0.8]

    if runmode == 'd' and numsteps > 1 and onlystep is None:
        print(
            'will only plot first step: %f. Alternatively, specify step as 2nd parameter'
            % minstep)
        steps = [minstep]
    elif onlystep is not None:
        steps = [onlystep]

    numsteps = len(steps)

    # save previous step's data, needed for comparison with current data:
    prev_step_data = None

    for i, step in enumerate(steps):
        log.info("running simulation with {0:n} steps:\n{1}".format(
            numsteps, steps) +
                 '\n  ### step: #{0} ({1}) ###\n'.format(i + 1, step))

        ### create and run simulation ###

        gdsfilebase = path.splitext(path.basename(gdsfile))[0]
        pointsfilename = gdsfilebase + '_' + gdscell + '.dat'
        jobname = ownname + '_r{0:03.0f}_res{1:03.0f}'.format(
            holerad * 1000, resolution)
        jobname_suffix = '_opt0'  #_term{0:03.0f}'.format(step*1000)

        sim = Simulation(
            jobname=jobname + jobname_suffix,
            ctl_template=template,
            resolution=resolution,
            work_in_subfolder=path.join(containing_folder,
                                        jobname + jobname_suffix),
            clear_subfolder=runmode.startswith('s'),
            sx=None,  # will be set later
            sy=None,  # will be set later
            sz=sz,
            thickness=thickness,
            pointsfilename=pointsfilename,
            dpml=dpml,
            dpmlthick=dpml_thick,
            swgwidth=swgwidth,
            swglength=swglength,
            swg_y_pos=None,  # will be set later
            holerad=holerad,
            phcterm=phcterm,
            fcen=fcen,
            df=df,
        )

        # prepare circles list:
        bb = export_gds_elements_locations_and_radii(gdsfile,
                                                     gdscell,
                                                     path.join(
                                                         sim.workingdir,
                                                         pointsfilename),
                                                     unit=gdsunitlength,
                                                     layers=[1])

        # late setting of important grid size:
        sim.sx = np.ceil((swglength + bb[1, 0] - bb[0, 0]) * resolution /
                         2) / resolution * 2.0
        sim.sy = np.ceil(
            (bb[1, 1] - bb[0, 1]) * resolution / 2) / resolution * 2.0
        sim.swg_y_pos = (bb[0, 1] + bb[1, 1]) / -2.0

        if runmode == 's':
            error = sim.run_simulation(num_processors=g_num_processors)
            if error:
                log.error(
                    'an error occured during simulation. See the .out file')
                return
        if runmode in ['s', 'd', 'p']:
            sim.plot_fluxes(
                interactive=runmode == 'd',
                only_fluxplanes=None,
                gap=[0.411115, 0.473015],
            )  #xlim=(0.4, 0.5), ylim=(0.3, 1))

        log.info(' ##### step={0} - success! #####\n\n'.format(step))

        # reset logger; the next stuff logged is going to next step's file:
        log.reset_logger()
Exemplo n.º 15
0
def distribute_pattern_images(imgfolder,
                              dstfile_prefix,
                              dstfile_type='pdf',
                              borderpixel=5,
                              vertical_complex_pairs=False,
                              only_k=None,
                              title='',
                              show=False):
    """Read all pngs (from MPB simulation) from *imgfolder* and distribute
    them according to bandnumber and k vector number.

    The filenames must be in a format like h.k55.b06.z.r.zeven.png,
    where the mode is optional. The field (here: 'h'), direction (here:
    '.z') and mode (here: '.zeven') will be added to *dstfile_prefix* to
    make up the destination file name (with extension '.' +
    *dstfile_type*)

    *borderpixel* is the number of pixels that the border around the
    images will take up. (between r and i parts; border between bands
    and kvecs will take up 3*borderpixel)

    If *vertical_complex_pairs* is False (default), real and imaginary
    parts of the field patterns will be next to each other, otherwise
    on top of each other.

    Specify *only_k_* to limit the k-vecs to be included in the field
    pattern diagram. *only_k* can be a list with k-vector indexes to be
    included OR a tuple with length 2, in which case *only_k* is
    interpreted as a slice, e.g. (0, 2) meaning the first, second and
    third exported k-vector will be included. Please note that with
    these indexes, the index of exported k-vectors are meant, not the
    index of all k-vectors simulated.  If it is None (default), all
    k-vectors where field patterns were exported will be added to the
    diagram.

    """
    if not path.isdir(imgfolder):
        return 0
    # make list of all field pattern png files:
    filenames = glob1(imgfolder, "*.png")
    if not filenames:
        return 0

    # Build the regular expression pattern for parsing file names:

    # re that matches the output, i.e. field (e, d or h) or 'dpwr' etc.:
    f = r'(?P<field>[edh]|hpwr|dpwr)'
    # re that matches the k number part, starting with '.':
    k = r'[.]k(?P<knum>\d+)'
    # re that matches the band number part, starting with '.':
    b = r'[.]b(?P<bandnum>\d+)'
    # re that matches the dataset:
    d = r'(:?[.](?P<data>[xyz][.][ri]|data))?'
    # re that matches anything following '.', which does not contain
    # another period (this should be the mode: te, tm, zodd etc.):
    m = r'(:?[.](?P<mode>[^.]+))?'
    # The final re pattern matches field pattern PNG file names:
    retest = re.compile(''.join([f, k, b, d, m, '.png']))

    # Analyze files in folder and make dictionary with data for each
    # destination file:
    dst_dict = dict()
    for fname in filenames:
        m = retest.match(fname)
        if m is None:
            # found png file with unknown format
            log.warning('Distribute field pattern images: Could not '
                        'parse the file name: {0}'.format(fname))
            continue
        redict = m.groupdict()
        field = redict['field']
        data = redict['data']
        if data is not None and data != 'data':
            field += '_' + data.split('.')[0]
            ri = data.split('.')[1]
        else:
            ri = ''
        mode = redict.get('mode', '')
        if mode:
            dstfile = '.'.join([dstfile_prefix, field, mode, dstfile_type])
        else:
            dstfile = '.'.join([dstfile_prefix, field, dstfile_type])
        if dstfile not in dst_dict:
            axtitle = '${0}{1}$ field pattern{2}'.format(
                field[0].upper(), field[1:],
                ', {0} mode'.format(mode) if mode else '')
            # dst_dict is a dictionary which keys are the unique
            # destination file names. The values are lists. The first
            # four items in these lists are a set of band numbers, a
            # set of k-vectors and a set of complex components (.r
            # and/or .i) occurring in the png file names and the
            # (second) title that will be printed below the major title.
            # Additional items in the value-lists are tuples, one for
            # each png file going to the destination file. The tuple
            # items are the png-file name, the band number, the k-vector
            # index and ['r' or 'i'] for the real or imaginary part.
            dst_dict[dstfile] = [set(), set(), set(), axtitle]

        bandnum = int(redict['bandnum'])
        knum = int(redict['knum'])

        # append to the sets of knum and ri of all png files going
        # to dstfile:
        dst_dict[dstfile][0].add(bandnum)
        dst_dict[dstfile][1].add(knum)
        dst_dict[dstfile][2].add(ri)
        # append a tuple for this png file
        dst_dict[dstfile].append((fname, bandnum, knum, ri))

    # now, for each destination file, make a figure and distribute the
    # pngs belonging there:
    for dstfile_name, dst_list in dst_dict.items():

        # convert sets to sorted lists:
        bnums = sorted(dst_list[0])
        knums = sorted(dst_list[1])
        if only_k is not None:
            if isinstance(only_k, tuple) and len(only_k) == 2:
                # interpret only_k as a slice:
                knums = knums[only_k[0]:only_k[1] + 1]
            else:
                knums = [knums[i] for i in only_k]
        bnums = sorted(bnums)
        # reverse, because I want real part first:
        ris = sorted(dst_list[2], reverse=True)
        num_cmplx_comps = len(ris)

        axtitle = dst_list[3]

        log.info('Distributing following field patterns to'
                 ' file {0}:'.format(dstfile_name))
        log.info(', '.join([tpl[0] for tpl in dst_list[4:]
                            if tpl[2] in knums]))

        # prepare the figure:

        # read img size from first file, all images should be the same!
        img = mpimg.imread(path.join(imgfolder, dst_list[4][0]))
        imgsize = (img.shape[1], img.shape[0])
        # img_aspect = imgsize[1] / imgsize[0]

        # calc pixelsize in data units: I want to force the individual
        # pngs into areas of 1x1 data units, including a half-border
        # around the pngs. That way, the pngs will be placed at integer
        # values of the axes. Because the pngs are generally not
        # rectangular, we will have a different pixelsize in x and y.

        if vertical_complex_pairs:
            # border belonging to one png in x: (1.5 + 1.5) * bordersize:
            pixelsize_x = 1.0 / (imgsize[0] + 3 * borderpixel)
            # border belonging to one png in y: (0.5 + 1.5) * bordersize:
            pixelsize_y = 1.0 / (imgsize[1] + 2 * borderpixel)
            # print 'pixel sizes (in data space)', pixelsize_x, pixelsize_y
        else:
            # border belonging to one png in x: (0.5 + 1.5) * bordersize:
            pixelsize_x = 1.0 / (imgsize[0] + 2 * borderpixel)
            # border belonging to one png in y: (1.5 + 1.5) * bordersize:
            pixelsize_y = 1.0 / (imgsize[1] + 3 * borderpixel)
            # print 'pixel sizes (in data space)', pixelsize_x, pixelsize_y

        # the aspect ratio for the subplot so the pixels turn out
        # rectangular:
        ax_aspect = pixelsize_x / pixelsize_y

        # calc extents of the indivdual pngs: These values are given in
        # data units. They denote the distance between an integer value
        # of an axis (near png center) to where the boundaries of the
        # pngs will be placed in data space, thereby
        # stretching/shrinking the pngs and leaving an empty border
        # between adjacent pngs.
        ext_thin_border_x = 0.5 - 0.5 * pixelsize_x * borderpixel
        ext_thick_border_x = 0.5 - 1.5 * pixelsize_x * borderpixel
        ext_thin_border_y = 0.5 - 0.5 * pixelsize_y * borderpixel
        ext_thick_border_y = 0.5 - 1.5 * pixelsize_y * borderpixel

        # size in data units:
        if vertical_complex_pairs:
            w_dataunits = len(knums)
            h_dataunits = len(bnums) * num_cmplx_comps
        else:
            w_dataunits = len(knums) * num_cmplx_comps
            h_dataunits = len(bnums)

        # now we have all data, so start plotting

        fig = plt.figure(
            figsize=(1 * w_dataunits / ax_aspect, 1 * h_dataunits)
            # note: do not play with dpi here, it does not change
            # the point size for fonts, so the graphics sizes change
            # while label sizes stay constant!
        )
        ax = fig.add_subplot(111, axisbg='0.5', aspect=ax_aspect)

        # now, we can place each image on the subplot:
        for src_tuple in dst_list[4:]:
            fname = path.join(imgfolder, src_tuple[0])
            bandnum = src_tuple[1]
            knum = src_tuple[2]
            ri = src_tuple[3]

            # where must the image go?
            try:
                ic = ris.index(ri)
                if vertical_complex_pairs:
                    x0 = knums.index(knum)
                    y0 = bnums.index(bandnum) * num_cmplx_comps + ic
                else:
                    x0 = knums.index(knum) * num_cmplx_comps + ic
                    y0 = bnums.index(bandnum)
            except ValueError:
                # kvec was excluded from distribution
                continue
            if vertical_complex_pairs:
                xl = x0 - ext_thick_border_x
                xr = x0 + ext_thick_border_x
                yb = y0 - ext_thin_border_y if ic else y0 - ext_thick_border_y
                yt = y0 + ext_thick_border_y if ic else y0 + ext_thin_border_y
            else:
                xl = x0 - ext_thin_border_x if ic else x0 - ext_thick_border_x
                xr = x0 + ext_thick_border_x if ic else x0 + ext_thin_border_x
                yb = y0 - ext_thick_border_y
                yt = y0 + ext_thick_border_y
            img = mpimg.imread(fname)
            ax.imshow(img,
                      origin='upper',
                      extent=(xl, xr, yb, yt),
                      interpolation='none')

        # set aspect; must be done after ax.imshow, as the latter changes it:
        ax.set_aspect(ax_aspect)

        # set ticks, labels etc.:
        if vertical_complex_pairs:
            klabelform = 'k{knum}'
            xticks = [klabelform.format(knum=k) for k in knums]
            bandlabelform = '{bandnum} ({ri})'
            yticks = [
                bandlabelform.format(bandnum=b, ri={
                    'r': 're',
                    'i': 'im'
                }[c]) for b in bnums for c in ris
            ]
        else:
            ris = ['.' + c if c else c for c in ris]
            klabelform = 'k{knum}{ri}'
            xticks = [
                klabelform.format(knum=k, ri=c) for k in knums for c in ris
            ]
            yticks = [str(b) for b in bnums]
        ax.set_xticks(range(len(xticks)))
        ax.set_xticklabels(xticks, rotation=45)
        ax.set_yticks(range(len(yticks)))
        ax.set_yticklabels(yticks)
        ax.tick_params(which='both', direction='out', length=2)
        ax.set_xlabel('Wave vector index', size='x-large')
        ax.set_ylabel('Band number', size='x-large')
        if title:
            fig.suptitle(title, size='x-large')
            ax.set_title(axtitle, size='large')

        # choose proper data region:
        # ax.autoscale_view(tight=True)
        ax.set_xlim(-0.5, w_dataunits - 0.5)
        ax.set_ylim(-0.5, h_dataunits - 0.5)

        # width of single png in data units:
        w_png_dataunits = imgsize[0] * pixelsize_x
        h_png_dataunits = imgsize[1] * pixelsize_y
        # print 'size of png in data units:', w_png_dataunits, h_png_dataunits

        # read here about transformations:
        # http://matplotlib.org/users/transforms_tutorial.html

        # transform sizes in (axis') data units to pixels:
        w_png_currentpixels, h_png_currentpixels = (
            ax.transData.transform([w_png_dataunits, h_png_dataunits]) -
            ax.transData.transform((0, 0)))
        # print 'size of png transformed to pixel:', w_png_currentpixels, \
        #                                            h_png_currentpixels

        # transformation to transform pixel sizes to figure units: i.e.,
        # what percentage of the whole figure takes up a single png?
        pixel_to_figcoords = fig.transFigure.inverted()
        w_png_figunits, h_png_figunits = pixel_to_figcoords.transform(
            (w_png_currentpixels, h_png_currentpixels))
        # print 'size of png in figure units:', w_png_figunits, h_png_figunits

        # how many pixels should the whole figure contain, so that the
        # individual pngs have their original resolution?
        w_fig_pixel = imgsize[0] / w_png_figunits
        h_fig_pixel = imgsize[1] / h_png_figunits
        # print 'goal size of whole figure in pixel:', w_fig_pixel, h_fig_pixel

        # current size of figure in inches:
        wfig, hfig = fig.get_size_inches()

        # figure dpi, so that resulting pixel size is same than original
        # images:
        wdpi = w_fig_pixel / wfig
        hdpi = h_fig_pixel / hfig
        # print 'dpi to save figure:', wdpi, hdpi
        # note: I used here that 1 figure unit is 1 inch, but this is
        # not correct since I needed to keep the aspect ratio of the
        # individual images while I forced them (with extent parameter
        # in imshow) on areas of approx. 1x1 figure units. Matplotlib
        # then scales everything so it fits, leading to different x and
        # y DPIs. I believe the biggest DPI is correct, because the
        # smaller axis (smaller figure width in figure units than is
        # actually returned by get_size_inches) will just be padded in
        # the image, leading to a smaller DPI as calculated above.
        dpi = max(wdpi, hdpi)

        fig.savefig(dstfile_name, dpi=dpi, bbox_inches='tight', pad_inches=0)

        if show:
            if show == 'block':
                plt.show(block=True)
            else:
                plt.show(block=False)
        else:
            del fig