예제 #1
0
파일: grating.py 프로젝트: shanham/spins-b
def view_opt_quick(save_folder: str) -> None:
    """Prints the current result of the optimization.

    Unlike `view_opt`, which plots fields and optimization trajectories,
    `view_opt_quick` prints out scalar monitors in the latest log file. This
    is useful for having a quick look into the state of the optimization.

    Args:
        save_folder: Location where the log files are saved.
    """
    with open(workspace.get_latest_log_file(save_folder), "rb") as fp:
        log_data = pickle.load(fp)
        for key, data in log_data["monitor_data"].items():
            if np.isscalar(data):
                print("{}: {}".format(key, data.squeeze()))
예제 #2
0
def gen_gds(save_folder: str, sim_width: float) -> None:
    """Generates a GDS file of the grating.

    Args:
        save_folder: Location where log files are saved. It is assumed that
            the optimization plan is also saved there.
        sim width: width of the simulation
    """
    # Load the optimization plan.
    with open(os.path.join(save_folder, "optplan.json")) as fp:
        plan = optplan.loads(fp.read())
    dx = plan.transformations[-1].parametrization.simulation_space.mesh.dx

    # Load the data from the latest log file.
    with open(workspace.get_latest_log_file(save_folder), "rb") as fp:
        log_data = pickle.load(fp)
        if log_data["transformation"] != plan.transformations[-1].name:
            raise ValueError("Optimization did not run until completion.")

        coords = log_data["parametrization"]["vector"] * dx


#        if plan.transformations[-1].parametrization.inverted:
#            coords = np.insert(coords, 0, 0, axis=0)
#            coords = np.insert(coords, -1, sim_width, axis=0)

# TODO Not sure about this part below creating rectangles
# Change the variables and names here

# `coords` now contains the location of the grating edges. Now draw a
# series of rectangles to represent the grating.
    grating_poly = []
    for i in range(0, len(coords), 2):
        grating_poly.append(
            ((coords[i], -sim_width / 2), (coords[i], sim_width / 2),
             (coords[i - 1], sim_width / 2), (coords[i - 1], -sim_width / 2)))

    # Save the grating to `annulus.gds`.
    grating = gdspy.Cell("ANNULUS", exclude_from_current=True)
    grating.add(gdspy.PolygonSet(grating_poly, 100))
    gdspy.write_gds(os.path.join(save_folder, "annulus.gds"), [grating],
                    unit=1.0e-9,
                    precision=1.0e-9)
예제 #3
0
파일: solver.py 프로젝트: kwadwo00/spins-b
def restore_workspace(plan: optplan.OptimizationPlan,
                      work: workspace.Workspace, save_folder: str,
                      console_logger) -> Tuple[int, Optional[Dict]]:
    """Restores the workspace state for resuming optimization plans.

    This function resumes the state of workspace (all parametrization values
    and parameter values) based on the saved data. This is done in the following
    steps:
    1) The checkpoint file corresponding to the last completed transformation
        is found.
    2) All parametrization and parameter values are restored according to the
       checkpoint file.
    3) The last log file is found. If the last log file corresponds to the
        the next transformation that should be executed, the parametrization
        value is restored and the event data from the log file is extracted.

    Args:
        plan: Optimization plan that saved the log data previously.
        work: Workspace to restore.
        save_folder: Folder containing saved log files.
        console_logger: A logging object for logging restoring info.

    Returns:
        A tuple `(tranform_index, event_data)` where `transform_index` is the
        index of the transformation in the optimization plan that should be
        executed next and `event_data` is a dictionary containing the event
        data of the last saved log file.
    """
    transform_index = 0
    event_data = None

    # Find the latest transformation with a checkpoint file.
    # Set `transform_index` to the index of the transformation that should
    # be run next (i.e. there exists a checkpoint for the
    # `transform_index - 1` transformation.
    for i, transform in enumerate(plan.transformations):
        if os.path.exists(
                os.path.join(save_folder,
                             "{}.chkpt.pkl".format(transform.name))):
            transform_index = i + 1

    # Load the checkpoint data.
    # If `transform_index` is zero, this means that there is no previous
    # checkpoint so we're on the first transformation in the plan and
    # there is no need to restore any previous parametrizations or
    # parameters.
    if transform_index > 0:
        chkpt_file = os.path.join(
            save_folder, "{}.chkpt.pkl".format(
                plan.transformations[transform_index - 1].name))
        console_logger.info("Restoring from checkpoint {}".format(chkpt_file))
        with open(chkpt_file, "rb") as fp:
            chkpt_data = pickle.load(fp)

        # Iterate through all the previous transformations, restoring any
        # parametrizations and parameters along the way. It is not strictly
        # necessary to restore parametrizations/parameters along the way,
        # but it was done out of implementation convenience.
        for transform in plan.transformations[:transform_index]:
            # Restore any parametrizations.
            work.get_object(transform.parametrization).deserialize(
                chkpt_data["parametrizations"][transform.parametrization.name])

            # Add any parameter descriptions. Actual values restored below.
            if transform.parameter_list:
                for set_param in transform.parameter_list:
                    work._add_node(set_param.parameter)

        # Now restore all parameter values.
        for param, param_value in chkpt_data["parameters"].items():
            work.get_object(param).set_parameter_value(param_value)

    if transform_index >= len(plan.transformations):
        # Optimization plan is complete already.
        return transform_index, None

    # At this point, the state should be exactly the same as when
    # `plan.transformations[transform_index]` started.
    # Now we see if we should restore to the middle of the next
    # transformation.

    # Load the log file with largest step.
    log_file = workspace.get_latest_log_file(save_folder)
    if not log_file:
        return transform_index, None

    console_logger.info("Restoring from log {}".format(log_file))
    with open(log_file, "rb") as fp:
        log_data = pickle.load(fp)

    if log_data["transformation"] == plan.transformations[
            transform_index].name:
        # The log file is in the next transformation so restore the
        # current parametrization value.
        work.get_object(transform.parametrization).deserialize(
            log_data["parametrization"])
        event_data = log_data["event"]

    # TODO(logansu): Remove hack.
    work.logger._log_counter = log_data["log_counter"]

    return transform_index, event_data