Пример #1
0
def verify_globals(config):
    logger.debug('Verifying globals...')

    # Get the current runmanager globals
    logger.debug('Getting values of globals from runmanager.')
    rm_globals = rm.get_globals()
    current_values = [rm_globals[name] for name in config['mloop_params']]

    # Retrieve the parameter values requested by M-LOOP on this iteration
    logger.debug('Getting requested globals values from lyse.routine_storage.')
    requested_values = lyse.routine_storage.params
    requested_dict = dict(zip(config['mloop_params'], requested_values))

    # Get the parameter values for the shot we just computed the cost for
    logger.debug('Getting lyse dataframe.')
    df = lyse.data()
    shot_values = [df[name].iloc[-1] for name in config['mloop_params']]

    # Verify integrity by cross-checking against what was requested
    if not np.array_equal(current_values, requested_values):
        message = (
            'Cost requested for values different to those in runmanager.\n'
            'Please add an executed shot to lyse with: {requested_dict}'
        ).format(requested_dict=requested_dict)
        logger.error(message)
        return False
    if not np.array_equal(shot_values, requested_values):
        message = (
            'Cost requested for different values to those used to compute cost.\n'
            'Please add an executed shot to lyse with: {requested_dict}'
        ).format(requested_dict=requested_dict)
        logger.error(message)
        return False
    logger.debug('Globals verified.')
    return True
def cost_analysis(cost_key=(None, ), maximize=True, x=None):
    """Return a cost dictionary to M-LOOP with at least:
      {'bad': True} or {'cost': float}.
    - Look for the latest cost in the cost_key column of the lyse
    - DataFrame and an uncertainty ('u_' prefix at the lowest level).
    - Report bad shot to M-LOOP if cost is nan or inf.
    - Negate value in DataFrame if maximize = True.
    - Fallback to reporting a constant or fake cost (from x).
    """
    logger.debug('Getting cost...')
    cost_dict = {'bad': False}

    # Retrieve current lyse DataFrame
    logger.debug('Getting lyse dataframe.')
    df = lyse.data()

    # Use the most recent shot
    ix = -1

    # Retrieve cost from specified column
    if len(df) and cost_key in df:
        cost = (df[cost_key].astype(float).values)[ix]
        if np.isnan(cost) or np.isinf(cost):
            cost_dict['bad'] = True
            logger.info('Got bad cost: {cost}'.format(cost=cost))
        else:
            cost_dict['cost'] = (1 - 2 * maximize) * cost
            logger.info('Got cost: {cost}'.format(cost=cost_dict['cost']))
        u_cost_key = cost_key[:-1] + ('u_' + cost_key[-1], )
        if u_cost_key in df:
            cost_dict['uncer'] = df[u_cost_key].iloc[ix]
            logger.info(
                'Got uncertainty: {uncer}'.format(uncer=cost_dict['uncer']))

    # If it doesn't exist, generate a fake cost
    elif x is not None:
        from fake_result import fake_result

        cost_dict['cost'] = (1 - 2 * maximize) * fake_result(x)
        logger.info('Faked cost: {cost}'.format(cost=cost_dict['cost']))

    # Or just use a constant cost (for debugging)
    else:
        cost_dict['cost'] = 1.2
        logger.info(
            'Faked constant cost: {cost}'.format(cost=cost_dict['cost']))

    return cost_dict
Пример #3
0
    def __init__(self, h5_paths, n_rows=3, **kwargs):

        self.h5_paths = h5_paths

        super().__init__(**kwargs)

        pg.mkQApp()

        self.n_rows = n_rows

        self.setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False)
        self.area = DockArea()

        self.setCentralWidget(self.area)
        self.resize(1000, 500)

        self.dshotselector = Dock("Shot selector")
        self.shotselector = ShotSelector()

        self.dshotselector.addWidget(self.shotselector)
        self.area.addDock(self.dshotselector, 'bottom')

        self.qpg_dock = Dock("Quick Plot Generator")
        self.qpg_dock.addWidget(QuickPlotGenerator(self))
        self.qpg_dock.setMinimumSize(self.qpg_dock.minimumSizeHint())
        self.area.addDock(self.qpg_dock)

        self.show()

        self.plots = {}
        self.data_extractor_manager = DataExtractorManager()

        self.shotselector.valueChanged.connect(self.refresh)
        self.shotselector.selectionChanged.connect(self.refresh)

        self.df = lyse.data()
Пример #4
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 28 15:23:09 2022

@author: rubidium
"""

import lyse
import numpy as np
import matplotlib.pyplot as plt

import lmfit
df = lyse.data(n_sequences=1)

FIT = False
X_LABEL = 'cMOT time'
scan = df['scan']

# Let's obtain the dataframe for all of lyse's currently loaded shots:

# mot load rate was saved by a routine called calculate_load_rate:

Counts = df['MOT_Basic_Image_Process', 'Counts']
xWidth = df['MOT_Basic_Image_Process', 'xWidth']
yWidth = df['MOT_Basic_Image_Process', 'yWidth']
x0 = df['MOT_Basic_Image_Process', 'x0']
y0 = df['MOT_Basic_Image_Process', 'y0']

if FIT:
    # Now fit to an exponential
    model = lmfit.models.ExpressionModel("amp * (1 - exp(-x / tau))")
Пример #5
0
import lyse
import numpy as np
import matplotlib.pyplot as plt
import mloop_config
from fake_result import fake_result

try:
    df = lyse.data()
    config = mloop_config.get()
    x = list(config['mloop_params'])[0]
    y = config['cost_key']
    try:
        # Try to use the most recent mloop_session ID
        gb = df.groupby('mloop_session')
        mloop_session = list(gb.groups.keys())[-1]
        subdf = gb.get_group(mloop_session)
    except Exception:
        # Fallback to the entire lyse DataFrame
        subdf = df
        mloop_session = None
    subdf.plot(x=x, y=y, kind='scatter')
    x_p = np.linspace(df[x].min(), df[x].max(), 200)
    plt.plot(x_p, fake_result(x_p, s=0))
    plt.axis(ymin=0, ymax=1.1)
    plt.title('M-LOOP session: {:}'.format(mloop_session))
    plt.show()
except Exception:
    pass
Пример #6
0
    P = np.dot(BW, B.T)

    # The RHS of the least squares equation:
    orig_projs = np.dot(BW, a.T)

    # Solve for x:
    x = np.linalg.solve(P, orig_projs)

    # Reconstruct the target image
    recon = np.dot(x.T, B)

    return recon.reshape(ny, nx)


run = lyse.Run(lyse.path)
dataset = lyse.data(lyse.path)

try:
    lyse.routine_storage.refs
except:
    lyse.routine_storage.refs = None
    lyse.routine_storage.dark_frames = None

with h5py.File(lyse.path) as h5file:
    for orientation, images in h5file["images"].iteritems():
        if orientation == "bottom":
            for image_type, frameset in images.iteritems():
                if image_type.startswith("absorption"):
                    element = image_type.split("_")[1]
                    isat_name = "_".join(
                        [orientation, element, "saturation_intensity"])
@author: dng5
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import lyse
from fancy_cost_functions import *
import pandas as pd

try:
    lyse.path
except NameError:
    import sys
    path = sys.argv[1]

series = lyse.data(lyse.path)
param1 = series['dummy1']
param2 = series['dummy2']

run_instance = lyse.Run(lyse.path)
print(lyse.path)

sigma0 = (3 / 2 / np.pi) * 780e-9**2

# atom_number = np.nansum(naive_OD*(5.6e-6/6.0)**2/sigma0)
params = np.array([param1, param2])
fitness = sphere(params)

# run_instance.save_result('average_counts', atom_avg)
run_instance.save_result('fitness', fitness)
    worksheet_early.write(1, head_i, heading, bold)
    worksheet_late.write(1, head_i, heading, bold)
early_late_worksheets = [worksheet_early, worksheet_late]

worksheet_spectra.write(0, 0,
                        'Early times average energy spectra for each grid',
                        bold)
worksheet_spectra.write(1, 0, 'Grid radius (micron):', boldshaded)
worksheet_spectra.write(2, 0, 'k vector', bold)

## We'll plot everything in units of microns, so convert units here
pixel_conversion = dmd_pixel_size * 1e6
xi = xi * 1e6

## Get the dataframe from lyse
df = lyse.data(timeout=30)

## Throw out the bad shots
df = df[df['review_vortex_locations', 'Good'] == True]

## Segregate the dataframe into "early times" and "late times" (df_l and df_h for low and high hold times)
df_l = df[df['vortex_spoon_wait_time'] < 2]
df_h = df[df['vortex_spoon_wait_time'] > 3.9]

### Set up the figure windows and axes for this grid ###
### Fig. 2:
fig = figure("Grid size", figsize=(2.25, 3.45), facecolor=(1, 1, 1, 0))

## We use gridspec to arrange the subfigures
gs = GridSpec(6, 1, height_ratios=[0.6, 0.8, 0.8, 0.8, 0.8, 0.8])
from analysislib.johnstone_vortices_2018.parameters import *

## Check where to save output:
exp_config = LabConfig()
results_path = exp_config.get('paths', 'analysis_output_folder')

sequence_list = [
    '20171004T093812',  #4.2
    '20171004T121555',  #6.0
    '20171005T090729',  #7.9
    '20171004T144649',  #9.7
    '20171006T101525'  #11.5
]

## Get the dataframe from lyse
full_df = lyse.data(timeout=10)

## Setup a spreadsheet for saving data to
workbook = xlsxwriter.Workbook(os.path.join(results_path, 'time_data.xlsx'))
bold = workbook.add_format({'bold': True})
bold_center = workbook.add_format({'bold': True, 'align': 'center'})
boldshaded = workbook.add_format({'bold': True, 'bg_color': '#CCCCCC'})
shaded = workbook.add_format({
    'bg_color': '#CCCCCC',
    'align': 'center',
    'left': 1,
    'right': 1
})
leftborder = workbook.add_format({'left': 1})
rightborder = workbook.add_format({'right': 1})
boldleftborder = workbook.add_format({'bold': True, 'left': 1})