def difference(out, req_vars):

    x_id = req_vars[-1]['x_id']
    y_id = req_vars[-1]['y_id']
    forward_diff = req_vars[-1].get('forward_diff', False)

    x = dict_from_list(req_vars, {'id': x_id})
    y = dict_from_list(req_vars, {'id': y_id})

    x_vals = x['vals']
    y_vals = y['vals']

    srs_vals = get_srs_vals(out, req_vars[-1]['series_id'])
    unique_srs, unique_srs_idx = get_unique_idx(srs_vals)

    x_arr = np.array(x_vals, dtype=float)
    y_arr = np.array(y_vals, dtype=float)

    out = np.ones(len(x_arr), dtype=float) * np.nan
    for usi in unique_srs_idx:

        xi = x_arr[usi]
        yi = y_arr[usi]

        srt_idx = np.argsort(xi)

        xis = xi[srt_idx]
        yis = yi[srt_idx]

        yis_d = np.diff(yis)

        if forward_diff:
            concat_lst = [yis_d, [np.nan]]
        else:
            concat_lst = [[np.nan], yis_d]

        diff = np.concatenate(concat_lst)
        diff = diff[np.argsort(srt_idx)]  # go back to original order

        out[usi] = diff

    req_vars[-1]['vals'] = utils.nan_to_none(out)
def energy_per_atom(out, sim, sim_idx, energy_src, opt_step=None):

    rv_args = {
        'compute_name': 'energy_per_atom',
        'inc_id': False,
        'inc_val': False,
        'energy_src': energy_src,
        'opt_step': opt_step,
    }
    req_vars_defn = get_depends(**rv_args)
    vrs = out['variables']
    req_vars = [dict_from_list(vrs, i) for i in req_vars_defn]
    n = req_vars[0]['vals'][sim_idx]
    e = req_vars[1]['vals'][sim_idx]
    return e / n
Beispiel #3
0
def resolve_var_id_conflict(ordered_vars, vr_id, modify_existing=True):

    # Resolve any ID conflicts. Search for same ID in ordered_vars, rename
    # existing ordered_vars variable ID if conflict found. Conflict should
    # only exist between an ordered_vars variable added as a dependency,
    # since we check above for dupliate user-specified IDs.

    trial_id = vr_id
    id_match_idx, id_match = dict_from_list(ordered_vars, {'id': trial_id},
                                            ret_index=True)
    count = 1
    while id_match is not None:
        trial_id += '_{:d}'.format(count)
        id_match = dict_from_list(ordered_vars, {'id': trial_id})
        count += 1

    if id_match_idx is not None:
        if modify_existing:
            ordered_vars[id_match_idx]['id'] = trial_id
        else:
            return trial_id

    elif not modify_existing:
        return trial_id
def atoms_gb_dist_change(out, sim, sim_idx):

    rv_args = {
        'compute_name': 'atoms_gb_dist_change',
        'inc_id': False,
        'inc_val': False,
    }
    req_vars_defn = get_depends(**rv_args)
    vrs = out['variables']
    req_vars = [dict_from_list(vrs, i) for i in req_vars_defn]

    atoms_gb_dist_initial = req_vars[0]['vals'][sim_idx]
    atoms_gb_dist_final = req_vars[1]['vals'][sim_idx]
    sup_type = req_vars[2]['vals'][sim_idx]

    if is_bicrystal(sim):
        return np.array(atoms_gb_dist_final) - np.array(atoms_gb_dist_initial)
def get_srs_vals(out, series_id):

    srs_vals = []
    series_names = out['series_name']

    for i in series_id:
        if i in series_names:
            i_idx = series_names.index(i)
            i_vals = utils.get_col(out['series_id']['val'], i_idx)
        else:
            i_vals = dict_from_list(out['variables'], {'id': i})['vals']
        srs_vals.append(i_vals)

    srs_vals = utils.transpose_list(srs_vals)

    sesh_ids = np.array(out['session_id'])[out['session_id_idx']]
    num_sims = len(sesh_ids)

    if len(srs_vals) == 0:
        srs_vals = [[0] for _ in range(num_sims)]

    return srs_vals
Beispiel #6
0
def split_by_series(vrs, pl, num_sims):

    fig_srs = pl.get('fig_series', [])
    subplot_srs = pl.get('subplot_series', [])
    trace_srs = pl.get('trace_series', [])

    # print('fig_srs: {}'.format(fig_srs))
    # print('subplot_srs: {}'.format(subplot_srs))
    # print('trace_srs: {}'.format(trace_srs))

    all_types_srs = [fig_srs, subplot_srs, trace_srs]
    all_types_srs_vals = [[], [], []]

    for srs_type_idx, srs_type in enumerate(all_types_srs):

        for i in srs_type:
            i_vals = utils.dict_from_list(vrs, {'id': i})['vals']
            all_types_srs_vals[srs_type_idx].append(i_vals)

    all_types_srs_vals = [utils.transpose_list(i) for i in all_types_srs_vals]

    fig_srs_vals = all_types_srs_vals[0]
    subplot_srs_vals = all_types_srs_vals[1]
    trace_srs_vals = all_types_srs_vals[2]

    if len(fig_srs_vals) == 0:
        fig_srs_vals = [[0] for _ in range(num_sims)]

    if len(subplot_srs_vals) == 0:
        subplot_srs_vals = [[0] for _ in range(num_sims)]

    if len(trace_srs_vals) == 0:
        trace_srs_vals = [[0] for _ in range(num_sims)]

    unique_fsv, unique_fsv_idx = utils.get_unique_idx(fig_srs_vals)
    unique_ssv, unique_ssv_idx = utils.get_unique_idx(subplot_srs_vals)
    unique_tsv, unique_tsv_idx = utils.get_unique_idx(trace_srs_vals)

    # print('unique_fsv: {}, unique_fsv_idx: {}'.format(unique_fsv, unique_fsv_idx))
    # print('unique_ssv: {}, unique_ssv_idx: {}'.format(unique_ssv, unique_ssv_idx))
    # print('unique_tsv: {}, unique_tsv_idx: {}'.format(unique_tsv, unique_tsv_idx))

    all_f = []
    for f in unique_fsv_idx:
        all_s = []
        for s in unique_ssv_idx:
            all_t = []
            for t in unique_tsv_idx:
                all_i = []
                for i in t:
                    if i in f and i in s:
                        all_i.append(i)
                all_t.append(all_i)
            all_s.append(all_t)
        all_f.append(all_s)

    # print('all_f: {}'.format(all_f))

    all_traces = {}
    for f_idx, f in enumerate(all_f):
        for s_idx, s in enumerate(f):
            for t_idx, t in enumerate(s):

                if all_traces.get(f_idx) is None:
                    all_traces.update({f_idx: []})

                all_traces[f_idx].append({
                    'subplot_idx': s_idx,
                    'subplot_name': subplot_srs,
                    'subplot_val': unique_ssv[s_idx],
                    'fig_idx': f_idx,
                    'fig_name': fig_srs,
                    'fig_val': unique_fsv[f_idx],
                    'trace_name': trace_srs,
                    'trace_val': unique_tsv[t_idx],
                    'vals_idx': t
                })

    # Convert to a list
    all_traces_lst = [None] * len(all_traces)
    for k, v in all_traces.items():
        all_traces_lst[k] = v

    return all_traces_lst
Beispiel #7
0
def main(plots_defn):

    res_id = plots_defn['results_id']
    res_dir = os.path.join(RES_PATH, res_id)
    res_json_path = os.path.join(res_dir, 'results.json')
    with open(res_json_path, 'r') as f:
        results = json.load(f)

    # Save a copy of the input makeplots options
    src_path = os.path.join(SET_UP_PATH, OPT_FILE_NAMES['makeplots'])
    dst_path = os.path.join(res_dir, OPT_FILE_NAMES['makeplots'])
    shutil.copy(src_path, res_dir)

    series_names = results['series_name']
    sesh_ids = results['session_id_idx']
    num_sims = len(sesh_ids)

    vrs = results['variables']

    for pl_idx, pl in enumerate(plots_defn['plots']):

        figs = []
        lib = pl['lib']
        fn = pl['filename']
        all_data_defn = pl['data']
        axes = pl['axes']
        axes_props = pl['axes_props']
        subplot_rows = pl.get('subplot_rows')

        # Get data values from variable IDs
        all_data = []
        for ii_idx, i in enumerate(all_data_defn):

            if i['type'] == 'poly':

                coeffs = i['coeffs']
                coeffs_defn = utils.dict_from_list(vrs, {'id': coeffs['id']})
                coeffs['vals'] = coeffs_defn['vals']

                if coeffs.get('idx') is not None:
                    for subidx in coeffs['idx']:
                        coeffs['vals'] = coeffs['vals'][subidx]
                d = {
                    'coeffs': coeffs,
                    **{k: v
                       for k, v in i.items() if k not in ['coeffs']}
                }

            elif i['type'] in ['line', 'marker', 'contour']:

                y_defn = utils.dict_from_list(vrs, {'id': i['y']['id']})
                # print("y_defn['vals']: {}".format(y_defn['vals']))

                # Allow setting x data to be an integer list if no x.id specified
                if i.get('x') is not None:
                    x_id = i['x']['id']
                    x_defn = utils.dict_from_list(vrs, {'id': x_id})
                else:
                    i['x'] = {}
                    if isinstance(y_defn['vals'][0], list):
                        x_defn_vals = [
                            list(range(len(i))) for i in y_defn['vals']
                        ]
                    else:
                        x_defn_vals = list(range(len(y_defn['vals'])))
                    x_defn = {'vals': x_defn_vals}
                    print(
                        'Setting x data to be an integer lists of length of y data.'
                    )

                x, y = i['x'], i['y']
                x['vals'], y['vals'] = x_defn['vals'], y_defn['vals']

                if x.get('idx') is not None:
                    for subidx in x['idx']:
                        x['vals'] = x['vals'][subidx]

                if y.get('idx') is not None:
                    for subidx in y['idx']:
                        y['vals'] = y['vals'][subidx]

                d = {
                    'x': x,
                    'y': y,
                    **{k: v
                       for k, v in i.items() if k not in ['x', 'y', 'z']}
                }

                if i['type'] == 'contour':
                    z_defn = utils.dict_from_list(vrs, {'id': i['z']['id']})
                    z = i['z']
                    z['vals'] = z_defn['vals']

                    if z.get('idx') is not None:
                        for subidx in z['idx']:
                            z['vals'] = z['vals'][subidx]

                    # print('z[vals]: {}'.format(z['vals']))

                    row_idx = utils.dict_from_list(vrs,
                                                   {'id': i['row_idx_id']})
                    col_idx = utils.dict_from_list(vrs,
                                                   {'id': i['col_idx_id']})
                    grid_shape = utils.dict_from_list(vrs,
                                                      {'id': i['shape_id']})

                    d.update({
                        'z': z,
                        'row_idx': row_idx,
                        'col_idx': col_idx,
                        'grid_shape': grid_shape,
                    })

            all_data.append(d)

        # Split up data according to figure, subplot and trace series
        all_traces = split_by_series(vrs, pl, num_sims)

        # Assign data to each trace for each in the figure
        all_traces_2 = []
        for f in all_traces:

            f_traces = []

            for tt in f:

                for d in all_data:

                    trc = copy.deepcopy(tt)

                    if d['type'] == 'poly':

                        coeffs = copy.deepcopy(d['coeffs'])
                        cv = utils.index_lst(coeffs['vals'], tt['vals_idx'])

                        non_none_idx = [
                            idx for idx in range(len(cv))
                            if cv[idx] is not None
                        ]

                        if len(non_none_idx) > 1:
                            raise ValueError(
                                'Multiple poly coeffs found for current trace.'
                            )
                        else:
                            cv = cv[non_none_idx[0]]

                        coeffs['vals'] = cv

                        trc.update({
                            'coeffs': coeffs,
                            'xmin': d['xmin'],
                            'xmax': d['xmax'],
                            'sub_subplot_idx': d['axes_idx'][0],
                            'axes_idx': d['axes_idx'],
                            'type': d['type'],
                            'name': d['name'],
                            'legend': d.get('legend', True),
                        })

                    elif d['type'] in ['marker', 'line', 'contour']:

                        x = copy.deepcopy(d['x'])
                        y = copy.deepcopy(d['y'])

                        xv = utils.index_lst(x['vals'], tt['vals_idx'])
                        yv = utils.index_lst(y['vals'], tt['vals_idx'])

                        y_non_none_idx = [
                            idx for idx in range(len(yv))
                            if yv[idx] is not None
                        ]

                        if isinstance(xv[y_non_none_idx[0]], list):
                            if len(y_non_none_idx) == 1:
                                xv = xv[y_non_none_idx[0]]
                                yv = yv[y_non_none_idx[0]]
                            else:
                                raise ValueError('Multiple traces found.')
                        else:
                            xv = utils.index_lst(xv, y_non_none_idx)
                            yv = utils.index_lst(yv, y_non_none_idx)

                        xv = np.array(xv)
                        yv = np.array(yv)

                        if d.get('sort', False):
                            if d['type'] == 'contour':
                                raise ValueError(
                                    'Cannot sort `contour` `type` data.')
                            srt_idx = np.argsort(xv)
                            xv = xv[srt_idx]
                            yv = yv[srt_idx]

                        x['vals'] = xv
                        y['vals'] = yv

                        trc.update({
                            'x': x,
                            'y': y,
                            'sub_subplot_idx': d['axes_idx'][0],
                            'axes_idx': d['axes_idx'],
                            'type': d['type'],
                            'name': d['name'],
                            'legend': d.get('legend', True),
                        })

                        if d['type'] == 'marker':
                            trc.update({'marker': d.get('marker', {})})

                        elif d['type'] == 'line':
                            trc.update({'line': d.get('line', {})})

                        elif d['type'] == 'contour':

                            z = copy.deepcopy(d['z'])
                            row_idx = copy.deepcopy(d['row_idx'])
                            col_idx = copy.deepcopy(d['col_idx'])
                            grid_shape = copy.deepcopy(d['grid_shape'])

                            zv = utils.index_lst(z['vals'], tt['vals_idx'])
                            row_idx_vals = utils.index_lst(
                                row_idx['vals'], tt['vals_idx'])
                            col_idx_vals = utils.index_lst(
                                col_idx['vals'], tt['vals_idx'])
                            grid_shape_vals = utils.index_lst(
                                grid_shape['vals'], tt['vals_idx'])

                            zv = utils.index_lst(zv, y_non_none_idx)
                            row_idx_vals = utils.index_lst(
                                row_idx_vals, y_non_none_idx)
                            col_idx_vals = utils.index_lst(
                                col_idx_vals, y_non_none_idx)
                            grid_shape_vals = utils.index_lst(
                                grid_shape_vals, y_non_none_idx)

                            zv = np.array(zv)
                            row_idx_vals = np.array(row_idx_vals)
                            col_idx_vals = np.array(col_idx_vals)
                            grid_shape_vals = np.array(grid_shape_vals)

                            z['vals'] = zv
                            row_idx['vals'] = row_idx_vals
                            col_idx['vals'] = col_idx_vals
                            grid_shape['vals'] = grid_shape_vals

                            trc.update({
                                'z': z,
                                'contour': {
                                    'row_idx': row_idx['vals'],
                                    'col_idx': col_idx['vals'],
                                    'grid_shape': grid_shape['vals']
                                }
                            })

                    f_traces.append(trc)

            all_traces_2.append(f_traces)

        # print('all_traces_2: {}'.format(readwrite.format_list(all_traces_2)))

        # Sort out subplots series
        all_traces_3 = copy.deepcopy(all_traces_2)
        for f_idx in range(len(all_traces_3)):

            f = all_traces_3[f_idx]

            resolved_idx = [
            ]  # trace indices for which final subplot index has been resolved
            for t_idx in range(len(f)):

                if t_idx not in resolved_idx:

                    si = f[t_idx]['subplot_idx']
                    ssi = f[t_idx]['sub_subplot_idx']
                    si_new = si + ssi

                    for t2_idx in range(t_idx, len(f)):

                        if f[t2_idx]['subplot_idx'] == si and f[t2_idx][
                                'sub_subplot_idx'] == ssi:
                            f[t2_idx]['subplot_idx'] = si_new
                            resolved_idx.append(t2_idx)

                        elif f[t2_idx]['subplot_idx'] >= si_new:
                            f[t2_idx]['subplot_idx'] += ssi

        # Add subplots to figures list
        # print('all_traces_3: {}'.format(readwrite.format_list(all_traces_3)))
        for f in all_traces_3:

            # print('\nf: {}'.format(f))

            num_subplots = utils.get_key_max(f, 'subplot_idx') + 1
            subplots = []
            for sidx in range(num_subplots):

                new_sp = {
                    'axes_props': axes_props,
                }

                # Collect all traces at this subplot idx
                sidx_traces = []
                for t in f:
                    if t['subplot_idx'] == sidx:

                        axidx = t['axes_idx']
                        t.update({
                            'axes_idx':
                            axidx[1],
                            'name':
                            t['name'] +
                            format_title(t['trace_name'], t['trace_val']),
                        })

                        sidx_traces.append(t)

                        if new_sp.get('axes') is None:
                            new_sp.update({'axes': axes[axidx[0]]})

                        if new_sp.get('title') is None:
                            new_sp.update({
                                'title':
                                format_title(t['subplot_name'],
                                             t['subplot_val'])
                            })

                new_sp.update({'traces': sidx_traces})
                subplots.append(new_sp)

            f_d = {
                'subplots':
                subplots,
                'title':
                fn + format_title(subplots[0]['traces'][0]['fig_name'],
                                  subplots[0]['traces'][0]['fig_val']),
                'subplot_rows':
                subplot_rows,
            }
            if f[0].get('save', True):
                f_d['save'] = True
                f_d['save_path'] = f[0].get('save_path', res_dir)

            figs.append(f_d)

        if lib == 'mpl':
            plotting.plot_many_mpl(figs)
        elif lib == 'plotly':
            plotting.plot_many_plotly(figs)
def get_depends(compute_name, inc_id=True, inc_val=True, **kwargs):
    """
    For a given compute, check if it has any dependencies. If it does,
    return a list of those as new definitions, in addition to the specified
    compute, in the correct dependency order.

    Parameters
    ----------


    """
    # Validation:
    allowed_computes = (list(SINGLE_COMPUTE_LOOKUP.keys()) +
                        list(MULTI_COMPUTE_LOOKUP.keys()))

    if compute_name not in allowed_computes:
        raise ValueError('Compute "{}" is not allowed.'.format(compute_name))

    d = {
        'type': 'compute',
        'name': compute_name,
    }
    if inc_id:
        if compute_name != 'gamma_surface_info':
            d.update({'id': compute_name})
        else:
            d.update({'id': kwargs['info_name']})

    out = []
    if compute_name == 'gb_energy':

        d.update({
            'energy_src': kwargs['energy_src'],
            'opt_step': kwargs['opt_step'],
            'series_id': kwargs['series_id'],
            'unit': kwargs['unit'],
        })
        out = (get_depends('energy',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           energy_src=kwargs['energy_src'],
                           opt_step=kwargs['opt_step']) +
               get_depends('num_atoms', inc_id=inc_id, inc_val=inc_val) +
               [PREDEFINED_VARS['gb_area'], PREDEFINED_VARS['sup_type']]) + out

    elif compute_name == 'energy_per_atom':

        d.update({
            'energy_src': kwargs['energy_src'],
            'opt_step': kwargs['opt_step']
        })
        out = (get_depends('num_atoms', inc_id=inc_id, inc_val=inc_val) +
               get_depends('energy',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           energy_src=kwargs['energy_src'],
                           opt_step=kwargs['opt_step']) + out)

    elif compute_name == 'gamma_surface_info':

        d.update({
            'info_name': kwargs['info_name'],
        })

        csi_idx = {
            'name': 'csi_idx',
            'type': 'series_id',
            'col_id': 'relative_shift',
        }
        grid_idx = {
            'type': 'series_id',
            'name': 'grid_idx',
            'col_id': 'relative_shift'
        }
        point_idx = {
            'type': 'series_id',
            'name': 'point_idx',
            'col_id': 'relative_shift'
        }

        add_out = [csi_idx, grid_idx, point_idx]
        ids = ['csi_idx', 'grid_idx', 'point_idx']
        for i, j in zip(add_out, ids):
            if inc_id:
                i.update({'id': j})

        out = add_out + out

    elif compute_name == 'gb_minimum_expansion':

        d.update({
            'energy_src': kwargs['energy_src'],
            'opt_step': kwargs['opt_step'],
            'series_id': kwargs['series_id'],
        })

        out = (get_depends(
            'energy',
            inc_id=inc_id,
            inc_val=inc_val,
            energy_src=kwargs['energy_src'],
            opt_step=kwargs['opt_step'],
        ) + get_depends('gb_boundary_vac', inc_id=inc_id, inc_val=inc_val) +
               out)

    elif compute_name == 'master_gamma':

        d.update({
            'energy_src': kwargs['energy_src'],
            'opt_step': kwargs['opt_step'],
            'series_id': kwargs['series_id'],
            'unit': kwargs['unit'],
            'use_gb_energy': kwargs['use_gb_energy'],
        })

        if kwargs.get('use_gb_energy', False):
            energy_depends = get_depends('gb_energy',
                                         inc_id=inc_id,
                                         inc_val=inc_val,
                                         energy_src=kwargs['energy_src'],
                                         opt_step=kwargs['opt_step'],
                                         series_id=kwargs['series_id'],
                                         unit=kwargs['unit'])
        else:
            energy_depends = get_depends(
                'energy',
                inc_id=inc_id,
                inc_val=inc_val,
                energy_src=kwargs['energy_src'],
                opt_step=kwargs['opt_step'],
            )

        out = (energy_depends + get_depends('gamma_surface_info',
                                            inc_id=inc_id,
                                            inc_val=inc_val,
                                            info_name='row_idx') +
               get_depends('gamma_surface_info',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           info_name='col_idx') +
               get_depends('gamma_surface_info',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           info_name='x_std_vals') +
               get_depends('gamma_surface_info',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           info_name='y_std_vals') +
               get_depends('gamma_surface_info',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           info_name='x_frac_vals') +
               get_depends('gamma_surface_info',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           info_name='y_frac_vals') +
               get_depends('gamma_surface_info',
                           inc_id=inc_id,
                           inc_val=inc_val,
                           info_name='grid_shape')) + [
                               PREDEFINED_VARS['gb_boundary_vac']
                           ] + out

    elif compute_name == 'energy':

        d.update({
            'energy_src': kwargs['energy_src'],
        })
        opt_step = kwargs.get('opt_step')
        if opt_step is not None:
            d.update({
                'opt_step': opt_step,
            })

    elif compute_name == 'rms_forces':

        d.update({
            'forces_src': kwargs['forces_src'],
        })
        opt_step = kwargs.get('opt_step')
        if opt_step is not None:
            d.update({
                'opt_step': opt_step,
            })

    elif compute_name == 'atoms_gb_dist_change':

        out = [
            PREDEFINED_VARS['gb_dist_initial'],
            PREDEFINED_VARS['gb_dist_final'],
            PREDEFINED_VARS['sup_type'],
        ] + out

    elif compute_name == 'difference':

        d.update({
            'x_id': kwargs['x_id'],
            'y_id': kwargs['y_id'],
            'x_args': kwargs['x_args'],
            'y_args': kwargs['y_args'],
            'series_id': kwargs['series_id'],
        })

        if kwargs.get('forward_diff') is not None:
            d.update({'forward_diff': kwargs['forward_diff']})

        out = (get_depends(
            kwargs['x_id'], **kwargs['x_args'], inc_id=inc_id, inc_val=inc_val)
               + get_depends(kwargs['y_id'],
                             **kwargs['y_args'],
                             inc_id=inc_id,
                             inc_val=inc_val) + out)

    # If the d dict is not in out, add it:
    d_out = dict_from_list(out, d)
    if d_out is None:
        out += [d]

    # Add a vals key to each dict if inc_val is True:
    for v_idx, v in enumerate(out):
        if v.get('vals') is None and inc_val:
            out[v_idx].update({'vals': []})
        elif v.get('vals') is not None and not inc_val:
            del out[v_idx]['vals']

    return out
Beispiel #9
0
def collate_results(res_opt, skip_idx=None, debug=False):
    """
    Save a JSON file containing the results of one of more simulation series.

    Idea is to build a dict (saved as a JSON file) which has results from
    simulations in flat lists.

    """

    rs_date, rs_num = utils.get_date_time_stamp(split=True)
    rs_id = rs_date + '_' + rs_num
    if debug:
        rs_id = '0000-00-00-0000_00000'

    def append_series_items(series_items, series_id, num_series, sim_idx,
                            srs_names):

        out = {'path': []}
        for i in series_id:
            path = []
            for j in i:
                srs_idx = srs_names.index(j['name'])
                for k, v in j.items():

                    if k == 'path':
                        path.append(v)
                        continue
                    if k not in out:
                        out.update({k: [None] * num_series})
                    if isinstance(v, np.ndarray):
                        v = v.tolist()
                    out[k][srs_idx] = v

            path_join = '_'.join([str(i) for i in path])
            out['path'].append(path_join)

        for k, v in out.items():
            if k in series_items:
                series_items[k].extend([v])
            else:
                blank = [None] * num_series
                series_items.update({k: [blank] * sm_idx + [v]})

        for k, v in series_items.items():
            if k not in out:
                blank = [None] * num_series
                series_items[k].append(blank)

        return series_items

    computes = []
    add_vars = []

    # Make a list of all variable ids
    var_ids = []

    # Variables ordered such that dependenices are listed before
    ordered_vars = []

    # Loop though variables: do validation
    for vr_idx, vr in enumerate(res_opt['variables']):

        vr_type = vr['type']
        vr_name = vr['name']
        vr_name_idx = vr.get('idx')
        vr_id = vr['id']

        # Check type is allowed:
        if vr_type not in VAR_ALLOWED_REQUIRED:
            raise ValueError('"{}" is not an allowed variable type: {}'.format(
                vr_type, VAR_ALLOWED_REQUIRED.keys()))

        # Check all required keys are given:
        for rk in VAR_ALLOWED_REQUIRED[vr_type]:
            if rk not in vr:
                rk_error = 'Variable #{} must have key: {}'.format(vr_idx, rk)
                raise ValueError(rk_error)

        # Check `id` is not repeated
        if vr_id not in var_ids:
            var_ids.append(vr_id)
        else:
            raise ValueError('Variable #{} id is not unique.'.format(vr_idx))

        ordered_vars = get_reduced_depends(ordered_vars,
                                           vr,
                                           inc_id=True,
                                           inc_val=True)

    # Start building output dict, which will be saved as a JSON file:
    out = {
        'session_id': [],
        'session_id_idx': [],
        'idx': [],
        'series_name': [],
        'variables': ordered_vars,
        'rid': rs_id,
        'output_path': res_opt['output']['path'],
    }

    # Get a list of lists of sims:
    all_sims = []
    for sid in res_opt['sid']:
        path = os.path.join(res_opt['archive']['path'], sid)
        pick_path = os.path.join(path, 'sims.pickle')
        pick = read_pickle(pick_path)
        all_sims.append(pick['all_sims'])

    # Get a flat list of series names for this sim series and get all sims:
    all_srs_name = []
    for series_sims in all_sims:
        sm_0 = series_sims[0]
        sm_0_opt = sm_0.options

        srs_id = sm_0_opt.get('series_id')
        if srs_id is not None:

            # (legacy compatibility)
            if isinstance(srs_id, dict) and len(srs_id) == 1:

                new_srs_id = []
                for k, v in srs_id.items():
                    new_srs_id.append([{'name': k, **v}])
                srs_id = new_srs_id

            for series_id_list in srs_id:
                for series_id_sublist in series_id_list:
                    nm = series_id_sublist['name']
                    if nm not in all_srs_name:
                        all_srs_name.append(nm)

    # Need better logic later to avoid doing this:
    if 'gamma_surface' in all_srs_name:
        all_srs_name[all_srs_name.index('gamma_surface')] = 'relative_shift'
    out['series_name'] = all_srs_name

    # Collect common series info list for each simulation series:
    all_csi = []

    # Loop through each simulation series to append vals to `result`,
    # `parameter` and single `compute` variable types:
    all_ids = {}
    all_sim_idx = 0
    for sid_idx, sid in enumerate(res_opt['sid']):

        skips = skip_idx[sid_idx]
        path = os.path.join(res_opt['archive']['path'], sid)

        # Open the pickle file associated with this simulation series:
        pick_path = os.path.join(path, 'sims.pickle')
        pick = read_pickle(pick_path)
        sims = pick['all_sims']
        # Get options from first sim if they don't exist (legacy compatiblity)
        base_opt = pick.get('base_options', sims[0].options)
        all_csi.append(pick.get('common_series_info'))

        # Loop through each simulation for this series
        for sm_idx, sm in enumerate(sims):

            if sm_idx in skips:
                continue

            if sid in out['session_id']:
                sid_idx = out['session_id'].index(sid)
            else:
                out['session_id'].append(sid)
                sid_idx = len(out['session_id']) - 1

            out['session_id_idx'].append(sid_idx)
            out['idx'].append(sm_idx)

            srs_id = sm.options.get('series_id')
            if srs_id is not None:

                # (legacy compatibility)
                if isinstance(srs_id, dict) and len(srs_id) == 1:

                    new_srs_id = []
                    for k, v in srs_id.items():
                        new_srs_id.append([{'name': k, **v}])
                    srs_id = new_srs_id

            if srs_id is None:
                srs_id = [[]]

            all_ids = append_series_items(all_ids, srs_id, len(all_srs_name),
                                          all_sim_idx, all_srs_name)

            # Loop through requested variables:
            for vr_idx, vr in enumerate(out['variables']):

                vr_name = vr['name']
                vr_type = vr['type']
                args = {k: v for k, v in vr.items() if k not in VAR_STD_KEYS}

                if vr_type not in ['result', 'parameter', 'compute']:
                    continue

                if vr_type == 'result':
                    val = sm.results[vr_name]
                elif vr_type == 'parameter':
                    val = sm.options[vr_name]
                elif vr_type == 'compute':
                    func_name = SINGLE_COMPUTE_LOOKUP.get(vr_name)
                    if func_name is not None:
                        val = func_name(out, sm, all_sim_idx, **args)
                    else:
                        # Must be a multi compute
                        continue

                all_sub_idx = vr.get('idx')
                if all_sub_idx is not None:
                    for sub_idx in all_sub_idx:
                        if vr_type == 'parameter':
                            try:
                                val = val[sub_idx]
                            except KeyError:
                                val = vr.get('default')
                                break

                        else:
                            val = val[sub_idx]

                # To ensure the data is JSON compatible:
                if isinstance(val, np.ndarray):
                    val = val.tolist()
                elif isinstance(val, np.generic):
                    val = np.asscalar(val)

                out['variables'][vr_idx]['vals'].append(val)

            all_sim_idx += 1

    all_ids = {k: v for k, v in all_ids.items() if k != 'name'}
    out['series_id'] = all_ids

    all_vrs = out['variables']
    # Now calculate variables which are multi `compute`s and `series_id`s:
    for vr_idx, vr in enumerate(all_vrs):

        vr_type = vr['type']
        vr_name = vr['name']

        if vr_type == 'series_id':
            cid = all_srs_name.index(vr['col_id'])
            vals = utils.get_col(all_ids[vr_name], cid)
            if vr.get('col_idx') is not None:
                vals = utils.get_col_none(vals, vr['col_idx'])
            all_vrs[vr_idx]['vals'] = vals

        elif vr_type == 'compute' and SINGLE_COMPUTE_LOOKUP.get(
                vr_name) is None:
            func = MULTI_COMPUTE_LOOKUP[vr_name]
            req_vars_defn = get_reduced_depends([],
                                                vr,
                                                inc_id=False,
                                                inc_val=False)
            req_vars = [dict_from_list(all_vrs, i) for i in req_vars_defn]

            if vr_name in REQUIRES_CSI:
                func(out, req_vars, common_series_info=all_csi)
            else:
                func(out, req_vars)

    return out
Beispiel #10
0
def var_in_list(lst, var, ret_idx=False):
    var_cnd = {k: v for k, v in var.items() if k not in ['id']}
    return dict_from_list(lst, var_cnd, ret_index=ret_idx)