Exemplo n.º 1
0
def parse_file(lattice_text, maxId=0):
    parser = LineParser(maxId)
    lines = lattice_text.replace('\r', '').split('\n')
    prev_line = ''
    models = {
        'beamlines': [],
        'elements': [],
        'default_beamline_name': None,
        'rpnVariables': {},
    }
    for line in lines:
        parser.increment_line_number()
        if re.search(r'^\s*\!', line):
            continue
        if re.search(r'\&\s*$', line):
            prev_line += re.sub(r'(\s*\&\s*)$', '', line)
            continue
        if not _parse_line(parser, prev_line + line, models):
            break
        prev_line = ''
    models['rpnVariables'] = map(
        lambda x: {
            'name': x,
            'value': models['rpnVariables'][x]
        }, models['rpnVariables'].keys())
    return models
Exemplo n.º 2
0
def parse_file(command_text):
    parser = LineParser(0)
    lines = command_text.replace('\r', '').split('\n')
    prev_line = ''
    commands = []

    for line in lines:
        parser.increment_line_number()
        if re.search(r'^#', line):
            continue
        line = re.sub(r'\!.*$', '', line)
        if not line:
            continue
        if re.search(r'\&end', line):
            if not _parse_line(parser, prev_line + ' ' + line, commands):
                break
            prev_line = ''
        elif re.search(r'\&', line) or len(prev_line):
            prev_line += ' ' + line
        else:
            # ignoring lines between command markers
            pass
    if prev_line and re.search(r'\&', prev_line):
        parser.raise_error('missing &end for command: {}'.format(prev_line))
    _update_lattice_names(commands)
    return commands
Exemplo n.º 3
0
def parse_file(zgoubi_text, max_id=0):
    parser = LineParser(max_id)
    lines = zgoubi_text.replace('\r', '').split('\n')
    elements = []
    # skip first documentation line
    title = lines.pop(0)
    parser.increment_line_number()
    current_command = None
    for line in lines:
        parser.increment_line_number()
        line = re.sub(r'\!.*$', '', line)
        line = re.sub(r'^\s+', '', line)
        line = re.sub(r'\s+$', '', line)
        if not line:
            continue
        keyword = _parse_keyword(line)
        if keyword:
            if current_command:
                _add_command(parser, current_command, elements)
            if keyword == 'END':
                current_command = None
                break
            line = _strip_command_index(line)
            current_command = [line.split()]
            current_command[0][0] = keyword
        else:
            line = line.lstrip()
            current_command.append(line.split())
    assert current_command is None, 'missing END element'
    return title, elements
Exemplo n.º 4
0
 def parse_file(self, lattice_text):
     from sirepo import simulation_db
     self.data = simulation_db.default_data(self.sim_data.sim_type())
     self.parser = LineParser(100)
     self.data.models.rpnVariables = {}
     self.data.models.sequences = []
     # None | sequence | track | match | edit
     self.container = None
     self.elements_by_name = PKDict()
     lines = lattice_text.replace('\r', '').split('\n')
     self.__parse_lines(lines)
     return self.data
Exemplo n.º 5
0
def parse_file(command_text):
    parser = LineParser(0)
    lines = command_text.replace('\r', '').split('\n')
    prev_line = ''
    commands = []

    for line in lines:
        parser.increment_line_number()
        if re.search(r'^#', line):
            continue
        line = re.sub(r'\!.*$', '', line)
        if not line:
            continue
        if re.search(r'\&end', line):
            if not _parse_line(parser, prev_line + ' ' + line, commands):
                break
            prev_line = ''
        elif re.search(r'\&', line) or prev_line:
            prev_line += ' ' + line
        else:
            # ignoring lines between command markers
            pass
    if prev_line and re.search(r'\&', prev_line):
        parser.raise_error('missing &end for command: {}'.format(prev_line))
    _update_lattice_names(commands)
    return commands
Exemplo n.º 6
0
def parse_file(zgoubi_text, max_id=0):
    parser = LineParser(max_id)
    lines = zgoubi_text.replace('\r', '').split('\n')
    elements = []
    # skip first documentation line
    title = lines.pop(0)
    parser.increment_line_number()
    unhandled_elements = {}
    current_command = None
    for line in lines:
        parser.increment_line_number()
        line = re.sub(r'\!.*$', '', line)
        line = re.sub(r'^\s+', '', line)
        line = re.sub(r'\s+$', '', line)
        if not line:
            continue
        keyword = _parse_keyword(line)
        if keyword:
            if current_command:
                _add_command(parser, current_command, elements,
                             unhandled_elements)
            if keyword == 'END' or keyword == 'FIN':
                current_command = None
                break
            line = _strip_command_index(line)
            current_command = [line.split()]
            current_command[0][0] = keyword
        else:
            line = line.lstrip()
            current_command.append(line.split())
    assert current_command is None, 'missing END element'
    return title, elements, sorted(unhandled_elements.keys())
Exemplo n.º 7
0
def parse_file(lattice_text, maxId=0):
    parser = LineParser(maxId)
    lines = lattice_text.replace('\r', '').split('\n')
    prev_line = ''
    models = {
        'beamlines': [],
        'elements': [],
        'default_beamline_name': None,
        'rpnVariables': {},
    }
    for line in lines:
        parser.increment_line_number()
        if re.search(r'^\s*\!', line):
            continue
        if re.search(r'\&\s*$', line):
            prev_line += re.sub(r'(\s*\&\s*)$', '', line)
            continue
        if not _parse_line(parser, prev_line + line, models):
            break
        prev_line = ''
    models['rpnVariables'] = map(lambda x: { 'name': x, 'value': models['rpnVariables'][x] }, models['rpnVariables'].keys())
    return models
Exemplo n.º 8
0
def parse_file(lattice_text, rpn_variables, maxId=0):
    parser = LineParser(maxId)
    lines = lattice_text.replace('\r', '').split('\n')
    prev_line = ''
    models = PKDict(
        beamlines=[],
        elements=[],
        default_beamline_name=None,
        rpnVariables=PKDict(),
    )
    for line in lines:
        parser.increment_line_number()
        if re.search(r'^\s*\!', line):
            continue
        if re.search(r'\&\s*$', line):
            prev_line += re.sub(r'(\s*\&\s*)$', '', line)
            continue
        if not _parse_line(parser, prev_line + line, models):
            break
        prev_line = ''
    models['rpnVariables'] = [PKDict(name=k, value=v) for k, v in models.rpnVariables.items()] \
        + rpn_variables
    return models
Exemplo n.º 9
0
class LatticeParser(object):
    COMMAND_PREFIX = 'command_'

    def __init__(self, sim_data):
        self.sim_data = sim_data
        self.schema = sim_data.schema()

    def parse_file(self, lattice_text):
        from sirepo import simulation_db
        self.data = simulation_db.default_data(self.sim_data.sim_type())
        self.parser = LineParser(100)
        self.data.models.rpnVariables = {}
        self.data.models.sequences = []
        # None | sequence | track | match | edit
        self.container = None
        self.elements_by_name = PKDict()
        lines = lattice_text.replace('\r', '').split('\n')
        self.__parse_lines(lines)
        return self.data

    def _code_variables_to_float(self, code_var):
        for v in self.data.models.rpnVariables:
            if not code_var.is_var_value(v.value):
                v.value = float(v.value)
        for container in ('elements', 'commands'):
            for el in self.data.models[container]:
                model_name = LatticeUtil.model_name_for_data(el)
                for f in self.schema.model[model_name]:
                    if f in el and self.schema.model[model_name][f][
                            1] == 'RPNValue':
                        if not code_var.is_var_value(el[f]):
                            el[f] = float(el[f])

    def _compute_drifts(self, code_var):
        drifts = PKDict()
        for el in self.data.models.elements:
            if el.type == 'DRIFT':
                length = self._format_length(self._eval_var(code_var, el.l))
                if length not in drifts:
                    drifts[length] = el._id
        return drifts

    def _downcase_variables(self, code_var):
        for v in self.data.models.rpnVariables:
            v.name = v.name.lower()
            if code_var.is_var_value(v.value):
                v.value = v.value.lower()
        for container in ('elements', 'commands'):
            for el in self.data.models[container]:
                model_name = LatticeUtil.model_name_for_data(el)
                for f in self.schema.model[model_name]:
                    if f in el and self.schema.model[model_name][f][
                            1] == 'RPNValue':
                        if code_var.is_var_value(el[f]):
                            el[f] = el[f].lower()

    def _eval_var(self, code_var, value):
        return code_var.eval_var_with_assert(value)

    @classmethod
    def _format_command(cls, name):
        return f'{cls.COMMAND_PREFIX}{name}'

    def _format_length(self, length):
        res = '{:.8E}'.format(length)
        res = re.sub(r'(\.\d+?)(0+)E', r'\1e', res)
        res = re.sub(r'e\+00$', '', res)
        return res

    def _get_drift(self, drifts, length, allow_negative_drift=False):
        if length == 0:
            return None
        if length < 0 and not allow_negative_drift:
            pkdlog('warning: negative drift: {}', length)
            return None
        length = self._format_length(length)
        if length not in drifts:
            name = 'D{}'.format(length)
            name = re.sub(r'\+', '', name)
            name = re.sub(r'e?-', 'R', name)
            drift = PKDict(
                _id=self.parser.next_id(),
                l=float(length),
                name=name,
                type='DRIFT',
            )
            self.sim_data.update_model_defaults(drift, 'DRIFT')
            self.data.models.elements.append(drift)
            drifts[length] = drift._id
        return drifts[length]

    def _set_default_beamline(self, cmd_type, field1, field2=None):
        name = None
        for cmd in self.data.models.commands:
            if cmd._type == cmd_type:
                name = None
                if field1 in cmd:
                    name = cmd.get(field1)
                elif field2 and field2 in cmd:
                    name = cmd.get(field2)
                if name and name.upper() in self.elements_by_name:
                    name = name.upper()
                    break
                name = None
        beamline_id = None
        if name:
            beamline_id = self.elements_by_name[name].id
        elif self.data.models.beamlines:
            beamline_id = self.data.models.beamlines[-1].id
        self.data.models.simulation.activeBeamlineId = \
            self.data.models.simulation.visualizationBeamlineId = beamline_id

    def __model_name(self, cmd):
        res = cmd
        while res not in self.schema.model:
            parent = self.elements_by_name[res]
            assert parent and parent.type
            res = parent.type
        return res

    def __parse_beamline(self, label, values):
        assert label
        values[-1] = re.sub(r'\s*\)$', '', values[-1])
        values[0] = re.sub(r'^.*?=\s*\(\s*', '', values[0])
        res = PKDict(
            name=label,
            id=self.parser.next_id(),
            items=[],
        )
        for v in values:
            v = self.__remove_quotes(v)
            count = 1
            m = re.match(r'^(\d+)\s*\*\s*\(?([\w.]+)\)?$', v)
            if m:
                count = int(m.group(1))
                v = m.group(2)
            reverse = False
            if v[0] == '-':
                reverse = True
                v = v[1:]
            el = self.elements_by_name[v.upper()]
            assert el, 'line: {} element not found: {}'.format(label, v)
            el_id = el._id if '_id' in el else el.id
            for _ in range(count):
                res['items'].append(-el_id if reverse else el_id)
        assert label.upper() not in self.elements_by_name
        self.elements_by_name[label.upper()] = res
        self.data.models.beamlines.append(res)

    def __parse_element(self, cmd, label, values):
        res = self.__parse_fields(
            self.__model_name(cmd), values,
            PKDict(
                name=label,
                _id=self.parser.next_id(),
            ))
        res.type = cmd
        if self.container:
            assert 'at' in res, 'sequence element missing "at": {}'.format(
                values)
            at = res.at
            del res['at']
            assert label, 'unlabeled element: {}'.format(values)
            assert label.upper() not in self.elements_by_name, \
                'duplicate element in sequence: {}'.format(label)
            if cmd not in self.schema.model:
                parent = self.elements_by_name[cmd]
                assert parent
                assert len(res) >= 3
                if len(res) == 3:
                    self.container['items'].append([parent._id, at])
                    return
            self.container['items'].append([res._id, at])
        assert 'at' not in res
        # copy in superclass values
        while cmd not in self.schema.model:
            parent = self.elements_by_name[cmd]
            assert parent and parent.type
            res = PKDict(list(parent.items()) + list(res.items()))
            res.type = parent.type
            cmd = parent.type
        self.sim_data.update_model_defaults(res, res.type)
        if not label:
            label = values[0].upper()
            assert label in self.elements_by_name, 'no element for label: {}: {}'.format(
                label, values)
            self.elements_by_name[label].update(res)
        else:
            assert label.upper() not in self.elements_by_name, \
                'duplicate element labeled: {}'.format(label)
            self.elements_by_name[label.upper()] = res
        self.data.models.elements.append(res)

    def __parse_fields(self, cmd, values, res):
        model_schema = self.schema.model.get(cmd)
        prev_field = None
        for value in values[1:]:
            m = re.match(r'^\s*([\w.]+)\s*:?=\s*(.+?)\s*$', value)
            if m:
                f, v = m.group(1, 2)
                f = f.lower()
                # skip non-schema fields, with the exception of positional fields "at" and "elemedge"
                if model_schema and f not in model_schema and f not in (
                        'at', 'elemedge'):
                    continue
                if f != 'name':
                    # some commands may have a "name" field
                    assert f not in res, 'field already defined: {}, values: {}'.format(
                        f, values)
                res[f] = self.__remove_quotes(v)
                prev_field = f
                continue
            # no assignment, maybe a boolean value
            m = re.match(r'^\s*(!|-)?\s*([\w.]+)\s*$', value)
            assert m, 'failed to parse field assignment: {}'.format(value)
            v, f = m.group(1, 2)
            if model_schema and f not in model_schema:
                # special case for "column" field, may contain multiple comma separated values
                if prev_field == 'column':
                    res[prev_field] += f', {f}'
                continue
            res[f.lower()] = '0' if v else '1'
        return res

    def __parse_lines(self, lines):
        prev_line = ''
        in_comment = False
        for line in lines:
            self.parser.increment_line_number()
            line = re.sub(r'\&\s*$', '', line)
            # strip comments
            line = line.strip()
            line = re.sub(r'(.*?)(!|//).*$', r'\1', line)
            line = re.sub(r'\/\*.*?\*\/', '', line)
            # special case, some commands often missing a comma
            line = re.sub(r'^\s*(title|exec|call)\s+([^,])',
                          r'\1, \2',
                          line,
                          flags=re.IGNORECASE)
            if in_comment and re.search(r'^.*\*\/', line):
                line = re.sub(r'^.*\*\/', '', line)
                in_comment = False
            if re.search(r'\/\*.*$', line):
                line = re.sub(r'\/\*.*$', '', line)
                in_comment = True
            if not line or in_comment:
                continue
            assert not re.search(r'^\s*(if|while)\s*\(', line, re.IGNORECASE), \
                'Remove conditional if() or while() statements from input file before import'
            while ';' in line:
                m = re.match(r'^(.*?);(.*)$', line)
                assert m, 'parse ; failed: {}'.format(line)
                item = (prev_line + ' ' + m.group(1)).strip()
                self.__parse_values(self.__split_values(item))
                line = m.group(2)
                prev_line = ''
            prev_line += line
        self.data.models['rpnVariables'] = [
            PKDict(name=k, value=v)
            for k, v in self.data.models.rpnVariables.items()
        ]

    def __parse_statement(self, cmd, label, values):
        if cmd.upper() in self.schema.model or cmd.upper(
        ) in self.elements_by_name:
            self.__parse_element(cmd.upper(), label, values)
            return
        cmd = cmd.lower()
        if self.container and cmd == 'end{}'.format(self.container.type):
            assert len(values) == 1, 'invalid end{}: {}'.format(
                self.container, values)
            self.container = None
            return
        if cmd in ('sequence', 'track'):
            self.container = PKDict(
                name=label,
                type=cmd,
                _id=self.parser.next_id(),
            )
            self.__parse_fields(self._format_command(cmd), values,
                                self.container)
            self.container['items'] = []
            if cmd == 'sequence':
                self.data.models.sequences.append(self.container)
                return
        if self._format_command(cmd) in self.schema.model:
            res = PKDict(
                _type=cmd,
                _id=self.parser.next_id(),
                name=label,
            )
            self.__parse_fields(self._format_command(cmd), values, res)
            self.sim_data.update_model_defaults(
                res, LatticeUtil.model_name_for_data(res))
            self.data.models.commands.append(res)
        elif cmd == 'line':
            self.__parse_beamline(label, values)
        elif cmd == 'title':
            if len(values) > 1:
                self.data.models.simulation.name = self.__remove_quotes(
                    values[1])
        elif cmd not in self.ignore_commands:
            assert cmd != 'call', '"CALL" statement not supported, combine subfiles into one input file before import'
            if re.search(r'^ptc_', cmd):
                pass
            else:
                pkdlog('unknown cmd: {}', values)

    def __parse_values(self, values):
        if not values:
            return
        if len(values) == 1 and '=' in values[0] and not re.search(
                r'\Wline\s*=\s*\(', values[0].lower()):
            # a variable assignment
            m = re.match(r'.*?([\w.\']+)\s*:?=\s*(.*)$', values[0])
            assert m, 'invalid variable assignment: {}'.format(values)
            name = m.group(1)
            v = m.group(2)
            if name not in self.data.models.rpnVariables:
                self.data.models.rpnVariables[name] = v
            return
        if ':' in values[0]:
            m = re.match(r'([\w.#"]+)\s*:\s*([\w.]+)', values[0])
            assert m, 'label match failed: {}'.format(values[0])
            label, cmd = m.group(1, 2)
            label = self.__remove_quotes(label)
        else:
            label, cmd = None, values[0]
        self.__parse_statement(cmd, label, values)

    def __remove_quotes(self, value):
        return re.sub(r'[\'"](.*)[\'"]', r'\1', value)

    def __split_values(self, item):
        # split items into values by commas
        values = []
        while item:
            item = item.strip()
            m = re.match(
                r'^\s*((?:[\w.\']+\s*:?=\s*)?(?:(?:".*?")|(?:\'.*?\')|(?:\{.*?\})|(?:\w+\(.*?\))))(?:,(.*))?$',
                item)
            if m:
                values.append(m.group(1))
                assert item != m.group(2)
                item = m.group(2)
                continue
            m = re.match(r'^\s*(.+?)(?:,(.*))?$', item)
            if m:
                values.append(m.group(1).strip())
                assert item != m.group(2)
                item = m.group(2)
                continue
            assert False, 'line parse failed: {}'.format(item)
        return values