Beispiel #1
0
def parse_elegant_phasespace(particle_file):
    from sdds import SDDS

    sd = SDDS(0)
    sd.load(particle_file)

    charge = sd.parameterData[sd.parameterName.index('Charge')][0]

    columns = ['x', 'y', 'xp', 'yp', 'p', 't']
    data = dict()
    for col in columns:
        data[col] = sd.columnData[sd.columnName.index(col)][0]
    data = pd.DataFrame.from_dict(data)
    data['z'] = np.zeros_like(data['t'])

    data['pz'] = data['p'] / np.sqrt(data['xp'] ** 2 + data['yp'] ** 2 + 1)
    data['px'] = data['pz'] * data['xp']
    data['py'] = data['pz'] * data['yp']
    data['dt'] = data['t'] - data['t'].mean()
    data['z'] = data['dt'] * V_LIGHT * data['pz'] / np.sqrt(data['p'] ** 2 + 1)
    data['x'] += data['xp'] * data['z']
    data['y'] += data['yp'] * data['z']

    data.drop(['xp', 'yp', 'p', 'dt'], inplace=True, axis=1)

    return Phasespace(data, charge)
Beispiel #2
0
    def react(self, action):
        """simulate response to chosen action"""

        # update magnet strengths according to chosen action
        self.strengths = self.strengths + action.changes

        # create lattice
        self.__createLattice(self.strengths)

        # run elegant simulation
        with open(os.devnull, "w") as f:
            sp.call(["elegant", "run.ele"], stdout=f, cwd=self.dir)

        # read elegant output
        os.chdir(self.dir)
        dataSet = SDDS(0)
        dataSet.load(self.dir + "/run.out")

        # calculate focus
        focus = torch.tensor((torch.tensor(dataSet.columnData[0]).mean(),
                              torch.tensor(dataSet.columnData[2]).mean()))

        # return terminal state if maximal amount of reactions exceeded
        if not self.reactCount < self.reactCountMax:
            print("forced abortion of episode, max steps exceeded")
            return State(self.strengths, focus, terminalState=True), -100
        else:
            self.reactCount += 1

        # return state and reward
        newDistanceToGoal = torch.sqrt(torch.sum(
            (focus - self.focusGoal)**2)).item()
        distanceChange = newDistanceToGoal - self.distanceToGoal
        self.distanceToGoal = newDistanceToGoal

        if newDistanceToGoal < self.acceptance:
            return State(self.strengths, focus, terminalState=True), 10
        elif torch.sqrt(torch.sum(focus**2)).item() >= self.targetRadius:
            return State(self.strengths, focus, terminalState=True), -100
        else:
            return State(self.strengths,
                         focus), self.__reward(distanceChange, 10**3)
Beispiel #3
0
class SDDSIntermediate(object):
    __metaclass__ = SDDSMeta
    def __init__(self,filename):
        self._SDDS = SDDS(0)
        if os.path.exists(filename):
            self._SDDS.load(filename)
        else:
            raise IOError('File not found: {}'.format(filename))

    @property
    def _SDDS_param(self):
        try:
            return self._SDDS_param_dat
        except:
            self._SDDS_param_dat = dict(zip(self._SDDS.parameterName,self._SDDS.parameterData))
            return self._SDDS_param_dat

    @property
    def _SDDS_col(self):
        try:
            return self._SDDS_column_dat
        except:
            self._SDDS_column_dat = dict(zip(self._SDDS.columnName,self._SDDS.columnData))
            return self._SDDS_column_dat