integrator_file=File('file://../files/alanine/integrator.xml'),
        args='-r --report-interval 1 -p CPU --store-interval 1').named(
            'openmm')

    # --------------------------------------------------------------------------
    # CREATE AN ANALYZER
    #   the instance that knows how to compute a msm from the trajectories
    # --------------------------------------------------------------------------

    modeller = PyEMMAAnalysis(pdb_file=pdb_file).named('pyemma')

    project.generators.add(engine)
    project.generators.add(modeller)

    # --------------------------------------------------------------------------
    # CREATE THE CLUSTER
    #   the instance that runs the simulations on the resource
    # --------------------------------------------------------------------------

    scheduler = project.get_scheduler(cores=1)

    trajectory = project.new_trajectory(engine['pdb_file'], 100)
    task = engine.run(trajectory)

    scheduler(task)

    scheduler.wait()
    scheduler.exit()

    project.close()
    def test(self):
        # ----------------------------------------------------------------------
        # CREATE THE ENGINE
        #   the instance to create trajectories
        # ----------------------------------------------------------------------

        pdb_file = File('file://{0}alanine.pdb'.format(
            self.f_base)).named('initial_pdb').load()

        engine = OpenMMEngine(
            pdb_file=pdb_file,
            system_file=File('file://{0}system.xml'.format(
                self.f_base)).load(),
            integrator_file=File('file://{0}integrator.xml'.format(
                self.f_base)).load(),
            args='-r --report-interval 1 -p CPU --store-interval 1').named(
                'openmm')

        # ----------------------------------------------------------------------
        # CREATE AN ANALYZER
        #   the instance that knows how to compute a msm from the trajectories
        # ----------------------------------------------------------------------

        modeller = PyEMMAAnalysis(engine=engine).named('pyemma')

        self.project.generators.add(engine)
        self.project.generators.add(modeller)

        # ----------------------------------------------------------------------
        # CREATE THE CLUSTER
        #   the instance that runs the simulations on the resource
        # ----------------------------------------------------------------------
        traj_len = 1
        trajectory = self.project.new_trajectory(engine['pdb_file'], traj_len,
                                                 engine)
        task = engine.run(trajectory)

        # self.project.queue(task)

        pdb = md.load('{0}alanine.pdb'.format(self.f_base))

        # this part fakes a running worker without starting the worker process
        worker = WorkerScheduler(self.project.configuration, verbose=True)
        worker.enter(self.project)

        worker.submit(task)

        self.assertEqual(len(self.project.trajectories), 0)

        while not task.is_done():
            worker.advance()

        try:
            assert (len(self.project.trajectories) == 1)
        except AssertionError:
            print("stderr from worker task: \n%s" % task.stderr)
            print("stdout from worker task: \n%s" % task.stdout)
            raise
        print("stdout of worker:\n%s" % task.stdout)

        # FIXME: the worker space is cleared, so the trajectory paths are not valid anymore.
        # traj_path = os.path.join(
        #     worker.path,
        #     'workers',
        #     'worker.' + hex(task.__uuid__),
        #     worker.replace_prefix(self.project.trajectories.one.url)
        # )
        # this is a workaround, but assumes that sandbox:// lives on the same fs.
        traj_path = os.path.join(self.shared_path,
                                 self.project.trajectories.one.dirname[1:],
                                 'output.dcd')

        assert (os.path.exists(traj_path)), traj_path

        # go back to the place where we ran the test
        traj = md.load(traj_path, top=pdb)

        assert (len(traj) == traj_len + 1), len(traj)

        # well, we have a 100 step trajectory which matches the size of the initial PDB
        # that is a good sign

        # extend the trajectory by 10
        task2 = task.extend(10)

        worker.submit(task2)

        while not task2.is_done():
            worker.advance()

        # should still be one, since we have the same trajectory
        assert (len(self.project.trajectories) == 1)

        traj = md.load(traj_path, top=pdb)

        self.assertEqual(len(traj), traj_len + 10 + 1)

        # after extension it is traj_len + 10 frames. Excellent

        self.project.close()