예제 #1
0
파일: test_io.py 프로젝트: chrisfroe/readdy
    def test_write_trajectory(self):
        traj_fname = os.path.join(self.dir, "traj.h5")
        simulation = Simulation()
        simulation.set_kernel("SingleCPU")
        simulation.box_size = common.Vec(5,5,5)
        simulation.register_particle_type("A", 0.0)
        simulation.register_reaction_conversion("A->A", "A", "A", 1.)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_trajectory(0)
        with closing(io.File.create(traj_fname, io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", 3)
            simulation.run_scheme_readdy(True).write_config_to_file(f).configure(1).run(20)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx+1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))

        with h5py.File(traj_fname) as f:
            np.testing.assert_equal("A", f["readdy/config/particle_types"][0]["name"])
예제 #2
0
    def test_particle_positions_observable(self):
        fname = os.path.join(self.dir, "test_observables_particle_positions.h5")
        sim = Simulation("SingleCPU")
        sim.context.box_size = [13., 13., 13.]
        sim.context.particle_types.add("A", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(1, ["A"], lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particle_positions(1, [])
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"particle_positions", int(3))
            sim.run(n_timesteps, 0)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/particle_positions/data"][:]
            np.testing.assert_equal(len(data), n_timesteps + 1)
            for t, positions in enumerate(data):
                # we begin with two particles
                np.testing.assert_equal(len(positions), t + 2)
                np.testing.assert_equal(positions[0]["x"], 0)
                np.testing.assert_equal(positions[0]["y"], 0)
                np.testing.assert_equal(positions[0]["z"], 0)
                for i in range(1, len(positions)):
                    np.testing.assert_equal(positions[i]["x"], 1.5)
                    np.testing.assert_equal(positions[i]["y"], 2.5)
                    np.testing.assert_equal(positions[i]["z"], 3.5)
예제 #3
0
    def test_forces_observable(self):
        fname = os.path.join(self.dir, "test_observables_particle_forces.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(1, ["A"], lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_forces(1, [])
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"forces", int(3))
            sim.run_scheme_readdy(True).configure(1).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/forces/data"][:]
            time_series = f2["readdy/observables/forces/time"]
            np.testing.assert_equal(len(data), n_timesteps + 1)
            np.testing.assert_equal(time_series, np.array(range(0, n_timesteps+1)))
            for t, forces in enumerate(data):
                # we begin with two particles
                np.testing.assert_equal(len(forces), t + 2)
                np.testing.assert_equal(forces[0]["x"], 0)
                np.testing.assert_equal(forces[0]["y"], 0)
                np.testing.assert_equal(forces[0]["z"], 0)
                for i in range(1, len(forces)):
                    np.testing.assert_equal(forces[i]["x"], 0)
                    np.testing.assert_equal(forces[i]["y"], 0)
                    np.testing.assert_equal(forces[i]["z"], 0)
예제 #4
0
    def test_write_trajectory(self):
        traj_fname = os.path.join(self.dir, "traj.h5")
        simulation = Simulation()
        simulation.set_kernel("SingleCPU")
        simulation.box_size = common.Vec(5, 5, 5)
        simulation.register_particle_type("A", 0.0)
        simulation.register_reaction_conversion("A->A", "A", "A", 1.)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_trajectory(0)
        with closing(io.File.create(traj_fname, io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", 3)
            simulation.run_scheme_readdy(True).write_config_to_file(
                f).configure(1).run(20)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx + 1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))

        with h5py.File(traj_fname) as f:
            np.testing.assert_equal(
                "A", f["readdy/config/particle_types"][0]["name"])
예제 #5
0
    def test_forces_observable(self):
        fname = os.path.join(self.dir, "test_observables_particle_forces.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(
            1, ["A"],
            lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_forces(1, [])
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"forces", int(3))
            sim.run_scheme_readdy(True).configure(1).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/forces/data"][:]
            time_series = f2["readdy/observables/forces/time"]
            np.testing.assert_equal(len(data), n_timesteps + 1)
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_timesteps + 1)))
            for t, forces in enumerate(data):
                # we begin with two particles
                np.testing.assert_equal(len(forces), t + 2)
                np.testing.assert_equal(forces[0]["x"], 0)
                np.testing.assert_equal(forces[0]["y"], 0)
                np.testing.assert_equal(forces[0]["z"], 0)
                for i in range(1, len(forces)):
                    np.testing.assert_equal(forces[i]["x"], 0)
                    np.testing.assert_equal(forces[i]["y"], 0)
                    np.testing.assert_equal(forces[i]["z"], 0)
예제 #6
0
파일: test_io.py 프로젝트: marscher/readdy
    def test_write_flat_trajectory(self):
        common.set_logging_level("error")
        traj_fname = os.path.join(self.dir, "flat_traj.h5")
        simulation = Simulation()
        simulation.set_kernel("SingleCPU")
        simulation.box_size = common.Vec(5, 5, 5)
        simulation.register_particle_type("A", 0.0, 0.0)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_flat_trajectory(1)
        with closing(
                io.File(traj_fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", int(3))
            simulation.run_scheme_readdy(True).configure(1).run(20)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx + 1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))
        common.set_logging_level("debug")
예제 #7
0
    def test_write_trajectory(self):
        traj_fname = os.path.join(self.dir, "traj.h5")
        context = Context()
        context.box_size = [5., 5., 5.]
        context.particle_types.add("A", 0.0)
        context.reactions.add_conversion("A->A", "A", "A", 1.)
        simulation = Simulation("SingleCPU", context)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_trajectory(0)
        with closing(io.File.create(traj_fname, io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", 3)
            loop = simulation.create_loop(1.)
            loop.write_config_to_file(f)
            loop.run(20)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx + 1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))

        with h5py.File(traj_fname, 'r') as f:
            np.testing.assert_equal(
                "A", f["readdy/config/particle_types"][0]["name"])
예제 #8
0
    def test_particle_positions_observable(self):
        fname = os.path.join(self.dir,
                             "test_observables_particle_positions.h5")
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1, .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(
            1, ["A"],
            lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particle_positions(1, [])
        n_timesteps = 19
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"particle_positions", int(3))
            sim.run_scheme_readdy(True).configure(0).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/particle_positions/data"][:]
            np.testing.assert_equal(len(data), n_timesteps + 1)
            for t, positions in enumerate(data):
                # we begin with two particles
                np.testing.assert_equal(len(positions), t + 2)
                np.testing.assert_equal(positions[0]["x"], 0)
                np.testing.assert_equal(positions[0]["y"], 0)
                np.testing.assert_equal(positions[0]["z"], 0)
                for i in range(1, len(positions)):
                    np.testing.assert_equal(positions[i]["x"], 1.5)
                    np.testing.assert_equal(positions[i]["y"], 2.5)
                    np.testing.assert_equal(positions[i]["z"], 3.5)
        common.set_logging_level("warn")
예제 #9
0
    def test_n_particles_observable(self):
        common.set_logging_level("warn")
        fname = os.path.join(self.dir, "test_observables_n_particles.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2, 1.)
        simulation.register_particle_type("B", .2, 1.)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        n_time_steps = 50
        callback_n_particles_a_b = []
        callback_n_particles_all = []

        def callback_ab(value):
            callback_n_particles_a_b.append(value)
            simulation.add_particle("A", common.Vec(-1, -1, -1))

        def callback_all(hist):
            callback_n_particles_all.append(hist)
            simulation.add_particle("A", common.Vec(-1, -1, -1))
            simulation.add_particle("B", common.Vec(-1, -1, -1))

        handle_a_b_particles = simulation.register_observable_n_particles(
            1, ["A", "B"], callback_ab)
        handle_all = simulation.register_observable_n_particles(
            1, [], callback_all)
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle_a_b_particles.enable_write_to_file(f, u"n_a_b_particles",
                                                      int(3))
            handle_all.enable_write_to_file(f, u"n_particles", int(5))
            simulation.run(n_time_steps, 0.02)
            handle_all.flush()
            handle_a_b_particles.flush()

        with h5py.File(fname, "r") as f2:
            n_a_b_particles = f2["readdy/observables/n_a_b_particles/data"][:]
            n_particles = f2["readdy/observables/n_particles/data"][:]
            time_series = f2["readdy/observables/n_a_b_particles/time"]
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_time_steps + 1)))
            for t in range(n_time_steps):
                np.testing.assert_equal(n_a_b_particles[t][0],
                                        callback_n_particles_a_b[t][0])
                np.testing.assert_equal(n_a_b_particles[t][1],
                                        callback_n_particles_a_b[t][1])
                np.testing.assert_equal(n_particles[t][0],
                                        callback_n_particles_all[t][0])
예제 #10
0
    def test_n_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_n_particles.h5")

        context = Context()
        box_size = [10., 10., 10.]
        context.kbt = 2
        context.pbc = [True, True, True]
        context.box_size = box_size
        context.particle_types.add("A", .2)
        context.particle_types.add("B", .2)
        simulation = Simulation("SingleCPU", context)

        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        n_time_steps = 50
        callback_n_particles_a_b = []
        callback_n_particles_all = []

        def callback_ab(value):
            callback_n_particles_a_b.append(value)
            simulation.add_particle("A", common.Vec(-1, -1, -1))

        def callback_all(hist):
            callback_n_particles_all.append(hist)
            simulation.add_particle("A", common.Vec(-1, -1, -1))
            simulation.add_particle("B", common.Vec(-1, -1, -1))

        handle_a_b_particles = simulation.register_observable_n_particles(
            1, ["A", "B"], callback_ab)
        handle_all = simulation.register_observable_n_particles(
            1, [], callback_all)
        with closing(io.File.create(fname)) as f:
            handle_a_b_particles.enable_write_to_file(f, u"n_a_b_particles",
                                                      int(3))
            handle_all.enable_write_to_file(f, u"n_particles", int(5))
            simulation.run(n_time_steps, 0.02)
            handle_all.flush()
            handle_a_b_particles.flush()

        with h5py.File(fname, "r") as f2:
            n_a_b_particles = f2["readdy/observables/n_a_b_particles/data"][:]
            n_particles = f2["readdy/observables/n_particles/data"][:]
            time_series = f2["readdy/observables/n_a_b_particles/time"]
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_time_steps + 1)))
            for t in range(n_time_steps):
                np.testing.assert_equal(n_a_b_particles[t][0],
                                        callback_n_particles_a_b[t][0])
                np.testing.assert_equal(n_a_b_particles[t][1],
                                        callback_n_particles_a_b[t][1])
                np.testing.assert_equal(n_particles[t][0],
                                        callback_n_particles_all[t][0])
예제 #11
0
    def test_interrupt_simple(self):
        sim = Simulation("SingleCPU")
        sim.context.particle_types.add("A", 0.1)
        # Define counter as list. This is a workaround because nosetest will complain otherwise.
        counter = [0]

        def increment(result):
            counter[0] += 1

        sim.register_observable_n_particles(1, ["A"], increment)
        do_continue = lambda t: t < 5
        sim.create_loop(.1).run_with_criterion(do_continue)
        np.testing.assert_equal(counter[0], 6)
예제 #12
0
    def test_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_particles.h5")
        context = Context()
        context.box_size = [13., 13., 13.]
        context.particle_types.add("A", .1)
        context.particle_types.add("B", .1)
        sim = Simulation("SingleCPU", context)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(
            1, ["A"],
            lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particles(1)
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"particles", int(3))
            loop = sim.create_loop(0)
            loop.write_config_to_file(f)
            loop.run(n_timesteps)
            handle.flush()

        from readdy.util.io_utils import get_particle_types

        particle_types = get_particle_types(fname)

        with h5py.File(fname, "r") as f2:

            types = f2["readdy/observables/particles/types"][:]
            ids = f2["readdy/observables/particles/ids"][:]
            positions = f2["readdy/observables/particles/positions"][:]
            for t in range(n_timesteps):
                np.testing.assert_equal(len(types[t]), t + 3)
                np.testing.assert_equal(len(ids[t]), t + 3)
                np.testing.assert_equal(len(positions[t]), t + 3)
                np.testing.assert_equal(types[t][0],
                                        particle_types["A"]["type_id"])
                np.testing.assert_equal(positions[t][0][0], 0)
                np.testing.assert_equal(positions[t][0][1], 0)
                np.testing.assert_equal(positions[t][0][2], 0)
                np.testing.assert_equal(positions[t][1][0], 0)
                np.testing.assert_equal(positions[t][1][1], 0)
                np.testing.assert_equal(positions[t][1][2], 0)
                np.testing.assert_equal(types[t][1],
                                        particle_types["B"]["type_id"])
                for others in range(2, len(types[t])):
                    np.testing.assert_equal(types[t][others],
                                            particle_types["A"]["type_id"])
                    np.testing.assert_equal(positions[t][others][0], 1.5)
                    np.testing.assert_equal(positions[t][others][1], 2.5)
                    np.testing.assert_equal(positions[t][others][2], 3.5)
예제 #13
0
    def test_interrupt_simple(self):
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.register_particle_type("A", 0.1)
        # Define counter as list. This is a workaround because nosetest will complain otherwise.
        counter = [0]

        def increment(result):
            counter[0] += 1

        sim.register_observable_n_particles(1, ["A"], increment)
        scheme = sim.run_scheme_readdy(True).configure(0.1)
        do_continue = lambda t: t < 5
        scheme.run_with_criterion(do_continue)
        np.testing.assert_equal(counter[0], 6)
예제 #14
0
    def test_n_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_n_particles.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2)
        simulation.register_particle_type("B", .2)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        n_time_steps = 50
        callback_n_particles_a_b = []
        callback_n_particles_all = []

        def callback_ab(value):
            callback_n_particles_a_b.append(value)
            simulation.add_particle("A", common.Vec(-1, -1, -1))

        def callback_all(hist):
            callback_n_particles_all.append(hist)
            simulation.add_particle("A", common.Vec(-1, -1, -1))
            simulation.add_particle("B", common.Vec(-1, -1, -1))

        handle_a_b_particles = simulation.register_observable_n_particles(1, ["A", "B"], callback_ab)
        handle_all = simulation.register_observable_n_particles(1, [], callback_all)
        with closing(io.File.create(fname)) as f:
            handle_a_b_particles.enable_write_to_file(f, u"n_a_b_particles", int(3))
            handle_all.enable_write_to_file(f, u"n_particles", int(5))
            simulation.run(n_time_steps, 0.02)
            handle_all.flush()
            handle_a_b_particles.flush()

        with h5py.File(fname, "r") as f2:
            n_a_b_particles = f2["readdy/observables/n_a_b_particles/data"][:]
            n_particles = f2["readdy/observables/n_particles/data"][:]
            time_series = f2["readdy/observables/n_a_b_particles/time"]
            np.testing.assert_equal(time_series, np.array(range(0, n_time_steps+1)))
            for t in range(n_time_steps):
                np.testing.assert_equal(n_a_b_particles[t][0], callback_n_particles_a_b[t][0])
                np.testing.assert_equal(n_a_b_particles[t][1], callback_n_particles_a_b[t][1])
                np.testing.assert_equal(n_particles[t][0], callback_n_particles_all[t][0])
예제 #15
0
    def test_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_particles.h5")
        sim = Simulation("SingleCPU")
        sim.context.box_size = [13.,13.,13.]
        sim.context.particle_types.add("A", .1)
        sim.context.particle_types.add("B", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(1, ["A"], lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particles(1)
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"particles", int(3))
            loop = sim.create_loop(0)
            loop.write_config_to_file(f)
            loop.run(n_timesteps)
            handle.flush()

        from readdy.util.io_utils import get_particle_types

        particle_types = get_particle_types(fname)

        with h5py.File(fname, "r") as f2:

            types = f2["readdy/observables/particles/types"][:]
            ids = f2["readdy/observables/particles/ids"][:]
            positions = f2["readdy/observables/particles/positions"][:]
            for t in range(n_timesteps):
                np.testing.assert_equal(len(types[t]), t + 3)
                np.testing.assert_equal(len(ids[t]), t + 3)
                np.testing.assert_equal(len(positions[t]), t + 3)
                np.testing.assert_equal(types[t][0], particle_types["A"]["type_id"])
                np.testing.assert_equal(positions[t][0][0], 0)
                np.testing.assert_equal(positions[t][0][1], 0)
                np.testing.assert_equal(positions[t][0][2], 0)
                np.testing.assert_equal(positions[t][1][0], 0)
                np.testing.assert_equal(positions[t][1][1], 0)
                np.testing.assert_equal(positions[t][1][2], 0)
                np.testing.assert_equal(types[t][1], particle_types["B"]["type_id"])
                for others in range(2, len(types[t])):
                    np.testing.assert_equal(types[t][others], particle_types["A"]["type_id"])
                    np.testing.assert_equal(positions[t][others][0], 1.5)
                    np.testing.assert_equal(positions[t][others][1], 2.5)
                    np.testing.assert_equal(positions[t][others][2], 3.5)
예제 #16
0
    def test_interrupt_maxparticles(self):
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.register_particle_type("A", 0.1)
        sim.add_particle("A", Vec(0, 0, 0))
        sim.register_reaction_fission("bla", "A", "A", "A", 1000., 0., 0.5, 0.5)
        counter = [0]
        shall_stop = [False]

        def increment(result):
            counter[0] += 1
            if result[0] >= 8:
                shall_stop[0] = True

        sim.register_observable_n_particles(1, ["A"], increment)
        scheme = sim.run_scheme_readdy(True).configure(1.)
        do_continue = lambda t: not shall_stop[0]
        scheme.run_with_criterion(do_continue)
        np.testing.assert_equal(counter[0], 4)
예제 #17
0
    def test_interrupt_maxparticles(self):
        sim = Simulation("SingleCPU")
        sim.context.particle_types.add("A", 0.1)
        sim.add_particle("A", Vec(0, 0, 0))
        sim.context.reactions.add_fission("bla", "A", "A", "A", 1000., 0., 0.5,
                                          0.5)
        counter = [0]
        shall_stop = [False]

        def increment(result):
            counter[0] += 1
            if result[0] >= 8:
                shall_stop[0] = True

        sim.register_observable_n_particles(1, ["A"], increment)
        loop = sim.create_loop(1.)
        do_continue = lambda t: not shall_stop[0]
        loop.run_with_criterion(do_continue)
        np.testing.assert_equal(counter[0], 4)
예제 #18
0
    def test_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_particles.h5")
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.box_size = common.Vec(13, 13, 13)
        typeid_A = sim.register_particle_type("A", .1, .1)
        typeid_B = sim.register_particle_type("B", .1, .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(
            1, ["A"],
            lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particles(1)
        n_timesteps = 19
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"particles", int(3))
            sim.run_scheme_readdy(True).configure(0).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            types = f2["readdy/observables/particles/types"][:]
            ids = f2["readdy/observables/particles/ids"][:]
            positions = f2["readdy/observables/particles/positions"][:]
            for t in range(n_timesteps):
                np.testing.assert_equal(len(types[t]), t + 3)
                np.testing.assert_equal(len(ids[t]), t + 3)
                np.testing.assert_equal(len(positions[t]), t + 3)
                np.testing.assert_equal(types[t][0], typeid_A)
                np.testing.assert_equal(positions[t][0][0], 0)
                np.testing.assert_equal(positions[t][0][1], 0)
                np.testing.assert_equal(positions[t][0][2], 0)
                np.testing.assert_equal(positions[t][1][0], 0)
                np.testing.assert_equal(positions[t][1][1], 0)
                np.testing.assert_equal(positions[t][1][2], 0)
                np.testing.assert_equal(types[t][1], typeid_B)
                for others in range(2, len(types[t])):
                    np.testing.assert_equal(types[t][others], typeid_A)
                    np.testing.assert_equal(positions[t][others][0], 1.5)
                    np.testing.assert_equal(positions[t][others][1], 2.5)
                    np.testing.assert_equal(positions[t][others][2], 3.5)
예제 #19
0
    def test_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_particles.h5")
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.box_size = common.Vec(13, 13, 13)
        typeid_A = sim.register_particle_type("A", .1)
        typeid_B = sim.register_particle_type("B", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(1, ["A"], lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particles(1)
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"particles", int(3))
            sim.run_scheme_readdy(True).configure(0).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            types = f2["readdy/observables/particles/types"][:]
            ids = f2["readdy/observables/particles/ids"][:]
            positions = f2["readdy/observables/particles/positions"][:]
            for t in range(n_timesteps):
                np.testing.assert_equal(len(types[t]), t + 3)
                np.testing.assert_equal(len(ids[t]), t + 3)
                np.testing.assert_equal(len(positions[t]), t + 3)
                np.testing.assert_equal(types[t][0], typeid_A)
                np.testing.assert_equal(positions[t][0][0], 0)
                np.testing.assert_equal(positions[t][0][1], 0)
                np.testing.assert_equal(positions[t][0][2], 0)
                np.testing.assert_equal(positions[t][1][0], 0)
                np.testing.assert_equal(positions[t][1][1], 0)
                np.testing.assert_equal(positions[t][1][2], 0)
                np.testing.assert_equal(types[t][1], typeid_B)
                for others in range(2, len(types[t])):
                    np.testing.assert_equal(types[t][others], typeid_A)
                    np.testing.assert_equal(positions[t][others][0], 1.5)
                    np.testing.assert_equal(positions[t][others][1], 2.5)
                    np.testing.assert_equal(positions[t][others][2], 3.5)
예제 #20
0
    def test_write_trajectory_as_observable(self):
        traj_fname = os.path.join(self.dir, "traj_as_obs.h5")
        simulation = Simulation("SingleCPU")
        simulation.context.box_size = [5., 5., 5.]
        simulation.context.particle_types.add("A", 0.0)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_trajectory(1)

        with closing(io.File.create(traj_fname, io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", int(3))
            simulation.run(20, 1)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx+1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))
예제 #21
0
    def execute(self):
        ###################################
        #
        # Units:
        #   - [x] = µm
        #   - [t] = s
        #   - [E] = kJ/mol
        #
        ###################################

        kernel_provider = KernelProvider.get()
        kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
        simulation = Simulation()
        simulation.set_kernel("CPU")

        ###################################
        #
        # set up simulation box
        #
        ###################################

        box_size = Vec(2, 7, 12)
        simulation.box_size = box_size
        simulation.kbt = 2.437  # room temperature
        simulation.periodic_boundary = [False, False, False]

        ###################################
        #
        # register particle types
        #
        ###################################

        # particle size, see: http://bmccellbiol.biomedcentral.com/articles/10.1186/1471-2121-5-29
        # "The size of the V-ATPase complex is about 15 nm (diameter) x 25 nm (length from lumen side to tip of head)"

        membrane_particle_size = .05

        diffusion_factor = .5
        simulation.register_particle_type("D", 2.5 * diffusion_factor,
                                          .01)  # MinD-ADP (without phosphor)
        simulation.register_particle_type("D_P", 2.5 * diffusion_factor,
                                          .01)  # MinD-ATP (with phosphor)
        simulation.register_particle_type("E", 2.5 * diffusion_factor,
                                          .01)  # MinE
        simulation.register_particle_type("D_PB", .01 * diffusion_factor,
                                          .01)  # MinD-ATP bound
        simulation.register_particle_type("DE", .01 * diffusion_factor,
                                          .01)  # MinDE

        ###################################
        #
        # register reaction types
        #
        ###################################

        reaction_radius = 4 * (
            0.01 + 0.01
        )  # = sum of the particle radii * 5 (5 - magic number such that k_fusion makes sense, sort of) 5 *
        # k_fusion = brentq(lambda x: self.erban_chapman(.093, 2.5 + .01, reaction_radius, x), 1, 5000000)
        k_fusion = 1.0
        print("k_fusion=%s" % k_fusion)
        simulation.register_reaction_conversion("Phosphorylation", "D", "D_P",
                                                .5)
        simulation.register_reaction_fusion("bound MinD+MinE->MinDE", "D_PB",
                                            "E", "DE", k_fusion,
                                            reaction_radius * 3.5, .5, .5)
        simulation.register_reaction_fission("MinDE to MinD and MinE, detach",
                                             "DE", "D", "E", .25,
                                             reaction_radius, .5, .5)

        ###################################
        #
        # register potentials
        #
        ###################################

        membrane_size = Vec(.5, 5, 10)
        layer = Vec(.08, .08, .08)
        extent = membrane_size + 2 * layer
        origin = -.5 * membrane_size - layer
        simulation.register_potential_box(
            "D", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "D_P", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "D_PB", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "E", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "DE", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)

        # simulation.register_potential_piecewise_weak_interaction("D_P", "D_PB", 3, .02, 2, .05)  # (force constant, desired dist, depth, no interaction dist)

        ###################################
        #
        # membrane particles
        #
        ###################################
        using_membrane_particles = False
        if using_membrane_particles:
            simulation.register_particle_type(
                "M", 0, membrane_particle_size)  # membrane particle
            simulation.register_reaction_enzymatic(
                "Attach to membrane", "M", "D_P", "D_PB", .5,
                .01 + membrane_particle_size)  # .01 + .025  # todo: rate?
            dx = np.linspace(
                origin[0] + layer[0],
                -1 * origin[0] - layer[0],
                int(float(membrane_size[0]) / membrane_particle_size),
                endpoint=True)
            dy = np.linspace(
                origin[1] + layer[1],
                -1 * origin[1] - layer[1],
                int(float(membrane_size[1]) / membrane_particle_size),
                endpoint=True)
            dz = np.linspace(
                origin[2] + layer[2],
                -1 * origin[2] - layer[2],
                int(float(membrane_size[2]) / membrane_particle_size),
                endpoint=True)
            for y in dy:
                for z in dz:
                    simulation.add_particle(
                        "M", Vec(-1 * origin[0] - layer[0], y, z))
            print("done adding membrane particles")
        else:
            simulation.register_reaction_conversion("Phosphorylation", "D_P",
                                                    "D_PB", .5)
            simulation.register_reaction_enzymatic(
                "Enzymatic DP+DPB->DPB + DPB", "D_PB", "D_P", "D_PB", .5, .02)
        using_uniform_distribution = True
        n_minE_particles = 3120
        n_minD_particles = n_minE_particles * 4
        mine_x = np.random.uniform(origin[0] + layer[0],
                                   -1 * origin[0] - layer[0], n_minE_particles)
        mine_y = np.random.uniform(origin[1] + layer[1],
                                   -1 * origin[1] - layer[1], n_minE_particles)
        if using_uniform_distribution:
            mine_z = np.random.uniform(origin[2] + layer[2],
                                       -1 * origin[2] - layer[2],
                                       n_minE_particles)
        else:
            mine_z = np.random.uniform(origin[2] + layer[2],
                                       .5 * (-1 * origin[2] - layer[2]),
                                       n_minE_particles)

        mind_x = np.random.uniform(origin[0] + layer[0],
                                   -1 * origin[0] - layer[0], n_minD_particles)
        mind_y = np.random.uniform(origin[1] + layer[1],
                                   -1 * origin[1] - layer[1], n_minD_particles)
        if using_uniform_distribution:
            mind_z = np.random.uniform(origin[2] + layer[2],
                                       -1 * origin[2] - layer[2],
                                       n_minD_particles)
        else:
            mind_z = np.random.uniform(.5 * (-1 * origin[2] - layer[2]),
                                       -1 * origin[2] - layer[2],
                                       n_minD_particles)

        for i in range(n_minE_particles):
            simulation.add_particle("E", Vec(mine_x[i], mine_y[i], mine_z[i]))

        for i in range(int(.5 * n_minD_particles)):
            simulation.add_particle("D", Vec(mind_x[i], mind_y[i], mind_z[i]))
        for i in range(int(.5 * n_minD_particles), n_minD_particles):
            simulation.add_particle("D_P", Vec(mind_x[i], mind_y[i],
                                               mind_z[i]))

        self.timestep = simulation.get_recommended_time_step(2)

        ###################################
        #
        # register observables
        #
        ###################################

        # simulation.register_observable_center_of_mass(1, self.com_callback_mind, ["D", "D_P", "D_PB"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_mine, ["E"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_minde, ["DE", "D_PB"])
        print("histogram start")
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minD, np.arange(-3, 3, .1), ["D", "D_P", "D_PB"], 2)
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minE, np.arange(-3, 3, .1), ["D_PB", "DE"], 2)
        stride = int(.01 / self.timestep)
        self.stride = stride
        print("using stride=%s" % stride)
        bins = np.linspace(-7, 7, 80)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D"], self.histogram_callback_minD)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D_P"], self.histogram_callback_minDP)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D_PB"], self.histogram_callback_minDPB)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["E"], self.histogram_callback_minE)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["DE"], self.histogram_callback_minDE)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D", "D_P", "D_PB", "DE"],
            self.histogram_callback_M)
        simulation.register_observable_n_particles(
            stride, ["D", "D_P", "D_PB", "E", "DE"], self.n_particles_callback)
        print("histogram end")

        self.n_timesteps = int(1200. / self.timestep)

        print("starting simulation for effectively %s sec" %
              (self.timestep * self.n_timesteps))
        simulation.run_scheme_readdy(True).with_reaction_scheduler(
            "GillespieParallel").configure(self.timestep).run(self.n_timesteps)

        if self._result_fname is not None:
            with open(self._result_fname, 'w') as f:
                np.save(f, np.array(self._hist_data))
예제 #22
0
    def execute(self):
        ###################################
        #
        # Units:
        #   - [x] = µm
        #   - [t] = s
        #   - [E] = kJ/mol
        #
        ###################################

        kernel_provider = KernelProvider.get()
        kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
        simulation = Simulation()
        simulation.set_kernel("CPU")

        ###################################
        #
        # set up simulation box
        #
        ###################################

        box_size = Vec(2, 7, 12)
        simulation.box_size = box_size
        simulation.kbt = 2.437  # room temperature
        simulation.periodic_boundary = [False, False, False]

        ###################################
        #
        # register particle types
        #
        ###################################

        # particle size, see: http://bmccellbiol.biomedcentral.com/articles/10.1186/1471-2121-5-29
        # "The size of the V-ATPase complex is about 15 nm (diameter) x 25 nm (length from lumen side to tip of head)"

        membrane_particle_size = .05

        diffusion_factor = .5
        simulation.register_particle_type("D", 2.5 * diffusion_factor, .01)  # MinD-ADP (without phosphor)
        simulation.register_particle_type("D_P", 2.5 * diffusion_factor, .01)  # MinD-ATP (with phosphor)
        simulation.register_particle_type("E", 2.5 * diffusion_factor, .01)  # MinE
        simulation.register_particle_type("D_PB", .01 * diffusion_factor, .01)  # MinD-ATP bound
        simulation.register_particle_type("DE", .01 * diffusion_factor, .01)  # MinDE

        ###################################
        #
        # register reaction types
        #
        ###################################

        reaction_radius = 4*(0.01 + 0.01)  # = sum of the particle radii * 5 (5 - magic number such that k_fusion makes sense, sort of) 5 *
        # k_fusion = brentq(lambda x: self.erban_chapman(.093, 2.5 + .01, reaction_radius, x), 1, 5000000)
        k_fusion = 1.0
        print("k_fusion=%s" % k_fusion)
        simulation.register_reaction_conversion("Phosphorylation", "D", "D_P", .5)
        simulation.register_reaction_fusion("bound MinD+MinE->MinDE", "D_PB", "E", "DE", k_fusion, reaction_radius*3.5, .5, .5)
        simulation.register_reaction_fission("MinDE to MinD and MinE, detach", "DE", "D", "E", .25, reaction_radius, .5, .5)

        ###################################
        #
        # register potentials
        #
        ###################################

        membrane_size = Vec(.5, 5, 10)
        layer = Vec(.08, .08, .08)
        extent = membrane_size + 2 * layer
        origin = -.5 * membrane_size - layer
        simulation.register_potential_box("D", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("D_P", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("D_PB", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("E", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("DE", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)

        # simulation.register_potential_piecewise_weak_interaction("D_P", "D_PB", 3, .02, 2, .05)  # (force constant, desired dist, depth, no interaction dist)

        ###################################
        #
        # membrane particles
        #
        ###################################
        using_membrane_particles = False
        if using_membrane_particles:
            simulation.register_particle_type("M", 0, membrane_particle_size)  # membrane particle
            simulation.register_reaction_enzymatic("Attach to membrane", "M", "D_P", "D_PB", .5, .01 + membrane_particle_size)  # .01 + .025  # todo: rate?
            dx = np.linspace(origin[0] + layer[0], -1 * origin[0] - layer[0], int(float(membrane_size[0]) / membrane_particle_size), endpoint=True)
            dy = np.linspace(origin[1] + layer[1], -1 * origin[1] - layer[1], int(float(membrane_size[1]) / membrane_particle_size), endpoint=True)
            dz = np.linspace(origin[2] + layer[2], -1 * origin[2] - layer[2], int(float(membrane_size[2]) / membrane_particle_size), endpoint=True)
            for y in dy:
                for z in dz:
                    simulation.add_particle("M", Vec(-1 * origin[0] - layer[0], y, z))
            print("done adding membrane particles")
        else:
            simulation.register_reaction_conversion("Phosphorylation", "D_P", "D_PB", .5)
            simulation.register_reaction_enzymatic("Enzymatic DP+DPB->DPB + DPB", "D_PB", "D_P", "D_PB", .5, .02)
        using_uniform_distribution = True
        n_minE_particles = 3120
        n_minD_particles = n_minE_particles * 4
        mine_x = np.random.uniform(origin[0] + layer[0], -1 * origin[0] - layer[0], n_minE_particles)
        mine_y = np.random.uniform(origin[1] + layer[1], -1 * origin[1] - layer[1], n_minE_particles)
        if using_uniform_distribution:
            mine_z = np.random.uniform(origin[2] + layer[2], -1 * origin[2] - layer[2], n_minE_particles)
        else:
            mine_z = np.random.uniform(origin[2] + layer[2], .5 * (-1 * origin[2] - layer[2]), n_minE_particles)

        mind_x = np.random.uniform(origin[0] + layer[0], -1 * origin[0] - layer[0], n_minD_particles)
        mind_y = np.random.uniform(origin[1] + layer[1], -1 * origin[1] - layer[1], n_minD_particles)
        if using_uniform_distribution:
            mind_z = np.random.uniform(origin[2] + layer[2], -1 * origin[2] - layer[2], n_minD_particles)
        else:
            mind_z = np.random.uniform(.5 * (-1 * origin[2] - layer[2]), -1 * origin[2] - layer[2], n_minD_particles)

        for i in range(n_minE_particles):
            simulation.add_particle("E", Vec(mine_x[i], mine_y[i], mine_z[i]))

        for i in range(int(.5 * n_minD_particles)):
            simulation.add_particle("D", Vec(mind_x[i], mind_y[i], mind_z[i]))
        for i in range(int(.5 * n_minD_particles), n_minD_particles):
            simulation.add_particle("D_P", Vec(mind_x[i], mind_y[i], mind_z[i]))

        self.timestep = simulation.get_recommended_time_step(2)

        ###################################
        #
        # register observables
        #
        ###################################

        # simulation.register_observable_center_of_mass(1, self.com_callback_mind, ["D", "D_P", "D_PB"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_mine, ["E"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_minde, ["DE", "D_PB"])
        print("histogram start")
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minD, np.arange(-3, 3, .1), ["D", "D_P", "D_PB"], 2)
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minE, np.arange(-3, 3, .1), ["D_PB", "DE"], 2)
        stride = int(.01/self.timestep)
        self.stride = stride
        print("using stride=%s" % stride)
        bins = np.linspace(-7, 7, 80)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D"], self.histogram_callback_minD)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D_P"], self.histogram_callback_minDP)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D_PB"], self.histogram_callback_minDPB)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["E"], self.histogram_callback_minE)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["DE"], self.histogram_callback_minDE)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D", "D_P", "D_PB", "DE"], self.histogram_callback_M)
        simulation.register_observable_n_particles(stride, ["D", "D_P", "D_PB", "E", "DE"], self.n_particles_callback)
        print("histogram end")

        self.n_timesteps = int(1200./self.timestep)

        print("starting simulation for effectively %s sec" % (self.timestep * self.n_timesteps))
        simulation.run_scheme_readdy(True).with_reaction_scheduler("GillespieParallel").configure(self.timestep).run(self.n_timesteps)

        if self._result_fname is not None:
            with open(self._result_fname, 'w') as f:
                np.save(f, np.array(self._hist_data))