コード例 #1
0
    def test_particle_positions_observable(self):
        fname = os.path.join(self.dir, "test_observables_particle_positions.h5")
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(1, ["A"], lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particle_positions(1, [])
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"particle_positions", int(3))
            sim.run_scheme_readdy(True).configure(0).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/particle_positions/data"][:]
            np.testing.assert_equal(len(data), n_timesteps + 1)
            for t, positions in enumerate(data):
                # we begin with two particles
                np.testing.assert_equal(len(positions), t + 2)
                np.testing.assert_equal(positions[0]["x"], 0)
                np.testing.assert_equal(positions[0]["y"], 0)
                np.testing.assert_equal(positions[0]["z"], 0)
                for i in range(1, len(positions)):
                    np.testing.assert_equal(positions[i]["x"], 1.5)
                    np.testing.assert_equal(positions[i]["y"], 2.5)
                    np.testing.assert_equal(positions[i]["z"], 3.5)
コード例 #2
0
    def test_virial_observable_CPU(self):
        fname = os.path.join(self.dir, "test_observables_virial.h5")

        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1)
        sim.register_potential_harmonic_repulsion("A", "A", 10., .5)
        for _ in range(10000):
            pos = common.Vec(*(13*np.random.random(size=3)-.5*13))
            sim.add_particle("A", pos)

        virials = []
        def virial_callback(virial):
            virials.append(np.ndarray((3,3), buffer=virial))

        handle = sim.register_observable_virial(1, virial_callback)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"virial", int(3))
            sim.run_scheme_readdy(True).configure(.1).run(10)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            h5virials = f2["readdy/observables/virial/data"]
            for v, v2 in zip(virials, h5virials):
                np.testing.assert_almost_equal(v, v2)
コード例 #3
0
ファイル: test_io.py プロジェクト: marscher/readdy
    def test_write_flat_trajectory(self):
        common.set_logging_level("error")
        traj_fname = os.path.join(self.dir, "flat_traj.h5")
        simulation = Simulation()
        simulation.set_kernel("SingleCPU")
        simulation.box_size = common.Vec(5, 5, 5)
        simulation.register_particle_type("A", 0.0, 0.0)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_flat_trajectory(1)
        with closing(
                io.File(traj_fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", int(3))
            simulation.run_scheme_readdy(True).configure(1).run(20)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx + 1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))
        common.set_logging_level("debug")
コード例 #4
0
ファイル: test_io.py プロジェクト: chrisfroe/readdy
    def test_write_trajectory(self):
        traj_fname = os.path.join(self.dir, "traj.h5")
        simulation = Simulation()
        simulation.set_kernel("SingleCPU")
        simulation.box_size = common.Vec(5,5,5)
        simulation.register_particle_type("A", 0.0)
        simulation.register_reaction_conversion("A->A", "A", "A", 1.)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_trajectory(0)
        with closing(io.File.create(traj_fname, io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", 3)
            simulation.run_scheme_readdy(True).write_config_to_file(f).configure(1).run(20)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx+1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))

        with h5py.File(traj_fname) as f:
            np.testing.assert_equal("A", f["readdy/config/particle_types"][0]["name"])
コード例 #5
0
    def test_particle_positions_observable(self):
        fname = os.path.join(self.dir,
                             "test_observables_particle_positions.h5")
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(
            1, ["A"],
            lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particle_positions(1, [])
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"particle_positions", int(3))
            sim.run_scheme_readdy(True).configure(0).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/particle_positions/data"][:]
            np.testing.assert_equal(len(data), n_timesteps + 1)
            for t, positions in enumerate(data):
                # we begin with two particles
                np.testing.assert_equal(len(positions), t + 2)
                np.testing.assert_equal(positions[0]["x"], 0)
                np.testing.assert_equal(positions[0]["y"], 0)
                np.testing.assert_equal(positions[0]["z"], 0)
                for i in range(1, len(positions)):
                    np.testing.assert_equal(positions[i]["x"], 1.5)
                    np.testing.assert_equal(positions[i]["y"], 2.5)
                    np.testing.assert_equal(positions[i]["z"], 3.5)
コード例 #6
0
    def chain_decay(self, kernel):
        sim = Simulation()
        sim.set_kernel(kernel)
        sim.box_size = common.Vec(10, 10, 10)
        sim.register_topology_type("TA")
        np.testing.assert_equal(sim.kernel_supports_topologies(), True)

        sim.register_particle_type("B", 1.0, ParticleTypeFlavor.NORMAL)
        sim.register_particle_type("Topology A", 1.0, ParticleTypeFlavor.TOPOLOGY)
        sim.configure_topology_bond_potential("Topology A", "Topology A", 10, 10)

        n_elements = 50.
        particles = [sim.create_topology_particle("Topology A", common.Vec(-5. + i * 10. / n_elements, 0, 0))
                     for i in range(int(n_elements))]
        topology = sim.add_topology("TA", particles)

        for i in range(int(n_elements - 1)):
            topology.get_graph().add_edge(i, i + 1)

        sim.register_structural_topology_reaction("TA", self._get_decay_reaction())
        sim.register_structural_topology_reaction("TA", self._get_split_reaction())

        # h = sim.register_observable_n_particles(1, [], lambda x: print("n particles=%s" % x))

        np.testing.assert_equal(1, len(sim.current_topologies()))

        sim.run_scheme_readdy(True).evaluate_topology_reactions().configure_and_run(int(500), float(1.0))

        np.testing.assert_equal(0, len(sim.current_topologies()))
コード例 #7
0
    def test_write_trajectory(self):
        traj_fname = os.path.join(self.dir, "traj.h5")
        simulation = Simulation()
        simulation.set_kernel("SingleCPU")
        simulation.box_size = common.Vec(5, 5, 5)
        simulation.register_particle_type("A", 0.0)
        simulation.register_reaction_conversion("A->A", "A", "A", 1.)

        def callback(_):
            simulation.add_particle("A", common.Vec(0, 0, 0))

        simulation.register_observable_n_particles(1, ["A"], callback)
        traj_handle = simulation.register_observable_trajectory(0)
        with closing(io.File.create(traj_fname, io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", 3)
            simulation.run_scheme_readdy(True).write_config_to_file(
                f).configure(1).run(20)

        r = TrajectoryReader(traj_fname)
        trajectory_items = r[:]
        for idx, items in enumerate(trajectory_items):
            np.testing.assert_equal(len(items), idx + 1)
            for item in items:
                np.testing.assert_equal(item.t, idx)
                np.testing.assert_equal(item.position, np.array([.0, .0, .0]))

        with h5py.File(traj_fname) as f:
            np.testing.assert_equal(
                "A", f["readdy/config/particle_types"][0]["name"])
コード例 #8
0
 def test_unbonded_edge(self):
     sim = Simulation()
     sim.set_kernel("SingleCPU")
     sim.box_size = common.Vec(10, 10, 10)
     np.testing.assert_equal(sim.kernel_supports_topologies(), True)
     sim.register_particle_type("T",
                                1.0,
                                .5,
                                flavor=ParticleTypeFlavor.TOPOLOGY)
     sim.register_particle_type("D",
                                1.0,
                                .5,
                                flavor=ParticleTypeFlavor.TOPOLOGY)
     sim.configure_topology_bond_potential("T", "T", 10., 11.)
     particles = [
         sim.create_topology_particle("T", common.Vec(0, 0, 0))
         for _ in range(3)
     ]
     particles.append(sim.create_topology_particle("D", common.Vec(0, 0,
                                                                   0)))
     top = sim.add_topology(particles)
     graph = top.get_graph()
     graph.add_edge(0, 1)
     graph.add_edge(1, 2)
     graph.add_edge(2, 3)
     with (np.testing.assert_raises(ValueError)):
         top.configure()
コード例 #9
0
    def test_histogram_along_axis_observable(self):
        fname = os.path.join(self.dir, "test_observables_hist_along_axis.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2)
        simulation.register_particle_type("B", .2)
        simulation.register_potential_harmonic_repulsion("A", "B", 10, 2.)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        bin_borders = np.arange(0, 5, .01)
        n_time_steps = 50
        callback_hist = []

        def hist_callback(hist):
            callback_hist.append(hist)

        handle = simulation.register_observable_histogram_along_axis(2, bin_borders, 0, ["A", "B"], hist_callback)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"hist_along_x_axis", int(3))
            simulation.run(n_time_steps, 0.02)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            histogram = f2["readdy/observables/hist_along_x_axis/data"][:]
            time_series = f2["readdy/observables/hist_along_x_axis/time"]
            np.testing.assert_equal(time_series, np.array(range(0, n_time_steps+1))[::2])
            for t in range(n_time_steps // 2):
                np.testing.assert_equal(histogram[t], np.array(callback_hist[t]))
コード例 #10
0
    def test_virial_observable_CPU(self):
        fname = os.path.join(self.dir, "test_observables_virial.h5")

        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1)
        sim.register_potential_harmonic_repulsion("A", "A", 10., .5)
        for _ in range(10000):
            pos = common.Vec(*(13 * np.random.random(size=3) - .5 * 13))
            sim.add_particle("A", pos)

        virials = []

        def virial_callback(virial):
            virials.append(np.ndarray((3, 3), buffer=virial))

        handle = sim.register_observable_virial(1, virial_callback)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"virial", int(3))
            sim.run_scheme_readdy(True).configure(.1).run(10)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            h5virials = f2["readdy/observables/virial/data"]
            for v, v2 in zip(virials, h5virials):
                np.testing.assert_almost_equal(v, v2)
コード例 #11
0
class TestInternalSimulationModule(ReaDDyTestCase):

    def setUp(self):
        super().setUp()
        self.kernel_provider = KernelProvider.get()
        self.kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
        self.simulation = Simulation()

    def py_harmonic_repulsion_energy(self, x_ij):
        dist = x_ij * x_ij
        # if dist < sqrt(25): return energy with  force constant 1
        if dist < 25:
            return (np.sqrt(dist) - 5) ** 2
        else:
            return 0

    def py_harmonic_repulsion_force(self, x_ij):
        dist = x_ij * x_ij
        if dist < 25:
            dist = np.sqrt(dist)
            return (2 * (dist - 5) / dist) * x_ij
        else:
            return Vec(0, 0, 0)

    def test_properties(self):
        if not self.simulation.is_kernel_selected():
            self.simulation.set_kernel('SingleCPU')
        np.testing.assert_equal(self.simulation.is_kernel_selected(), True)
        np.testing.assert_equal(self.simulation.get_selected_kernel_type(), "SingleCPU")
        self.simulation.kbt = 5.0
        np.testing.assert_equal(self.simulation.kbt, 5.0)
        self.simulation.periodic_boundary = [True, False, True]
        np.testing.assert_equal(self.simulation.periodic_boundary, (True, False, True))
        self.simulation.box_size = Vec(1, 3.6, 7)
        np.testing.assert_equal(self.simulation.box_size, Vec(1, 3.6, 7))

    def py_position_observable_callback(self, positions):
        _vec_sum = Vec(0, 0, 0)
        for v in positions:
            _vec_sum += v
        mean = _vec_sum / float(len(positions))
        print("mean=%s" % mean)

    def test_potentials(self):
        if not self.simulation.is_kernel_selected():
            self.simulation.set_kernel("SingleCPU")

        ida = self.simulation.register_particle_type("ParticleTypeA", 1.0)
        idb = self.simulation.register_particle_type("ParticleTypeB", 3.0)
        self.simulation.register_particle_type("ParticleTypeA_internal", 1.0)
        self.simulation.register_particle_type("ParticleTypeB_internal", 3.0)
        pot = Pot2(ida, idb, self.py_harmonic_repulsion_energy,
                   self.py_harmonic_repulsion_force)
        self.simulation.register_potential_order_2(pot)
        self.simulation.register_potential_harmonic_repulsion("ParticleTypeA_internal", "ParticleTypeB_internal", 1.0, 2.0)
        self.simulation.add_particle("ParticleTypeA", Vec(0, 0, 0))
        self.simulation.add_particle("ParticleTypeB", Vec(0.4, 0.4, 0.4))
        self.simulation.run(100, 1)
コード例 #12
0
    def test_n_particles_observable(self):
        common.set_logging_level("warn")
        fname = os.path.join(self.dir, "test_observables_n_particles.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2, 1.)
        simulation.register_particle_type("B", .2, 1.)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        n_time_steps = 50
        callback_n_particles_a_b = []
        callback_n_particles_all = []

        def callback_ab(value):
            callback_n_particles_a_b.append(value)
            simulation.add_particle("A", common.Vec(-1, -1, -1))

        def callback_all(hist):
            callback_n_particles_all.append(hist)
            simulation.add_particle("A", common.Vec(-1, -1, -1))
            simulation.add_particle("B", common.Vec(-1, -1, -1))

        handle_a_b_particles = simulation.register_observable_n_particles(
            1, ["A", "B"], callback_ab)
        handle_all = simulation.register_observable_n_particles(
            1, [], callback_all)
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle_a_b_particles.enable_write_to_file(f, u"n_a_b_particles",
                                                      int(3))
            handle_all.enable_write_to_file(f, u"n_particles", int(5))
            simulation.run(n_time_steps, 0.02)
            handle_all.flush()
            handle_a_b_particles.flush()

        with h5py.File(fname, "r") as f2:
            n_a_b_particles = f2["readdy/observables/n_a_b_particles/data"][:]
            n_particles = f2["readdy/observables/n_particles/data"][:]
            time_series = f2["readdy/observables/n_a_b_particles/time"]
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_time_steps + 1)))
            for t in range(n_time_steps):
                np.testing.assert_equal(n_a_b_particles[t][0],
                                        callback_n_particles_a_b[t][0])
                np.testing.assert_equal(n_a_b_particles[t][1],
                                        callback_n_particles_a_b[t][1])
                np.testing.assert_equal(n_particles[t][0],
                                        callback_n_particles_all[t][0])
コード例 #13
0
    def test_reaction_counts_observable(self):
        fname = os.path.join(self.dir,
                             "test_observables_particle_reaction_counts.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(10, 10, 10)
        sim.register_particle_type("A", .0)
        sim.register_particle_type("B", .0)
        sim.register_particle_type("C", .0)
        sim.register_reaction_conversion("mylabel", "A", "B", .00001)
        sim.register_reaction_conversion("A->B", "A", "B", 1e16)
        sim.register_reaction_fusion("B+C->A", "B", "C", "A", 1e16, 1.0, .5,
                                     .5)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(1.0, 1.0, 1.0))
        sim.add_particle("C", common.Vec(1.1, 1.0, 1.0))

        n_timesteps = 1
        handle = sim.register_observable_reaction_counts(1)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"reactions", int(3))
            sim.run_scheme_readdy(True).write_config_to_file(
                f).with_reaction_scheduler("Gillespie").configure_and_run(
                    n_timesteps, 1)

        import readdy.util.io_utils as io_utils
        reactions = io_utils.get_reactions(fname)

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/reactions"]
            time_series = f2["readdy/observables/reactions/time"]
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_timesteps + 1)))

            def get_item(name, collection):
                return next(x for x in collection if x["name"] == name)

            mylabel_id = get_item("mylabel", reactions.values())["id"]
            atob_id = get_item("A->B", reactions.values())["id"]
            fusion_id = get_item("B+C->A", reactions.values())["id"]

            # counts of first time step, time is first index
            np.testing.assert_equal(data["counts/" + str(mylabel_id)][0],
                                    np.array([0]))
            np.testing.assert_equal(data["counts/" + str(atob_id)][0],
                                    np.array([0]))
            np.testing.assert_equal(data["counts/" + str(fusion_id)][0],
                                    np.array([0]))
            # counts of second time step
            np.testing.assert_equal(data["counts/" + str(mylabel_id)][1],
                                    np.array([0]))
            np.testing.assert_equal(data["counts/" + str(atob_id)][1],
                                    np.array([1]))
            np.testing.assert_equal(data["counts/" + str(fusion_id)][1],
                                    np.array([1]))
コード例 #14
0
 def test_sanity(self):
     simulation = Simulation()
     simulation.set_kernel("SingleCPU")
     configurator = simulation.run_scheme_readdy(False)
     scheme = configurator \
         .with_integrator("EulerBDIntegrator") \
         .include_forces(False) \
         .with_reaction_scheduler("UncontrolledApproximation") \
         .evaluate_observables(False) \
         .configure(1)
     scheme.run(10)
コード例 #15
0
 def test_sanity(self):
     sim = Simulation()
     sim.set_kernel("SingleCPU")
     sim.box_size = common.Vec(10, 10, 10)
     np.testing.assert_equal(sim.kernel_supports_topologies(), True)
     sim.register_topology_type("TA")
     sim.register_particle_type("T",
                                1.0,
                                flavor=ParticleTypeFlavor.TOPOLOGY)
     sim.configure_topology_bond_potential("T", "T", 10., 11.)
     particles = [
         sim.create_topology_particle("T", common.Vec(x, 0, 0))
         for x in range(4)
     ]
     top = sim.add_topology("TA", particles)
     graph = top.get_graph()
     graph.add_edge(0, 1)
     graph.add_edge(1, 2)
     graph.add_edge(2, 3)
     np.testing.assert_equal(len(graph.get_vertices()), 4)
     for v in graph.get_vertices():
         if v.particle_index == 0:
             np.testing.assert_equal(top.position_of_vertex(v),
                                     common.Vec(0, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 1)
             np.testing.assert_equal(
                 1 in [vv.get().particle_index for vv in v], True)
         if v.particle_index == 1:
             np.testing.assert_equal(top.position_of_vertex(v),
                                     common.Vec(1, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 2)
             np.testing.assert_equal(
                 0 in [vv.get().particle_index for vv in v], True)
             np.testing.assert_equal(
                 2 in [vv.get().particle_index for vv in v], True)
         if v.particle_index == 2:
             np.testing.assert_equal(top.position_of_vertex(v),
                                     common.Vec(2, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 2)
             np.testing.assert_equal(
                 1 in [vv.get().particle_index for vv in v], True)
             np.testing.assert_equal(
                 3 in [vv.get().particle_index for vv in v], True)
         if v.particle_index == 3:
             np.testing.assert_equal(top.position_of_vertex(v),
                                     common.Vec(3, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 1)
             np.testing.assert_equal(
                 2 in [vv.get().particle_index for vv in v], True)
     top.configure()
     sim.run_scheme_readdy(True).configure_and_run(0, 1)
コード例 #16
0
 def test_unconnected_graph(self):
     sim = Simulation()
     sim.set_kernel("SingleCPU")
     sim.register_topology_type("TA")
     sim.box_size = common.Vec(10, 10, 10)
     np.testing.assert_equal(sim.kernel_supports_topologies(), True)
     sim.register_particle_type("T", 1.0, flavor=ParticleTypeFlavor.TOPOLOGY)
     sim.configure_topology_bond_potential("T", "T", 10., 11.)
     particles = [sim.create_topology_particle("T", common.Vec(0, 0, 0)) for _ in range(4)]
     top = sim.add_topology("TA", particles)
     graph = top.get_graph()
     graph.add_edge(0, 1)
     graph.add_edge(1, 2)
     with (np.testing.assert_raises(ValueError)):
         top.configure()
コード例 #17
0
    def test_interrupt_simple(self):
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.register_particle_type("A", 0.1)
        # Define counter as list. This is a workaround because nosetest will complain otherwise.
        counter = [0]

        def increment(result):
            counter[0] += 1

        sim.register_observable_n_particles(1, ["A"], increment)
        scheme = sim.run_scheme_readdy(True).configure(0.1)
        do_continue = lambda t: t < 5
        scheme.run_with_criterion(do_continue)
        np.testing.assert_equal(counter[0], 6)
コード例 #18
0
    def run(self, time_steps, out_file):
        sim = Simulation()
        sim.set_kernel(self.kernel)
        sim.box_size = common.Vec(60, 20, 20)
        sim.periodic_boundary = [True, True, True]

        typeid_b = sim.register_particle_type("B", 1.0, 1.0,
                                              ParticleTypeFlavor.NORMAL)
        sim.register_particle_type("Topology A", .5, .5,
                                   ParticleTypeFlavor.TOPOLOGY)

        sim.register_potential_harmonic_repulsion("Topology A", "Topology A",
                                                  10)
        sim.register_potential_harmonic_repulsion("Topology A", "B", 10)
        sim.register_potential_harmonic_repulsion("B", "B", 10)

        sim.configure_topology_bond_potential("Topology A", "Topology A", 10,
                                              1.)
        sim.configure_topology_angle_potential("Topology A", "Topology A",
                                               "Topology A", 10, np.pi)
        # sim.configure_topology_dihedral_potential("Topology A", "Topology A", "Topology A", "Topology A", 1, 1, -np.pi)

        n_elements = 50.
        particles = [
            sim.create_topology_particle("Topology A",
                                         common.Vec(-25. + i, 0, 0))
            for i in range(int(n_elements))
        ]
        topology = sim.add_topology(particles)

        for i in range(int(n_elements - 1)):
            topology.get_graph().add_edge(i, i + 1)

        topology.add_reaction(self._get_decay_reaction(typeid_b))
        topology.add_reaction(self._get_split_reaction())

        traj_handle = sim.register_observable_flat_trajectory(1)
        with closing(
                io.File(out_file, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", 50)
            sim.run_scheme_readdy(True)\
                .evaluate_topology_reactions()\
                .write_config_to_file(f)\
                .configure_and_run(time_steps, self.time_step)
        print("currently %s topologies" % len(sim.current_topologies()))
コード例 #19
0
    def test_radial_distribution_observable(self):
        common.set_logging_level("warn")
        fname = os.path.join(self.dir,
                             "test_observables_radial_distribution.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2, 1.)
        simulation.register_particle_type("B", .2, 1.)
        simulation.register_potential_harmonic_repulsion("A", "B", 10)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        bin_borders = np.arange(0, 5, .01)
        density = 1. / (box_size[0] * box_size[1] * box_size[2])
        n_time_steps = 50
        callback_centers = []
        callback_rdf = []

        def rdf_callback(pair):
            callback_centers.append(pair[0])
            callback_rdf.append(pair[1])

        handle = simulation.register_observable_radial_distribution(
            1, bin_borders, ["A"], ["B"], density, rdf_callback)
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"radial_distribution", int(3))
            simulation.run(n_time_steps, 0.02)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            bin_centers = f2[
                "readdy/observables/radial_distribution/bin_centers"][:]
            distribution = f2[
                "readdy/observables/radial_distribution/distribution"][:]
            for t in range(n_time_steps):
                np.testing.assert_equal(bin_centers,
                                        np.array(callback_centers[t]))
                np.testing.assert_equal(distribution[t],
                                        np.array(callback_rdf[t]))
コード例 #20
0
    def test_n_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_n_particles.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2)
        simulation.register_particle_type("B", .2)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        n_time_steps = 50
        callback_n_particles_a_b = []
        callback_n_particles_all = []

        def callback_ab(value):
            callback_n_particles_a_b.append(value)
            simulation.add_particle("A", common.Vec(-1, -1, -1))

        def callback_all(hist):
            callback_n_particles_all.append(hist)
            simulation.add_particle("A", common.Vec(-1, -1, -1))
            simulation.add_particle("B", common.Vec(-1, -1, -1))

        handle_a_b_particles = simulation.register_observable_n_particles(1, ["A", "B"], callback_ab)
        handle_all = simulation.register_observable_n_particles(1, [], callback_all)
        with closing(io.File.create(fname)) as f:
            handle_a_b_particles.enable_write_to_file(f, u"n_a_b_particles", int(3))
            handle_all.enable_write_to_file(f, u"n_particles", int(5))
            simulation.run(n_time_steps, 0.02)
            handle_all.flush()
            handle_a_b_particles.flush()

        with h5py.File(fname, "r") as f2:
            n_a_b_particles = f2["readdy/observables/n_a_b_particles/data"][:]
            n_particles = f2["readdy/observables/n_particles/data"][:]
            time_series = f2["readdy/observables/n_a_b_particles/time"]
            np.testing.assert_equal(time_series, np.array(range(0, n_time_steps+1)))
            for t in range(n_time_steps):
                np.testing.assert_equal(n_a_b_particles[t][0], callback_n_particles_a_b[t][0])
                np.testing.assert_equal(n_a_b_particles[t][1], callback_n_particles_a_b[t][1])
                np.testing.assert_equal(n_particles[t][0], callback_n_particles_all[t][0])
コード例 #21
0
ファイル: test_timer.py プロジェクト: clonker/readdy
 def test_timer_sanity(self):
     simulation = Simulation()
     simulation.set_kernel("CPU")
     scheme = simulation.run_scheme_readdy(True)
     scheme.configure_and_run(10, 0.1)
     root = simulation.performance_root()
     np.testing.assert_equal(root.count(), 1)
     np.testing.assert_equal(root.time() > 0., True)
     if False:
         print(root)
         print(root[""])
         print(root["integrator"])
         print(root["integrator"].time())
         print(root["integrator"].count())
         integrator = root["integrator"]
         print(integrator["/"])
         print(integrator["/integrator"])
         print(root.keys())
コード例 #22
0
ファイル: test_timer.py プロジェクト: chrisfroe/readdy
 def test_timer_sanity(self):
     simulation = Simulation()
     simulation.set_kernel("CPU")
     scheme = simulation.run_scheme_readdy(True)
     scheme.configure_and_run(10, 0.1)
     root = simulation.performance_root()
     np.testing.assert_equal(root.count(), 1)
     np.testing.assert_equal(root.time() > 0., True)
     if False:
         print(root)
         print(root[""])
         print(root["integrator"])
         print(root["integrator"].time())
         print(root["integrator"].count())
         integrator = root["integrator"]
         print(integrator["/"])
         print(integrator["/integrator"])
         print(root.keys())
コード例 #23
0
 def test_sanity(self):
     sim = Simulation()
     sim.set_kernel("SingleCPU")
     sim.box_size = common.Vec(10, 10, 10)
     np.testing.assert_equal(sim.kernel_supports_topologies(), True)
     sim.register_particle_type("T",
                                1.0,
                                .5,
                                flavor=ParticleTypeFlavor.TOPOLOGY)
     sim.configure_topology_bond_potential("T", "T", 10., 11.)
     particles = [
         sim.create_topology_particle("T", common.Vec(0, 0, 0))
         for _ in range(4)
     ]
     labels = ["%s" % i for i in range(4)]
     top = sim.add_topology(particles, labels)
     graph = top.get_graph()
     graph.add_edge("0", "1")
     graph.add_edge(1, 2)
     graph.add_edge("2", "3")
     np.testing.assert_equal(len(graph.get_vertices()), 4)
     for v in graph.get_vertices():
         if v.label == "0":
             np.testing.assert_equal(len(v.neighbors()), 1)
             np.testing.assert_equal(
                 1 in [vv.get().particle_index for vv in v], True)
         if v.label == "1":
             np.testing.assert_equal(len(v.neighbors()), 2)
             np.testing.assert_equal(
                 0 in [vv.get().particle_index for vv in v], True)
             np.testing.assert_equal(
                 2 in [vv.get().particle_index for vv in v], True)
         if v.label == "2":
             np.testing.assert_equal(len(v.neighbors()), 2)
             np.testing.assert_equal(
                 1 in [vv.get().particle_index for vv in v], True)
             np.testing.assert_equal(
                 3 in [vv.get().particle_index for vv in v], True)
         if v.label == "3":
             np.testing.assert_equal(len(v.neighbors()), 1)
             np.testing.assert_equal(
                 2 in [vv.get().particle_index for vv in v], True)
     top.configure()
     sim.run_scheme_readdy(True).configure_and_run(0, 1)
コード例 #24
0
    def test_reaction_counts_observable(self):
        fname = os.path.join(self.dir, "test_observables_particle_reaction_counts.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(10, 10, 10)
        sim.register_particle_type("A", .0)
        sim.register_particle_type("B", .0)
        sim.register_particle_type("C", .0)
        sim.register_reaction_conversion("mylabel", "A", "B", .00001)
        sim.register_reaction_conversion("A->B", "A", "B", 1e16)
        sim.register_reaction_fusion("B+C->A", "B", "C", "A", 1e16, 1.0, .5, .5)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(1.0, 1.0, 1.0))
        sim.add_particle("C", common.Vec(1.1, 1.0, 1.0))

        n_timesteps = 1
        handle = sim.register_observable_reaction_counts(1)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"reactions", int(3))
            sim.run_scheme_readdy(True).write_config_to_file(f).with_reaction_scheduler("Gillespie").configure_and_run(n_timesteps, 1)

        import readdy.util.io_utils as io_utils
        reactions = io_utils.get_reactions(fname)

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/reactions"]
            time_series = f2["readdy/observables/reactions/time"]
            np.testing.assert_equal(time_series, np.array(range(0, n_timesteps+1)))

            def get_item(name, collection):
                return next(x for x in collection if x["name"] == name)

            mylabel_id = get_item("mylabel", reactions.values())["id"]
            atob_id = get_item("A->B", reactions.values())["id"]
            fusion_id = get_item("B+C->A", reactions.values())["id"]

            # counts of first time step, time is first index
            np.testing.assert_equal(data["counts/"+str(mylabel_id)][0], np.array([0]))
            np.testing.assert_equal(data["counts/"+str(atob_id)][0], np.array([0]))
            np.testing.assert_equal(data["counts/"+str(fusion_id)][0], np.array([0]))
            # counts of second time step
            np.testing.assert_equal(data["counts/"+str(mylabel_id)][1], np.array([0]))
            np.testing.assert_equal(data["counts/"+str(atob_id)][1], np.array([1]))
            np.testing.assert_equal(data["counts/"+str(fusion_id)][1], np.array([1]))
コード例 #25
0
    def test_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_particles.h5")
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.box_size = common.Vec(13, 13, 13)
        typeid_A = sim.register_particle_type("A", .1, .1)
        typeid_B = sim.register_particle_type("B", .1, .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(
            1, ["A"],
            lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particles(1)
        n_timesteps = 19
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"particles", int(3))
            sim.run_scheme_readdy(True).configure(0).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            types = f2["readdy/observables/particles/types"][:]
            ids = f2["readdy/observables/particles/ids"][:]
            positions = f2["readdy/observables/particles/positions"][:]
            for t in range(n_timesteps):
                np.testing.assert_equal(len(types[t]), t + 3)
                np.testing.assert_equal(len(ids[t]), t + 3)
                np.testing.assert_equal(len(positions[t]), t + 3)
                np.testing.assert_equal(types[t][0], typeid_A)
                np.testing.assert_equal(positions[t][0][0], 0)
                np.testing.assert_equal(positions[t][0][1], 0)
                np.testing.assert_equal(positions[t][0][2], 0)
                np.testing.assert_equal(positions[t][1][0], 0)
                np.testing.assert_equal(positions[t][1][1], 0)
                np.testing.assert_equal(positions[t][1][2], 0)
                np.testing.assert_equal(types[t][1], typeid_B)
                for others in range(2, len(types[t])):
                    np.testing.assert_equal(types[t][others], typeid_A)
                    np.testing.assert_equal(positions[t][others][0], 1.5)
                    np.testing.assert_equal(positions[t][others][1], 2.5)
                    np.testing.assert_equal(positions[t][others][2], 3.5)
コード例 #26
0
    def test_interrupt_maxparticles(self):
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.register_particle_type("A", 0.1)
        sim.add_particle("A", Vec(0, 0, 0))
        sim.register_reaction_fission("bla", "A", "A", "A", 1000., 0., 0.5, 0.5)
        counter = [0]
        shall_stop = [False]

        def increment(result):
            counter[0] += 1
            if result[0] >= 8:
                shall_stop[0] = True

        sim.register_observable_n_particles(1, ["A"], increment)
        scheme = sim.run_scheme_readdy(True).configure(1.)
        do_continue = lambda t: not shall_stop[0]
        scheme.run_with_criterion(do_continue)
        np.testing.assert_equal(counter[0], 4)
コード例 #27
0
    def test_histogram_along_axis_observable(self):
        common.set_logging_level("warn")
        fname = os.path.join(self.dir, "test_observables_hist_along_axis.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2, 1.)
        simulation.register_particle_type("B", .2, 1.)
        simulation.register_potential_harmonic_repulsion("A", "B", 10)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        bin_borders = np.arange(0, 5, .01)
        n_time_steps = 50
        callback_hist = []

        def hist_callback(hist):
            callback_hist.append(hist)

        handle = simulation.register_observable_histogram_along_axis(
            2, bin_borders, 0, ["A", "B"], hist_callback)
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"hist_along_x_axis", int(3))
            simulation.run(n_time_steps, 0.02)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            histogram = f2["readdy/observables/hist_along_x_axis/data"][:]
            time_series = f2["readdy/observables/hist_along_x_axis/time"]
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_time_steps + 1))[::2])
            for t in range(n_time_steps // 2):
                np.testing.assert_equal(histogram[t],
                                        np.array(callback_hist[t]))
コード例 #28
0
    def test_particles_observable(self):
        fname = os.path.join(self.dir, "test_observables_particles.h5")
        sim = Simulation()
        sim.set_kernel("SingleCPU")
        sim.box_size = common.Vec(13, 13, 13)
        typeid_A = sim.register_particle_type("A", .1)
        typeid_B = sim.register_particle_type("B", .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(1, ["A"], lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_particles(1)
        n_timesteps = 19
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"particles", int(3))
            sim.run_scheme_readdy(True).configure(0).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            types = f2["readdy/observables/particles/types"][:]
            ids = f2["readdy/observables/particles/ids"][:]
            positions = f2["readdy/observables/particles/positions"][:]
            for t in range(n_timesteps):
                np.testing.assert_equal(len(types[t]), t + 3)
                np.testing.assert_equal(len(ids[t]), t + 3)
                np.testing.assert_equal(len(positions[t]), t + 3)
                np.testing.assert_equal(types[t][0], typeid_A)
                np.testing.assert_equal(positions[t][0][0], 0)
                np.testing.assert_equal(positions[t][0][1], 0)
                np.testing.assert_equal(positions[t][0][2], 0)
                np.testing.assert_equal(positions[t][1][0], 0)
                np.testing.assert_equal(positions[t][1][1], 0)
                np.testing.assert_equal(positions[t][1][2], 0)
                np.testing.assert_equal(types[t][1], typeid_B)
                for others in range(2, len(types[t])):
                    np.testing.assert_equal(types[t][others], typeid_A)
                    np.testing.assert_equal(positions[t][others][0], 1.5)
                    np.testing.assert_equal(positions[t][others][1], 2.5)
                    np.testing.assert_equal(positions[t][others][2], 3.5)
コード例 #29
0
    def chain_decay(self, kernel):
        sim = Simulation()
        sim.set_kernel(kernel)
        sim.box_size = common.Vec(10, 10, 10)
        sim.register_topology_type("TA")
        np.testing.assert_equal(sim.kernel_supports_topologies(), True)

        sim.register_particle_type("B", 1.0, ParticleTypeFlavor.NORMAL)
        sim.register_particle_type("Topology A", 1.0,
                                   ParticleTypeFlavor.TOPOLOGY)
        sim.configure_topology_bond_potential("Topology A", "Topology A", 10,
                                              10)

        n_elements = 50.
        particles = [
            sim.create_topology_particle(
                "Topology A", common.Vec(-5. + i * 10. / n_elements, 0, 0))
            for i in range(int(n_elements))
        ]
        topology = sim.add_topology("TA", particles)

        for i in range(int(n_elements - 1)):
            topology.get_graph().add_edge(i, i + 1)

        sim.register_structural_topology_reaction("TA",
                                                  self._get_decay_reaction())
        sim.register_structural_topology_reaction("TA",
                                                  self._get_split_reaction())

        # h = sim.register_observable_n_particles(1, [], lambda x: print("n particles=%s" % x))

        np.testing.assert_equal(1, len(sim.current_topologies()))

        sim.run_scheme_readdy(
            True).evaluate_topology_reactions().configure_and_run(
                int(500), float(1.0))

        np.testing.assert_equal(0, len(sim.current_topologies()))
コード例 #30
0
    def test_radial_distribution_observable(self):
        fname = os.path.join(self.dir, "test_observables_radial_distribution.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2)
        simulation.register_particle_type("B", .2)
        simulation.register_potential_harmonic_repulsion("A", "B", 10, 2.)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        bin_borders = np.arange(0, 5, .01)
        density = 1. / (box_size[0] * box_size[1] * box_size[2])
        n_time_steps = 50
        callback_centers = []
        callback_rdf = []

        def rdf_callback(pair):
            callback_centers.append(pair[0])
            callback_rdf.append(pair[1])

        handle = simulation.register_observable_radial_distribution(1, bin_borders, ["A"], ["B"], density, rdf_callback)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"radial_distribution", int(3))
            simulation.run(n_time_steps, 0.02)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            bin_centers = f2["readdy/observables/radial_distribution/bin_centers"][:]
            distribution = f2["readdy/observables/radial_distribution/distribution"][:]
            for t in range(n_time_steps):
                np.testing.assert_equal(bin_centers, np.array(callback_centers[t]))
                np.testing.assert_equal(distribution[t], np.array(callback_rdf[t]))
コード例 #31
0
 def test_sanity(self):
     sim = Simulation()
     sim.set_kernel("SingleCPU")
     sim.box_size = common.Vec(10, 10, 10)
     np.testing.assert_equal(sim.kernel_supports_topologies(), True)
     sim.register_topology_type("TA")
     sim.register_particle_type("T", 1.0, flavor=ParticleTypeFlavor.TOPOLOGY)
     sim.configure_topology_bond_potential("T", "T", 10., 11.)
     particles = [sim.create_topology_particle("T", common.Vec(x, 0, 0)) for x in range(4)]
     top = sim.add_topology("TA", particles)
     graph = top.get_graph()
     graph.add_edge(0, 1)
     graph.add_edge(1, 2)
     graph.add_edge(2, 3)
     np.testing.assert_equal(len(graph.get_vertices()), 4)
     for v in graph.get_vertices():
         if v.particle_index == 0:
             np.testing.assert_equal(top.position_of_vertex(v), common.Vec(0, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 1)
             np.testing.assert_equal(1 in [vv.get().particle_index for vv in v], True)
         if v.particle_index == 1:
             np.testing.assert_equal(top.position_of_vertex(v), common.Vec(1, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 2)
             np.testing.assert_equal(0 in [vv.get().particle_index for vv in v], True)
             np.testing.assert_equal(2 in [vv.get().particle_index for vv in v], True)
         if v.particle_index == 2:
             np.testing.assert_equal(top.position_of_vertex(v), common.Vec(2, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 2)
             np.testing.assert_equal(1 in [vv.get().particle_index for vv in v], True)
             np.testing.assert_equal(3 in [vv.get().particle_index for vv in v], True)
         if v.particle_index == 3:
             np.testing.assert_equal(top.position_of_vertex(v), common.Vec(3, 0, 0))
             np.testing.assert_equal(len(v.neighbors()), 1)
             np.testing.assert_equal(2 in [vv.get().particle_index for vv in v], True)
     top.configure()
     sim.run_scheme_readdy(True).configure_and_run(0, 1)
コード例 #32
0
    def test_forces_observable(self):
        fname = os.path.join(self.dir, "test_observables_particle_forces.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(13, 13, 13)
        sim.register_particle_type("A", .1, .1)
        sim.add_particle("A", common.Vec(0, 0, 0))
        # every time step, add one particle
        sim.register_observable_n_particles(
            1, ["A"],
            lambda n: sim.add_particle("A", common.Vec(1.5, 2.5, 3.5)))
        handle = sim.register_observable_forces(1, [])
        n_timesteps = 19
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"forces", int(3))
            sim.run_scheme_readdy(True).configure(1).run(n_timesteps)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/forces/data"][:]
            time_series = f2["readdy/observables/forces/time"]
            np.testing.assert_equal(len(data), n_timesteps + 1)
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_timesteps + 1)))
            for t, forces in enumerate(data):
                # we begin with two particles
                np.testing.assert_equal(len(forces), t + 2)
                np.testing.assert_equal(forces[0]["x"], 0)
                np.testing.assert_equal(forces[0]["y"], 0)
                np.testing.assert_equal(forces[0]["z"], 0)
                for i in range(1, len(forces)):
                    np.testing.assert_equal(forces[i]["x"], 0)
                    np.testing.assert_equal(forces[i]["y"], 0)
                    np.testing.assert_equal(forces[i]["z"], 0)
コード例 #33
0
ファイル: chain_decay.py プロジェクト: chrisfroe/readdy
    def run(self, time_steps, out_file):
        sim = Simulation()
        sim.set_kernel(self.kernel)
        sim.box_size = common.Vec(60, 20, 20)
        sim.periodic_boundary = [True, True, True]

        typeid_b = sim.register_particle_type("B", 1.0, 1.0, ParticleTypeFlavor.NORMAL)
        sim.register_particle_type("Topology A", .5, .5, ParticleTypeFlavor.TOPOLOGY)

        sim.register_potential_harmonic_repulsion("Topology A", "Topology A", 10)
        sim.register_potential_harmonic_repulsion("Topology A", "B", 10)
        sim.register_potential_harmonic_repulsion("B", "B", 10)

        sim.configure_topology_bond_potential("Topology A", "Topology A", 10, 1.)
        sim.configure_topology_angle_potential("Topology A", "Topology A", "Topology A", 10, np.pi)
        # sim.configure_topology_dihedral_potential("Topology A", "Topology A", "Topology A", "Topology A", 1, 1, -np.pi)

        n_elements = 50.
        particles = [sim.create_topology_particle("Topology A", common.Vec(-25. + i, 0, 0))
                     for i in range(int(n_elements))]
        topology = sim.add_topology(particles)

        for i in range(int(n_elements - 1)):
            topology.get_graph().add_edge(i, i + 1)

        topology.add_reaction(self._get_decay_reaction(typeid_b))
        topology.add_reaction(self._get_split_reaction())

        traj_handle = sim.register_observable_flat_trajectory(1)
        with closing(io.File(out_file, io.FileAction.CREATE, io.FileFlag.OVERWRITE)) as f:
            traj_handle.enable_write_to_file(f, u"", 50)
            sim.run_scheme_readdy(True)\
                .evaluate_topology_reactions()\
                .write_config_to_file(f)\
                .configure_and_run(time_steps, self.time_step)
        print("currently %s topologies" % len(sim.current_topologies()))
コード例 #34
0
    def test_center_of_mass_observable(self):
        common.set_logging_level("warn")
        fname = os.path.join(self.dir, "test_observables_com.h5")

        simulation = Simulation()
        simulation.set_kernel("SingleCPU")

        box_size = common.Vec(10, 10, 10)
        simulation.kbt = 2
        simulation.periodic_boundary = [True, True, True]
        simulation.box_size = box_size
        simulation.register_particle_type("A", .2, 1.)
        simulation.register_particle_type("B", .2, 1.)
        simulation.add_particle("A", common.Vec(-2.5, 0, 0))
        simulation.add_particle("B", common.Vec(0, 0, 0))
        n_time_steps = 50
        callback_com = []

        def com_callback(vec):
            callback_com.append(vec)

        handle = simulation.register_observable_center_of_mass(
            1, ["A", "B"], com_callback)
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"com", 3)
            simulation.run(n_time_steps, 0.02)
            handle.flush()

        with h5py.File(fname, "r") as f2:
            com = f2["readdy/observables/com/data"][:]
            for t in range(n_time_steps):
                np.testing.assert_equal(com[t]["x"], callback_com[t][0])
                np.testing.assert_equal(com[t]["y"], callback_com[t][1])
                np.testing.assert_equal(com[t]["z"], callback_com[t][2])
コード例 #35
0
ファイル: min-e_min-d.py プロジェクト: yangxi1209/readdy
    def execute(self):
        ###################################
        #
        # Units:
        #   - [x] = µm
        #   - [t] = s
        #   - [E] = kJ/mol
        #
        ###################################

        kernel_provider = KernelProvider.get()
        kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
        simulation = Simulation()
        simulation.set_kernel("CPU")

        ###################################
        #
        # set up simulation box
        #
        ###################################

        box_size = Vec(2, 7, 12)
        simulation.box_size = box_size
        simulation.kbt = 2.437  # room temperature
        simulation.periodic_boundary = [False, False, False]

        ###################################
        #
        # register particle types
        #
        ###################################

        # particle size, see: http://bmccellbiol.biomedcentral.com/articles/10.1186/1471-2121-5-29
        # "The size of the V-ATPase complex is about 15 nm (diameter) x 25 nm (length from lumen side to tip of head)"

        membrane_particle_size = .05

        diffusion_factor = .5
        simulation.register_particle_type("D", 2.5 * diffusion_factor,
                                          .01)  # MinD-ADP (without phosphor)
        simulation.register_particle_type("D_P", 2.5 * diffusion_factor,
                                          .01)  # MinD-ATP (with phosphor)
        simulation.register_particle_type("E", 2.5 * diffusion_factor,
                                          .01)  # MinE
        simulation.register_particle_type("D_PB", .01 * diffusion_factor,
                                          .01)  # MinD-ATP bound
        simulation.register_particle_type("DE", .01 * diffusion_factor,
                                          .01)  # MinDE

        ###################################
        #
        # register reaction types
        #
        ###################################

        reaction_radius = 4 * (
            0.01 + 0.01
        )  # = sum of the particle radii * 5 (5 - magic number such that k_fusion makes sense, sort of) 5 *
        # k_fusion = brentq(lambda x: self.erban_chapman(.093, 2.5 + .01, reaction_radius, x), 1, 5000000)
        k_fusion = 1.0
        print("k_fusion=%s" % k_fusion)
        simulation.register_reaction_conversion("Phosphorylation", "D", "D_P",
                                                .5)
        simulation.register_reaction_fusion("bound MinD+MinE->MinDE", "D_PB",
                                            "E", "DE", k_fusion,
                                            reaction_radius * 3.5, .5, .5)
        simulation.register_reaction_fission("MinDE to MinD and MinE, detach",
                                             "DE", "D", "E", .25,
                                             reaction_radius, .5, .5)

        ###################################
        #
        # register potentials
        #
        ###################################

        membrane_size = Vec(.5, 5, 10)
        layer = Vec(.08, .08, .08)
        extent = membrane_size + 2 * layer
        origin = -.5 * membrane_size - layer
        simulation.register_potential_box(
            "D", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "D_P", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "D_PB", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "E", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box(
            "DE", 10., origin, extent,
            False)  # (force constant, origin, extent, considerParticleRadius)

        # simulation.register_potential_piecewise_weak_interaction("D_P", "D_PB", 3, .02, 2, .05)  # (force constant, desired dist, depth, no interaction dist)

        ###################################
        #
        # membrane particles
        #
        ###################################
        using_membrane_particles = False
        if using_membrane_particles:
            simulation.register_particle_type(
                "M", 0, membrane_particle_size)  # membrane particle
            simulation.register_reaction_enzymatic(
                "Attach to membrane", "M", "D_P", "D_PB", .5,
                .01 + membrane_particle_size)  # .01 + .025  # todo: rate?
            dx = np.linspace(
                origin[0] + layer[0],
                -1 * origin[0] - layer[0],
                int(float(membrane_size[0]) / membrane_particle_size),
                endpoint=True)
            dy = np.linspace(
                origin[1] + layer[1],
                -1 * origin[1] - layer[1],
                int(float(membrane_size[1]) / membrane_particle_size),
                endpoint=True)
            dz = np.linspace(
                origin[2] + layer[2],
                -1 * origin[2] - layer[2],
                int(float(membrane_size[2]) / membrane_particle_size),
                endpoint=True)
            for y in dy:
                for z in dz:
                    simulation.add_particle(
                        "M", Vec(-1 * origin[0] - layer[0], y, z))
            print("done adding membrane particles")
        else:
            simulation.register_reaction_conversion("Phosphorylation", "D_P",
                                                    "D_PB", .5)
            simulation.register_reaction_enzymatic(
                "Enzymatic DP+DPB->DPB + DPB", "D_PB", "D_P", "D_PB", .5, .02)
        using_uniform_distribution = True
        n_minE_particles = 3120
        n_minD_particles = n_minE_particles * 4
        mine_x = np.random.uniform(origin[0] + layer[0],
                                   -1 * origin[0] - layer[0], n_minE_particles)
        mine_y = np.random.uniform(origin[1] + layer[1],
                                   -1 * origin[1] - layer[1], n_minE_particles)
        if using_uniform_distribution:
            mine_z = np.random.uniform(origin[2] + layer[2],
                                       -1 * origin[2] - layer[2],
                                       n_minE_particles)
        else:
            mine_z = np.random.uniform(origin[2] + layer[2],
                                       .5 * (-1 * origin[2] - layer[2]),
                                       n_minE_particles)

        mind_x = np.random.uniform(origin[0] + layer[0],
                                   -1 * origin[0] - layer[0], n_minD_particles)
        mind_y = np.random.uniform(origin[1] + layer[1],
                                   -1 * origin[1] - layer[1], n_minD_particles)
        if using_uniform_distribution:
            mind_z = np.random.uniform(origin[2] + layer[2],
                                       -1 * origin[2] - layer[2],
                                       n_minD_particles)
        else:
            mind_z = np.random.uniform(.5 * (-1 * origin[2] - layer[2]),
                                       -1 * origin[2] - layer[2],
                                       n_minD_particles)

        for i in range(n_minE_particles):
            simulation.add_particle("E", Vec(mine_x[i], mine_y[i], mine_z[i]))

        for i in range(int(.5 * n_minD_particles)):
            simulation.add_particle("D", Vec(mind_x[i], mind_y[i], mind_z[i]))
        for i in range(int(.5 * n_minD_particles), n_minD_particles):
            simulation.add_particle("D_P", Vec(mind_x[i], mind_y[i],
                                               mind_z[i]))

        self.timestep = simulation.get_recommended_time_step(2)

        ###################################
        #
        # register observables
        #
        ###################################

        # simulation.register_observable_center_of_mass(1, self.com_callback_mind, ["D", "D_P", "D_PB"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_mine, ["E"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_minde, ["DE", "D_PB"])
        print("histogram start")
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minD, np.arange(-3, 3, .1), ["D", "D_P", "D_PB"], 2)
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minE, np.arange(-3, 3, .1), ["D_PB", "DE"], 2)
        stride = int(.01 / self.timestep)
        self.stride = stride
        print("using stride=%s" % stride)
        bins = np.linspace(-7, 7, 80)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D"], self.histogram_callback_minD)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D_P"], self.histogram_callback_minDP)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D_PB"], self.histogram_callback_minDPB)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["E"], self.histogram_callback_minE)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["DE"], self.histogram_callback_minDE)
        simulation.register_observable_histogram_along_axis(
            stride, bins, 2, ["D", "D_P", "D_PB", "DE"],
            self.histogram_callback_M)
        simulation.register_observable_n_particles(
            stride, ["D", "D_P", "D_PB", "E", "DE"], self.n_particles_callback)
        print("histogram end")

        self.n_timesteps = int(1200. / self.timestep)

        print("starting simulation for effectively %s sec" %
              (self.timestep * self.n_timesteps))
        simulation.run_scheme_readdy(True).with_reaction_scheduler(
            "GillespieParallel").configure(self.timestep).run(self.n_timesteps)

        if self._result_fname is not None:
            with open(self._result_fname, 'w') as f:
                np.save(f, np.array(self._hist_data))
コード例 #36
0
    global rdf, centers, n_calls, T
    if centers is None:
        centers = pair[0]
    if rdf is None:
        rdf = pair[1]
    else:
        rdf += pair[1]
    n_calls += 1
    if n_calls % 10000 == 0:
        print("%s" % (10. * float(n_calls) / float(T)))


if __name__ == '__main__':
    KernelProvider.get().load_from_dir(platform_utils.get_readdy_plugin_dir())
    simulation = Simulation()
    simulation.set_kernel("CPU")

    box_size = Vec(10, 10, 10)
    simulation.kbt = 2
    simulation.periodic_boundary = [True, True, True]
    simulation.box_size = box_size
    simulation.register_particle_type("A", .2, 1.)
    simulation.register_particle_type("B", .2, 1.)
    simulation.register_potential_harmonic_repulsion("A", "B", 10)
    simulation.add_particle("A", Vec(-2.5, 0, 0))
    simulation.add_particle("B", Vec(0, 0, 0))

    simulation.register_observable_radial_distribution(
        10, np.arange(0, 5, .01), ["A"], ["B"],
        1. / (box_size[0] * box_size[1] * box_size[2]), rdf_callback)
    simulation.run(T, 0.02)
コード例 #37
0
    def test_reactions_observable(self):
        fname = os.path.join(self.dir,
                             "test_observables_particle_reactions.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(10, 10, 10)
        sim.register_particle_type("A", .0)
        sim.register_particle_type("B", .0)
        sim.register_particle_type("C", .0)
        sim.register_reaction_conversion("mylabel", "A", "B", .00001)
        sim.register_reaction_conversion("A->B", "A", "B", 1.)
        sim.register_reaction_fusion("B+C->A", "B", "C", "A", 1.0, 1.0, .5, .5)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(1.0, 1.0, 1.0))
        sim.add_particle("C", common.Vec(1.1, 1.0, 1.0))

        n_timesteps = 1

        handle = sim.register_observable_reactions(1)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"reactions", int(3))
            sim.run_scheme_readdy(True).write_config_to_file(
                f).configure_and_run(n_timesteps, 1)

        type_str_to_id = ioutils.get_particle_types(fname)

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/reactions"]
            time_series = f2["readdy/observables/reactions/time"]
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_timesteps + 1)))

            def get_item(name, collection):
                return next(x for x in collection if x["name"] == name)

            import readdy.util.io_utils as io_utils
            reactions = io_utils.get_reactions(fname)

            mylabel_reaction = get_item("mylabel", reactions.values())
            np.testing.assert_allclose(mylabel_reaction["rate"], .00001)
            np.testing.assert_equal(mylabel_reaction["n_educts"], 1)
            np.testing.assert_equal(mylabel_reaction["n_products"], 1)
            np.testing.assert_equal(mylabel_reaction["educt_types"],
                                    [type_str_to_id["A"], 0])
            np.testing.assert_equal(mylabel_reaction["product_types"],
                                    [type_str_to_id["B"], 0])
            atob_reaction = get_item("A->B", reactions.values())
            np.testing.assert_equal(atob_reaction["rate"], 1.)
            np.testing.assert_equal(atob_reaction["n_educts"], 1)
            np.testing.assert_equal(atob_reaction["n_products"], 1)
            np.testing.assert_equal(mylabel_reaction["educt_types"],
                                    [type_str_to_id["A"], 0])
            np.testing.assert_equal(mylabel_reaction["product_types"],
                                    [type_str_to_id["B"], 0])

            fusion_reaction = get_item("B+C->A", reactions.values())
            np.testing.assert_equal(fusion_reaction["rate"], 1.)
            np.testing.assert_equal(fusion_reaction["educt_distance"], 1.)
            np.testing.assert_equal(fusion_reaction["n_educts"], 2)
            np.testing.assert_equal(fusion_reaction["n_products"], 1)
            np.testing.assert_equal(fusion_reaction["educt_types"],
                                    [type_str_to_id["B"], type_str_to_id["C"]])
            np.testing.assert_equal(fusion_reaction["product_types"],
                                    [type_str_to_id["A"], 0])

            records = data["records"][:]
            np.testing.assert_equal(len(records), 2)
            # records of 1st time step
            for record in records[1]:
                np.testing.assert_equal(
                    record["reaction_type"] == 0
                    or record["reaction_type"] == 1, True)
                if record["reaction_type"] == 0:
                    np.testing.assert_equal(record["position"],
                                            np.array([.0, .0, .0]))
                    np.testing.assert_equal(record["reaction_id"],
                                            atob_reaction["id"])
                elif record["reaction_type"] == 1:
                    # fusion
                    np.testing.assert_allclose(record["position"],
                                               np.array([1.05, 1.0, 1.0]))
                    np.testing.assert_equal(record["reaction_id"],
                                            fusion_reaction["id"])
コード例 #38
0
ファイル: schnakenberg.py プロジェクト: marscher/readdy
    def run(self):

        ###################################
        #
        # Units:
        #   - [t] = sec
        #
        ###################################

        tau = 1e-3

        kernel_provider = KernelProvider.get()
        kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
        simulation = Simulation()
        simulation.set_kernel("CPU")
        simulation.kbt = 1.0
        simulation.box_size = Vec(3, 3, 3)
        simulation.periodic_boundary = [True, True, True]

        D = 1.0
        R = .01
        simulation.register_particle_type("A", D, R)
        simulation.register_particle_type("2A", D, R)
        simulation.register_particle_type("3A", D, R)
        simulation.register_particle_type("B", D, R)
        simulation.register_particle_type("GA", D, R)
        simulation.register_particle_type("GB", D, R)

        reaction_radius = .2
        k1 = 4 * 1e-5
        k2 = 50
        k3 = 10
        k4 = 250

        V = simulation.box_size * simulation.box_size

        N_GA = 100.0
        N_GB = 1000.0

        k_enzymatic = brentq(lambda x: erban_chapman(k1, 2, reaction_radius, x), 1e-10, 5000000000000)
        k_enzymatic = 10. # k_enzymatic for reaction_radius = .1
        print("k_enzymatic=%s" % k_enzymatic)
        print("2 * k3 - k3 * k3 * tau = %s" % (2.0 * k3 - k3 * k3 * tau))
        print("k3 * k3 * tau = %s" % (k3 * k3 * tau))
        print("k_birthA = %s" % (k2 * V / N_GA))
        print("k_birthB = %s" % (k4 * V / N_GB))
        print("sqrt(R*R/D) = %s" % (np.sqrt(reaction_radius * reaction_radius / D)))

        simulation.register_reaction_conversion("2A -> A", "2A", "A", 2.0 * k3 - k3 * k3 * tau)
        simulation.register_reaction_decay("2A -> 0", "2A", k3 * k3 * tau)
        simulation.register_reaction_fusion("A + A -> 2A", "A", "A", "2A", 1.0 / tau, reaction_radius, .5, .5)
        simulation.register_reaction_enzymatic("2A + B -> 2A + A", "2A", "B", "A", k_enzymatic, reaction_radius)
        simulation.register_reaction_fission("GA -> GA + A", "GA", "GA", "A", k2 * V / N_GA, reaction_radius, .5, .5)
        simulation.register_reaction_decay("A -> 0", "A", k3)
        simulation.register_reaction_fission("GB -> GB + B", "GB", "GB", "B", k4 * V / N_GB, reaction_radius, .5, .5)

        simulation.add_particle("A", Vec(0, 0, 0))

        ga_x = np.random.uniform(-0.5 * simulation.box_size[0], 0.5 * simulation.box_size[0], int(N_GA))
        ga_y = np.random.uniform(-0.5 * simulation.box_size[1], 0.5 * simulation.box_size[1], int(N_GA))
        ga_z = np.random.uniform(-0.5 * simulation.box_size[2], 0.5 * simulation.box_size[2], int(N_GA))

        gb_x = np.random.uniform(-0.5 * simulation.box_size[0], 0.5 * simulation.box_size[0], int(N_GB))
        gb_y = np.random.uniform(-0.5 * simulation.box_size[1], 0.5 * simulation.box_size[1], int(N_GB))
        gb_z = np.random.uniform(-0.5 * simulation.box_size[2], 0.5 * simulation.box_size[2], int(N_GB))

        for i in range(int(N_GA)):
            simulation.add_particle("GA", Vec(ga_x[i], ga_y[i], ga_z[i]))
        for i in range(int(N_GB)):
            simulation.add_particle("GB", Vec(gb_x[i], gb_y[i], gb_z[i]))

        N_B = 30000
        boxsize = np.array([simulation.box_size[0], simulation.box_size[1], simulation.box_size[2]])
        for i in range(N_B):
            pos = np.random.random(3) * boxsize - 0.5 * boxsize
            simulation.add_particle("B", Vec(pos[0], pos[1], pos[2]))

        def callback(result):
            n_a, n_b = result[0] + 2 * result[1] + 3 * result[2], result[3]
            self._data[self._t, 0] = n_a
            self._data[self._t, 1] = n_b
            print("(%s,%s,a=%s,a2=%s,a3=%s)"%(n_a, n_b,result[0], result[1], result[2]))

            self.lines.set_xdata(self._data[:self._t, 0])
            self.lines.set_ydata(self._data[:self._t, 1])
            self.ax.relim()
            self.ax.autoscale_view()
            #We need to draw *and* flush
            self.fig.canvas.draw()
            self.fig.canvas.flush_events()

            plt.pause(.1)
            self._t += 1

        simulation.register_observable_n_particles_types(1, ["A", "2A", "3A", "B"], callback)
        print("start run")
        simulation.run(self._timesteps, tau)
コード例 #39
0
    def test_reactions_observable(self):
        fname = os.path.join(self.dir, "test_observables_particle_reactions.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(10, 10, 10)
        sim.register_particle_type("A", .0)
        sim.register_particle_type("B", .0)
        sim.register_particle_type("C", .0)
        sim.register_reaction_conversion("mylabel", "A", "B", .00001)
        sim.register_reaction_conversion("A->B", "A", "B", 1.)
        sim.register_reaction_fusion("B+C->A", "B", "C", "A", 1.0, 1.0, .5, .5)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(1.0, 1.0, 1.0))
        sim.add_particle("C", common.Vec(1.1, 1.0, 1.0))

        n_timesteps = 1

        handle = sim.register_observable_reactions(1)
        with closing(io.File.create(fname)) as f:
            handle.enable_write_to_file(f, u"reactions", int(3))
            sim.run_scheme_readdy(True).write_config_to_file(f).configure_and_run(n_timesteps, 1)

        type_str_to_id = ioutils.get_particle_types(fname)

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/reactions"]
            time_series = f2["readdy/observables/reactions/time"]
            np.testing.assert_equal(time_series, np.array(range(0, n_timesteps+1)))

            def get_item(name, collection):
                return next(x for x in collection if x["name"] == name)

            import readdy.util.io_utils as io_utils
            reactions = io_utils.get_reactions(fname)

            mylabel_reaction = get_item("mylabel", reactions.values())
            np.testing.assert_allclose(mylabel_reaction["rate"], .00001)
            np.testing.assert_equal(mylabel_reaction["n_educts"], 1)
            np.testing.assert_equal(mylabel_reaction["n_products"], 1)
            np.testing.assert_equal(mylabel_reaction["educt_types"], [type_str_to_id["A"], 0])
            np.testing.assert_equal(mylabel_reaction["product_types"], [type_str_to_id["B"], 0])
            atob_reaction = get_item("A->B", reactions.values())
            np.testing.assert_equal(atob_reaction["rate"], 1.)
            np.testing.assert_equal(atob_reaction["n_educts"], 1)
            np.testing.assert_equal(atob_reaction["n_products"], 1)
            np.testing.assert_equal(mylabel_reaction["educt_types"], [type_str_to_id["A"], 0])
            np.testing.assert_equal(mylabel_reaction["product_types"], [type_str_to_id["B"], 0])

            fusion_reaction = get_item("B+C->A", reactions.values())
            np.testing.assert_equal(fusion_reaction["rate"], 1.)
            np.testing.assert_equal(fusion_reaction["educt_distance"], 1.)
            np.testing.assert_equal(fusion_reaction["n_educts"], 2)
            np.testing.assert_equal(fusion_reaction["n_products"], 1)
            np.testing.assert_equal(fusion_reaction["educt_types"], [type_str_to_id["B"], type_str_to_id["C"]])
            np.testing.assert_equal(fusion_reaction["product_types"], [type_str_to_id["A"], 0])

            records = data["records"][:]
            np.testing.assert_equal(len(records), 2)
            # records of 1st time step
            for record in records[1]:
                np.testing.assert_equal(record["reaction_type"] == 0 or record["reaction_type"] == 1, True)
                if record["reaction_type"] == 0:
                    np.testing.assert_equal(record["position"], np.array([.0, .0, .0]))
                    np.testing.assert_equal(record["reaction_id"], atob_reaction["id"])
                elif record["reaction_type"] == 1:
                    # fusion
                    np.testing.assert_allclose(record["position"], np.array([1.05, 1.0, 1.0]))
                    np.testing.assert_equal(record["reaction_id"], fusion_reaction["id"])
コード例 #40
0
class TwoParticlesMiniExample(object):
    def __init__(self):
        KernelProvider.get().load_from_dir(
            platform_utils.get_readdy_plugin_dir())
        self.simulation = Simulation()
        self.simulation.set_kernel("CPU")

        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111, projection='3d')
        plt.ioff()
        self.fig.show()
        self.prev_pos = {}
        self.current_plot = None

        self.T = 4000000

    def ppos_callback(self, pos):
        plt.cla()
        self.ax.set_xlim([-1, 1])
        self.ax.set_ylim([-1, 1])
        self.ax.set_zlim([-1, 1])
        r = [-.75, .75]
        for s, e in combinations(np.array(list(product(r, r, r))), 2):
            if np.sum(np.abs(s - e)) == r[1] - r[0]:
                self.ax.plot3D(*zip(s, e), color="b")
        pA = self.simulation.get_particle_positions("A")
        pB = self.simulation.get_particle_positions("B")
        if len(pA) == 1 and len(pB) == 1:
            A = pA[0]
            B = pB[0]
            self.ax.scatter([A[0]], [A[1]], [A[2]], color="g", s=100)
            self.ax.scatter([B[0]], [B[1]], [B[2]], color="r", s=100)
            self.ax.plot3D([A[0], B[0]], [A[1], B[1]], [A[2], B[2]], color="r")
        pC = self.simulation.get_particle_positions("C")
        if len(pC) == 1:
            C = pC[0]
            self.ax.scatter([C[0]], [C[1]], [C[2]], color="b", s=100)
        plt.pause(.001)

    def start(self):
        box_size = Vec(2.0, 2.0, 2.0)
        depth = 2.
        desired_dist = .25
        force_constant = 4 * depth / (desired_dist * desired_dist)
        no_interaction_dist = 1.5
        self.simulation.kbt = 0.01
        self.simulation.periodic_boundary = [False, False, False]
        self.simulation.box_size = box_size
        self.simulation.register_particle_type("A", .1, .1)
        self.simulation.register_particle_type("B", .01, .1)
        self.simulation.register_particle_type("C", .5, .1)
        self.simulation.register_potential_piecewise_weak_interaction(
            "A", "B", force_constant, desired_dist, depth, no_interaction_dist
        )  # (force constant, desired dist, depth, no interaction dist)
        self.simulation.register_reaction_fusion("fusion", "A", "B", "C",
                                                 1000., .3, .5, .5)
        self.simulation.register_reaction_fission("fission", "C", "A", "B",
                                                  1000., .25, .5, .5)
        self.simulation.register_potential_box("A", 100.,
                                               Vec(-.75, -.75, -.75),
                                               Vec(1.5, 1.5, 1.5), False)
        self.simulation.register_potential_box("B", 100.,
                                               Vec(-.75, -.75, -.75),
                                               Vec(1.5, 1.5, 1.5), False)
        self.simulation.register_potential_box("C", 100.,
                                               Vec(-.75, -.75, -.75),
                                               Vec(1.5, 1.5, 1.5), False)
        self.simulation.add_particle("A", Vec(-.0, -.0, -.0))
        self.simulation.add_particle("B", Vec(0.1, 0.1, 0.1))
        self.simulation.register_observable_particle_positions(
            1, [], self.ppos_callback)

        self.simulation.run(self.T, .0001)
コード例 #41
0
    def test_reaction_counts_observable(self):
        common.set_logging_level("warn")
        fname = os.path.join(self.dir,
                             "test_observables_particle_reaction_counts.h5")
        sim = Simulation()
        sim.set_kernel("CPU")
        sim.box_size = common.Vec(10, 10, 10)
        sim.register_particle_type("A", .0, 5.0)
        sim.register_particle_type("B", .0, 6.0)
        sim.register_particle_type("C", .0, 6.0)
        sim.register_reaction_conversion("mylabel", "A", "B", .00001)
        sim.register_reaction_conversion("A->B", "A", "B", 1.)
        sim.register_reaction_fusion("B+C->A", "B", "C", "A", 1.0, 1.0, .5, .5)
        sim.add_particle("A", common.Vec(0, 0, 0))
        sim.add_particle("B", common.Vec(1.0, 1.0, 1.0))
        sim.add_particle("C", common.Vec(1.1, 1.0, 1.0))

        n_timesteps = 1
        handle = sim.register_observable_reaction_counts(1)
        with closing(
                io.File(fname, io.FileAction.CREATE,
                        io.FileFlag.OVERWRITE)) as f:
            handle.enable_write_to_file(f, u"reactions", int(3))
            sim.run_scheme_readdy(True).write_config_to_file(
                f).with_reaction_scheduler(
                    "GillespieParallel").configure_and_run(n_timesteps, 1)

        with h5py.File(fname, "r") as f2:
            data = f2["readdy/observables/reactions"]
            time_series = f2["readdy/observables/reactions/time"]
            np.testing.assert_equal(time_series,
                                    np.array(range(0, n_timesteps + 1)))

            def get_item(name, collection):
                return next(x for x in collection if x["name"] == name)

            order_1_reactions = data["registered_reactions/order1_reactions"]
            order_2_reactions = data["registered_reactions/order2_reactions"]

            mylabel_reaction = get_item("mylabel", order_1_reactions)
            reaction_idx_mylabel = mylabel_reaction["index"]
            atob_reaction = get_item("A->B", order_1_reactions)
            reaction_idx_atob = atob_reaction["index"]

            # counts of first time step, time is first index
            np.testing.assert_equal(
                data["counts/order1/A[id=0]"][0, reaction_idx_mylabel],
                np.array([0]))
            np.testing.assert_equal(
                data["counts/order1/A[id=0]"][0, reaction_idx_atob],
                np.array([0]))
            np.testing.assert_equal(
                data["counts/order2/B[id=1] + C[id=2]"][0, 0], np.array([0]))
            # counts of second time step
            np.testing.assert_equal(
                data["counts/order1/A[id=0]"][1, reaction_idx_mylabel],
                np.array([0]))
            np.testing.assert_equal(
                data["counts/order1/A[id=0]"][1, reaction_idx_atob],
                np.array([1]))
            np.testing.assert_equal(
                data["counts/order2/B[id=1] + C[id=2]"][1, 0], np.array([1]))

        common.set_logging_level("warn")
コード例 #42
0
class TwoParticlesMiniExample(object):
    def __init__(self):
        KernelProvider.get().load_from_dir(platform_utils.get_readdy_plugin_dir())
        self.simulation = Simulation()
        self.simulation.set_kernel("CPU")

        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111, projection='3d')
        plt.ioff()
        self.fig.show()
        self.prev_pos = {}
        self.current_plot = None

        self.T = 4000000

    def ppos_callback(self, pos):
        plt.cla()
        self.ax.set_xlim([-1, 1])
        self.ax.set_ylim([-1, 1])
        self.ax.set_zlim([-1, 1])
        r = [-.75, .75]
        for s, e in combinations(np.array(list(product(r, r, r))), 2):
            if np.sum(np.abs(s - e)) == r[1] - r[0]:
                self.ax.plot3D(*zip(s, e), color="b")
        pA = self.simulation.get_particle_positions("A")
        pB = self.simulation.get_particle_positions("B")
        if len(pA) == 1 and len(pB) == 1:
            A = pA[0]; B = pB[0]
            self.ax.scatter([A[0]], [A[1]], [A[2]], color="g", s=100)
            self.ax.scatter([B[0]], [B[1]], [B[2]], color="r", s=100)
            self.ax.plot3D([A[0], B[0]], [A[1], B[1]], [A[2], B[2]], color="r")
        pC = self.simulation.get_particle_positions("C")
        if len(pC) == 1:
            C = pC[0]
            self.ax.scatter([C[0]], [C[1]], [C[2]], color="b", s=100)
        plt.pause(.001)

    def start(self):
        box_size = Vec(2.0, 2.0, 2.0)
        depth = 2.
        desired_dist = .25
        force_constant = 4 * depth / (desired_dist * desired_dist)
        no_interaction_dist = 1.5
        self.simulation.kbt = 0.01
        self.simulation.periodic_boundary = [False, False, False]
        self.simulation.box_size = box_size
        self.simulation.register_particle_type("A", .1, .1)
        self.simulation.register_particle_type("B", .01, .1)
        self.simulation.register_particle_type("C", .5, .1)
        self.simulation.register_potential_piecewise_weak_interaction("A", "B", force_constant, desired_dist, depth,
                                                                          no_interaction_dist)  # (force constant, desired dist, depth, no interaction dist)
        self.simulation.register_reaction_fusion("fusion", "A", "B", "C", 1000., .3, .5, .5)
        self.simulation.register_reaction_fission("fission", "C", "A", "B", 1000., .25, .5, .5)
        self.simulation.register_potential_box("A", 100., Vec(-.75, -.75, -.75), Vec(1.5, 1.5, 1.5), False)
        self.simulation.register_potential_box("B", 100., Vec(-.75, -.75, -.75), Vec(1.5, 1.5, 1.5), False)
        self.simulation.register_potential_box("C", 100., Vec(-.75, -.75, -.75), Vec(1.5, 1.5, 1.5), False)
        self.simulation.add_particle("A", Vec(-.0, -.0, -.0))
        self.simulation.add_particle("B", Vec(0.1, 0.1, 0.1))
        self.simulation.register_observable_particle_positions(1, [], self.ppos_callback)

        self.simulation.run(self.T, .0001)
コード例 #43
0
ファイル: min-e_min-d.py プロジェクト: readdy/readdy
    def execute(self):
        ###################################
        #
        # Units:
        #   - [x] = µm
        #   - [t] = s
        #   - [E] = kJ/mol
        #
        ###################################

        kernel_provider = KernelProvider.get()
        kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
        simulation = Simulation()
        simulation.set_kernel("CPU")

        ###################################
        #
        # set up simulation box
        #
        ###################################

        box_size = Vec(2, 7, 12)
        simulation.box_size = box_size
        simulation.kbt = 2.437  # room temperature
        simulation.periodic_boundary = [False, False, False]

        ###################################
        #
        # register particle types
        #
        ###################################

        # particle size, see: http://bmccellbiol.biomedcentral.com/articles/10.1186/1471-2121-5-29
        # "The size of the V-ATPase complex is about 15 nm (diameter) x 25 nm (length from lumen side to tip of head)"

        membrane_particle_size = .05

        diffusion_factor = .5
        simulation.register_particle_type("D", 2.5 * diffusion_factor, .01)  # MinD-ADP (without phosphor)
        simulation.register_particle_type("D_P", 2.5 * diffusion_factor, .01)  # MinD-ATP (with phosphor)
        simulation.register_particle_type("E", 2.5 * diffusion_factor, .01)  # MinE
        simulation.register_particle_type("D_PB", .01 * diffusion_factor, .01)  # MinD-ATP bound
        simulation.register_particle_type("DE", .01 * diffusion_factor, .01)  # MinDE

        ###################################
        #
        # register reaction types
        #
        ###################################

        reaction_radius = 4*(0.01 + 0.01)  # = sum of the particle radii * 5 (5 - magic number such that k_fusion makes sense, sort of) 5 *
        # k_fusion = brentq(lambda x: self.erban_chapman(.093, 2.5 + .01, reaction_radius, x), 1, 5000000)
        k_fusion = 1.0
        print("k_fusion=%s" % k_fusion)
        simulation.register_reaction_conversion("Phosphorylation", "D", "D_P", .5)
        simulation.register_reaction_fusion("bound MinD+MinE->MinDE", "D_PB", "E", "DE", k_fusion, reaction_radius*3.5, .5, .5)
        simulation.register_reaction_fission("MinDE to MinD and MinE, detach", "DE", "D", "E", .25, reaction_radius, .5, .5)

        ###################################
        #
        # register potentials
        #
        ###################################

        membrane_size = Vec(.5, 5, 10)
        layer = Vec(.08, .08, .08)
        extent = membrane_size + 2 * layer
        origin = -.5 * membrane_size - layer
        simulation.register_potential_box("D", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("D_P", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("D_PB", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("E", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)
        simulation.register_potential_box("DE", 10., origin, extent, False)  # (force constant, origin, extent, considerParticleRadius)

        # simulation.register_potential_piecewise_weak_interaction("D_P", "D_PB", 3, .02, 2, .05)  # (force constant, desired dist, depth, no interaction dist)

        ###################################
        #
        # membrane particles
        #
        ###################################
        using_membrane_particles = False
        if using_membrane_particles:
            simulation.register_particle_type("M", 0, membrane_particle_size)  # membrane particle
            simulation.register_reaction_enzymatic("Attach to membrane", "M", "D_P", "D_PB", .5, .01 + membrane_particle_size)  # .01 + .025  # todo: rate?
            dx = np.linspace(origin[0] + layer[0], -1 * origin[0] - layer[0], int(float(membrane_size[0]) / membrane_particle_size), endpoint=True)
            dy = np.linspace(origin[1] + layer[1], -1 * origin[1] - layer[1], int(float(membrane_size[1]) / membrane_particle_size), endpoint=True)
            dz = np.linspace(origin[2] + layer[2], -1 * origin[2] - layer[2], int(float(membrane_size[2]) / membrane_particle_size), endpoint=True)
            for y in dy:
                for z in dz:
                    simulation.add_particle("M", Vec(-1 * origin[0] - layer[0], y, z))
            print("done adding membrane particles")
        else:
            simulation.register_reaction_conversion("Phosphorylation", "D_P", "D_PB", .5)
            simulation.register_reaction_enzymatic("Enzymatic DP+DPB->DPB + DPB", "D_PB", "D_P", "D_PB", .5, .02)
        using_uniform_distribution = True
        n_minE_particles = 3120
        n_minD_particles = n_minE_particles * 4
        mine_x = np.random.uniform(origin[0] + layer[0], -1 * origin[0] - layer[0], n_minE_particles)
        mine_y = np.random.uniform(origin[1] + layer[1], -1 * origin[1] - layer[1], n_minE_particles)
        if using_uniform_distribution:
            mine_z = np.random.uniform(origin[2] + layer[2], -1 * origin[2] - layer[2], n_minE_particles)
        else:
            mine_z = np.random.uniform(origin[2] + layer[2], .5 * (-1 * origin[2] - layer[2]), n_minE_particles)

        mind_x = np.random.uniform(origin[0] + layer[0], -1 * origin[0] - layer[0], n_minD_particles)
        mind_y = np.random.uniform(origin[1] + layer[1], -1 * origin[1] - layer[1], n_minD_particles)
        if using_uniform_distribution:
            mind_z = np.random.uniform(origin[2] + layer[2], -1 * origin[2] - layer[2], n_minD_particles)
        else:
            mind_z = np.random.uniform(.5 * (-1 * origin[2] - layer[2]), -1 * origin[2] - layer[2], n_minD_particles)

        for i in range(n_minE_particles):
            simulation.add_particle("E", Vec(mine_x[i], mine_y[i], mine_z[i]))

        for i in range(int(.5 * n_minD_particles)):
            simulation.add_particle("D", Vec(mind_x[i], mind_y[i], mind_z[i]))
        for i in range(int(.5 * n_minD_particles), n_minD_particles):
            simulation.add_particle("D_P", Vec(mind_x[i], mind_y[i], mind_z[i]))

        self.timestep = simulation.get_recommended_time_step(2)

        ###################################
        #
        # register observables
        #
        ###################################

        # simulation.register_observable_center_of_mass(1, self.com_callback_mind, ["D", "D_P", "D_PB"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_mine, ["E"])
        # simulation.register_observable_center_of_mass(1, self.com_callback_minde, ["DE", "D_PB"])
        print("histogram start")
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minD, np.arange(-3, 3, .1), ["D", "D_P", "D_PB"], 2)
        # simulation.register_observable_histogram_along_axis(100, self.histrogram_callback_minE, np.arange(-3, 3, .1), ["D_PB", "DE"], 2)
        stride = int(.01/self.timestep)
        self.stride = stride
        print("using stride=%s" % stride)
        bins = np.linspace(-7, 7, 80)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D"], self.histogram_callback_minD)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D_P"], self.histogram_callback_minDP)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D_PB"], self.histogram_callback_minDPB)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["E"], self.histogram_callback_minE)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["DE"], self.histogram_callback_minDE)
        simulation.register_observable_histogram_along_axis(stride, bins, 2, ["D", "D_P", "D_PB", "DE"], self.histogram_callback_M)
        simulation.register_observable_n_particles(stride, ["D", "D_P", "D_PB", "E", "DE"], self.n_particles_callback)
        print("histogram end")

        self.n_timesteps = int(1200./self.timestep)

        print("starting simulation for effectively %s sec" % (self.timestep * self.n_timesteps))
        simulation.run_scheme_readdy(True).with_reaction_scheduler("GillespieParallel").configure(self.timestep).run(self.n_timesteps)

        if self._result_fname is not None:
            with open(self._result_fname, 'w') as f:
                np.save(f, np.array(self._hist_data))