コード例 #1
0
def xyz2dalton_from_ccdata(atomnos, atomcoords, totalcharge=0):
    """Given NumPy arrays of atomic numbers (shape [natom,]) and atomic
    coordinates (shape [natom, 3]), format the file contents into a second
    suitable for DALTON's MOLECULE input section.
    """

    from periodic_table import Element

    outfilelines = []
    atomtypes = 0
    atomsymbols = [Element[atomnum] for atomnum in atomnos]
    oldcharge = ''
    count = 0
    for i, s, n, c in zip(counter(start=1), atomsymbols, atomnos, atomcoords):
        newcharge = n
        if newcharge != oldcharge and i > 1:
            atom_section_header = f'Charge={oldcharge:.1f} Atoms={count}'
            outfilelines.insert(len(outfilelines) - count, atom_section_header)
            count = 0
            atomtypes += 1
        line = f'{s} {c[0]:20.12f} {c[1]:20.12f} {c[2]:20.12f}'
        outfilelines.append(line)
        count += 1
        oldcharge = newcharge
    atom_section_header = f'Charge={oldcharge:.1f} Atoms={count}'
    outfilelines.insert(len(outfilelines) - count, atom_section_header)
    atomtypes += 1
    mol_section_header = f'Atomtypes={atomtypes} Angstrom Charge={totalcharge} Nosymmetry'
    outfilelines.insert(0, mol_section_header)

    return '\n'.join(outfilelines)
コード例 #2
0
ファイル: xyz2dalton.py プロジェクト: berquist/scripts
def xyz2dalton_from_splitlines(xyzfile_splitlines, totalcharge=0):
    """Given a list of lines from an XYZ file (not the # of atoms or
    comment lines!), format the file contents into a section suitable for
    DALTON's MOLECULE input section.
    """

    from periodic_table import AtomicNum

    outfilelines = []
    atomtypes = 0
    atomsymbols = [line.split()[0] for line in xyzfile_splitlines
                   if line.strip() != '']
    atomnums = [float(AtomicNum[symbol]) for symbol in atomsymbols]
    oldcharge = ''
    count = 0
    for i, atomnum, line in zip(counter(start=1), atomnums, xyzfile_splitlines):
        newcharge = atomnum
        if newcharge != oldcharge and i > 1:
            atom_section_header = 'Charge={charge} Atoms={count}'.format(charge=oldcharge, count=count)
            outfilelines.insert(len(outfilelines) - count,
                                atom_section_header)
            count = 0
            atomtypes += 1
        outfilelines.append(line)
        count += 1
        oldcharge = newcharge
    atom_section_header = 'Charge={charge} Atoms={count}'.format(charge=oldcharge, count=count)
    outfilelines.insert(len(outfilelines) - count,
                        atom_section_header)
    atomtypes += 1
    mol_section_header = 'Atomtypes={atomtypes} Angstrom Charge={totalcharge} Nosymmetry'.format(atomtypes=atomtypes, totalcharge=totalcharge)
    outfilelines.insert(0, mol_section_header)

    return '\n'.join(outfilelines)
コード例 #3
0
ファイル: modcollection.py プロジェクト: Kf4btg/SkyModMan
    def exec_move(self, first, last, split):
        """Can it really be this simple? A move is just swapping the
        position of 2 sub-lists. Call with the values returned by
        ``prepare_move()``

        Doing it this way requires a bit more work on each operation
        (the sub-list is pulled from the collection on each run: O(k)),
        but greatly reduces the amount of information we have to store.

        :return: value of 'split' parameter for reversing the  move
        """

        # [8 9 | 10 11 12] => f=8, l=13, s=2
        #   |
        #   V
        # [10 11 12 | 8 9] => new s=3 (13-8-2)
        #
        #
        # [10 11 12 13 14 15 | 16 17 18] f=10 l=19 s=6
        #
        # [16 17 18 | 10 11 12 13 14 15] new s = (19-10)-6 = 9-6 = 3

        try:
            keys = [self._order[i] for i in range(first, last)]
        except KeyError as e:
            raise IndexError(
                e.args[0],
                "Tried to move item(s) beyond end of collection") from None

        # use itertools.chain to avoid list concatenation
        for j, k in zip(counter(first), chain(keys[split:], keys[:split])):
            self._index[k] = j
            self._order[j] = k

        return last - first - split
コード例 #4
0
ファイル: xyz2dalton.py プロジェクト: berquist/scripts
def xyz2dalton_from_ccdata(atomnos, atomcoords, totalcharge=0):
    """Given NumPy arrays of atomic numbers (shape [natom,]) and atomic
    coordinates (shape [natom, 3]), format the file contents into a second
    suitable for DALTON's MOLECULE input section.
    """

    from periodic_table import Element

    outfilelines = []
    atomtypes = 0
    atomsymbols = [Element[atomnum] for atomnum in atomnos]
    oldcharge = ''
    count = 0
    for i, s, n, c in zip(counter(start=1), atomsymbols, atomnos, atomcoords):
        newcharge = n
        if newcharge != oldcharge and i > 1:
            atom_section_header = f'Charge={oldcharge:.1f} Atoms={count}'
            outfilelines.insert(len(outfilelines) - count,
                                atom_section_header)
            count = 0
            atomtypes += 1
        line = f'{s} {c[0]:20.12f} {c[1]:20.12f} {c[2]:20.12f}'
        outfilelines.append(line)
        count += 1
        oldcharge = newcharge
    atom_section_header = f'Charge={oldcharge:.1f} Atoms={count}'
    outfilelines.insert(len(outfilelines) - count,
                        atom_section_header)
    atomtypes += 1
    mol_section_header = f'Atomtypes={atomtypes} Angstrom Charge={totalcharge} Nosymmetry'
    outfilelines.insert(0, mol_section_header)

    return '\n'.join(outfilelines)
コード例 #5
0
 def __init__(self,net,qvar,max_duplicates):
     self.net    = net 
     self.leaves = set()
     # construct leaves
     for var in net.vars:
         if var.func:
             duplicates = np.random.randint(2,1+max_duplicates)
             for _ in range(duplicates):
                 self.leaves.add(Node(var=var))
         else:
             self.leaves.add(Node(var=var))
     # construct random dtree
     trees  = list(self.leaves)
     rindex = lambda: np.random.randint(len(trees))
     while len(trees) > 1:
         index = rindex()
         tree1 = trees[index]
         trees[index] = trees[-1]
         trees.pop()
         index = rindex()
         tree2 = trees[index]
         tree  = Node(left=tree1,right=tree2)
         trees[index] = tree
     # node trees[0] is root of dtree and will be discarded during orientation
     # find host and root
     host = None
     for leaf in self.leaves:
         if leaf.var == qvar:
             host = leaf
             break
     assert host
     root = host.parent
     self.host, self.root = host, root # before calling orient
     
     # construct jointree view from dtree (will bypass node trees[0])
     self.nodes = [] # filled by orient, bottom-up
     self.__orient(root,host) # may change self.root
     # host is not connected to view: its left, right, parent are undefined
     # only connection to view is being parent of root
     host.left = host.right = host.parent = None
     
     # number view nodes
     id = counter(0)
     for i in self.nodes: i.id = next(id)
     host.id = next(id)
     
     # set separators and clusters
     self.__initialize()
     self.__shrink()
コード例 #6
0
def xyz2dalton_from_splitlines(xyzfile_splitlines, totalcharge=0):
    """Given a list of lines from an XYZ file (not the # of atoms or
    comment lines!), format the file contents into a section suitable for
    DALTON's MOLECULE input section.
    """

    from periodic_table import AtomicNum

    outfilelines = []
    atomtypes = 0
    atomsymbols = [
        line.split()[0] for line in xyzfile_splitlines if line.strip() != ''
    ]
    atomnums = [float(AtomicNum[symbol]) for symbol in atomsymbols]
    oldcharge = ''
    count = 0
    for i, atomnum, line in zip(counter(start=1), atomnums,
                                xyzfile_splitlines):
        newcharge = atomnum
        if newcharge != oldcharge and i > 1:
            atom_section_header = 'Charge={charge} Atoms={count}'.format(
                charge=oldcharge, count=count)
            outfilelines.insert(len(outfilelines) - count, atom_section_header)
            count = 0
            atomtypes += 1
        outfilelines.append(line)
        count += 1
        oldcharge = newcharge
    atom_section_header = 'Charge={charge} Atoms={count}'.format(
        charge=oldcharge, count=count)
    outfilelines.insert(len(outfilelines) - count, atom_section_header)
    atomtypes += 1
    mol_section_header = 'Atomtypes={atomtypes} Angstrom Charge={totalcharge} Nosymmetry'.format(
        atomtypes=atomtypes, totalcharge=totalcharge)
    outfilelines.insert(0, mol_section_header)

    return '\n'.join(outfilelines)
コード例 #7
0
from cgi import escape
from itertools import count as counter

SIMPLE = False


def comment(s):
    print "/*\n%s\n*/" % s


def simple():
    global SIMPLE
    return SIMPLE


nextInt = counter().next


def ifseteq(h, k, v):
    return h.has_key(k) and h[k] == v


def lwrap(t, n=32):
    return "\n".join(textwrap.wrap(t, n))


class TezEdge(object):
    def __init__(self, src, dst, kind):
        self.src = src
        self.dst = dst
        self.kind = kind
コード例 #8
0
ファイル: counters.py プロジェクト: fantastdd/PyRCC8
from itertools import count as counter

arcCount = counter()
conCount = counter()
nodeCount = counter()
#count the number of generated visual-graphs
visualCount = counter()

コード例 #9
0
ファイル: modcollection.py プロジェクト: Kf4btg/SkyModMan
    def _change_order(self, old_position, new_position, num_to_move=1):
        """
        Move the item currently located at `old_position` to
        `new_position`, adjusting the indices of any affected items
        as needed.

        :param int old_position:
        :param int new_position:
        :param int num_to_move: Number of contiguous items to move
            (i.e. length of the slice starting at index `old_position`)
        """

        # Note -- The conventional way to visualize this is that
        # the item is being slotted in _before_ the destination index;
        # so, to move an item from position 7 to position 2, we pull
        # it out of slot 7 and slide it back in directly above the
        # item currently in slot 2, pushing that item down to slot 3,
        # the one below it to slot 4, etc, until the item that was
        # previously in slot 6 moves into the empty slot at slot 7.

        # So, moving an item UP means shifting all the items BEFORE it--
        # up to and including the item currently in the destination--
        # down by 1. Moving a contiguous section of items up just means
        # shifting the preceding items down by the number of items in
        # the section.

        # I don't know why this always makes my brain cry.

        ## don't do dumb things
        assert new_position != old_position
        assert num_to_move > 0
        assert new_position in range(self._length)
        assert old_position + num_to_move <= self._length

        new, old, count = new_position, old_position, num_to_move

        ## note:: there are some rambling talking-to-myself musings on why
        ## we move things around like this back in the git history of
        # this file. The gist of it is,
        # basically, because we're not really using lists OR modifying
        # the real data (kept safe and sound in self._map), we can
        # just get a list of the keys of the items we're moving around
        # (in their new order) and directly assign their new indexes
        # to _index and _order

        try:
            # save the keys we're moving around
            chunk = [self._order[i] for i in range(old, old + count)]

            if new < old:
                # moving up (to LOWER indices)

                # append the stuff from <the target index> to <right before
                # our original index> to the chunk (since, after "shifting"
                # these values out of the way, they will come after our
                # "moved" values in the New Order)

                first = new
                chunk += [self._order[i] for i in range(new, old)]

            elif old < new:
                #     moving down (to HIGHER indices)

                # have to shift items <immediately after the main block> to
                # <index where the END of our block will end up>.
                # This can stuck on the front of the original chunk to get
                # the New Order
                first = old
                chunk = [
                    self._order[i] for i in range(old + count, new + count)
                ] + chunk

            else:
                # old==new WHAT ARE YOU DOING GET OUT OF HERE
                return
        except KeyError as e:
            # if a key (an int in this case) was not found, that effectively
            # means the given index was "out of bounds"

            raise IndexError(
                e.args[0],
                "Tried to move item(s) beyond end of collection") from None

        # now plug the whole chunk back in;
        # we don't need to worry about the 'last' index because the
        # length of the chunk implicitly handles that; so we just
        # start counting at first.
        for i, k in zip(counter(first), chunk):
            self._index[k] = i
            self._order[i] = k
コード例 #10
0
ファイル: rpc_v1.py プロジェクト: tomerfiliba/layer5
 def __init__(self, channel, service):
     RPCMediumBase.__init__(self, channel, service)
     self._seq = itertools.counter()
     self._replies = {}
コード例 #11
0
    def close_voting(self):
        self.voting_open = False

        movements = ''

        for i in counter():
            action_counts = defaultdict(int)

            popular_action = None
            popular_count = 0
            owners_by_action = defaultdict(list)

            for owner, actions in self.past_commands.items():
                action = None
                try:
                    action = actions[i]
                except IndexError:
                    pass
                count = action_counts[action] + 1
                action_counts[action] = count

                owners_by_action[action].append(owner)

                if count > popular_count:
                    popular_action = action
                    popular_count = count

            # Delete any votes that were in the minority so we are ready for the next action.
            for action, owners in owners_by_action.items():
                if action != popular_action:
                    for owner in owners:
                        del self.past_commands[owner]

            if popular_action is None:
                if i == 0:
                    self.connection.privmsg(
                        self.channel,
                        "No commands detected. Waiting for next vote.")
                break

            if popular_action == Action.BACK:
                self.connection.privmsg(self.channel,
                                        "Going back to previous map.")
                controller.back_to_map()
                break

            if popular_action == Action.RESTART:
                self.connection.privmsg(self.channel, "Restarting level.")
                controller.restart()
                break

            if popular_action == Action.UP:
                movements += 'u'
                controller.movement('u')
            elif popular_action == Action.DOWN:
                movements += 'd'
                controller.movement('d')
            elif popular_action == Action.LEFT:
                movements += 'l'
                controller.movement('l')
            elif popular_action == Action.RIGHT:
                movements += 'r'
                controller.movement('r')
            elif popular_action == Action.ENTER:
                movements += 's'
                controller.movement('s')

        if len(movements):
            movements = ' '.join(movements)
            self.connection.privmsg(self.channel,
                                    f"Executing movements: {movements}")

        self.past_commands = {}
        self.reactor.scheduler.execute_after(EXECUTE_TIME,
                                             self.open_command_voting)
import numpy as np
from itertools import product as counter
Avg_reward = 0
n = 0

for COIN in counter(
    [0, 1], repeat=5
):  #Generates all possibile combinations of a vector of 0,1 of length 'repeat'
    r = 1  #the reward to be obtained after a toss
    Tot_Reward = 0  #the sum of all rewards obtained in 'repeat' number of tosses
    reward_sequence = [
    ]  #array to store the rewards we got as the coin was being tossed
    first_head = True
    after_first_head = False
    Yn = 0
    toss_index = 0
    for toss in COIN:
        if (toss == 1 and first_head):  #if we get heads
            after_first_head = True
            first_head = False
            r = 1
        elif after_first_head:
            if toss == 0:
                r = 2 * r
            else:
                r = 2 * r
            after_first_head = False
        elif toss == 1:
            r = 2 * r  #if r!=0 else 1  #reward is 2 times the earlier reward. If r is 0, make it 1(initial reward)
        else:
            r = r
コード例 #13
0
ファイル: network.py プロジェクト: jvrana/Terrarium
class NetworkOptimizer:
    """Class that finds optimal Steiner Tree ("""

    counter = counter()

    def __init__(self, browser, sample_composition_graph, template_graph):
        self.browser = browser
        self.sample_composition = sample_composition_graph
        self.template_graph = template_graph.copy()
        self.log = Loggable(self)
        self.gid = next(self.counter)
        self.solution = None

    def _cinfo(self, msg, foreground="white", background="black"):
        self.log.info(cstring(msg, foreground, background))

    ############################
    # RUN
    ############################

    def run_stage0(self):
        self._cinfo("STAGE 0: Sample Composition")
        self.update_sample_composition()
        graph = self.create_sample_composition_graphs(self.template_graph,
                                                      self.browser,
                                                      self.sample_composition)
        return graph

    def run_stage1(self, graph):
        self._cinfo("STAGE 1 Cleaning Graph")
        self.clean_graph(graph)

    def run_stage2(self, graph, goal_sample, goal_object_type, ignore):
        if ignore is None:
            ignore = []
        self._cinfo("STAGE 2: Terminal Nodes")
        start_nodes = self.extract_items(graph)
        start_nodes += self.extract_leaf_operations(graph)
        start_nodes = [n for n in start_nodes if n not in ignore]
        end_nodes = self.extract_end_nodes(graph, goal_sample,
                                           goal_object_type)
        return start_nodes, end_nodes

    def run_stage3(self, graph, start_nodes, end_nodes):
        self._cinfo("STAGE 3: Optimizing")
        return self.optimize_steiner_tree(start_nodes, end_nodes, graph, [])

    def run(self, goal_object_type, goal_sample=None, ignore=None):
        if goal_sample is None:
            goal_sample = self.root_samples()[0]

        existing_item = self.browser.session.Item.one(
            query={
                "sample_id": goal_sample.id,
                "object_type_id": goal_object_type.id
            })
        if existing_item:
            print("Existing item found: {}".format(existing_item))
            self.solution = NetworkSolution(cost=0,
                                            paths=[],
                                            graph=None,
                                            item=existing_item)
            return self.solution

        if goal_sample.sample_type_id != goal_object_type.sample_type_id:
            raise Exception(
                "ObjectType {ot} does not match Sample {s}. '{s}' is a {st} but "
                "ObjectType '{ot}' refers to "
                "a '{otst}'".format(
                    ot=goal_object_type.name,
                    s=goal_sample.name,
                    st=goal_sample.sample_type.name,
                    otst=goal_object_type.sample_type.name,
                ))

        ############################
        # Stage 0
        ############################
        graph = self.run_stage0()

        if goal_sample.id not in self.sample_composition:
            raise Exception(
                "Sample id={} not found in sample composition".format(
                    goal_sample.id))

        ############################
        # Stage 1
        ############################
        self.run_stage1(graph)

        ############################
        # Stage 2
        ############################
        start_nodes, end_nodes = self.run_stage2(graph, goal_sample,
                                                 goal_object_type, ignore)

        ############################
        # Stage 3
        ############################
        cost, paths, visited_samples = self.run_stage3(graph, start_nodes,
                                                       end_nodes)

        self.solution = NetworkSolution(cost=cost, paths=paths, graph=graph)
        return self.solution

    ############################
    # PLAN
    ############################

    def plan(self, canvas=None, solution=None) -> Planner:
        """Converts a path through a :class:`BrowserGraph` into an Aquarium
        Plan.

        :param canvas: Planner instance
        :param solution:
        :return:
        """
        if canvas is None:
            canvas = Planner(self.browser.session)
            canvas.plan.operations = []
        if solution is None:
            solution = self.solution
        if solution.paths:
            graph = solution.graph.copy()
            for path_num, path in enumerate(solution.paths):
                print("Path: {}".format(path_num))
                self._plan_assign_field_values(path, graph, canvas)
                self._plan_assign_items(path, graph, canvas)
                print()
        return canvas

    @classmethod
    def _plan_assign_field_values(cls, path, graph, canvas):
        """Assign :class:`FieldValue` to a path.

        :param path: list of node_ids
        :param graph: BrowserGraph instance
        :param canvas: Planner instance
        :return:
        """
        prev_node = None

        for n, ndata in graph.iter_model_data(model_class="AllowableFieldType",
                                              nbunch=path):
            aft = ndata["model"]
            sample = ndata["sample"]
            if aft.field_type.role == "output":
                # create and set output operation
                if "operation" not in ndata:
                    # print("Creating field value")
                    op = canvas.create_operation_by_type_id(
                        aft.field_type.parent_id)
                    fv = op.output(aft.field_type.name)
                    canvas.set_field_value(fv, sample=sample)
                    ndata["field_value"] = fv
                    ndata["operation"] = op
                else:
                    op = ndata["operation"]

                if prev_node:
                    input_aft = prev_node[1]["model"]
                    input_sample = prev_node[1]["sample"]
                    input_name = input_aft.field_type.name

                    if input_aft.field_type.array:
                        # print(
                        #     "Setting input array {} to sample='{}'".format(
                        #         input_name, input_sample
                        #     )
                        # )
                        input_fv = canvas.set_input_field_value_array(
                            op, input_name, sample=input_sample)
                    else:
                        # print(
                        #     "Setting input {} to sample='{}'".format(
                        #         input_name, input_sample
                        #     )
                        # )
                        input_fv = canvas.set_field_value(op.input(input_name),
                                                          sample=input_sample)
                    # print("Setting input field_value for '{}'".format(prev_node[0]))
                    prev_node[1]["field_value"] = input_fv
                    prev_node[1]["operation"] = op
            prev_node = (n, ndata)

    @classmethod
    def _plan_assign_items(cls, path, graph, canvas):
        """Assign :class:`Item` in a path.

        :param path: list of node_ids
        :param graph: BrowserGraph instance
        :param canvas: Planner instance
        :return:
        """
        prev_node = None

        for n, ndata in graph.iter_model_data(model_class="AllowableFieldType",
                                              nbunch=path):
            if (ndata["node_class"] == "AllowableFieldType"
                    and ndata["model"].field_type.role == "input"):
                if "operation" not in ndata:
                    cls.print_aft(graph, n)
                    raise Exception(
                        "Key 'operation' not found in node data '{}'".format(
                            n))
                input_fv = ndata["field_value"]
                input_sample = ndata["sample"]
                if prev_node:
                    node_class = prev_node[1]["node_class"]
                    if node_class == "AllowableFieldType":
                        output_fv = prev_node[1]["field_value"]
                        canvas.add_wire(output_fv, input_fv)
                    elif node_class == "Item":
                        item = prev_node[1]["model"]
                        if input_fv.field_type.part:
                            canvas.set_part(input_fv, item)
                        else:
                            canvas.set_field_value(input_fv,
                                                   sample=input_sample,
                                                   item=item)
                    else:
                        raise Exception(
                            "Node class '{}' not recognized".format(
                                node_class))
            prev_node = (n, ndata)

    ############################
    # UTILS
    ############################

    def clean_graph(self, graph):
        """Remove internal wires with different routing id but same sample.

        :param graph:
        :type graph:
        :return:
        :rtype:
        """
        removal = []

        afts = graph.models("AllowableFieldType")
        self.browser.retrieve(afts, "field_type")

        for n1, n2 in tqdm(list(graph.edges)):
            node1 = graph.get_node(n1)
            node2 = graph.get_node(n2)
            if (node1["node_class"] == "AllowableFieldType"
                    and node2["node_class"] == "AllowableFieldType"):
                aft1 = node1["model"]
                aft2 = node2["model"]
                ft1 = aft1.field_type
                ft2 = aft2.field_type
                if ft1.role == "input" and ft2.role == "output":
                    if (ft1.routing != ft2.routing
                            and node1["sample"].id == node2["sample"].id):
                        removal.append((n1, n2))
                    if (ft1.routing == ft2.routing
                            and node1["sample"].id != node2["sample"].id):
                        removal.append((n1, n2))

        print("Removing edges with same sample but different routing ids")
        print(len(graph.edges))
        graph.graph.remove_edges_from(removal)
        return graph

    def update_sample_composition(self):
        updated_sample_composition = self.expand_sample_composition(
            browser=self.browser, graph=self.sample_composition)
        self.sample_composition = updated_sample_composition
        return self.sample_composition

    def print_sample_composition(self):
        for s1, s2 in self.sample_composition.edges:
            s1 = self.sample_composition.nodes[s1]
            s2 = self.sample_composition.nodes[s2]
            print(s1["sample"].name + " => " + s2["sample"].name)

    def root_samples(self):
        nodes = graph_utils.find_leaves(self.sample_composition)
        return [self.sample_composition.nodes[n]["sample"] for n in nodes]

    @classmethod
    def expand_sample_composition(cls, browser, samples=None, graph=None):
        if graph is None:
            graph = nx.DiGraph()
        if samples is None:
            graph_copy = nx.DiGraph()
            graph_copy.add_nodes_from(graph.nodes(data=True))
            graph_copy.add_edges_from(graph.edges(data=True))
            graph = graph_copy
            samples = [graph.nodes[n]["sample"] for n in graph]
        if not samples:
            return graph
        browser.recursive_retrieve(samples, {"field_values": "sample"})
        new_samples = []
        for s1 in samples:
            fvdict = s1._field_value_dictionary()
            for ft in s1.sample_type.field_types:
                if ft.ftype == "sample":
                    fv = fvdict[ft.name]
                    if not isinstance(fv, list):
                        fv = [fv]
                    for _fv in fv:
                        if _fv:
                            s2 = _fv.sample
                            if s2:
                                new_samples.append(s2)
                                graph.add_node(s1.id, sample=s1)
                                graph.add_node(s2.id, sample=s2)
                                graph.add_edge(s2.id, s1.id)
        return cls.expand_sample_composition(browser, new_samples, graph)

    @staticmethod
    def decompose_template_graph_into_samples(template_graph,
                                              samples,
                                              include_none=True):
        """From a template graph and list of samples, extract sample specific
        nodes from the template graph (using sample type id)

        :param template_graph:
        :type template_graph:
        :param samples:
        :type samples:
        :return:
        :rtype:
        """
        sample_type_ids = {s.sample_type_id for s in samples}
        sample_type_graphs = defaultdict(list)

        if include_none:
            sample_type_ids.add(None)
            samples.append(none_sample)

        samples = list(set(samples))

        for n, ndata in template_graph.iter_model_data():
            if ndata["node_class"] == "AllowableFieldType":
                if ndata["model"].sample_type_id in sample_type_ids:
                    sample_type_graphs[ndata["model"].sample_type_id].append(n)

        sample_graphs = {}

        for sample in samples:
            nodes = sample_type_graphs[sample.sample_type_id]
            sample_graph = template_graph.subgraph(nodes)
            sample_graph.set_prefix("Sample{}_".format(sample.id))
            for n, ndata in sample_graph.iter_model_data():
                ndata["sample"] = sample
            sample_graphs[sample.id] = sample_graph
        return sample_graphs

    @staticmethod
    def _find_parts_for_samples(browser, sample_ids, lim=50):
        all_parts = []
        part_type = browser.find_by_name("__Part", model_class="ObjectType")
        for sample_id in sample_ids:
            sample_parts = browser.last(
                lim,
                model_class="Item",
                query=dict(object_type_id=part_type.id, sample_id=sample_id),
            )
            all_parts += sample_parts
        browser.retrieve(all_parts, "collections")

        # filter out parts that do not exist
        all_parts = [
            part for part in all_parts
            if part.collections and part.collections[0].location != "deleted"
        ]

        # create a Part-by-Sample-by-ObjectType dictionary
        data = {}
        for part in all_parts:
            if part.collections:
                data.setdefault(part.collections[0].object_type_id,
                                {}).setdefault(part.sample_id, []).append(part)
        return data

    def create_sample_composition_graphs(self, template_graph, browser,
                                         sample_composition):
        """Break a template_graph into subgraphs comprising of individual
        samples obtained from the sample composition graph.

        The `sample_composition` graph is a directed graph that defines how samples may
        be built from other samples. Meanwhile, the `template_graph` defines all
        possible connections between :class:`AllowableFieldType` and the associated
        weights of each edge determined from the :class:`AutoPlannerModel`. Using the
        `sample_composition` graph, we grab individual subgraphs from the template graph
        for each node in the sample compositions graph (using SampleType). The edges
        of the `sample_composition` graph determines which edges of these new subgraphs
         can be connected to each other, forming the final graph used in the Steiner
         tree optimization algorithm.

        :param template_graph:
        :param browser:
        :param sample_composition:
        :return:
        """
        sample_edges = []
        graphs = []

        samples = []
        for item in sample_composition.nodes:
            samples.append(sample_composition.nodes[item]["sample"])
        sample_graph_dict = self.decompose_template_graph_into_samples(
            template_graph, samples)

        for s1, s2 in sample_composition.edges:
            s1 = sample_composition.nodes[s1]["sample"]
            s2 = sample_composition.nodes[s2]["sample"]

            sample_graph1 = sample_graph_dict[s1.id]
            sample_graph2 = sample_graph_dict[s2.id]

            graphs += [sample_graph1.graph, sample_graph2.graph]

            input_afts = [
                aft for aft in sample_graph1.iter_models("AllowableFieldType")
                if aft.field_type.role == "input"
            ]
            output_afts = [
                aft for aft in sample_graph2.iter_models("AllowableFieldType")
                if aft.field_type.role == "output"
            ]

            pairs = AutoPlannerModel._match_internal_afts(
                input_afts, output_afts)
            pairs = [(sample_graph1.node_id(aft1), sample_graph2.node_id(aft2))
                     for aft1, aft2 in pairs]
            sample_edges += pairs

        # include the afts from operations that have no sample_type_id (e.g. Order Primer)
        if None in sample_graph_dict:
            graphs.append(sample_graph_dict[None].graph)
            # TODO: add None edges for internal graphs...

        graph = BrowserGraph(browser)
        graph.graph = nx.compose_all(graphs)

        graph.cache_models()

        self.log.info("Adding {} sample-to-sample edges".format(
            len(sample_edges)))

        for n1, n2 in tqdm(sample_edges):
            assert n1 in graph
            assert n2 in graph

            node1 = graph.get_node(n1)
            node2 = graph.get_node(n2)
            aft1 = node1["model"]
            aft2 = node2["model"]

            x = (template_graph.node_id(aft1), template_graph.node_id(aft2))
            edge = template_graph.get_edge(*x)
            graph.add_edge(n1,
                           n2,
                           edge_type="sample_to_sample",
                           weight=edge["weight"])

        afts = list(graph.iter_models(model_class="AllowableFieldType"))
        browser.retrieve(afts, "field_type")
        sample_ids = list({
            ndata["sample"].id
            for _, ndata in graph.iter_model_data()
            if ndata["sample"] is not None
        })

        ##############################
        # Get items
        ##############################

        non_part_afts = [aft for aft in afts if not aft.field_type.part]
        object_type_ids = list({aft.object_type_id for aft in non_part_afts})

        self._cinfo(
            "finding all relevant items for {} samples and {} object_types".
            format(len(sample_ids), len(object_type_ids)))
        items = browser.where(
            model_class="Item",
            query={
                "sample_id": sample_ids,
                "object_type_id": object_type_ids
            },
        )
        items = [item for item in items if item.location != "deleted"]
        self.log.info("{} total items found".format(len(items)))
        items_by_object_type_id = defaultdict(list)
        for item in items:
            items_by_object_type_id[item.object_type_id].append(item)

        ##############################
        # Get parts
        ##############################

        self._cinfo("finding relevant parts/collections")
        part_by_sample_by_type = self._find_parts_for_samples(browser,
                                                              sample_ids,
                                                              lim=50)
        self._cinfo("found {} collection types".format(
            len(part_by_sample_by_type)))

        ##############################
        # Assign Items/Parts/Collections
        ##############################

        new_items = []
        new_edges = []
        for node, ndata in graph.iter_model_data(
                model_class="AllowableFieldType"):
            aft = ndata["model"]
            sample = ndata["sample"]
            if sample:
                sample_id = sample.id
                sample_type_id = sample.sample_type_id
            else:
                sample_id = None
                sample_type_id = None
            if aft.sample_type_id == sample_type_id:
                if aft.field_type.part:
                    parts = part_by_sample_by_type.get(aft.object_type_id,
                                                       {}).get(sample_id, [])
                    for part in parts[-1:]:
                        if part.sample_id == sample_id:
                            new_items.append(part)
                            new_edges.append((part, sample, node))
                else:
                    items = items_by_object_type_id[aft.object_type_id]
                    for item in items:
                        if item.sample_id == sample_id:
                            new_items.append(item)
                            new_edges.append((item, sample, node))

        for item in new_items:
            graph.add_model(item)

        for item, sample, node in new_edges:
            graph.add_edge(graph.node_id(item), node, weight=0)

        self.log.info("{} items added to various allowable_field_types".format(
            len(new_edges)))
        return graph

    @staticmethod
    def print_aft(graph, node_id):
        if node_id == "END":
            return
        try:
            node = graph.get_node(node_id)
            if node["node_class"] == "AllowableFieldType":
                aft = node["model"]
                print(
                    "<AFT id={:<10} sample={:<10} {:^10} {:<10} '{:<10}:{}'>".
                    format(
                        aft.id,
                        node["sample"].name,
                        aft.field_type.role,
                        aft.field_type.name,
                        aft.field_type.operation_type.category,
                        aft.field_type.operation_type.name,
                    ))
            elif node["node_class"] == "Item":
                item = node["model"]
                sample_name = "None"
                if item.sample:
                    sample_name = item.sample.name
                print("<Item id={:<10} {:<20} {:<20}>".format(
                    item.id, sample_name, item.object_type.name))
        except Exception as e:
            print(node_id)
            print(e)
            pass

    def extract_leaf_operations(self, graph):
        """Extracts operations that have no inputs (such as "Order Primer")

        :param graph:
        :type graph:
        :return:
        :rtype:
        """

        leaf_afts = []
        for n, ndata in graph.iter_model_data(
                model_class="AllowableFieldType"):
            aft = ndata["model"]
            if aft.field_type.role == "output":
                node_id = self.template_graph.node_id(aft)
                preds = self.template_graph.predecessors(node_id)
                if not list(preds):
                    leaf_afts.append(n)
        return leaf_afts

    def extract_items(self, graph):
        item_groups = []

        for n, ndata in graph.iter_model_data(model_class="Item"):
            for succ in graph.graph.successors(n):

                grouped = []
                for n2 in graph.graph.predecessors(succ):
                    node = graph.get_node(n2)
                    if node["node_class"] == "Item":
                        grouped.append(n2)
                item_groups.append(tuple(grouped))
        items = list(set(reduce(lambda x, y: list(x) + list(y), item_groups)))

        return items

    def extract_end_nodes(self, graph, goal_sample, goal_object_type):
        end_nodes = []
        for n, ndata in graph.iter_model_data(
                model_class="AllowableFieldType"):
            aft = ndata["model"]
            if (ndata["sample"].id == goal_sample.id
                    and aft.object_type_id == goal_object_type.id
                    and aft.field_type.role == "output"):
                end_nodes.append(n)
        return end_nodes

    @staticmethod
    def get_sister_inputs(node, node_data, output_node, graph, ignore=None):
        """Returns a field_type_id to nodes."""
        sister_inputs = defaultdict(list)
        if (node_data["node_class"] == "AllowableFieldType"
                and node_data["model"].field_type.role == "input"):
            aft = node_data["model"]
            successor = output_node
            predecessors = list(graph.predecessors(successor))
            print(len(predecessors))
            for p in predecessors:
                if p == node or (ignore and p in ignore):
                    continue
                pnode = graph.get_node(p)
                if pnode["node_class"] == "AllowableFieldType":
                    is_array = pnode["model"].field_type.array is True
                    if (not is_array and pnode["model"].field_type_id
                            == aft.field_type_id):
                        continue
                    if is_array:
                        key = "{}_{}".format(pnode["model"].field_type_id,
                                             pnode["sample"].id)
                    else:
                        key = str(pnode["model"].field_type_id)
                    sister_inputs[key].append((p, pnode))
        return sister_inputs

    def _print_nodes(self, node_ids, graph):
        print(node_ids)
        items = list(graph.iter_models(nbunch=node_ids, model_class="Item"))
        self.browser.retrieve(items, "sample")
        self.browser.retrieve(items, "object_type")

        grouped_by_object_type = {}
        for item in items:
            grouped_by_object_type.setdefault(item.object_type.name,
                                              []).append(item)

        for otname, items in grouped_by_object_type.items():
            cprint(otname, "white", "black")
            for item in items:
                sample_name = "None"
                if item.sample:
                    sample_name = item.sample.name
                print("    <Item id={} {} {}".format(item.id,
                                                     item.object_type.name,
                                                     sample_name))

        for n, ndata in graph.iter_model_data(model_class="AllowableFieldType",
                                              nbunch=node_ids):
            self.print_aft(graph, n)

    def _optimize_get_seed_paths(
        self,
        start_nodes,
        end_nodes,
        bgraph,
        visited_end_nodes,
        output_node=None,
        verbose=False,
    ):
        paths = []
        end_nodes = [e for e in end_nodes if e not in visited_end_nodes]
        if verbose:
            print("START")
            self._print_nodes(start_nodes, bgraph)

            print("END")
            print(end_nodes)
            self._print_nodes(end_nodes, bgraph)

            print("VISITED: {}".format(visited_end_nodes))

        for start in start_nodes:
            for end in end_nodes:
                through_nodes = [start, end]
                if output_node:
                    through_nodes.append(output_node)
                try:
                    cost, path = graph_utils.top_paths(through_nodes, bgraph)
                except nx.exception.NetworkXNoPath:
                    continue
                paths.append((cost, path))
        return paths

    def _gather_assignments(self, path, bgraph, visited_end_nodes,
                            visited_samples, depth):

        input_to_output = OrderedDict()
        for n1, n2 in zip(path[:-1], path[1:]):
            node2 = bgraph.get_node(n2)
            node1 = bgraph.get_node(n1)
            if "sample" in node1:
                visited_samples.add(node1["sample"].id)
            if "sample" in node2:
                visited_samples.add(node2["sample"].id)
            if node2["node_class"] == "AllowableFieldType":
                aft2 = node2["model"]
                if aft2.field_type.role == "output":
                    input_to_output[n1] = n2

        print("PATH:")
        for p in path:
            print(p)
            self.print_aft(bgraph, p)

        # iterate through each input to find unfullfilled inputs
        inputs = list(input_to_output.keys())[:]
        print(input_to_output.keys())
        if depth > 0:
            inputs = inputs[:-1]
        #     print()
        #     print("INPUTS: {}".format(inputs))

        #     all_sister
        empty_assignments = defaultdict(list)

        for i, n in enumerate(inputs):
            print()
            print("Finding sisters for:")
            self.print_aft(bgraph, n)
            output_n = input_to_output[n]
            ndata = bgraph.get_node(n)
            sisters = self.get_sister_inputs(n,
                                             ndata,
                                             output_n,
                                             bgraph,
                                             ignore=visited_end_nodes)
            if not sisters:
                print("no sisters found")
            for ftid, nodes in sisters.items():
                print("**Sister FieldType {}**".format(ftid))
                for s, values in nodes:
                    self.print_aft(bgraph, s)
                    empty_assignments["{}_{}".format(output_n, ftid)].append(
                        (s, output_n, values))
                print()

        ############################################
        # 4.3 recursively find cost & shortest paths
        #     for unassigned inputs for every possible
        #     assignment
        ############################################
        all_assignments = list(product(*empty_assignments.values()))
        print(all_assignments)
        for k, v in empty_assignments.items():
            print(k)
            print(v)

        return all_assignments

    # TODO: fix issue with seed path
    # TODO: During the seed stage, this algorithm can get 'stuck' in a non-optimal solution,
    #     making it difficult to plan 'short' experimental plans. As an example, planning
    #     PCRs can get stuck on 'Anneal Oligos' since this is the shortest seed path.
    #     But this results in a sample penalty since the Template from the sample
    #     composition is unfullfilled.
    #     There is no procedure currently in place to solve this issue.
    #     Solution 1: Instead of using the top seed path, evaluate the top 'N' seed
    #     paths, picking the best one
    #     Solution 2: Evaluate the top 'N' most 'different' seed paths
    #     Solution 3: Rank seed paths not only on path length/cost, but also on their
    #     visited samples.
    #     The most visited samples, the better the path. However, longer paths have more
    #     visited samples,
    #     hence usually a higher path length/cost. It would be difficult to weight
    #     these two aspects of the seed path.

    def optimize_steiner_tree(
        self,
        start_nodes,
        end_nodes,
        bgraph,
        visited_end_nodes,
        visited_samples=None,
        output_node=None,
        verbose=True,
        depth=0,
    ):

        # TODO: Algorithm gets stuck on shortest top path...
        # e.g. Yeast Glycerol Stock to Yeast Mating instead of yeast transformation

        if visited_samples is None:
            visited_samples = set()

        ############################################
        # 1. find all shortest paths
        ############################################
        seed_paths = self._optimize_get_seed_paths(start_nodes, end_nodes,
                                                   bgraph, visited_end_nodes,
                                                   output_node, verbose)
        visited_end_nodes += end_nodes

        ############################################
        # 2. find overall shortest path(s)
        ############################################
        NUM_PATHS = 1
        THRESHOLD = 10**8

        if not seed_paths:
            if verbose:
                print("No paths found")
            return math.inf, [], visited_samples
        seed_paths = sorted(seed_paths, key=lambda x: x[0])

        best = []

        for cost, path in seed_paths[:NUM_PATHS]:
            visited_samples_copy = set(visited_samples)
            final_paths = [path]
            if cost > THRESHOLD:
                cprint("Path beyond threshold, returning early", "red")
                print(graph_utils.get_path_length(bgraph, path))
                return cost, final_paths, visited_samples_copy

            if verbose:
                cprint("Single path found with cost {}".format(cost), None,
                       "blue")
                cprint(graph_utils.get_path_weights(bgraph, path), None,
                       "blue")

            ############################################
            # 3. mark edges as 'visited'
            ############################################
            bgraph_copy = bgraph.copy()
            edges = list(zip(path[:-1], path[1:]))
            for e1, e2 in edges:
                edge = bgraph_copy.get_edge(e1, e2)
                edge["weight"] = 0

            ############################################
            # 4.1 input-to-output graph
            ############################################
            input_to_output = self._input_to_output_graph(
                bgraph_copy, path, visited_samples_copy)

            ############################################
            # 4.2  search for all unassigned inputs
            ############################################
            print("PATH:")
            for p in path:
                print(p)
                self.print_aft(bgraph, p)

            # iterate through each input to find unfullfilled inputs

            empty_assignments = self._gather_empty_assignments(
                bgraph, bgraph_copy, depth, input_to_output, visited_end_nodes)

            ############################################
            # 4.3 recursively find cost & shortest paths
            #     for unassigned inputs for every possible
            #     assignment
            ############################################
            cost, final_paths, visited_samples_copy = self._best_assignment(
                bgraph_copy,
                cost,
                depth,
                empty_assignments,
                final_paths,
                start_nodes,
                visited_end_nodes,
                visited_samples_copy,
            )

            ############################################
            # 5 Make a sample penalty for missing input samples
            ############################################

            output_samples = set()
            for path in final_paths:
                for node in path:
                    ndata = bgraph_copy.get_node(node)
                    if "sample" in ndata:
                        output_samples.add(ndata["sample"])

            expected_samples = set()
            for sample in output_samples:
                for pred in self.sample_composition.predecessors(sample.id):
                    expected_samples.add(pred)

            sample_penalty = max([
                (len(expected_samples) - len(visited_samples_copy)) * 10000, 0
            ])

            best.append({
                "cost": cost,
                "final_paths": final_paths,
                "visited_samples": visited_samples_copy,
                "expected_samples": expected_samples,
                "sample_penalty": sample_penalty,
                "final_paths": final_paths,
            })

        best = sorted(best, key=lambda x: x["cost"] + x["sample_penalty"])

        ############################################
        # return cost and paths
        ############################################
        selected = best[0]

        cprint("SAMPLES {}/{}".format(len(selected["visited_samples"]),
                                      len(selected["expected_samples"])))
        cprint("COST AT DEPTH {}: {}".format(depth, selected["cost"]), None,
               "red")
        cprint("SAMPLE PENALTY: {}".format(selected["sample_penalty"]), None,
               "red")
        cprint("VISITED SAMPLES: {}".format(selected["visited_samples"]), None,
               "red")
        return (
            selected["cost"] + selected["sample_penalty"],
            selected["final_paths"],
            selected["visited_samples"],
        )

    def _best_assignment(
        self,
        bgraph_copy,
        cost,
        depth,
        empty_assignments,
        final_paths,
        start_nodes,
        visited_end_nodes,
        visited_samples,
    ):
        all_assignments = list(product(*empty_assignments.values()))
        print(all_assignments)
        for k, v in empty_assignments.items():
            print(k)
            print(v)
        if all_assignments[0]:

            # TODO: enforce unique sample_ids if in operation_type
            cprint("Found {} assignments".format(len(all_assignments)), None,
                   "blue")
            best_assignment_costs = []

            for assign_num, assignment in enumerate(all_assignments):
                cprint(
                    "Evaluating assignment {}/{}".format(
                        assign_num + 1, len(all_assignments)),
                    None,
                    "red",
                )
                cprint("Assignment length: {}".format(len(assignment)), None,
                       "yellow")

                assignment_cost = 0
                assignment_paths = []
                assignment_samples = set(visited_samples)
                for end_node, output_node, _ in assignment:
                    _cost, _paths, _visited_samples = self.optimize_steiner_tree(
                        start_nodes,
                        [end_node],
                        bgraph_copy,
                        visited_end_nodes[:],
                        assignment_samples,
                        output_node,
                        verbose=True,
                        depth=depth + 1,
                    )
                    assignment_cost += _cost
                    assignment_paths += _paths
                    assignment_samples = assignment_samples.union(
                        _visited_samples)
                best_assignment_costs.append(
                    (assignment_cost, assignment_paths, assignment_samples))
            cprint([(len(x[2]), x[0]) for x in best_assignment_costs], "green")
            best_assignment_costs = sorted(best_assignment_costs,
                                           key=lambda x: (-len(x[2]), x[0]))

            cprint(
                "Best assignment cost returned: {}".format(
                    best_assignment_costs[0][0]),
                "red",
            )

            cost += best_assignment_costs[0][0]
            final_paths += best_assignment_costs[0][1]
            visited_samples = visited_samples.union(
                best_assignment_costs[0][2])
        return cost, final_paths, visited_samples

    def _gather_empty_assignments(self, bgraph, bgraph_copy, depth,
                                  input_to_output, visited_end_nodes):
        empty_assignments = defaultdict(list)
        inputs = list(input_to_output.keys())[:]
        print(input_to_output.keys())
        if depth > 0:
            inputs = inputs[:-1]
        for i, n in enumerate(inputs):
            print()
            print("Finding sisters for:")
            self.print_aft(bgraph, n)
            output_n = input_to_output[n]
            ndata = bgraph_copy.get_node(n)
            sisters = self.get_sister_inputs(n,
                                             ndata,
                                             output_n,
                                             bgraph_copy,
                                             ignore=visited_end_nodes)
            if not sisters:
                print("no sisters found")
            for ftid, nodes in sisters.items():
                print("**Sister FieldType {}**".format(ftid))
                for s, values in nodes:
                    self.print_aft(bgraph, s)
                    empty_assignments["{}_{}".format(output_n, ftid)].append(
                        (s, output_n, values))
                print()
        return empty_assignments

    def _input_to_output_graph(self, bgraph_copy, path, visited_samples):
        input_to_output = OrderedDict()
        for n1, n2 in zip(path[:-1], path[1:]):
            node2 = bgraph_copy.get_node(n2)
            node1 = bgraph_copy.get_node(n1)
            if "sample" in node1:
                visited_samples.add(node1["sample"].id)
            if "sample" in node2:
                visited_samples.add(node2["sample"].id)
            if node2["node_class"] == "AllowableFieldType":
                aft2 = node2["model"]
                if aft2.field_type.role == "output":
                    input_to_output[n1] = n2
        return input_to_output
コード例 #14
0
ファイル: rpc_v1.py プロジェクト: pombreda/layer5
 def __init__(self, channel, service):
     RPCMediumBase.__init__(self, channel, service)
     self._seq = itertools.counter()
     self._replies = {}
コード例 #15
0
 def __init__(self, count, dst):
     self._max = count
     self._counter = counter()
     self._ip_header = ip_header(dst=dst)
     self._icmp = echo_request_tpl()
     self._transport = l3_transport()
コード例 #16
0
ファイル: counters.py プロジェクト: yiyunyc2/EulerX
from itertools import count as counter

arcCount = counter()
conCount = counter()
nodeCount = counter()