コード例 #1
0
 def test_cycle(self):
     """
     Creating a cycle: A data-instance is both input to and returned by a WorkFlowNode
     """
     d = Data().store()
     c = WorkCalculation().store()
     # New provenance design branch
     # ~ c.add_incoming(d, link_type=LinkType.INPUT_WORK, link_label='lala')
     # ~ d.add_incoming(c, link_type=LinkType.RETURN, link_label='lala')
     c.add_link_from(d, link_type=LinkType.INPUT, label='lala')
     d.add_link_from(c, link_type=LinkType.RETURN, label='lala')
     qb = QueryBuilder().append(Node).append(Node)
     rule = UpdateRule(qb, max_iterations=np.inf)
     es = get_basket(node_ids=(d.id,))
     res = rule.run(es)
     self.assertEqual( res['nodes']._set, set([d.id, c.id]))
コード例 #2
0
    def setUpClass(cls):
        """
        Create some code to test the DataParamType parameter type for the command line infrastructure
        We create an initial code with a random name and then on purpose create two code with a name
        that matches exactly the ID and UUID, respectively, of the first one. This allows us to test
        the rules implemented to solve ambiguities that arise when determing the identifier type
        """
        super(TestDataParamType, cls).setUpClass()

        cls.param = DataParamType()
        cls.entity_01 = Data().store()
        cls.entity_02 = Data().store()
        cls.entity_03 = Data().store()

        cls.entity_01.label = 'data_01'
        cls.entity_02.label = str(cls.entity_01.pk)
        cls.entity_03.label = str(cls.entity_01.uuid)
コード例 #3
0
    def runTest(self):
        """
        Testing whether groups and nodes can be traversed with the Graph explorer:
        """
        # I create a certain number of groups and save them in this list:
        groups = []
        for igroup in range(self.N_GROUPS):
            name='g-{}'.format(igroup) # Name has to be unique
            groups.append(Group(name=name).store())
        # Same with nodes: Create 1 node less than I have groups
        nodes = []
        for inode in range(1, self.N_GROUPS):
            d = Data().store()
            # The node I create, I added both to the group of
            # same index and the group of index - 1
            groups[inode].add_nodes(d)
            groups[inode-1].add_nodes(d)
            nodes.append(d)

        # Creating sets for the test:
        nodes_set = set([n.id for n in nodes])
        groups_set = set([g.id for g in groups])

        # Now I want rule that gives me all the data starting
        # from the last node, with links being
        # belonging to the same group:
        qb = QueryBuilder()
        qb.append(Data, tag='d')
        # ~ qb.append(Group, with_node='d', tag='g', filters={'type':''} ) # The filter here is
        qb.append(Group, group_of='d', tag='g', filters={'type':''} ) # The filter here is
        # there for avoiding problems with autogrouping. Depending how the test
        # exactly is run, nodes can be put into autogroups.
        qb.append(Data, member_of='g')
        # ~ qb.append(Data, with_group='g')

        es = get_basket(node_ids=(d.id,))
        rule = UpdateRule(qb, max_iterations=np.inf)
        res = rule.run(es.copy())['nodes']._set
        # checking whether this updateRule above really visits all the nodes I created:
        self.assertEqual(res, nodes_set)
        # The visits:
        self.assertEqual(rule.get_visits()['nodes']._set,res)

        # I can do the same with 2 rules chained into a RuleSequence:
        qb1=QueryBuilder().append(Node, tag='n').append(
                Group, group_of='n', filters={'type':''})
                # ~ Group, with_node='n', filters={'type':''})
        qb2=QueryBuilder().append(Group, tag='n').append(
                Node, member_of='n')
                # ~ Node, with_group='n')
        rule1 = UpdateRule(qb1)
        rule2 = UpdateRule(qb2)
        seq = RuleSequence((rule1, rule2), max_iterations=np.inf)
        res = seq.run(es.copy())
        for should_set, is_set in (
                (nodes_set.copy(), res['nodes']._set),
                (groups_set,res['groups']._set)):
            self.assertEqual(is_set, should_set)
コード例 #4
0
    def test_dynamic_output(self):
        from aiida.orm import Node
        from aiida.orm.data import Data

        n = Node()
        d = Data()
        self.assertFalse(self.spec.validate_outputs({'key': 'foo'})[0])
        self.assertFalse(self.spec.validate_outputs({'key': 5})[0])
        self.assertFalse(self.spec.validate_outputs({'key': n})[0])
        self.assertTrue(self.spec.validate_outputs({'key': d})[0])
コード例 #5
0
ファイル: processSpec.py プロジェクト: zooks97/aiida_core
    def test_dynamic_output(self):
        from aiida.orm import Node
        from aiida.orm.data import Data

        n = Node()
        d = Data()
        port = self.spec.get_dynamic_output()
        self.assertFalse(port.validate("foo")[0])
        self.assertFalse(port.validate(5)[0])
        self.assertFalse(port.validate(n)[0])
        self.assertTrue(port.validate(d)[0])
コード例 #6
0
ファイル: generic.py プロジェクト: zooks97/aiida_core
    def test_group_general(self):
        """
        General tests to verify that the group addition with the skip_orm=True flag
        work properly
        """
        from aiida.orm.group import Group
        from aiida.orm.data import Data

        node_01 = Data().store()
        node_02 = Data().store()
        node_03 = Data().store()
        node_04 = Data().store()
        node_05 = Data().store()
        node_06 = Data().store()
        node_07 = Data().store()
        node_08 = Data().store()
        nodes = [
            node_01, node_02, node_03, node_04, node_05, node_06, node_07,
            node_08
        ]

        group = Group(name='test_adding_nodes').store()
        # Single node
        group.add_nodes(node_01, skip_orm=True)
        # List of nodes
        group.add_nodes([node_02, node_03], skip_orm=True)
        # Single DbNode
        group.add_nodes(node_04.dbnode, skip_orm=True)
        # List of DbNodes
        group.add_nodes([node_05.dbnode, node_06.dbnode], skip_orm=True)
        # List of orm.Nodes and DbNodes
        group.add_nodes([node_07, node_08.dbnode], skip_orm=True)

        # Check
        self.assertEqual(set(_.pk for _ in nodes),
                         set(_.pk for _ in group.nodes))

        # Try to add a node that is already present: there should be no problem
        group.add_nodes(node_01, skip_orm=True)
        self.assertEqual(set(_.pk for _ in nodes),
                         set(_.pk for _ in group.nodes))
コード例 #7
0
ファイル: generic.py プロジェクト: zooks97/aiida_core
    def test_group_batch_size(self):
        """
        Test that the group addition in batches works as expected.
        """
        from aiida.orm.group import Group
        from aiida.orm.data import Data

        # Create 100 nodes
        nodes = []
        for _ in range(100):
            nodes.append(Data().store())

        # Add nodes to groups using different batch size. Check in the end the
        # correct addition.
        batch_sizes = (1, 3, 10, 1000)
        for batch_size in batch_sizes:
            group = Group(name='test_batches_' + str(batch_size)).store()
            group.add_nodes(nodes, skip_orm=True, batch_size=batch_size)
            self.assertEqual(set(_.pk for _ in nodes),
                             set(_.pk for _ in group.nodes))
コード例 #8
0
    def test_stash(self):
        """
        Here I'm testing the 'stash'
        """
        # creatin a first calculation with 3 input data:
        c = Calculation().store()
        dins = set() # To compare later, dins is a set of the input data pks.
        for i in range(3):
            data_in = Data().store()
            dins.add(data_in.id)
            # ~ c.add_incoming(data_in, 
                    # ~ link_type=LinkType.INPUT_CALC,
                    # ~ link_label='lala-{}'.format(i))
            c.add_link_from(data_in, 
                    link_type=LinkType.INPUT,
                    label='lala-{}'.format(i))

        # Creating output data to that calculation:
        douts = set() # Similar to dins, this is the set of data output pks
        for i in range(4):
            data_out = Data().store()
            douts.add(data_out.id)
            # ~ data_out.add_incoming(c,
                    # ~ link_type=LinkType.CREATE,
                    # ~ link_label='lala-{}'.format(i))
            data_out.add_link_from(c,
                    link_type=LinkType.CREATE,
                    label='lala-{}'.format(i))
        #print(draw_children

        # adding another calculation, with one input from c's outputs,
        # and one input from c's inputs
        c2 = Calculation().store()
        # ~ c2.add_incoming(data_in, link_type=LinkType.INPUT_CALC, link_label='b')
        # ~ c2.add_incoming(data_out, link_type=LinkType.INPUT_CALC, link_label='c')
        c2.add_link_from(data_in, link_type=LinkType.INPUT, label='b')
        c2.add_link_from(data_out, link_type=LinkType.INPUT, label='c')


        # ALso here starting with a set that only contains the starting the calculation:
        es = get_basket(node_ids=(c.id,))
        # Creating the rule for getting input nodes:
        rule_in = UpdateRule(QueryBuilder().append(
                Node, tag='n').append(
                Node, input_of='n'))
        # ~ rule_in = UpdateRule(QueryBuilder().append(
                # ~ Node, tag='n').append(
                # ~ Node, with_outgoing='n'))
        # Creating the rule for getting output nodes
        rule_out = UpdateRule(QueryBuilder().append(
                Node, tag='n').append(
                Node, output_of='n'))
                # ~ Node, with_incoming='n'))
        #, edge_filters={'type':LinkType.CREATE.value}))


        # I'm testing the input rule. Since I'm updating, I should
        # have the input and the calculation itself:
        is_set = rule_in.run(es.copy())['nodes']._set
        self.assertEqual(is_set, dins.union({c.id}))

        # Testing the output rule, also here, output + calculation c is expected:
        is_set = rule_out.run(es.copy())['nodes']._set
        self.assertEqual(is_set, douts.union({c.id}))

        # Now I'm  testing the rule sequence.
        # I first apply the rule to get outputs, than the rule to get inputs
        rs1 = RuleSequence((rule_out, rule_in))
        is_set = rs1.run(es.copy())['nodes']._set
        # I expect the union of inputs, outputs, and the calculation:
        self.assertEqual(is_set, douts.union(dins).union({c.id}))

        # If the order of the rules is exchanged, I end up of also attaching c2 to the results.
        # This is because c and c2 share one data-input:
        rs2 = RuleSequence((rule_in, rule_out))
        is_set = rs2.run(es.copy())['nodes']._set
        self.assertEqual(is_set, douts.union(dins).union({c.id, c2.id}))

        # Testing similar rule, but with the possibility to stash the results:
        stash = es.copy(with_data=False)
        rsave = RuleSaveWalkers(stash)
        # Checking whether Rule does the right thing i.e If I stash the result,
        # the active walkers should be an empty set:
        self.assertEqual(rsave.run(es.copy()), es.copy(with_data=False))
        # Whereas the stash contains the same data as the starting point:
        self.assertEqual(stash,es)
        rs2 = RuleSequence((
                RuleSaveWalkers(stash), rule_in,
                RuleSetWalkers(stash) ,rule_out))
        is_set = rs2.run(es.copy())['nodes']._set
        # NOw I test whether the stash does the right thing,
        # namely not including c2 in the results:
        self.assertEqual(is_set, douts.union(dins).union({c.id}))