예제 #1
0
 def test_iteration(self):
     study_config = self.study_config
     pipeline = Pipeline()
     pipeline.set_study_config(study_config)
     pipeline.add_iterative_process(
         'dummy',
         'capsul.attributes.test.test_attributed_process.DummyProcess',
         ['truc', 'bidule'])
     pipeline.autoexport_nodes_parameters()
     cm = ProcessCompletionEngine.get_completion_engine(pipeline)
     atts = cm.get_attribute_values()
     atts.center = ['muppets']
     atts.subject = ['kermit', 'piggy', 'stalter', 'waldorf']
     cm.complete_parameters()
     self.assertEqual([os.path.normpath(p) for p in pipeline.truc], [
         os.path.normpath(p) for p in [
             '/tmp/in/DummyProcess_truc_muppets_kermit',
             '/tmp/in/DummyProcess_truc_muppets_piggy',
             '/tmp/in/DummyProcess_truc_muppets_stalter',
             '/tmp/in/DummyProcess_truc_muppets_waldorf'
         ]
     ])
     self.assertEqual([os.path.normpath(p) for p in pipeline.bidule], [
         os.path.normpath(p) for p in [
             '/tmp/out/DummyProcess_bidule_muppets_kermit',
             '/tmp/out/DummyProcess_bidule_muppets_piggy',
             '/tmp/out/DummyProcess_bidule_muppets_stalter',
             '/tmp/out/DummyProcess_bidule_muppets_waldorf'
         ]
     ])
예제 #2
0
    def test_run_iteraton_swf(self):
        study_config = self.study_config
        tmp_dir = tempfile.mkdtemp(prefix='capsul_')
        self.temps.append(tmp_dir)

        study_config.input_directory = os.path.join(tmp_dir, 'in')
        study_config.output_directory = os.path.join(tmp_dir, 'out')
        os.mkdir(study_config.input_directory)
        os.mkdir(study_config.output_directory)

        pipeline = Pipeline()
        pipeline.set_study_config(study_config)
        pipeline.add_iterative_process(
            'dummy',
            'capsul.attributes.test.test_attributed_process.DummyProcess',
            ['truc', 'bidule'])
        pipeline.autoexport_nodes_parameters()
        cm = ProcessCompletionEngine.get_completion_engine(pipeline)
        atts = cm.get_attribute_values()
        atts.center = ['muppets']
        atts.subject = ['kermit', 'piggy', 'stalter', 'waldorf']
        cm.complete_parameters()

        # create input files
        for s in atts.subject:
            open(
                os.path.join(study_config.input_directory,
                             'DummyProcess_truc_muppets_%s' % s),
                'w').write('%s\n' % s)

        # run
        study_config.use_soma_workflow = True
        study_config.run(pipeline)

        # check outputs
        out_files = [
            os.path.join(study_config.output_directory,
                         'DummyProcess_bidule_muppets_%s' % s)
            for s in atts.subject
        ]
        for s, out_file in zip(atts.subject, out_files):
            self.assertTrue(os.path.isfile(out_file))
            self.assertTrue(open(out_file).read() == '%s\n' % s)
예제 #3
0
    def test_run_iteraton_swf(self):
        study_config = self.study_config
        tmp_dir = tempfile.mkdtemp(prefix='capsul_')
        self.temps.append(tmp_dir)

        study_config.input_directory = os.path.join(tmp_dir, 'in')
        study_config.output_directory = os.path.join(tmp_dir, 'out')
        os.mkdir(study_config.input_directory)
        os.mkdir(study_config.output_directory)

        pipeline = Pipeline()
        pipeline.set_study_config(study_config)
        pipeline.add_iterative_process(
            'dummy',
            'capsul.attributes.test.test_attributed_process.DummyProcess',
            ['truc', 'bidule'])
        pipeline.autoexport_nodes_parameters()
        cm = ProcessCompletionEngine.get_completion_engine(pipeline)
        atts = cm.get_attribute_values()
        atts.center = ['muppets']
        atts.subject = ['kermit', 'piggy', 'stalter', 'waldorf']
        cm.complete_parameters()

        # create input files
        for s in atts.subject:
            open(os.path.join(
                study_config.input_directory,
                'DummyProcess_truc_muppets_%s' % s), 'w').write('%s\n' %s)

        # run
        study_config.use_soma_workflow = True
        study_config.run(pipeline)

        # check outputs
        out_files = [
            os.path.join(
                study_config.output_directory,
                'DummyProcess_bidule_muppets_%s' % s) for s in atts.subject]
        for s, out_file in zip(atts.subject, out_files):
            self.assertTrue(os.path.isfile(out_file))
            self.assertTrue(open(out_file).read() == '%s\n' % s)
예제 #4
0
 def test_iteration(self):
     study_config = self.study_config
     pipeline = Pipeline()
     pipeline.set_study_config(study_config)
     pipeline.add_iterative_process(
         'dummy',
         'capsul.attributes.test.test_attributed_process.DummyProcess',
         ['truc', 'bidule'])
     pipeline.autoexport_nodes_parameters()
     cm = ProcessCompletionEngine.get_completion_engine(pipeline)
     atts = cm.get_attribute_values()
     atts.center = ['muppets']
     atts.subject = ['kermit', 'piggy', 'stalter', 'waldorf']
     cm.complete_parameters()
     self.assertEqual(pipeline.truc,
                      ['/tmp/in/DummyProcess_truc_muppets_kermit',
                       '/tmp/in/DummyProcess_truc_muppets_piggy',
                       '/tmp/in/DummyProcess_truc_muppets_stalter',
                       '/tmp/in/DummyProcess_truc_muppets_waldorf'])
     self.assertEqual(pipeline.bidule,
                      ['/tmp/out/DummyProcess_bidule_muppets_kermit',
                       '/tmp/out/DummyProcess_bidule_muppets_piggy',
                       '/tmp/out/DummyProcess_bidule_muppets_stalter',
                       '/tmp/out/DummyProcess_bidule_muppets_waldorf'])
예제 #5
0
def data_history_pipeline(filename, project):
    """
    Get the complete "useful" history of a file in the database, as a "fake
    pipeline".

    The pipeline contains fake processes (unspecialized, direct Process
    instances), with all parameters (all being of type Any). The pipeline has
    connections, and gets all upstream ancestors of the file, so it contains
    all processing used to produce the latest version of the file (it may have
    been modified several time during the processing), and gets as inputs all
    input files which were used to produce the final data.

    Processing bricks which are not used, probably part of earlier runs which
    have been orphaned because the data file has been overwritten, are not
    listed in this history.
    """

    procs, links = get_data_history_processes(filename, project)

    if procs:
        pipeline = Pipeline()
        for proc in procs.values():
            if proc.used:
                pproc = brick_to_process(proc.brick, project)
                proc.process = pproc
                name = pproc.name
                if name in pipeline.nodes:
                    name = '%s_%s' % (name, pproc.uuid.replace('-', '_'))
                pproc.node_name = name
                pipeline.add_process(name, pproc)

        for link in links:
            if link[0] is None:
                src = link[1]
                if src not in pipeline.traits():
                    pipeline.export_parameter(link[2].process.node_name,
                                              link[3], src)
                    src = None
                elif pipeline.trait(src).output:
                    # already taken as an output: export under another name
                    done = False
                    n = 0
                    while not done:
                        src2 = '%s_%d' % (src, n)
                        if src2 not in pipeline.traits():
                            pipeline.export_parameter(
                                link[2].process.node_name, link[3], src2)
                            src = None
                            done = True
                        elif not pipeline.trait(src2).output:
                            src = src2
                            done = True
                        n += 1
            else:
                src = '%s.%s' % (link[0].process.node_name, link[1])
            if link[2] is None:
                dst = link[3]
                if dst not in pipeline.traits():
                    pipeline.export_parameter(link[0].process.node_name,
                                              link[1], dst)
                    dst = None
                elif not pipeline.trait(dst).output:
                    # already taken as an input: export under another name
                    done = False
                    n = 0
                    while not done:
                        dst2 = '%s_%d' % (dst, n)
                        if dst2 not in pipeline.traits():
                            pipeline.export_parameter(
                                link[0].process.node_name, link[1], dst2)
                            dst = None
                            done = True
                        elif pipeline.trait(dst2).output:
                            dst = dst2
                            done = True
                        n += 1
            else:
                dst = '%s.%s' % (link[2].process.node_name, link[3])
            if src is not None and dst is not None:
                try:
                    pipeline.add_link('%s->%s' % (src, dst))
                except ValueError as e:
                    print(e)

        return pipeline

    else:
        return None