def test_add_task_1(self):
        '''Add tasks'''
        to = TaskOrganizer()
        for i, task in enumerate(self.tasks):
            self.assertEquals(len(to.tasks), i)

            to.add_task(task)
            self.assertTrue(task in to.tasks)
            self.assertEquals(len(to.tasks), i+1)

        # adding same task doesn't do anything.
        self.assertEquals(len(to.tasks), len(self.tasks))
        to.add_task(self.tasks[0])
        self.assertEquals(len(to.tasks), len(self.tasks))
    def test_full_run(self):
        to = TaskOrganizer()
        for task in self.tasks:
            to.add_task(task)

        # batch one should be filtering and nothing else
        results, skipped, impossible = to.pull_runnable_tasks()
        pp = [task.plugin for task, info in results]
        print '1', [p.name for p in pp]
        self.assertTrue(self.dfp in pp)
        self.assertTrue(self.efp in pp)
        self.assertEquals(len(pp), 4) # 2 for each trial.

        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])

        for task, info in results:
            return_fake_results(to, task)

        # batch 2 should be detection and upsampling.
        results, skipped, impossible = to.pull_runnable_tasks()

        pp = [task.plugin for task, info in results]
        print '2', [p.name for p in pp]
        self.assertTrue(self.esrp in pp or self.esrp2 in pp)
        self.assertTrue(self.sdp in pp)
        self.assertEquals(len(pp), 4) # 2 for each trial.

        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])

        for task, info in results:
            return_fake_results(to, task)

        # batch 3 should more upsampling.
        results, skipped, impossible = to.pull_runnable_tasks()

        pp = [task.plugin for task, info in results]
        print '3', [p.name for p in pp]
        self.assertTrue(self.esrp in pp or self.esrp2 in pp)
        self.assertEquals(len(pp), 2) # 2 for each trial.

        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])

        for task, info in results:
            return_fake_results(to, task)

        # batch 4 should be feature extraction.
        results, skipped, impossible = to.pull_runnable_tasks()

        pp = [task.plugin for task, info in results]
        print '4', [p.name for p in pp]
        self.assertTrue(self.fep in pp)
        self.assertEquals(len(pp), 2) # 2 for each trial.

        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])
        
        for task, info in results:
            return_fake_results(to, task)

        # batch 5 should be clustering.
        results, skipped, impossible = to.pull_runnable_tasks()

        pp = [task.plugin for task, info in results]
        print '5', [p.name for p in pp]
        self.assertTrue(self.cp in pp)
        self.assertEquals(len(pp), 1) # 1 for each trial. (pooling)

        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])

        for task, info in results:
            return_fake_results(to, task)

        # batch 6 should be clustering revision.
        results, skipped, impossible = to.pull_runnable_tasks()

        pp = [task.plugin for task, info in results]
        print '6', [p.name for p in pp]
        self.assertTrue(self.crp in pp)
        self.assertEquals(len(pp), 1) # 1 for each trial. (pooling)

        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])

        for task, info in results:
            return_fake_results(to, task)

        # batch 7 should be summary plot stuff.
        results, skipped, impossible = to.pull_runnable_tasks()

        pp = [task.plugin for task, info in results]
        print '7', [p.name for p in pp]
        self.assertTrue(self.spp in pp)
        self.assertEquals(len(pp), 2) # 2 for each trial.

        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])

        for task, info in results:
            return_fake_results(to, task)

        # all done
        no_results, skipped, impossible = to.pull_runnable_tasks()
        self.assertEquals(no_results, [])