Exemple #1
0
    def test_execution_stats2(self):
        # schedule some commands
        res = self.cc.comp_dynamic(hh)

        myjobid = 'myjobid'
        compmake_execution_stats(self.cc, res, use_job_id=myjobid)
        self.assert_cmd_success('make recurse=1')
        self.assert_cmd_success('ls')

        res = get_job_userobject_resolved(myjobid, db=self.db)
        check_result(res)

        print(res)

        self.assertEqual(res['jobs'], set(['hh-gg-ff', 'hh-gg', 'hh']))
Exemple #2
0
    def test_execution_stats2(self):
        # schedule some commands
        res = self.cc.comp_dynamic(hh)

        myjobid = 'myjobid'
        compmake_execution_stats(self.cc, res, use_job_id=myjobid)
        self.assert_cmd_success('make recurse=1')
        self.assert_cmd_success('ls')

        res = get_job_userobject_resolved(myjobid, db=self.db)
        check_result(res)

        print(res)

        self.assertEqual(res['jobs'], set(['hh-gg-ff', 'hh-gg', 'hh']))
Exemple #3
0
    def define_jobs_context(self, context):
        sizes = np.linspace(16, 256, 32).astype("int")
        # sizes = [16, 32, 64, 128, 256, 512]
        nobs = 500
        streams = ["test_gauss_drx1"]
        estimators = [
            "test_ddsest_unc_refine0",
            "test_ddsest_unc_refine0ns",  # don't stop
            "test_ddsest_unc_fast_order",
        ]
        max_displs = [0.1, 0.15, 0.2, 0.25, 0.3]

        def should_do(estimator, shape):
            if estimator in ["test_ddsest_unc_refine0", "test_ddsest_unc_refine0ns"]:
                return True
            if estimator == "test_ddsest_unc_fast_order":
                return shape <= 128
            assert False, estimator

        results = StoreResults()
        comp_stats = StoreResults()

        combs = iterate_context_names_quartet(context, sizes, max_displs, streams, estimators)
        for c, shape, max_displ, stream, estimator in combs:
            if not should_do(estimator, shape):
                continue

            id_stream = stream + "_%s_%s" % (shape, nobs)
            key = dict(
                length=nobs, shape=shape, stream=stream, estimator=estimator, id_stream=id_stream, max_displ=max_displ
            )
            learned = c.subtask(DDSLLearn, stream=id_stream, estimator=estimator, max_displ=max_displ)
            results[key] = learned
            comp_stats[key] = compmake_execution_stats(learned)

        source_descs = {}
        # For each sample, show the cpu for each algorithm
        jobs_tables_by_sample(
            context,
            id_table="cpu",
            allstats=comp_stats,
            one_table_for_each="id_stream",
            cols_fields=["cpu_time", "wall_time"],
            rows_field="estimator",
            source_descs=source_descs,
        )

        estimators_subsets = get_estimators_subsets(estimators)
        # one plot for each group
        for id_group, estimators in estimators_subsets.items():
            c = context.child(id_group)
            group_runs = comp_stats.select(lambda k: k["estimator"] in estimators)
            report = c.comp(report_cputime, group_runs)
            c.add_report(report, "cputime", group=id_group)
Exemple #4
0
    def define_jobs_context(self, context):
        sizes = np.linspace(16, 256, 32).astype('int')
        # sizes = [16, 32, 64, 128, 256, 512]
        nobs = 500
        streams = ['test_gauss_drx1']
        estimators = ['test_ddsest_unc_refine0',
                      'test_ddsest_unc_refine0ns',  # don't stop
                      'test_ddsest_unc_fast_order']
        max_displs = [0.1, 0.15, 0.2, 0.25, 0.3]
        
        def should_do(estimator, shape):
            if estimator in ['test_ddsest_unc_refine0', 'test_ddsest_unc_refine0ns']:
                return True
            if estimator == 'test_ddsest_unc_fast_order':
                return shape <= 128 
            assert False, estimator
        
        results = StoreResults()
        comp_stats = StoreResults()
        
        combs = iterate_context_names_quartet(context, sizes, max_displs, streams, estimators)
        for c, shape, max_displ, stream, estimator in combs:
            if not should_do(estimator, shape):
                continue
            
            id_stream = stream + '_%s_%s' % (shape, nobs)
            key = dict(length=nobs, shape=shape, stream=stream, estimator=estimator,
                       id_stream=id_stream, max_displ=max_displ)
            learned = c.subtask(DDSLLearn, stream=id_stream, estimator=estimator,
                                max_displ=max_displ) 
            results[key] = learned
            comp_stats[key] = compmake_execution_stats(learned)

        source_descs = {}
        # For each sample, show the cpu for each algorithm
        jobs_tables_by_sample(context, id_table='cpu',
                    allstats=comp_stats,
                    one_table_for_each='id_stream',
                    cols_fields=[
                        'cpu_time',
                        'wall_time',
                    ],
                    rows_field='estimator',
                    source_descs=source_descs)

        estimators_subsets = get_estimators_subsets(estimators)
        # one plot for each group
        for id_group, estimators in estimators_subsets.items():
            c = context.child(id_group)
            group_runs = comp_stats.select(lambda k: k['estimator'] in estimators)
            report = c.comp(report_cputime, group_runs)
            c.add_report(report, 'cputime', group=id_group)