Example #1
0
def Bootstrapping(n_boot, ci_alpha, analysisInfo):
    booting = btstrp.bootstrap(n_boot, np.mean, analysisInfo, axis=0)
    btmean = booting.mean(axis=0)
    btci = btstrp.bca_ci(booting,
                         np.mean,
                         analysisInfo,
                         alpha=ci_alpha,
                         axis=0)

    return btmean, btci
Example #2
0
def Bootstrapping(n_boot, ci_alpha, analysisInfo) :
    booting = btstrp.bootstrap(n_boot, np.mean, analysisInfo, axis=0)
    btmean = booting.mean(axis=0)
    btci = btstrp.bca_ci(booting, np.mean, analysisInfo, alpha=ci_alpha, axis=0)

    return btmean, btci
Example #3
0
def main(args):
    from ZigZag.ListRuns import ExpandTrackRuns, CommonTrackRuns, \
                                MultiSims2Sims

    n_boot = 5000
    ci_alpha = 0.05

    #if len(args.multiSims) < 2 :
    #    raise ValueError("Need at least 2 scenarios to analyze")

    if args.groupby is not None:
        # Perform substitution of the 'group' option with the 'into' value
        # from --group
        if args.fig_disp == 'group':
            args.fig_disp = args.groupby[1]
        if args.plot_disp == 'group':
            args.plot_disp = args.groupby[1]
        if args.tick_disp == 'group':
            args.tick_disp = args.groupby[1]

    if args.bw_mode:
        colors = ['0.25', '0.75', '0.5']
        if args.dispMode == 'bar':
            # Make the last one white
            colors.append('1.0')
        else:
            # Make the last one black
            colors.append('0.0')

        plt.rcParams['axes.color_cycle'] = colors

    # These are experimental features in matplotlib and may not
    # exist in the user's installation.
    if 'style.cycle' in plt.rcParams:
        plt.rcParams['style.cycle'] = True
    if 'cycle.hatch' in plt.rcParams:
        plt.rcParams['cycle.hatch'] = True

    # Validate the command-line arguments for display options
    if (set(['skills', 'trackruns', 'scenarios']) != set(
        [args.fig_disp, args.plot_disp, args.tick_disp])):
        raise ValueError("The '--fig', '--plot', and '--tick' options"
                         " must each be unique:"
                         " [fig: %s, plot: %s, tick: %s)" %
                         (args.fig_disp, args.plot_disp, args.tick_disp))

    simNames = MultiSims2Sims(args.multiSims, args.directory)
    commonTrackRuns = CommonTrackRuns(simNames, args.directory)
    trackRuns = ExpandTrackRuns(commonTrackRuns, args.trackRuns)

    if args.signif_from is not None:
        if args.signif_from in trackRuns:
            trackRuns.remove(args.signif_from)

        if len(trackRuns) == 0:
            raise ValueError("Need at least one other track run to measure"
                             " the significance of the score difference")

        # Make sure the signif_from trackrun is at the end.
        trackRuns.append(args.signif_from)

    # (Scenarios x Skills x Sims x TrackRuns)
    analysis = MultiScenarioAnalyze(args.multiSims,
                                    args.skillNames,
                                    trackRuns,
                                    path=args.directory,
                                    tag_filters=args.filters)

    if args.signif_from is not None:
        from scipy.stats import percentileofscore
        analysis = analysis[:, :, :, -1].x[..., None] - analysis[:, :, :, :-1]
        trackRuns.pop()

        for skillIndex, skillName in enumerate(args.skillNames):
            print("Skill: ", skillName)
            sk_index = analysis.labelindex(skillName, axis=1)
            for sceneIndex, aScenario in enumerate(args.multiSims):
                print("  Scenario:", aScenario)
                sc_index = analysis.labelindex(aScenario, axis=0)

                # Cheating a bit because we aren't using the same replicates
                # that will be used later for reporting the estimated mean
                # and their confidence intervals.
                replicates = btstrp.bootstrap(n_boot,
                                              np.mean,
                                              analysis[sc_index,
                                                       sk_index, :, :].x,
                                              axis=0)
                print("  ", [
                    percentileofscore(replicates[:, i], 0.0)
                    for i in xrange(len(trackRuns))
                ])
                print(
                    "  ",
                    np.sum(analysis[sc_index, sk_index, :, :].x <= 0.0,
                           axis=0))

        #means = analysis.mean(axis=2)
        #stddevs = analysis.std(axis=2)
        #print(skewtest(analysis.x, axis=2))
        #print(kurtosistest(analysis.x, axis=2))
        #print("P-values", ttest_1samp(analysis.x, 0.0, axis=2)[1])

    if analysis.label[-1] != trackRuns:
        print("WARNING! The track runs labels aren't matching!")

    # meanSkills, and the ci's are 3D (scenarios x skills x trackruns)
    (meanSkills, skills_ci_upper,
     skills_ci_lower) = ProcessAnalysis(analysis, n_boot, ci_alpha,
                                        args.multiSims, args.skillNames)

    display = {
        disp: data
        for disp, data in zip(['fig', 'plot', 'ticks'],
                              [args.fig_disp, args.plot_disp, args.tick_disp])
    }

    defaultLabels = {
        'skills': args.skillNames,
        'trackruns': trackRuns,
        'scenarios': args.multiSims
    }

    groupinfo = (None if args.groupby is None else dict(
        group=args.groupby[0], into=args.groupby[1], by=args.groupby[2]))

    #groupinfo = {'group' : 'trackruns',
    #             'into' : 'scenarios',
    #             'by' : 'framesBack'}

    # Default labels may be modified by Regroup()
    (meanSkills, skills_ci_upper,
     skills_ci_lower) = Regroup(groupinfo, defaultLabels, meanSkills,
                                skills_ci_upper, skills_ci_lower)

    #print(meanSkills.shape)
    #print(len(defaultLabels['scenarios']), len(defaultLabels['skills']),\
    #      len(defaultLabels['trackruns']))

    (meanSkills, skills_ci_upper,
     skills_ci_lower) = Rearrange(display, meanSkills, skills_ci_upper,
                                  skills_ci_lower)

    #print(meanSkills.shape)

    tickLabels = args.ticklabels if args.ticklabels is not None else \
                 defaultLabels[display['ticks']]
    figTitles = args.titles if args.titles is not None else \
                defaultLabels[display['fig']]
    plotLabels = args.plotlabels if args.plotlabels is not None else \
                 defaultLabels[display['plot']]

    #print(len(tickLabels), len(figTitles), len(plotLabels))

    figs = [plt.figure(figsize=args.figsize) for title in figTitles]
    legs = DisplayMultiSceneAnalysis(figTitles,
                                     plotLabels,
                                     tickLabels,
                                     meanSkills,
                                     skills_ci_upper,
                                     skills_ci_lower,
                                     figs,
                                     dispMode=args.dispMode)

    if args.showTables:
        DisplayTableAnalysis(figTitles, plotLabels, tickLabels, meanSkills,
                             skills_ci_upper, skills_ci_lower)

    for aFig, legend, title in zip(figs, legs, defaultLabels[display['fig']]):
        aFig.gca().set_xlabel(args.xlabel)
        if args.saveImgFile is not None:
            aFig.savefig("%s_%s.%s" %
                         (args.saveImgFile, title, args.imageType),
                         bbox_inches='tight',
                         pad_inches=0.25,
                         bbox_extra_artists=[legend])

    if args.doShow:
        plt.show()
Example #4
0
def main(args) :
    from ZigZag.ListRuns import ExpandTrackRuns, CommonTrackRuns, \
                                MultiSims2Sims

    n_boot = 5000
    ci_alpha = 0.05

    #if len(args.multiSims) < 2 :
    #    raise ValueError("Need at least 2 scenarios to analyze")

    if args.groupby is not None :
        # Perform substitution of the 'group' option with the 'into' value
        # from --group
        if args.fig_disp == 'group' :
            args.fig_disp = args.groupby[1]
        if args.plot_disp == 'group' :
            args.plot_disp = args.groupby[1]
        if args.tick_disp == 'group' :
            args.tick_disp = args.groupby[1]

    if args.bw_mode :
        colors = ['0.25', '0.75', '0.5']
        if args.dispMode == 'bar' :
            # Make the last one white
            colors.append('1.0')
        else :
            # Make the last one black
            colors.append('0.0')

        plt.rcParams['axes.color_cycle'] = colors

    # These are experimental features in matplotlib and may not
    # exist in the user's installation.
    if 'style.cycle' in plt.rcParams :
        plt.rcParams['style.cycle'] = True
    if 'cycle.hatch' in plt.rcParams :
        plt.rcParams['cycle.hatch'] = True

    # Validate the command-line arguments for display options
    if (set(['skills', 'trackruns', 'scenarios']) !=
        set([args.fig_disp, args.plot_disp, args.tick_disp])) :
        raise ValueError("The '--fig', '--plot', and '--tick' options"
                         " must each be unique:"
                         " [fig: %s, plot: %s, tick: %s)" %
                         (args.fig_disp, args.plot_disp, args.tick_disp))


    simNames = MultiSims2Sims(args.multiSims, args.directory)
    commonTrackRuns = CommonTrackRuns(simNames, args.directory)
    trackRuns = ExpandTrackRuns(commonTrackRuns, args.trackRuns)

    if args.signif_from is not None :
        if args.signif_from in trackRuns :
            trackRuns.remove(args.signif_from)

        if len(trackRuns) == 0 :
            raise ValueError("Need at least one other track run to measure"
                             " the significance of the score difference")

        # Make sure the signif_from trackrun is at the end.
        trackRuns.append(args.signif_from)

    # (Scenarios x Skills x Sims x TrackRuns)
    analysis = MultiScenarioAnalyze(args.multiSims, args.skillNames,
                                    trackRuns,
                                    path=args.directory,
                                    tag_filters=args.filters)

    if args.signif_from is not None :
        from scipy.stats import percentileofscore
        analysis = analysis[:, :, :, -1].x[..., None] - analysis[:, :, :, :-1]
        trackRuns.pop()

        for skillIndex, skillName in enumerate(args.skillNames) :
            print "Skill: ", skillName
            sk_index = analysis.labelindex(skillName, axis=1)
            for sceneIndex, aScenario in enumerate(args.multiSims) :
                print "  Scenario:", aScenario
                sc_index = analysis.labelindex(aScenario, axis=0)

                # Cheating a bit because we aren't using the same replicates
                # that will be used later for reporting the estimated mean
                # and their confidence intervals.
                replicates = btstrp.bootstrap(n_boot, np.mean,
                                       analysis[sc_index, sk_index, :, :].x,
                                       axis=0)
                print "  ", [percentileofscore(replicates[:, i], 0.0) for
                             i in xrange(len(trackRuns))]
                print "  ", np.sum(analysis[sc_index, sk_index, :, :].x <= 0.0,
                                   axis=0)

        #means = analysis.mean(axis=2)
        #stddevs = analysis.std(axis=2)
        #print skewtest(analysis.x, axis=2)
        #print kurtosistest(analysis.x, axis=2)
        #print "P-values", ttest_1samp(analysis.x, 0.0, axis=2)[1]

    if analysis.label[-1] != trackRuns :
        print "WARNING! The track runs labels aren't matching!"

    # meanSkills, and the ci's are 3D (scenarios x skills x trackruns)
    (meanSkills,
     skills_ci_upper,
     skills_ci_lower) = ProcessAnalysis(analysis, n_boot, ci_alpha,
                                        args.multiSims, args.skillNames)

    display = {disp:data for disp, data in
               zip(['fig', 'plot', 'ticks'],
                   [args.fig_disp, args.plot_disp, args.tick_disp])}

    defaultLabels = {'skills' : args.skillNames,
                     'trackruns' : trackRuns,
                     'scenarios' : args.multiSims}

    groupinfo = (None if args.groupby is None else
                 dict(group=args.groupby[0],
                      into=args.groupby[1],
                      by=args.groupby[2]))

    #groupinfo = {'group' : 'trackruns',
    #             'into' : 'scenarios',
    #             'by' : 'framesBack'}

    # Default labels may be modified by Regroup()
    (meanSkills,
     skills_ci_upper,
     skills_ci_lower) = Regroup(groupinfo, defaultLabels, meanSkills,
                                                          skills_ci_upper,
                                                          skills_ci_lower)

    #print meanSkills.shape
    #print len(defaultLabels['scenarios']), len(defaultLabels['skills']),\
    #      len(defaultLabels['trackruns'])

    (meanSkills,
     skills_ci_upper,
     skills_ci_lower) = Rearrange(display, meanSkills,
                                           skills_ci_upper,
                                           skills_ci_lower)

    #print meanSkills.shape

    tickLabels = args.ticklabels if args.ticklabels is not None else \
                 defaultLabels[display['ticks']]
    figTitles = args.titles if args.titles is not None else \
                defaultLabels[display['fig']]
    plotLabels = args.plotlabels if args.plotlabels is not None else \
                 defaultLabels[display['plot']]

    #print len(tickLabels), len(figTitles), len(plotLabels)

    figs = [plt.figure(figsize=args.figsize) for title in figTitles]
    legs = DisplayMultiSceneAnalysis(figTitles, plotLabels, tickLabels,
                              meanSkills, skills_ci_upper, skills_ci_lower,
                              figs, dispMode=args.dispMode)

    if args.showTables :
        DisplayTableAnalysis(figTitles, plotLabels, tickLabels,
                             meanSkills, skills_ci_upper, skills_ci_lower)


    for aFig, legend, title in zip(figs, legs,
                                   defaultLabels[display['fig']]) :
        aFig.gca().set_xlabel(args.xlabel)
        if args.saveImgFile is not None :
            aFig.savefig("%s_%s.%s" % (args.saveImgFile,
                                       title, args.imageType),
                         bbox_inches='tight', pad_inches=0.25,
                         bbox_extra_artists=[legend])

    if args.doShow :
        plt.show()