def test_parameter_sweep_plot_level_warnings(self, mgmt_data):
    """
    This testcase executes a parameter sweep and captures any warnings
    generated during the construction of the bokeh figure object.
    :param mgmt_data: Use management department data
    :type mgmt_data: dictionary
    :return: Pass/Fail
    :rtype: Boolean
    """
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        modlist = list([Mod_Validate_Sweep(**mgmt_data)])
        c = Comparison(modlist)

        plot_settings = {'plottype': 'parameter sweep percentage',
                         'intervals': 'empirical',
                         'number_of_runs': 500,
                         # number simulations to average over
                         'target': 0.25,
                         'xlabel': 'Promotion Rate for Women',
                         'ylabel': 'Proportion Women',
                         'title': 'Sweep, Hire-Promote, Female Promotion Rate 1->2',
                         'model_legend_label': ['Model 1, Hire-Promote',
                                                'Model '
                                                '2, '
                                                'Promote-Hire'],
                         'parameter_sweep_param': 'female_promotion_probability_1',
                         'parameter_ubound': 0.5,
                         'parameter_lbound': 0.05,
                         'number_of_steps': 15
                         }
        c.plot_comparison_overall_chart(**plot_settings)
    assert (len(w) == 0)
def test_parameter_sweep_function_overall_catch_warnings(self, mgmt_data):
    """
    This testcase executes a parameter sweep and then checks for any
    warnings generated while computing the plot figure. In the past there
    were issues with column sizes not matching the in the plot functions.

    :param mgmt_data: Use management department data
    :type mgmt_data: dictionary
    :return: Pass/Fail
    :rtype: Boolean
    """
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        modlist = list([Mod_Validate_Sweep(**mgmt_data)])

        c = Comparison(modlist)

        plot_settings = {
            'plottype':
            'parameter sweep percentage',
            'intervals':
            'empirical',
            'number_of_runs':
            3,
            # number simulations to average over
            'target':
            0.25,
            'xlabel':
            'Hiring Rate for Women',
            'ylabel':
            'Proportion Women',
            'title':
            'Parameter Sweep Validation, uniform noise(0,'
            '0.1)',
            'model_legend_label':
            ['Model 1, Hire-Promote', 'Model '
             '2, '
             'Promote-Hire'],
            'parameter_sweep_param':
            'bf1',
            'parameter_ubound':
            0.6,
            'parameter_lbound':
            0.05,
            'number_of_steps':
            3
        }
        c.plot_comparison_overall_chart(**plot_settings)
    assert (len(w) == 0)
def test_plot_overall_mf_numbers(mgmt_data):
    modlist = list([Model3GenderDiversity(**mgmt_data)])
    # modlist = list([Model2GenderDiversity(**mgmt_data),
    #                 Mod_Stoch_FBPH(**mgmt_data)])
    modlist[0].init_default_hiring_rate()
    c = Comparison(modlist)

    plot_settings = {
        'plottype': 'male female numbers',
        'intervals': 'empirical',
        'number_of_runs': 100,
        # number simulations to average over
        'target': 0.25,
        # target percentage of women in the department
        # Main plot settings
        'xlabel': 'Years',
        'ylabel': 'Number of Professors',
        'title': 'Male Female numbers plot',
        'line_width': 2,
        'transparency': 0.25,
        'model_legend_label': ['Model 3', 'Mode 2, Promote-Hire'],
        'legend_location': 'top_right',
        'height_': height,
        'width_': width,
        'male_female_numbers_plot': True,
        'mf_male_label': ['Male Model 3', 'Female Model 3'],
        'mf_target_label': ['Target 3']
    }
    show(c.plot_comparison_overall_chart(**plot_settings))
def test_plot_overall_unfilled_vacancies(mgmt_data):
    modlist = list([Model3GenderDiversity(**mgmt_data)])
    # modlist = list([Model2GenderDiversity(**mgmt_data),
    #                 Mod_Stoch_FBPH(**mgmt_data)])
    modlist[0].init_default_hiring_rate()
    c = Comparison(modlist)

    plot_settings = {
        'plottype': 'unfilled vacancies',
        'intervals': 'empirical',
        'number_of_runs': 100,
        # number simulations to average over
        'target': 0.25,
        # target percentage of women in the department
        # Main plot settings
        'xlabel': 'Years',
        'ylabel': 'Unfilled Vacancies',
        'title': 'Unfilled Vacancies',
        'line_width': 2,
        'transparency': 0.25,
        'model_legend_label': ['New Model', 'Mode 2, Promote-Hire'],
        'legend_location': 'top_right',
        'height_': height,
        'width_': width,
    }
    show(c.plot_comparison_overall_chart(**plot_settings))
def test_bokeh_sweep_plot_overall(mgmt_data):
    modlist = list([Model3GenderDiversity(**mgmt_data)])
    # modlist = list([Model2GenderDiversity(**mgmt_data),
    #                 Mod_Stoch_FBPH(**mgmt_data)])
    modlist[0].init_default_hiring_rate()

    c = Comparison(modlist)

    plot_settings = {
        'plottype': 'parameter sweep percentage',
        'intervals': 'empirical',
        'number_of_runs': 100,
        # number simulations to average over
        'target': 0.25,
        'xlabel': 'Years',
        'ylabel': 'Proportion Women',
        'title': 'Parameter Sweep Gender Percentage',
        'model_legend_label': ['New Model', 'Model '
                               '2, '
                               'Promote-Hire'],
        'parameter_sweep_param': 'bf1',
        'parameter_ubound': 0.6,
        'parameter_lbound': 0.05,
        'number_of_steps': 5
    }
    show(c.plot_comparison_overall_chart(**plot_settings))
def test_bokeh_comparison_plot_overall_one_model(mgmt_data):
    modlist = list(
        [ModelGenderDiversityGrowthForecastIncremental(**mgmt_data)])
    # modlist = list([Model2GenderDiversity(**mgmt_data),
    #                 Mod_Stoch_FBPH(**mgmt_data)])
    modlist[0].init_default_hiring_rate()
    modlist[0].init_growth_rate([0.02, 0.01, 0.10, 0.05])
    # modlist[0].init_growth_rate([0.015])

    c = Comparison(modlist)

    # print(modlist[0].calculate_yearly_dept_size_targets())

    plot_settings = {
        'plottype': 'gender proportion',
        'intervals': 'empirical',
        'number_of_runs': 100,
        # number simulations to average over
        'target': 0.25,
        # target percentage of women in the department
        # Main plot settings
        'xlabel': 'Years',
        'ylabel': 'Proportion Women',
        'title': 'Change in Proportion Women',
        'line_width': 2,
        'transparency': 0.25,
        'model_legend_label': ['New Model', 'Mode 2, Promote-Hire'],
        'legend_location': 'top_right',
        'height_': height,
        'width_': width,
        'year_offset': 0
    }
    show(c.plot_comparison_overall_chart(**plot_settings))
def test_parameter_sweep_plot_level_warnings(self, mgmt_data):
    """
    This testcase executes a parameter sweep and captures any warnings
    generated during the construction of the bokeh figure object.
    :param mgmt_data: Use management department data
    :type mgmt_data: dictionary
    :return: Pass/Fail
    :rtype: Boolean
    """
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        modlist = list([Mod_Validate_Sweep(**mgmt_data)])
        c = Comparison(modlist)

        plot_settings = {
            'plottype':
            'parameter sweep percentage',
            'intervals':
            'empirical',
            'number_of_runs':
            500,
            # number simulations to average over
            'target':
            0.25,
            'xlabel':
            'Promotion Rate for Women',
            'ylabel':
            'Proportion Women',
            'title':
            'Sweep, Hire-Promote, Female Promotion Rate 1->2',
            'model_legend_label':
            ['Model 1, Hire-Promote', 'Model '
             '2, '
             'Promote-Hire'],
            'parameter_sweep_param':
            'female_promotion_probability_1',
            'parameter_ubound':
            0.5,
            'parameter_lbound':
            0.05,
            'number_of_steps':
            15
        }
        c.plot_comparison_overall_chart(**plot_settings)
    assert (len(w) == 0)
def test_parameter_sweep_function_overall_catch_warnings(self, mgmt_data):
    """
    This testcase executes a parameter sweep and then checks for any
    warnings generated while computing the plot figure. In the past there
    were issues with column sizes not matching the in the plot functions.

    :param mgmt_data: Use management department data
    :type mgmt_data: dictionary
    :return: Pass/Fail
    :rtype: Boolean
    """
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        modlist = list([Mod_Validate_Sweep(**mgmt_data)])

        c = Comparison(modlist)

        plot_settings = {'plottype': 'parameter sweep percentage',
                         'intervals': 'empirical',
                         'number_of_runs': 3,
                         # number simulations to average over
                         'target': 0.25,
                         'xlabel': 'Hiring Rate for Women',
                         'ylabel': 'Proportion Women',
                         'title': 'Parameter Sweep Validation, uniform noise(0,'
                                  '0.1)',
                         'model_legend_label': ['Model 1, Hire-Promote',
                                                'Model '
                                                '2, '
                                                'Promote-Hire'],
                         'parameter_sweep_param': 'bf1',
                         'parameter_ubound': 0.6,
                         'parameter_lbound': 0.05,
                         'number_of_steps': 3
                         }
        c.plot_comparison_overall_chart(**plot_settings)
    assert(len(w) == 0)
def test_bokeh_comparison_plot_dept_size_overall(mgmt_data):
    modlist = list([
        Model3GenderDiversity(**mgmt_data),
        ModelGenderDiversityLinearGrowth(**mgmt_data),
        ModelGenderDiversityGrowthForecast(**mgmt_data)
    ])
    modlist[0].init_default_hiring_rate()
    modlist[1].init_default_hiring_rate()
    modlist[1].init_growth_rate(0.01)
    modlist[2].init_default_hiring_rate()
    modlist[2].init_growth_rate([73, 78, 83, 88])

    c = Comparison(modlist)
    plot_settings = {
        'plottype':
        'department size',
        'intervals':
        'empirical',
        'number_of_runs':
        100,
        # number simulations to average over
        'target':
        0.25,
        # target percentage of women in the department
        # Main plot settings
        'xlabel':
        'Years',
        'ylabel':
        'Department Size',
        'title':
        'Department Size',
        'line_width':
        2,
        'transparency':
        0.25,
        'model_legend_label': [
            'Model 3 No Growth', 'Model 3 Lin Growth(1%/year)',
            'Model 3 Forecast(+5/5 year)'
        ],
        'legend_location':
        'top_right',
        'height_':
        height,
        'width_':
        width,
    }
    show(c.plot_comparison_overall_chart(**plot_settings))
def test_bokeh_comparison_plot_overall_multiple_models(mgmt_data):
    modlist = list([
        Model3GenderDiversity(**mgmt_data),
        ModelGenderDiversityLinearGrowth(**mgmt_data),
        ModelGenderDiversityGrowthForecast(**mgmt_data)
    ])
    modlist[0].init_default_hiring_rate()
    modlist[1].init_default_hiring_rate()
    modlist[1].init_growth_rate(0.05)
    modlist[2].init_default_hiring_rate()
    modlist[2].init_growth_rate([73, 78, 83, 88])

    c = Comparison(modlist)

    plot_settings = {
        'plottype':
        'gender proportion',
        'intervals':
        'empirical',
        'number_of_runs':
        100,
        # number simulations to average over
        'target':
        0.25,
        # target percentage of women in the department
        # Main plot settings
        'xlabel':
        'Years',
        'ylabel':
        'Proportion Women',
        'title':
        'Change in Proportion Women Overall',
        'transparency':
        0.25,
        'model_legend_label':
        ['Model 3 No Growth', 'Model 3 Lin Growth', 'Model 3 Forecast']
    }
    show(c.plot_comparison_overall_chart(**plot_settings))