def generate_narratives(self): regression_narrative_obj = LinearRegressionNarrative( self._df_regression_result, self._correlations, self._dataframe_helper, self._dataframe_context, self._metaParser, self._spark ) main_card_data = regression_narrative_obj.generate_main_card_data() main_card_narrative = NarrativesUtils.get_template_output(self._base_dir,\ 'regression_main_card.html',main_card_data) self.narratives['main_card'] = {} self.narratives["main_card"]['paragraphs'] = NarrativesUtils.paragraph_splitter(main_card_narrative) self.narratives["main_card"]['header'] = 'Key Measures that affect ' + self.result_column self.narratives["main_card"]['chart'] = {} self.narratives["main_card"]['chart']['heading'] = '' self.narratives["main_card"]['chart']['data'] = [[i for i,j in self._all_coeffs], [j['coefficient'] for i,j in self._all_coeffs]] self.narratives["main_card"]['chart']['label'] = {'x':'Measure Name', 'y': 'Change in ' + self.result_column + ' per unit increase'} main_card = NormalCard() main_card_header = HtmlData(data = '<h3>Key Measures that affect ' + self.result_column+"</h3>") main_card_paragraphs = NarrativesUtils.block_splitter(main_card_narrative,self._blockSplitter) main_card_chart_data = [{"key":val[0],"value":val[1]} for val in zip([i for i,j in self._all_coeffs],[j['coefficient'] for i,j in self._all_coeffs])] main_card_chart = NormalChartData(data=main_card_chart_data) mainCardChartJson = ChartJson() mainCardChartJson.set_data(main_card_chart.get_data()) mainCardChartJson.set_label_text({'x':'Influencing Factors','y': 'Change in ' + self.result_column + ' per unit increase'}) mainCardChartJson.set_chart_type("bar") mainCardChartJson.set_axes({"x":"key","y":"value"}) mainCardChartJson.set_yaxis_number_format(".2f") # st_info = ["Test : Regression","Threshold for p-value: 0.05", "Effect Size: Regression Coefficient"] chart_data = sorted(main_card_chart_data,key=lambda x:x["value"],reverse=True) statistical_info_array=[ ("Test Type","Regression"), ("Effect Size","Coefficients"), ("Max Effect Size",chart_data[0]["key"]), ("Min Effect Size",chart_data[-1]["key"]), ] statistical_inferenc = "" if len(chart_data) == 1: statistical_inference = "{} is the only variable that have significant influence over {} (Target) having an \ Effect size of {}".format(chart_data[0]["key"],self._dataframe_context.get_result_column(),round(chart_data[0]["value"],4)) elif len(chart_data) == 2: statistical_inference = "There are two variables ({} and {}) that have significant influence over {} (Target) and the \ Effect size ranges are {} and {} respectively".format(chart_data[0]["key"],chart_data[1]["key"],self._dataframe_context.get_result_column(),round(chart_data[0]["value"],4),round(chart_data[1]["value"],4)) else: statistical_inference = "There are {} variables that have significant influence over {} (Target) and the \ Effect size ranges from {} to {}".format(len(chart_data),self._dataframe_context.get_result_column(),round(chart_data[0]["value"],4),round(chart_data[-1]["value"],4)) if statistical_inference != "": statistical_info_array.append(("Inference",statistical_inference)) statistical_info_array = NarrativesUtils.statistical_info_array_formatter(statistical_info_array) main_card.set_card_data(data = [main_card_header]+main_card_paragraphs+[C3ChartData(data=mainCardChartJson,info=statistical_info_array)]) main_card.set_card_name("Key Influencers") self._regressionNode.add_a_card(main_card) count = 0 for measure_column in self.significant_measures: sigMeasureNode = NarrativesTree() sigMeasureNode.set_name(measure_column) measureCard1 = NormalCard() measureCard1.set_card_name("{}: Impact on {}".format(measure_column,self.result_column)) measureCard1Data = [] if self._run_dimension_level_regression: measureCard2 = NormalCard() measureCard2.set_card_name("Key Areas where it Matters") measureCard2Data = [] measure_column_cards = {} card0 = {} card1data = regression_narrative_obj.generate_card1_data(measure_column) card1heading = "<h3>Impact of "+measure_column+" on "+self.result_column+"</h3>" measureCard1Header = HtmlData(data=card1heading) card1data.update({"blockSplitter":self._blockSplitter}) card1narrative = NarrativesUtils.get_template_output(self._base_dir,\ 'regression_card1.html',card1data) card1paragraphs = NarrativesUtils.block_splitter(card1narrative,self._blockSplitter) card0 = {"paragraphs":card1paragraphs} card0["charts"] = {} card0['charts']['chart2']={} # card0['charts']['chart2']['data']=card1data["chart_data"] # card0['charts']['chart2']['heading'] = '' # card0['charts']['chart2']['labels'] = {} card0['charts']['chart1']={} card0["heading"] = card1heading measure_column_cards['card0'] = card0 measureCard1Header = HtmlData(data=card1heading) measureCard1Data += [measureCard1Header] measureCard1para = card1paragraphs measureCard1Data += measureCard1para if self._run_dimension_level_regression: print("running narratives for key area dict") self._dim_regression = self.run_regression_for_dimension_levels() card2table, card2data=regression_narrative_obj.generate_card2_data(measure_column,self._dim_regression) card2data.update({"blockSplitter":self._blockSplitter}) card2narrative = NarrativesUtils.get_template_output(self._base_dir,\ 'regression_card2.html',card2data) card2paragraphs = NarrativesUtils.block_splitter(card2narrative,self._blockSplitter) card1 = {'tables': card2table, 'paragraphs' : card2paragraphs, 'heading' : 'Key Areas where ' + measure_column + ' matters'} measure_column_cards['card1'] = card1 measureCard2Data += card2paragraphs if "table1" in card2table: table1data = regression_narrative_obj.convert_table_data(card2table["table1"]) card2Table1 = TableData() card2Table1.set_table_data(table1data) card2Table1.set_table_type("heatMap") card2Table1.set_table_top_header(card2table["table1"]["heading"]) card2Table1Json = json.loads(CommonUtils.convert_python_object_to_json(card2Table1)) # measureCard2Data.insert(3,card2Table1) measureCard2Data.insert(3,card2Table1Json) if "table2" in card2table: table2data = regression_narrative_obj.convert_table_data(card2table["table2"]) card2Table2 = TableData() card2Table2.set_table_data(table2data) card2Table2.set_table_type("heatMap") card2Table2.set_table_top_header(card2table["table2"]["heading"]) # measureCard2Data.insert(5,card2Table2) card2Table2Json = json.loads(CommonUtils.convert_python_object_to_json(card2Table2)) # measureCard2Data.append(card2Table2) measureCard2Data.append(card2Table2Json) # self._result_setter.set_trend_section_data({"result_column":self.result_column, # "measure_column":measure_column, # "base_dir":self._base_dir # }) # trend_narratives_obj = TimeSeriesNarrative(self._dataframe_helper, self._dataframe_context, self._result_setter, self._spark, self._story_narrative) # card2 = trend_narratives_obj.get_regression_trend_card_data() # if card2: # measure_column_cards['card2'] = card2 # # # card3 = {} progressMessage = CommonUtils.create_progress_message_object(self._analysisName,"custom","info","Analyzing Key Influencers",self._completionStatus,self._completionStatus,display=True) CommonUtils.save_progress_message(self._messageURL,progressMessage,ignore=False) card4data = regression_narrative_obj.generate_card4_data(self.result_column,measure_column) card4data.update({"blockSplitter":self._blockSplitter}) # card4heading = "Sensitivity Analysis: Effect of "+self.result_column+" on Segments of "+measure_column card4narrative = NarrativesUtils.get_template_output(self._base_dir,\ 'regression_card4.html',card4data) card4paragraphs = NarrativesUtils.block_splitter(card4narrative,self._blockSplitter) # card3 = {"paragraphs":card4paragraphs} card0['paragraphs'] = card1paragraphs+card4paragraphs card4Chart = card4data["charts"] # st_info = ["Test : Regression", "Variables : "+ self.result_column +", "+measure_column,"Intercept : "+str(round(self._df_regression_result.get_intercept(),2)), "Regression Coefficient : "+ str(round(self._df_regression_result.get_coeff(measure_column),2))] statistical_info_array=[ ("Test Type","Regression"), ("Coefficient",str(round(self._df_regression_result.get_coeff(measure_column),2))), ("P-Value","<= 0.05"), ("Intercept",str(round(self._df_regression_result.get_intercept(),2))), ("R Square ",str(round(self._df_regression_result.get_rsquare(),2))), ] inferenceTuple = () coeff = self._df_regression_result.get_coeff(measure_column) if coeff > 0: inferenceTuple = ("Inference","For every additional unit of increase in {} there will be an increase of {} units in {} (target).".format(measure_column,str(round(coeff,2)),self._dataframe_context.get_result_column())) else: inferenceTuple = ("Inference","For every additional unit of decrease in {} there will be an decrease of {} units in {} (target).".format(measure_column,str(round(coeff,2)),self._dataframe_context.get_result_column())) if len(inferenceTuple) > 0: statistical_info_array.append(inferenceTuple) statistical_info_array = NarrativesUtils.statistical_info_array_formatter(statistical_info_array) card4paragraphs.insert(2,C3ChartData(data=card4Chart,info=statistical_info_array)) measureCard1Data += card4paragraphs self.narratives['cards'].append(measure_column_cards) if count == 0: card4data.pop("charts") self._result_setter.update_executive_summary_data(card4data) count += 1 measureCard1.set_card_data(measureCard1Data) if self._run_dimension_level_regression: measureCard2.set_card_data(measureCard2Data) sigMeasureNode.add_cards([measureCard1,measureCard2]) sigMeasureNode.add_cards([measureCard1]) self._regressionNode.add_a_node(sigMeasureNode) # self._result_setter.set_trend_section_completion_status(True) self._story_narrative.add_a_node(self._regressionNode)
def _generate_narratives(self): """ generate main card narrative and remaining cards are generated by calling ChiSquareAnalysis class for each of analyzed dimensions """ for target_dimension in self._df_chisquare_result.keys(): target_chisquare_result = self._df_chisquare_result[ target_dimension] analysed_variables = target_chisquare_result.keys( ) ## List of all analyzed var. # List of significant var out of analyzed var. significant_variables = [ dim for dim in target_chisquare_result.keys() if target_chisquare_result[dim].get_pvalue() <= 0.05 ] effect_sizes = [ target_chisquare_result[dim].get_effect_size() for dim in significant_variables ] effect_size_dict = dict(zip(significant_variables, effect_sizes)) significant_variables = [ y for (x, y) in sorted(zip(effect_sizes, significant_variables), reverse=True) ] #insignificant_variables = [i for i in self._df_chisquare_result[target_dimension] if i['pv']>0.05] num_analysed_variables = len(analysed_variables) num_significant_variables = len(significant_variables) self.narratives['main_card'] = {} self.narratives['main_card'][ 'heading'] = 'Relationship between ' + target_dimension + ' and other factors' self.narratives['main_card']['paragraphs'] = {} data_dict = { 'num_variables': num_analysed_variables, 'num_significant_variables': num_significant_variables, 'significant_variables': significant_variables, 'target': target_dimension, 'analysed_dimensions': analysed_variables, 'blockSplitter': self._blockSplitter } # for both para 1 and para 2 paragraph = {} paragraph['header'] = '' paragraph['content'] = NarrativesUtils.get_template_output( self._base_dir, 'main_card.html', data_dict) self.narratives['main_card']['paragraphs'] = [paragraph] self.narratives['cards'] = [] chart = { 'header': 'Strength of association between ' + target_dimension + ' and other dimensions' } chart['data'] = effect_size_dict chart['label_text'] = { 'x': 'Dimensions', 'y': 'Effect Size (Cramers-V)' } chart_data = [] chartDataValues = [] for k, v in effect_size_dict.items(): chart_data.append({"key": k, "value": float(v)}) chartDataValues.append(float(v)) chart_data = sorted(chart_data, key=lambda x: x["value"], reverse=True) chart_json = ChartJson() chart_json.set_data(chart_data) chart_json.set_chart_type("bar") # chart_json.set_label_text({'x':'Dimensions','y':'Effect Size (Cramers-V)'}) chart_json.set_label_text({ 'x': ' ', 'y': 'Effect Size (Cramers-V)' }) chart_json.set_axis_rotation(True) chart_json.set_axes({"x": "key", "y": "value"}) # chart_json.set_yaxis_number_format(".4f") chart_json.set_yaxis_number_format( NarrativesUtils.select_y_axis_format(chartDataValues)) self.narratives['main_card']['chart'] = chart main_card = NormalCard() header = "<h3>Strength of association between " + target_dimension + " and other dimensions</h3>" main_card_data = [HtmlData(data=header)] main_card_narrative = NarrativesUtils.get_template_output( self._base_dir, 'main_card.html', data_dict) main_card_narrative = NarrativesUtils.block_splitter( main_card_narrative, self._blockSplitter) main_card_data += main_card_narrative # st_info = ["Test : Chi Square", "Threshold for p-value : 0.05", "Effect Size : Cramer's V"] # print "chartdata",chart_data if len(chart_data) > 0: statistical_info_array = [ ("Test Type", "Chi-Square"), ("Effect Size", "Cramer's V"), ("Max Effect Size", chart_data[0]["key"]), ("Min Effect Size", chart_data[-1]["key"]), ] statistical_inferenc = "" if len(chart_data) == 1: statistical_inference = "{} is the only variable that have significant association with the {} (Target) having an \ Effect size of {}".format( chart_data[0]["key"], self._dataframe_context.get_result_column(), round(chart_data[0]["value"], 4)) elif len(chart_data) == 2: statistical_inference = "There are two variables ({} and {}) that have significant association with the {} (Target) and the \ Effect size ranges are {} and {} respectively".format( chart_data[0]["key"], chart_data[1]["key"], self._dataframe_context.get_result_column(), round(chart_data[0]["value"], 4), round(chart_data[1]["value"], 4)) else: statistical_inference = "There are {} variables that have significant association with the {} (Target) and the \ Effect size ranges from {} to {}".format( len(chart_data), self._dataframe_context.get_result_column(), round(chart_data[0]["value"], 4), round(chart_data[-1]["value"], 4)) if statistical_inference != "": statistical_info_array.append( ("Inference", statistical_inference)) statistical_info_array = NarrativesUtils.statistical_info_array_formatter( statistical_info_array) else: statistical_info_array = [] main_card_data.append( C3ChartData(data=chart_json, info=statistical_info_array)) main_card.set_card_data(main_card_data) main_card.set_card_name("Key Influencers") if self._storyOnScoredData != True: self._chiSquareNode.add_a_card(main_card) self._result_setter.add_a_score_chi_card(main_card) print "target_dimension", target_dimension if self._appid == '2' and num_significant_variables > 5: significant_variables = significant_variables[:5] else: if self._nColsToUse != None: significant_variables = significant_variables[:self. _nColsToUse] CommonUtils.create_update_and_save_progress_message( self._dataframe_context, self._scriptWeightDict, self._scriptStages, self._analysisName, "custom", "info", display=True, customMsg="Analyzing key drivers", weightKey="narratives") for analysed_dimension in significant_variables[:self. _noOfSigDimsToShow]: chisquare_result = self._df_chisquare.get_chisquare_result( target_dimension, analysed_dimension) if self._appid == '2': print "APPID 2 is used" card = ChiSquareAnalysis( self._dataframe_context, self._dataframe_helper, chisquare_result, target_dimension, analysed_dimension, significant_variables, num_analysed_variables, self._data_frame, self._measure_columns, self._base_dir, None, target_chisquare_result) # self.narratives['cards'].append(card) self._result_setter.add_a_score_chi_card( json.loads( CommonUtils.convert_python_object_to_json( card.get_dimension_card1()))) elif self._appid == '1': print "APPID 1 is used" card = ChiSquareAnalysis( self._dataframe_context, self._dataframe_helper, chisquare_result, target_dimension, analysed_dimension, significant_variables, num_analysed_variables, self._data_frame, self._measure_columns, self._base_dir, None, target_chisquare_result) # self.narratives['cards'].append(card) self._result_setter.add_a_score_chi_card( json.loads( CommonUtils.convert_python_object_to_json( card.get_dimension_card1()))) else: target_dimension_card = ChiSquareAnalysis( self._dataframe_context, self._dataframe_helper, chisquare_result, target_dimension, analysed_dimension, significant_variables, num_analysed_variables, self._data_frame, self._measure_columns, self._base_dir, None, target_chisquare_result) self.narratives['cards'].append(target_dimension_card) self._chiSquareNode.add_a_node( target_dimension_card.get_dimension_node()) self._story_narrative.add_a_node(self._chiSquareNode) self._result_setter.set_chisquare_node(self._chiSquareNode)
def _generate_card1(self): self._anovaCard1 = NormalCard(name='Impact on ' + self._measure_column_capitalized) lines = [] lines += NarrativesUtils.block_splitter( '<h3>' + self._measure_column_capitalized + ': Impact of ' + self._dimension_column_capitalized + ' on ' + self._measure_column_capitalized + '</h3>', self._blockSplitter) self.card1 = Card('Impact of ' + self._dimension_column_capitalized + ' on ' + self._measure_column_capitalized) dim_table = self._dimension_anova_result.get_level_dataframe() # print dim_table keys = dim_table['levels'] totals = dim_table['total'] means = dim_table['average'] counts = dim_table['count'] if len(keys) >= 5: self._card3_required = True group_by_total = {} group_by_mean = {} for k, t, m in zip(keys, totals, means): group_by_total[k] = t group_by_mean[k] = m chart1 = chart(data=group_by_total, labels={ self._dimension_column_capitalized: self._measure_column_capitalized }) chart2 = chart(data=group_by_mean, labels={ self._dimension_column_capitalized: self._measure_column_capitalized }) self.card1.add_chart('group_by_total', chart1) self.card1.add_chart('group_by_mean', chart2) # st_info = ["Test : ANOVA", "p-value: 0.05", "F-stat: "+str(round(self._dimension_anova_result.get_f_value(),2))] statistical_info_array = [ ("Test Type", "ANOVA"), ("P-Value", "0.05"), ("F Value", str(round(self._dimension_anova_result.get_f_value(), 2))), ("Inference", "There is a significant effect of {} on {} (target).".format( self._dimension_column_capitalized, self._measure_column_capitalized)) ] statistical_info_array = NarrativesUtils.statistical_info_array_formatter( statistical_info_array) card1_chart1 = C3ChartData(data=self._get_c3chart_card1_chart1( group_by_total, group_by_mean), info=statistical_info_array) self._result_setter.set_anova_chart_on_scored_data( {self._dimension_column: card1_chart1}) lines += [card1_chart1] # top_group_by_total = keys[totals.index(max(totals))] top_group_by_total = keys[totals.argmax()] sum_top_group_by_total = max(totals) avg_top_group_by_total = means[totals.argmax()] bubble1 = BubbleData( NarrativesUtils.round_number(sum_top_group_by_total, 1), top_group_by_total + ' is the largest contributor to ' + self._measure_column) # self.card1.add_bubble_data(bubble1) top_group_by_mean = keys[means.argmax()] sum_top_group_by_mean = totals[means.argmax()] avg_top_group_by_mean = max(means) bubble2 = BubbleData( NarrativesUtils.round_number(avg_top_group_by_mean, 1), top_group_by_mean + ' has the highest average ' + self._measure_column) # self.card1.add_bubble_data(bubble2) groups_by_total = sorted(zip(totals, keys), reverse=True) sum_total = sum(totals) uniformly_distributed = True five_percent_total = 0.05 * sum_total fifteen_percent_total = 0.15 * sum_total sorted_total = sorted(totals, reverse=True) if len(groups_by_total) % 2 == 0: fifty_percent_index = int(len(groups_by_total) / 2) top_fifty_total = sum(sorted_total[:fifty_percent_index]) bottom_fifty_total = sum(sorted_total[fifty_percent_index:]) if top_fifty_total - bottom_fifty_total >= fifteen_percent_total: uniformly_distributed = False else: fifty_percent_index = int(len(groups_by_total) / 2) + 1 top_fifty_total = sum(sorted_total[:fifty_percent_index]) bottom_fifty_total = sum(sorted_total[fifty_percent_index - 1:]) if top_fifty_total - bottom_fifty_total >= fifteen_percent_total: uniformly_distributed = False top_groups = None top_groups_contribution = None if (not uniformly_distributed) and len(groups_by_total) > 2: max_diff = 0 diffs = [ sorted_total[i] - sorted_total[i + 1] for i in range(fifty_percent_index) ] max_diff_index = diffs.index(max(diffs[1:])) top_groups = [k for t, k in groups_by_total[:max_diff_index + 1]] top_groups_contribution = sum( sorted_total[:max_diff_index + 1]) * 100 / sum_total bottom_groups = [] bottom_groups_contribution = 0 for t, k in groups_by_total[:0:-1]: bottom_groups.append(k) bottom_groups_contribution = bottom_groups_contribution + t if bottom_groups_contribution >= five_percent_total: break bottom_groups_contribution = bottom_groups_contribution * 100 / sum_total elif not uniformly_distributed: top_groups = [groups_by_total[0][1]] top_groups_contribution = groups_by_total[0][0] * 100 / sum_total bottom_groups = [groups_by_total[1][1]] bottom_groups_contribution = groups_by_total[1][0] * 100 / sum_total elif uniformly_distributed: top_groups = [] top_groups_contribution = 0 bottom_groups = [] bottom_groups_contribution = 0 num_groups = len(keys) data_dict = { 'uniformly_distributed': uniformly_distributed, 'top_groups': top_groups, 'num_top_groups': len(top_groups), 'top_groups_percent': NarrativesUtils.round_number(top_groups_contribution, 2), 'dimension_name': self._dimension_column, 'plural_dimension_name': NarrativesUtils.pluralize(self._dimension_column), 'measure_name': self._measure_column, 'best_category_by_mean': top_group_by_mean, 'best_category_by_mean_cont': round(100.0 * sum_top_group_by_mean / sum(totals), 2), 'best_category_by_mean_avg': NarrativesUtils.round_number(avg_top_group_by_mean, 2), 'best_category_by_total': top_group_by_total, 'best_category_by_total_cont': round(100.0 * sum_top_group_by_total / sum(totals), 2), 'best_category_by_total_avg': NarrativesUtils.round_number(avg_top_group_by_total, 2), 'best_category_by_total_sum': NarrativesUtils.round_number(sum_top_group_by_total, 2), 'bottom_groups': bottom_groups, 'num_bottom_groups': len(bottom_groups), 'bottom_groups_percent': NarrativesUtils.round_number(bottom_groups_contribution, 2), 'num_groups': num_groups } output = {'header': 'Overview', 'content': []} if self._binAnalyzedCol == True: narrativeText = NarrativesUtils.get_template_output( self._base_dir, 'anova_template_3_binned_IV.html', data_dict) output['content'].append(narrativeText) self._result_setter.set_anova_narrative_on_scored_data( {self._dimension_column: narrativeText}) else: narrativeText = NarrativesUtils.get_template_output( self._base_dir, 'anova_template_3.html', data_dict) output['content'].append(narrativeText) self._result_setter.set_anova_narrative_on_scored_data( {self._dimension_column: narrativeText}) for cnt in output['content']: lines += NarrativesUtils.block_splitter(cnt, self._blockSplitter) self._anovaCard1.set_card_data(lines) self.card1.add_paragraph(dict(output)) self._result_setter.set_anova_cards_regression_score(self.card1)
def _generate_narratives(self): chisquare_result = self._chisquare_result target_dimension = self._target_dimension analysed_dimension = self._analysed_dimension significant_variables = self._significant_variables num_analysed_variables = self._num_analysed_variables table = self._chiSquareTable total = self._chiSquareTable.get_total() levels = self._chiSquareTable.get_column_two_levels() level_counts = self._chiSquareTable.get_column_total() levels_count_sum = sum(level_counts) levels_percentages = [ i * 100.0 / levels_count_sum for i in level_counts ] sorted_levels = sorted(zip(level_counts, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] top_dims = [ j for i, j in sorted_levels[:level_differences.index(max(level_differences))] ] top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences.index(max(level_differences))] ]) bottom_dim = sorted_levels[-1][1] bottom_dim_contribution = sorted_levels[-1][0] bottom_dims = [ y for x, y in sorted_levels if x == bottom_dim_contribution ] target_levels = self._chiSquareTable.get_column_one_levels() target_counts = self._chiSquareTable.get_row_total() sorted_target_levels = sorted(zip(target_counts, target_levels), reverse=True) top_target_count, top_target = sorted_target_levels[0] second_target_count, second_target = sorted_target_levels[1] top_target_contributions = [ table.get_value(top_target, i) for i in levels ] sum_top_target = sum(top_target_contributions) sorted_levels = sorted(zip(top_target_contributions, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] top_target_top_dims = [ j for i, j in sorted_levels[:level_differences.index(max(level_differences))] ] top_target_top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences.index(max(level_differences))] ]) top_target_bottom_dim = sorted_levels[-1][1] top_target_bottom_dim_contribution = sorted_levels[-1][0] top_target_percentages = [ i * 100.0 / sum_top_target for i in top_target_contributions ] best_top_target_index = top_target_contributions.index( max(top_target_contributions)) worst_top_target_index = top_target_contributions.index( min(top_target_contributions)) top_target_differences = [ x - y for x, y in zip(levels_percentages, top_target_percentages) ] if len(top_target_differences) > 6: tops = 2 bottoms = -2 elif len(top_target_differences) > 4: tops = 2 bottoms = -1 else: tops = 1 bottoms = -1 sorted_ = sorted(enumerate(top_target_differences), key=lambda x: x[1], reverse=True) best_top_difference_indices = [x for x, y in sorted_[:tops]] worst_top_difference_indices = [x for x, y in sorted_[bottoms:]] top_target_shares = [ x * 100.0 / y for x, y in zip(top_target_contributions, level_counts) ] max_top_target_shares = max(top_target_shares) best_top_target_share_index = [ idx for idx, val in enumerate(top_target_shares) if val == max_top_target_shares ] level_counts_threshold = sum(level_counts) * 0.05 / len(level_counts) min_top_target_shares = min([ x for x, y in zip(top_target_shares, level_counts) if y >= level_counts_threshold ]) worst_top_target_share_index = [ idx for idx, val in enumerate(top_target_shares) if val == min_top_target_shares ] overall_top_percentage = sum_top_target * 100.0 / total second_target_contributions = [ table.get_value(second_target, i) for i in levels ] sum_second_target = sum(second_target_contributions) sorted_levels = sorted(zip(second_target_contributions, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] second_target_top_dims = [ j for i, j in sorted_levels[:level_differences.index(max(level_differences))] ] second_target_top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences.index(max(level_differences))] ]) second_target_bottom_dim = sorted_levels[-1][1] second_target_bottom_dim_contribution = sorted_levels[-1][0] second_target_percentages = [ i * 100.0 / sum_second_target for i in second_target_contributions ] best_second_target_index = second_target_contributions.index( max(second_target_contributions)) worst_second_target_index = second_target_contributions.index( min(second_target_contributions)) second_target_differences = [ x - y for x, y in zip(levels_percentages, second_target_percentages) ] if len(second_target_differences) > 6: tops = 2 bottoms = -2 elif len(second_target_differences) > 4: tops = 2 bottoms = -1 else: tops = 1 bottoms = -1 sorted_ = sorted(enumerate(second_target_differences), key=lambda x: x[1], reverse=True) best_second_difference_indices = [x for x, y in sorted_[:tops]] worst_second_difference_indices = [x for x, y in sorted_[bottoms:]] second_target_shares = [ x * 100.0 / y for x, y in zip(second_target_contributions, level_counts) ] max_second_target_shares = max(second_target_shares) best_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == max_second_target_shares ] level_counts_threshold = sum(level_counts) * 0.05 / len(level_counts) min_second_target_shares = min([ x for x, y in zip(second_target_shares, level_counts) if y >= level_counts_threshold ]) # worst_second_target_share_index = second_target_shares.index(min_second_target_shares) worst_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == min_second_target_shares ] overall_second_percentage = sum_second_target * 100.0 / total targetCardDataDict = {} targetCardDataDict['target'] = target_dimension targetCardDataDict['colname'] = analysed_dimension targetCardDataDict['num_significant'] = len(significant_variables) targetCardDataDict['plural_colname'] = NarrativesUtils.pluralize( analysed_dimension) targetCardDataDict["blockSplitter"] = self._blockSplitter targetCardDataDict["binTargetCol"] = self._binTargetCol targetCardDataDict["binAnalyzedCol"] = self._binAnalyzedCol targetCardDataDict['highlightFlag'] = self._highlightFlag targetCardDataDict['levels'] = levels data_dict = {} data_dict[ 'best_second_difference'] = best_second_difference_indices ##these changed data_dict['worst_second_difference'] = worst_second_difference_indices data_dict['best_top_difference'] = best_top_difference_indices data_dict['worst_top_difference'] = worst_top_difference_indices data_dict['levels_percentages'] = levels_percentages data_dict['top_target_percentages'] = top_target_percentages data_dict['second_target_percentages'] = second_target_percentages data_dict['levels'] = levels data_dict['best_top_share'] = best_top_target_share_index data_dict['worst_top_share'] = worst_top_target_share_index data_dict['best_second_share'] = best_second_target_share_index data_dict['worst_second_share'] = worst_second_target_share_index data_dict['top_target_shares'] = top_target_shares data_dict['second_target_shares'] = second_target_shares data_dict['overall_second'] = overall_second_percentage data_dict['overall_top'] = overall_top_percentage data_dict['num_significant'] = len(significant_variables) data_dict['colname'] = analysed_dimension data_dict['plural_colname'] = NarrativesUtils.pluralize( analysed_dimension) data_dict['target'] = target_dimension data_dict['top_levels'] = top_dims data_dict['top_levels_percent'] = round( top_dims_contribution * 100.0 / total, 1) data_dict['bottom_level'] = bottom_dim data_dict['bottom_levels'] = bottom_dims data_dict['bottom_level_percent'] = round( bottom_dim_contribution * 100 / sum(level_counts), 2) data_dict['second_target'] = second_target data_dict['second_target_top_dims'] = second_target_top_dims data_dict[ 'second_target_top_dims_contribution'] = second_target_top_dims_contribution * 100.0 / sum( second_target_contributions) data_dict['second_target_bottom_dim'] = second_target_bottom_dim data_dict[ 'second_target_bottom_dim_contribution'] = second_target_bottom_dim_contribution data_dict['best_second_target'] = levels[best_second_target_index] data_dict['best_second_target_count'] = second_target_contributions[ best_second_target_index] data_dict['best_second_target_percent'] = round( second_target_contributions[best_second_target_index] * 100.0 / sum(second_target_contributions), 2) data_dict['worst_second_target'] = levels[worst_second_target_index] data_dict['worst_second_target_percent'] = round( second_target_contributions[worst_second_target_index] * 100.0 / sum(second_target_contributions), 2) data_dict['top_target'] = top_target data_dict['top_target_top_dims'] = top_target_top_dims data_dict[ 'top_target_top_dims_contribution'] = top_target_top_dims_contribution * 100.0 / sum( top_target_contributions) data_dict['top_target_bottom_dim'] = top_target_bottom_dim data_dict[ 'top_target_bottom_dim_contribution'] = top_target_bottom_dim_contribution data_dict['best_top_target'] = levels[best_top_target_index] data_dict['best_top_target_count'] = top_target_contributions[ best_top_target_index] data_dict['best_top_target_percent'] = round( top_target_contributions[best_top_target_index] * 100.0 / sum(top_target_contributions), 2) data_dict['worst_top_target'] = levels[worst_top_target_index] data_dict['worst_top_target_percent'] = round( top_target_contributions[worst_top_target_index] * 100.0 / sum(top_target_contributions), 2) data_dict["blockSplitter"] = self._blockSplitter data_dict["binTargetCol"] = self._binTargetCol data_dict["binAnalyzedCol"] = self._binAnalyzedCol data_dict['highlightFlag'] = self._highlightFlag ############### # CARD1 # ############### print "self._binTargetCol & self._binAnalyzedCol : ", self._binTargetCol, self._binAnalyzedCol if (self._binTargetCol == True & self._binAnalyzedCol == False): print "Only Target Column is Binned, : ", self._binTargetCol output = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card1_binned_target.html', data_dict), self._blockSplitter, highlightFlag=self._highlightFlag) elif (self._binTargetCol == True & self._binAnalyzedCol == True): print "Target Column and IV is Binned : ", self._binTargetCol, self._binAnalyzedCol output = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card1_binned_target_and_IV.html', data_dict), self._blockSplitter, highlightFlag=self._highlightFlag) else: output = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output(self._base_dir, 'card1.html', data_dict), self._blockSplitter, highlightFlag=self._highlightFlag) targetDimCard1Data = [] targetDimcard1Heading = '<h3>Relationship between ' + self._target_dimension + ' and ' + self._analysed_dimension + "</h3>" toggledata = ToggleData() targetDimTable1Data = self.generate_card1_table1() targetDimCard1Table1 = TableData() targetDimCard1Table1.set_table_type("heatMap") targetDimCard1Table1.set_table_data(targetDimTable1Data) toggledata.set_toggleon_data({ "data": { "tableData": targetDimTable1Data, "tableType": "heatMap" }, "dataType": "table" }) targetDimTable2Data = self.generate_card1_table2() targetDimCard1Table2 = TableData() targetDimCard1Table2.set_table_type("normal") table2Data = targetDimTable2Data["data1"] table2Data = [ innerList[1:] for innerList in table2Data if innerList[0].strip() != "" ] targetDimCard1Table2.set_table_data(table2Data) toggledata.set_toggleoff_data({ "data": { "tableData": table2Data, "tableType": "heatMap" }, "dataType": "table" }) targetDimCard1Data.append(HtmlData(data=targetDimcard1Heading)) targetDimCard1Data.append(toggledata) targetDimCard1Data += output self._card1.set_card_data(targetDimCard1Data) self._card1.set_card_name("{}: Relationship with {}".format( self._analysed_dimension, self._target_dimension)) ############### # CARD2 # ############### if self._appid == None: key_factors = '' num_key_factors = len(self._second_level_dimensions) if len(self._second_level_dimensions) == 5: key_factors = ', '.join( self._second_level_dimensions[:4] ) + ' and ' + self._second_level_dimensions[4] elif len(self._second_level_dimensions) == 4: key_factors = ', '.join( self._second_level_dimensions[:3] ) + ' and ' + self._second_level_dimensions[3] elif len(self._second_level_dimensions) == 3: key_factors = ', '.join( self._second_level_dimensions[:2] ) + ' and ' + self._second_level_dimensions[2] elif len(self._second_level_dimensions) == 2: key_factors = ' and '.join(self._second_level_dimensions) elif len(self._second_level_dimensions) == 1: key_factors = self._second_level_dimensions[0] targetCardDataDict['num_key_factors'] = num_key_factors targetCardDataDict['key_factors'] = key_factors dict_for_test = {} for tupleObj in sorted_target_levels[:self._chiSquareLevelLimit]: targetLevel = tupleObj[1] targetCardDataDict['random_card2'] = random.randint(1, 100) targetCardDataDict['random_card4'] = random.randint(1, 100) second_target_contributions = [ table.get_value(targetLevel, i) for i in levels ] sum_second_target = sum(second_target_contributions) sorted_levels = sorted(zip(second_target_contributions, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] second_target_top_dims = [ j for i, j in sorted_levels[:level_differences. index(max(level_differences))] ] second_target_top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences. index(max(level_differences))] ]) second_target_bottom_dim = sorted_levels[-1][1] second_target_bottom_dim_contribution = sorted_levels[-1][0] second_target_percentages = [ i * 100.0 / sum_second_target for i in second_target_contributions ] best_second_target_index = second_target_contributions.index( max(second_target_contributions)) worst_second_target_index = second_target_contributions.index( min(second_target_contributions)) second_target_differences = [ x - y for x, y in zip(levels_percentages, second_target_percentages) ] if len(second_target_differences) > 6: tops = 2 bottoms = -2 elif len(second_target_differences) > 4: tops = 2 bottoms = -1 else: tops = 1 bottoms = -1 sorted_ = sorted(enumerate(second_target_differences), key=lambda x: x[1], reverse=True) best_second_difference_indices = [x for x, y in sorted_[:tops]] worst_second_difference_indices = [ x for x, y in sorted_[bottoms:] ] second_target_shares = [ x * 100.0 / y for x, y in zip(second_target_contributions, level_counts) ] max_second_target_shares = max(second_target_shares) best_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == max_second_target_shares ] level_counts_threshold = sum(level_counts) * 0.05 / len( level_counts) min_second_target_shares = min([ x for x, y in zip(second_target_shares, level_counts) if y >= level_counts_threshold ]) worst_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == min_second_target_shares ] overall_second_percentage = sum_second_target * 100.0 / total # DataFrame for contribution calculation df_second_target = self._data_frame.filter(col(self._target_dimension)==targetLevel).\ filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ select(self._second_level_dimensions).toPandas() df_second_dim = self._data_frame.filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ select(self._second_level_dimensions).toPandas() # if self._chisquare_result.get_splits(): # splits = self._chisquare_result.get_splits() # idx = self._chiSquareTable.get_bin_names(splits).index(second_target_top_dims[0]) # idx1 = self._chiSquareTable.get_bin_names(splits).index(top_target_top_dims[0]) # splits[len(splits)-1] = splits[len(splits)-1]+1 # df_second_target = self._data_frame.filter(col(self._target_dimension)==targetLevel).\ # filter(col(self._analysed_dimension)>=splits[idx]).filter(col(self._analysed_dimension)<splits[idx+1]).\ # select(self._second_level_dimensions).toPandas() # df_second_dim = self._data_frame.filter(col(self._analysed_dimension)>=splits[idx]).\ # filter(col(self._analysed_dimension)<splits[idx+1]).\ # select(self._second_level_dimensions).toPandas() # else: # df_second_target = self._data_frame.filter(col(self._target_dimension)==targetLevel).\ # filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ # select(self._second_level_dimensions).toPandas() # df_second_dim = self._data_frame.filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ # select(self._second_level_dimensions).toPandas() # print self._data_frame.select('Sales').show() distribution_second = [] for d in self._second_level_dimensions: grouped = df_second_target.groupby(d).agg({ d: 'count' }).sort_values(d, ascending=False) contributions = df_second_dim.groupby(d).agg({d: 'count'}) contribution_index = list(contributions.index) contributions_val = contributions[d].tolist() contributions_list = dict( zip(contribution_index, contributions_val)) index_list = list(grouped.index) grouped_list = grouped[d].tolist() contributions_percent_list = [ round(y * 100.0 / contributions_list[x], 2) for x, y in zip(index_list, grouped_list) ] sum_ = grouped[d].sum() diffs = [0] + [ grouped_list[i] - grouped_list[i + 1] for i in range(len(grouped_list) - 1) ] max_diff = diffs.index(max(diffs)) index_txt = '' if max_diff == 1: index_txt = index_list[0] elif max_diff == 2: index_txt = index_list[0] + '(' + str( round(grouped_list[0] * 100.0 / sum_, 1) ) + '%)' + ' and ' + index_list[1] + '(' + str( round(grouped_list[1] * 100.0 / sum_, 1)) + '%)' elif max_diff > 2: index_txt = 'including ' + index_list[0] + '(' + str( round(grouped_list[0] * 100.0 / sum_, 1) ) + '%)' + ' and ' + index_list[1] + '(' + str( round(grouped_list[1] * 100.0 / sum_, 1)) + '%)' distribution_second.append({'contributions':[round(i*100.0/sum_,2) for i in grouped_list[:max_diff]],\ 'levels': index_list[:max_diff],'variation':random.randint(1,100),\ 'index_txt': index_txt, 'd':d,'contributions_percent':contributions_percent_list}) targetCardDataDict['distribution_second'] = distribution_second targetCardDataDict['second_target'] = targetLevel targetCardDataDict[ 'second_target_top_dims'] = second_target_top_dims targetCardDataDict[ 'second_target_top_dims_contribution'] = second_target_top_dims_contribution * 100.0 / sum( second_target_contributions) targetCardDataDict[ 'second_target_bottom_dim'] = second_target_bottom_dim targetCardDataDict[ 'second_target_bottom_dim_contribution'] = second_target_bottom_dim_contribution targetCardDataDict['best_second_target'] = levels[ best_second_target_index] targetCardDataDict[ 'best_second_target_count'] = second_target_contributions[ best_second_target_index] targetCardDataDict['best_second_target_percent'] = round( second_target_contributions[best_second_target_index] * 100.0 / sum(second_target_contributions), 2) targetCardDataDict['worst_second_target'] = levels[ worst_second_target_index] targetCardDataDict['worst_second_target_percent'] = round( second_target_contributions[worst_second_target_index] * 100.0 / sum(second_target_contributions), 2) card2Data = [] targetLevelContributions = [ table.get_value(targetLevel, i) for i in levels ] card2Heading = '<h3>Distribution of ' + self._target_dimension + ' (' + targetLevel + ') across ' + self._analysed_dimension + "</h3>" chart, bubble = self.generate_distribution_card_chart( targetLevel, targetLevelContributions, levels, level_counts, total) card2ChartData = NormalChartData(data=chart["data"]) card2ChartJson = ChartJson() card2ChartJson.set_data(card2ChartData.get_data()) card2ChartJson.set_chart_type("combination") card2ChartJson.set_types({ "total": "bar", "percentage": "line" }) card2ChartJson.set_legend({ "total": "# of " + targetLevel, "percentage": "% of " + targetLevel }) card2ChartJson.set_axes({ "x": "key", "y": "total", "y2": "percentage" }) card2ChartJson.set_label_text({ "x": " ", "y": "Count", "y2": "Percentage" }) print "self._binTargetCol & self._binAnalyzedCol : ", self._binTargetCol, self._binAnalyzedCol if (self._binTargetCol == True & self._binAnalyzedCol == False): print "Only Target Column is Binned" output2 = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card2_binned_target.html', targetCardDataDict), self._blockSplitter) elif (self._binTargetCol == True & self._binAnalyzedCol == True): print "Target Column and IV is Binned" output2 = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card2_binned_target_and_IV.html', targetCardDataDict), self._blockSplitter) else: print "In Else, self._binTargetCol should be False : ", self._binTargetCol output2 = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card2.html', targetCardDataDict), self._blockSplitter) card2Data.append(HtmlData(data=card2Heading)) statistical_info_array = [ ("Test Type", "Chi-Square"), ("Chi-Square statistic", str(round(self._chisquare_result.get_stat(), 3))), ("P-Value", str(round(self._chisquare_result.get_pvalue(), 3))), ("Inference", "Chi-squared analysis shows a significant association between {} (target) and {}." .format(self._target_dimension, self._analysed_dimension)) ] statistical_info_array = NarrativesUtils.statistical_info_array_formatter( statistical_info_array) card2Data.append( C3ChartData(data=card2ChartJson, info=statistical_info_array)) card2Data += output2 card2BubbleData = "<div class='col-md-6 col-xs-12'><h2 class='text-center'><span>{}</span><br /><small>{}</small></h2></div><div class='col-md-6 col-xs-12'><h2 class='text-center'><span>{}</span><br /><small>{}</small></h2></div>".format( bubble[0]["value"], bubble[0]["text"], bubble[1]["value"], bubble[1]["text"]) card2Data.append(HtmlData(data=card2BubbleData)) targetCard = NormalCard() targetCard.set_card_data(card2Data) targetCard.set_card_name("{} : Distribution of {}".format( self._analysed_dimension, targetLevel)) self._targetCards.append(targetCard) dict_for_test[targetLevel] = targetCardDataDict out = {'data_dict': data_dict, 'target_dict': dict_for_test} return out
def _generate_narratives(self): chisquare_result = self._chisquare_result target_dimension = self._target_dimension analysed_dimension = self._analysed_dimension significant_variables = self._significant_variables num_analysed_variables = self._num_analysed_variables table = self._chiSquareTable total = self._chiSquareTable.get_total() levels = self._chiSquareTable.get_column_two_levels() level_counts = self._chiSquareTable.get_column_total() levels_count_sum = sum(level_counts) levels_percentages = [ old_div(i * 100.0, levels_count_sum) for i in level_counts ] sorted_levels = sorted(zip(level_counts, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] top_dims = [ j for i, j in sorted_levels[:level_differences.index(max(level_differences))] ] top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences.index(max(level_differences))] ]) bottom_dim = sorted_levels[-1][1] bottom_dim_contribution = sorted_levels[-1][0] bottom_dims = [ y for x, y in sorted_levels if x == bottom_dim_contribution ] target_levels = self._chiSquareTable.get_column_one_levels() target_counts = self._chiSquareTable.get_row_total() sorted_target_levels = sorted(zip(target_counts, target_levels), reverse=True) top_target_count, top_target = sorted_target_levels[0] second_target_count, second_target = sorted_target_levels[1] top_target_contributions = [ table.get_value(top_target, i) for i in levels ] sum_top_target = sum(top_target_contributions) sorted_levels = sorted(zip(top_target_contributions, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] top_target_top_dims = [ j for i, j in sorted_levels[:level_differences.index(max(level_differences))] ] top_target_top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences.index(max(level_differences))] ]) top_target_bottom_dim = sorted_levels[-1][1] top_target_bottom_dim_contribution = sorted_levels[-1][0] top_target_percentages = [ old_div(i * 100.0, sum_top_target) for i in top_target_contributions ] best_top_target_index = top_target_contributions.index( max(top_target_contributions)) worst_top_target_index = top_target_contributions.index( min(top_target_contributions)) top_target_differences = [ x - y for x, y in zip(levels_percentages, top_target_percentages) ] if len(top_target_differences) > 6: tops = 2 bottoms = -2 elif len(top_target_differences) > 4: tops = 2 bottoms = -1 else: tops = 1 bottoms = -1 sorted_ = sorted(enumerate(top_target_differences), key=lambda x: x[1], reverse=True) best_top_difference_indices = [x for x, y in sorted_[:tops]] worst_top_difference_indices = [x for x, y in sorted_[bottoms:]] top_target_shares = [ old_div(x * 100.0, y) for x, y in zip(top_target_contributions, level_counts) ] max_top_target_shares = max(top_target_shares) best_top_target_share_index = [ idx for idx, val in enumerate(top_target_shares) if val == max_top_target_shares ] level_counts_threshold = old_div( sum(level_counts) * 0.05, len(level_counts)) min_top_target_shares = min([ x for x, y in zip(top_target_shares, level_counts) if y >= level_counts_threshold ]) if max_top_target_shares == min_top_target_shares: worst_top_target_share_index = [] else: worst_top_target_share_index = [ idx for idx, val in enumerate(top_target_shares) if val == min_top_target_shares ] overall_top_percentage = old_div(sum_top_target * 100.0, total) second_target_contributions = [ table.get_value(second_target, i) for i in levels ] sum_second_target = sum(second_target_contributions) sorted_levels = sorted(zip(second_target_contributions, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] second_target_top_dims = [ j for i, j in sorted_levels[:level_differences.index(max(level_differences))] ] second_target_top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences.index(max(level_differences))] ]) second_target_bottom_dim = sorted_levels[-1][1] second_target_bottom_dim_contribution = sorted_levels[-1][0] second_target_percentages = [ old_div(i * 100.0, sum_second_target) for i in second_target_contributions ] best_second_target_index = second_target_contributions.index( max(second_target_contributions)) worst_second_target_index = second_target_contributions.index( min(second_target_contributions)) second_target_differences = [ x - y for x, y in zip(levels_percentages, second_target_percentages) ] if len(second_target_differences) > 6: tops = 2 bottoms = -2 elif len(second_target_differences) > 4: tops = 2 bottoms = -1 else: tops = 1 bottoms = -1 sorted_ = sorted(enumerate(second_target_differences), key=lambda x: x[1], reverse=True) best_second_difference_indices = [x for x, y in sorted_[:tops]] worst_second_difference_indices = [x for x, y in sorted_[bottoms:]] second_target_shares = [ old_div(x * 100.0, y) for x, y in zip(second_target_contributions, level_counts) ] max_second_target_shares = max(second_target_shares) best_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == max_second_target_shares ] level_counts_threshold = old_div( sum(level_counts) * 0.05, len(level_counts)) if min(second_target_shares) == 0: min_second_target_shares = min([ x for x, y in zip(second_target_shares, level_counts) if x != 0 ]) else: min_second_target_shares = min([ x for x, y in zip(second_target_shares, level_counts) if y >= level_counts_threshold ]) # worst_second_target_share_index = second_target_shares.index(min_second_target_shares) if max_second_target_shares == min_second_target_shares: worst_second_target_share_index = [] else: worst_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == min_second_target_shares ] overall_second_percentage = old_div(sum_second_target * 100.0, total) targetCardDataDict = {} targetCardDataDict['target'] = target_dimension targetCardDataDict['colname'] = analysed_dimension targetCardDataDict['num_significant'] = len(significant_variables) targetCardDataDict['plural_colname'] = NarrativesUtils.pluralize( analysed_dimension) targetCardDataDict["blockSplitter"] = self._blockSplitter targetCardDataDict["binTargetCol"] = self._binTargetCol targetCardDataDict["binAnalyzedCol"] = self._binAnalyzedCol targetCardDataDict['highlightFlag'] = self._highlightFlag targetCardDataDict['levels'] = levels data_dict = {} data_dict[ 'best_second_difference'] = best_second_difference_indices ##these changed data_dict['worst_second_difference'] = worst_second_difference_indices data_dict['best_top_difference'] = best_top_difference_indices data_dict['worst_top_difference'] = worst_top_difference_indices data_dict['levels_percentages'] = levels_percentages data_dict['top_target_percentages'] = top_target_percentages data_dict['second_target_percentages'] = second_target_percentages data_dict['levels'] = levels data_dict['best_top_share'] = best_top_target_share_index data_dict['worst_top_share'] = worst_top_target_share_index data_dict['best_second_share'] = best_second_target_share_index data_dict['worst_second_share'] = worst_second_target_share_index data_dict['top_target_shares'] = top_target_shares data_dict['second_target_shares'] = second_target_shares data_dict['overall_second'] = overall_second_percentage data_dict['overall_top'] = overall_top_percentage data_dict['num_significant'] = len(significant_variables) data_dict['colname'] = analysed_dimension data_dict['plural_colname'] = NarrativesUtils.pluralize( analysed_dimension) data_dict['target'] = target_dimension data_dict['top_levels'] = top_dims data_dict['top_levels_percent'] = round( old_div(top_dims_contribution * 100.0, total), 1) data_dict['bottom_level'] = bottom_dim data_dict['bottom_levels'] = bottom_dims data_dict['bottom_level_percent'] = round( old_div(bottom_dim_contribution * 100, sum(level_counts)), 2) data_dict['second_target'] = second_target data_dict['second_target_top_dims'] = second_target_top_dims data_dict['second_target_top_dims_contribution'] = old_div( second_target_top_dims_contribution * 100.0, sum(second_target_contributions)) data_dict['second_target_bottom_dim'] = second_target_bottom_dim data_dict[ 'second_target_bottom_dim_contribution'] = second_target_bottom_dim_contribution data_dict['best_second_target'] = levels[best_second_target_index] data_dict['best_second_target_count'] = second_target_contributions[ best_second_target_index] data_dict['best_second_target_percent'] = round( old_div( second_target_contributions[best_second_target_index] * 100.0, sum(second_target_contributions)), 2) data_dict['worst_second_target'] = levels[worst_second_target_index] data_dict['worst_second_target_percent'] = round( old_div( second_target_contributions[worst_second_target_index] * 100.0, sum(second_target_contributions)), 2) data_dict['top_target'] = top_target data_dict['top_target_top_dims'] = top_target_top_dims data_dict['top_target_top_dims_contribution'] = old_div( top_target_top_dims_contribution * 100.0, sum(top_target_contributions)) data_dict['top_target_bottom_dim'] = top_target_bottom_dim data_dict[ 'top_target_bottom_dim_contribution'] = top_target_bottom_dim_contribution data_dict['best_top_target'] = levels[best_top_target_index] data_dict['best_top_target_count'] = top_target_contributions[ best_top_target_index] data_dict['best_top_target_percent'] = round( old_div(top_target_contributions[best_top_target_index] * 100.0, sum(top_target_contributions)), 2) data_dict['worst_top_target'] = levels[worst_top_target_index] data_dict['worst_top_target_percent'] = round( old_div(top_target_contributions[worst_top_target_index] * 100.0, sum(top_target_contributions)), 2) data_dict["blockSplitter"] = self._blockSplitter data_dict["binTargetCol"] = self._binTargetCol data_dict["binAnalyzedCol"] = self._binAnalyzedCol data_dict['highlightFlag'] = self._highlightFlag # print "_"*60 # print "DATA DICT - ", data_dict # print "_"*60 ############### # CARD1 # ############### print("self._binTargetCol & self._binAnalyzedCol : ", self._binTargetCol, self._binAnalyzedCol) if len(data_dict['worst_second_share']) == 0: output = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card1_binned_target_worst_second.html', data_dict), self._blockSplitter, highlightFlag=self._highlightFlag) else: if (self._binTargetCol == True & self._binAnalyzedCol == False): print("Only Target Column is Binned, : ", self._binTargetCol) output = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card1_binned_target.html', data_dict), self._blockSplitter, highlightFlag=self._highlightFlag) elif (self._binTargetCol == True & self._binAnalyzedCol == True): print("Target Column and IV is Binned : ", self._binTargetCol, self._binAnalyzedCol) output = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card1_binned_target_and_IV.html', data_dict), self._blockSplitter, highlightFlag=self._highlightFlag) else: output = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card1.html', data_dict), self._blockSplitter, highlightFlag=self._highlightFlag) targetDimCard1Data = [] targetDimcard1Heading = '<h3>Impact of ' + self._analysed_dimension + ' on ' + self._target_dimension + "</h3>" toggledata = ToggleData() targetDimTable1Data = self.generate_card1_table1() targetDimCard1Table1 = TableData() targetDimCard1Table1.set_table_type("heatMap") targetDimCard1Table1.set_table_data(targetDimTable1Data) toggledata.set_toggleon_data({ "data": { "tableData": targetDimTable1Data, "tableType": "heatMap" }, "dataType": "table" }) targetDimTable2Data = self.generate_card1_table2() targetDimCard1Table2 = TableData() targetDimCard1Table2.set_table_type("normal") table2Data = targetDimTable2Data["data1"] table2Data = [ innerList[1:] for innerList in table2Data if innerList[0].strip() != "" ] targetDimCard1Table2.set_table_data(table2Data) toggledata.set_toggleoff_data({ "data": { "tableData": table2Data, "tableType": "heatMap" }, "dataType": "table" }) targetDimCard1Data.append(HtmlData(data=targetDimcard1Heading)) targetDimCard1Data.append(toggledata) targetDimCard1Data += output self._card1.set_card_data(targetDimCard1Data) self._card1.set_card_name("{}: Relationship with {}".format( self._analysed_dimension, self._target_dimension)) ############### # CARD2 # ############### if self._appid == None: key_factors = '' num_key_factors = len(self._second_level_dimensions) if len(self._second_level_dimensions) == 5: key_factors = ', '.join( self._second_level_dimensions[:4] ) + ' and ' + self._second_level_dimensions[4] elif len(self._second_level_dimensions) == 4: key_factors = ', '.join( self._second_level_dimensions[:3] ) + ' and ' + self._second_level_dimensions[3] elif len(self._second_level_dimensions) == 3: key_factors = ', '.join( self._second_level_dimensions[:2] ) + ' and ' + self._second_level_dimensions[2] elif len(self._second_level_dimensions) == 2: key_factors = ' and '.join(self._second_level_dimensions) elif len(self._second_level_dimensions) == 1: key_factors = self._second_level_dimensions[0] targetCardDataDict['num_key_factors'] = num_key_factors targetCardDataDict['key_factors'] = key_factors dict_for_test = {} for tupleObj in sorted_target_levels[:self._chiSquareLevelLimit]: targetLevel = tupleObj[1] targetCardDataDict['random_card2'] = random.randint(1, 100) targetCardDataDict['random_card4'] = random.randint(1, 100) second_target_contributions = [ table.get_value(targetLevel, i) for i in levels ] sum_second_target = sum(second_target_contributions) sorted_levels = sorted(zip(second_target_contributions, levels), reverse=True) level_differences = [0.0] + [ sorted_levels[i][0] - sorted_levels[i + 1][0] for i in range(len(sorted_levels) - 1) ] level_diff_index = level_differences.index( max(level_differences)) if level_differences.index( max(level_differences)) > 0 else len( level_differences ) ##added for pipeline keyerror issue second_target_top_dims = [ j for i, j in sorted_levels[:level_diff_index] ] second_target_top_dims_contribution = sum([ i for i, j in sorted_levels[:level_differences. index(max(level_differences))] ]) second_target_bottom_dim = sorted_levels[-1][1] second_target_bottom_dim_contribution = sorted_levels[-1][0] second_target_percentages = [ old_div(i * 100.0, sum_second_target) for i in second_target_contributions ] best_second_target_index = second_target_contributions.index( max(second_target_contributions)) worst_second_target_index = second_target_contributions.index( min(second_target_contributions)) second_target_differences = [ x - y for x, y in zip(levels_percentages, second_target_percentages) ] if len(second_target_differences) > 6: tops = 2 bottoms = -2 elif len(second_target_differences) > 4: tops = 2 bottoms = -1 else: tops = 1 bottoms = -1 sorted_ = sorted(enumerate(second_target_differences), key=lambda x: x[1], reverse=True) best_second_difference_indices = [x for x, y in sorted_[:tops]] worst_second_difference_indices = [ x for x, y in sorted_[bottoms:] ] second_target_shares = [ old_div(x * 100.0, y) for x, y in zip(second_target_contributions, level_counts) ] max_second_target_shares = max(second_target_shares) best_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == max_second_target_shares ] level_counts_threshold = old_div( sum(level_counts) * 0.05, len(level_counts)) min_second_target_shares = min([ x for x, y in zip(second_target_shares, level_counts) if y >= level_counts_threshold ]) worst_second_target_share_index = [ idx for idx, val in enumerate(second_target_shares) if val == min_second_target_shares ] overall_second_percentage = old_div(sum_second_target * 100.0, total) # DataFrame for contribution calculation if self._pandas_flag: df_second_target = self._data_frame[( self._data_frame[self._target_dimension] == targetLevel ) & (self._data_frame[self._analysed_dimension] == second_target_top_dims[0])][ self._second_level_dimensions] df_second_dim = self._data_frame[( self._data_frame[self._analysed_dimension] == second_target_top_dims[0] )][self._second_level_dimensions] else: df_second_target = self._data_frame.filter(col(self._target_dimension)==targetLevel).\ filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ select(self._second_level_dimensions).toPandas() df_second_dim = self._data_frame.filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ select(self._second_level_dimensions).toPandas() # if self._chisquare_result.get_splits(): # splits = self._chisquare_result.get_splits() # idx = self._chiSquareTable.get_bin_names(splits).index(second_target_top_dims[0]) # idx1 = self._chiSquareTable.get_bin_names(splits).index(top_target_top_dims[0]) # splits[len(splits)-1] = splits[len(splits)-1]+1 # df_second_target = self._data_frame.filter(col(self._target_dimension)==targetLevel).\ # filter(col(self._analysed_dimension)>=splits[idx]).filter(col(self._analysed_dimension)<splits[idx+1]).\ # select(self._second_level_dimensions).toPandas() # df_second_dim = self._data_frame.filter(col(self._analysed_dimension)>=splits[idx]).\ # filter(col(self._analysed_dimension)<splits[idx+1]).\ # select(self._second_level_dimensions).toPandas() # else: # df_second_target = self._data_frame.filter(col(self._target_dimension)==targetLevel).\ # filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ # select(self._second_level_dimensions).toPandas() # df_second_dim = self._data_frame.filter(col(self._analysed_dimension)==second_target_top_dims[0]).\ # select(self._second_level_dimensions).toPandas() # print self._data_frame.select('Sales').show() distribution_second = [] d_l = [] for d in self._second_level_dimensions: grouped = df_second_target.groupby(d).agg({d: 'count'}) contributions = df_second_dim.groupby(d).agg({d: 'count'}) contribution_index = list(contributions.index) contributions_val = contributions[d].tolist() contributions_list = dict( list(zip(contribution_index, contributions_val))) index_list = list(grouped.index) grouped_list = grouped[d].tolist() contributions_percent_list = [ round(old_div(y * 100.0, contributions_list[x]), 2) for x, y in zip(index_list, grouped_list) ] sum_ = grouped[d].sum() diffs = [0] + [ grouped_list[i] - grouped_list[i + 1] for i in range(len(grouped_list) - 1) ] max_diff = diffs.index(max(diffs)) grouped_dict = dict(list(zip(index_list, grouped_list))) for val in contribution_index: if val not in list(grouped_dict.keys()): grouped_dict[val] = 0 else: pass index_list = [] grouped_list = [] contributions_val = [] for key in list(grouped_dict.keys()): index_list.append(str(key)) grouped_list.append(grouped_dict[key]) contributions_val.append(contributions_list[key]) ''' print "="*70 print "GROUPED - ", grouped print "INDEX LIST - ", index_list print "GROUPED LIST - ", grouped_list print "GROUPED DICT - ", grouped_dict print "CONTRIBUTIONS - ", contributions print "CONTRIBUTION INDEX - ", contribution_index print "CONTRIBUTIONS VAL - ", contributions_val print "CONTRIBUTIONS LIST - ", contributions_list print "CONTRIBUTIONS PERCENT LIST - ", contributions_percent_list print "SUM - ", sum_ print "DIFFS - ", diffs print "MAX DIFF - ", max_diff print "="*70 ''' informative_dict = { "levels": index_list, "positive_class_contribution": grouped_list, "positive_plus_others": contributions_val } informative_df = pd.DataFrame(informative_dict) informative_df["percentage_horizontal"] = old_div( informative_df["positive_class_contribution"] * 100, informative_df["positive_plus_others"]) informative_df["percentage_vertical"] = old_div( informative_df["positive_class_contribution"] * 100, sum_) informative_df.sort_values(["percentage_vertical"], inplace=True, ascending=False) informative_df = informative_df.reset_index(drop=True) percentage_vertical_sorted = list( informative_df["percentage_vertical"]) percentage_horizontal_sorted = list( informative_df["percentage_horizontal"]) levels_sorted = list(informative_df["levels"]) differences_list = [] for i in range(1, len(percentage_vertical_sorted)): difference = percentage_vertical_sorted[ i - 1] - percentage_vertical_sorted[i] differences_list.append(round(difference, 2)) ''' print "-"*70 print "DIFFERENCES LIST - ", differences_list print "-"*70 ''' index_txt = '' if differences_list: if differences_list[0] >= 30: print("showing 1st case") index_txt = levels_sorted[0] max_diff_equivalent = 1 else: if len(differences_list) >= 2: if differences_list[1] >= 10: print("showing 1st and 2nd case") index_txt = levels_sorted[0] + '(' + str( round(percentage_vertical_sorted[0], 1) ) + '%)' + ' and ' + levels_sorted[ 1] + '(' + str( round( percentage_vertical_sorted[1], 1)) + '%)' max_diff_equivalent = 2 else: print("showing 3rd case") index_txt = 'including ' + levels_sorted[ 0] + '(' + str( round( percentage_vertical_sorted[0], 1) ) + '%)' + ' and ' + levels_sorted[ 1] + '(' + str( round( percentage_vertical_sorted[ 1], 1)) + '%)' max_diff_equivalent = 3 else: print("showing 3rd case") index_txt = 'including ' + levels_sorted[ 0] + '(' + str( round(percentage_vertical_sorted[0], 1) ) + '%)' + ' and ' + levels_sorted[ 1] + '(' + str( round( percentage_vertical_sorted[1], 1)) + '%)' max_diff_equivalent = 3 else: max_diff_equivalent = 0 ''' print "-"*70 print informative_df.head(25) print "-"*70 ''' distribution_second.append({ 'contributions': [ round(i, 2) for i in percentage_vertical_sorted[:max_diff_equivalent] ], 'levels': levels_sorted[:max_diff_equivalent], 'variation': random.randint(1, 100), 'index_txt': index_txt, 'd': d, 'contributions_percent': percentage_horizontal_sorted }) ''' print "DISTRIBUTION SECOND - ", distribution_second print "<>"*50 ''' targetCardDataDict['distribution_second'] = distribution_second targetCardDataDict['second_target'] = targetLevel targetCardDataDict[ 'second_target_top_dims'] = second_target_top_dims targetCardDataDict[ 'second_target_top_dims_contribution'] = old_div( second_target_top_dims_contribution * 100.0, sum(second_target_contributions)) targetCardDataDict[ 'second_target_bottom_dim'] = second_target_bottom_dim targetCardDataDict[ 'second_target_bottom_dim_contribution'] = second_target_bottom_dim_contribution targetCardDataDict['best_second_target'] = levels[ best_second_target_index] targetCardDataDict[ 'best_second_target_count'] = second_target_contributions[ best_second_target_index] targetCardDataDict['best_second_target_percent'] = round( old_div( second_target_contributions[best_second_target_index] * 100.0, sum(second_target_contributions)), 2) targetCardDataDict['worst_second_target'] = levels[ worst_second_target_index] targetCardDataDict['worst_second_target_percent'] = round( old_div( second_target_contributions[worst_second_target_index] * 100.0, sum(second_target_contributions)), 2) card2Data = [] targetLevelContributions = [ table.get_value(targetLevel, i) for i in levels ] impact_target_thershold = old_div( sum(targetLevelContributions) * 0.02, len(targetLevelContributions)) card2Heading = '<h3>Key Drivers of ' + self._target_dimension + ' (' + targetLevel + ')' + "</h3>" chart, bubble = self.generate_distribution_card_chart( targetLevel, targetLevelContributions, levels, level_counts, total, impact_target_thershold) card2ChartData = NormalChartData(data=chart["data"]) "rounding the chartdata values for key drivers tab inside table percentage(table data)" for d in card2ChartData.get_data(): d['percentage'] = round(d['percentage'], 2) d_l.append(d) card2ChartJson = ChartJson() card2ChartJson.set_data(d_l) card2ChartJson.set_chart_type("combination") card2ChartJson.set_types({ "total": "bar", "percentage": "line" }) card2ChartJson.set_legend({ "total": "# of " + targetLevel, "percentage": "% of " + targetLevel }) card2ChartJson.set_axes({ "x": "key", "y": "total", "y2": "percentage" }) card2ChartJson.set_label_text({ "x": " ", "y": "Count", "y2": "Percentage" }) print("self._binTargetCol & self._binAnalyzedCol : ", self._binTargetCol, self._binAnalyzedCol) if (self._binTargetCol == True & self._binAnalyzedCol == False): print("Only Target Column is Binned") output2 = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card2_binned_target.html', targetCardDataDict), self._blockSplitter) elif (self._binTargetCol == True & self._binAnalyzedCol == True): print("Target Column and IV is Binned") output2 = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card2_binned_target_and_IV.html', targetCardDataDict), self._blockSplitter) else: print("In Else, self._binTargetCol should be False : ", self._binTargetCol) output2 = NarrativesUtils.block_splitter( NarrativesUtils.get_template_output( self._base_dir, 'card2.html', targetCardDataDict), self._blockSplitter) card2Data.append(HtmlData(data=card2Heading)) statistical_info_array = [ ("Test Type", "Chi-Square"), ("Chi-Square statistic", str(round(self._chisquare_result.get_stat(), 3))), ("P-Value", str(round(self._chisquare_result.get_pvalue(), 3))), ("Inference", "Chi-squared analysis shows a significant association between {} (target) and {}." .format(self._target_dimension, self._analysed_dimension)) ] statistical_info_array = NarrativesUtils.statistical_info_array_formatter( statistical_info_array) card2Data.append( C3ChartData(data=card2ChartJson, info=statistical_info_array)) card2Data += output2 card2BubbleData = "<div class='col-md-6 col-xs-12'><h2 class='text-center'><span>{}</span><br /><small>{}</small></h2></div><div class='col-md-6 col-xs-12'><h2 class='text-center'><span>{}</span><br /><small>{}</small></h2></div>".format( bubble[0]["value"], bubble[0]["text"], bubble[1]["value"], bubble[1]["text"]) card2Data.append(HtmlData(data=card2BubbleData)) targetCard = NormalCard() targetCard.set_card_data(card2Data) targetCard.set_card_name("{} : Distribution of {}".format( self._analysed_dimension, targetLevel)) self._targetCards.append(targetCard) dict_for_test[targetLevel] = targetCardDataDict out = {'data_dict': data_dict, 'target_dict': dict_for_test} return out
def _generate_narratives(self): try: nColsToUse = self._analysisDict[ self._analysisName]["noOfColumnsToUse"] except: nColsToUse = None self._anovaNodes = NarrativesTree() self._anovaNodes.set_name("Performance") for measure_column in self._df_anova_result.get_measure_columns(): measure_anova_result = self._df_anova_result.get_measure_result( measure_column) significant_dimensions_dict, insignificant_dimensions = measure_anova_result.get_OneWayAnovaSignificantDimensions( ) num_dimensions = len(list(significant_dimensions_dict.items()) ) + len(insignificant_dimensions) significant_dimensions = [ k for k, v in sorted(list(significant_dimensions_dict.items()), key=lambda x: -x[1]) ] if nColsToUse != None: significant_dimensions = significant_dimensions[:nColsToUse] num_significant_dimensions = len(significant_dimensions) num_insignificant_dimensions = len(insignificant_dimensions) print("num_significant_dimensions", num_significant_dimensions) if num_significant_dimensions > 0: mainCard = NormalCard(name="Overview of Key Factors") data_c3 = [] for sig_dim in significant_dimensions: data_c3.append({ 'dimension': sig_dim, 'effect_size': float(significant_dimensions_dict[sig_dim]) }) self.narratives = {} self.narratives[AnovaNarratives. KEY_HEADING] = "%s Performance Analysis" % ( measure_column, ) self.narratives['main_card'] = {} self.narratives['cards'] = [] self.narratives['main_card'][ AnovaNarratives. KEY_SUBHEADING] = "Relationship between %s and other Dimensions" % ( measure_column) self.narratives['main_card'][ AnovaNarratives.KEY_PARAGRAPH] = [] data_dict = { \ 'significant_dimensions' : significant_dimensions, 'insignificant_dimensions' : insignificant_dimensions, 'num_significant_dimensions' : num_significant_dimensions, 'num_insignificant_dimensions' : num_insignificant_dimensions, 'num_dimensions' : num_significant_dimensions+num_insignificant_dimensions, 'target' : measure_column \ } output = {'header': ''} output['content'] = NarrativesUtils.get_template_output( self._base_dir, 'anova_template_1.html', data_dict) self.narratives['main_card'][ AnovaNarratives.KEY_PARAGRAPH].append(output) output1 = {'header': ''} output1['content'] = NarrativesUtils.get_template_output( self._base_dir, 'anova_template_2.html', data_dict) lines = [] lines += NarrativesUtils.block_splitter( output['content'], self._blockSplitter) data_c3 = NormalChartData(data_c3) chart_data = data_c3.get_data() chartDataValues = [] effect_size_values = [] for obj in chart_data: effect_size_values.append(obj["effect_size"]) chart_data_min = min(effect_size_values) if chart_data_min < 0.00001: for obj in chart_data: chartDataValues.append(str(obj["effect_size"])) else: for obj in chart_data: chartDataValues.append(obj["effect_size"]) chart_json = ChartJson(data=chart_data, axes={ 'x': 'dimension', 'y': 'effect_size' }, label_text={ 'x': '', 'y': 'Effect Size (scaled exp values)' }, chart_type='bar') chart_json.set_axis_rotation(True) # chart_json.set_yaxis_number_format(".4f") chart_json.set_yaxis_number_format( NarrativesUtils.select_y_axis_format(chartDataValues)) # st_info = ["Test : ANOVA", "Threshold for p-value : 0.05", "Effect Size : Tukey's HSD"] statistical_info_array = [ ("Test Type", "ANOVA"), ("Effect Size", "ETA squared"), ("Max Effect Size", chart_data[0]["dimension"]), ("Min Effect Size", chart_data[-1]["dimension"]), ] statistical_inferenc = "" if len(chart_data) == 1: statistical_inference = "{} is the only variable that have significant association with the {} (Target) having an \ Effect size of {}".format( chart_data[0]["dimension"], self._dataframe_context.get_result_column(), round(chart_data[0]["effect_size"], 4)) elif len(chart_data) == 2: statistical_inference = "There are two variables ({} and {}) that have significant association with the {} (Target) and the \ Effect size ranges are {} and {} respectively".format( chart_data[0]["dimension"], chart_data[1]["dimension"], self._dataframe_context.get_result_column(), round(chart_data[0]["effect_size"], 4), round(chart_data[1]["effect_size"], 4)) else: statistical_inference = "There are {} variables that have significant association with the {} (Target) and the \ Effect size ranges from {} to {}".format( len(chart_data), self._dataframe_context.get_result_column(), round(chart_data[0]["effect_size"], 4), round(chart_data[-1]["effect_size"], 4)) if statistical_inference != "": statistical_info_array.append( ("Inference", statistical_inference)) statistical_info_array = NarrativesUtils.statistical_info_array_formatter( statistical_info_array) lines += [ C3ChartData(data=chart_json, info=statistical_info_array) ] lines += NarrativesUtils.block_splitter( output1['content'], self._blockSplitter) mainCard.set_card_data(lines) self._anovaNodes.add_a_card(mainCard) self.narratives['main_card'][ AnovaNarratives.KEY_PARAGRAPH].append(output1) self.narratives['main_card'][AnovaNarratives.KEY_CHART] = {} effect_size_chart = { 'heading': '', 'labels': { 'Dimension': 'Effect Size' }, 'data': significant_dimensions_dict } print(significant_dimensions_dict) self.narratives['main_card'][AnovaNarratives.KEY_CHART][ 'effect_size'] = effect_size_chart progressMessage = CommonUtils.create_progress_message_object( self._analysisName, "custom", "info", "Analyzing Key Drivers", self._completionStatus, self._completionStatus, display=True) CommonUtils.save_progress_message(self._messageURL, progressMessage, ignore=False) self._generate_dimension_narratives(significant_dimensions, measure_anova_result, measure_column) else: mainCard = NormalCard(name="Overview of Key Factors") cardText = HtmlData( "There are no dimensions in the dataset that have significant influence on {}" .format(measure_column)) mainCard.set_card_data([cardText]) self._anovaNodes.add_a_card(mainCard)