def _post_process_result(cls, result): # override Executor._post_process_result def _strred(srred_trred): srred, trred = srred_trred try: return srred * trred except TypeError: # possible either srred or trred is None return None result = super(StrredFeatureExtractor, cls)._post_process_result(result) # calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd srred_scores_key = cls.get_scores_key('srred') trred_scores_key = cls.get_scores_key('trred') strred_scores_key = cls.get_scores_key('strred') srred_scores = result.result_dict[srred_scores_key] trred_scores = result.result_dict[trred_scores_key] # compute strred scores # === Way One: consistent with VMAF framework, which is to multiply S and T scores per frame, then average # strred_scores = map(_strred, zip(srred_scores, trred_scores)) # === Way Two: authentic way of calculating STRRED score: average first, then multiply === assert len(srred_scores) == len(trred_scores) strred_scores = ListStats.nonemean(srred_scores) * ListStats.nonemean(trred_scores) * np.ones(len(srred_scores)) result.result_dict[strred_scores_key] = strred_scores # validate for feature in cls.DERIVED_ATOM_FEATURES: assert cls.get_scores_key(feature) in result.result_dict return result
def _post_process_result(cls, result): # override Executor._post_process_result def _strred(srred_trred): srred, trred = srred_trred if srred is not None and trred is not None: return srred * trred elif srred is None: return trred elif trred is None: return srred else: return None result = super(StrredOptFeatureExtractor, cls)._post_process_result(result) srred_scores_key = cls.get_scores_key('srred') trred_scores_key = cls.get_scores_key('trred') strred_scores_key = cls.get_scores_key('strred') strred_all_same_scores_key = cls.get_scores_key('strred_all_same') srred_scores = result.result_dict[srred_scores_key] trred_scores = result.result_dict[trred_scores_key] assert len(srred_scores) == len(trred_scores) # === Way One: consistent with VMAF framework, which is to multiply S and T scores per frame, then average strred_scores = list(map(_strred, zip(srred_scores, trred_scores))) # === Way Two: authentic way of calculating STRRED score: average first, then multiply === strred_all_same_scores = ListStats.nonemean( srred_scores) * ListStats.nonemean(trred_scores) * np.ones( len(srred_scores)) result.result_dict[strred_all_same_scores_key] = strred_all_same_scores result.result_dict[strred_scores_key] = strred_scores # validate for feature in cls.DERIVED_ATOM_FEATURES: assert cls.get_scores_key(feature) in result.result_dict return result