Ejemplo n.º 1
0
    def duplicate_checking_movies(self, movie_data, writer,writer_credit_match_false,thread_name): 
        #import pdb;pdb.set_trace()  
        duplicate=""  
        credit_array=[]   
        print("\nFunction duplicate_checking_movies_called.........................")
        self.id=movie_data.get("id")
        self.movie_title=unidecode.unidecode(pinyin.get(movie_data.get("title")))
        self.release_year=movie_data.get("release_year").encode("ascii","ignore")
        data_expired_api_resp = lib_common_modules().link_expiry_check_(self.expire_api,
                                              self.preprod_domain_name,self.id,self.service,self.Token)
        if data_expired_api_resp:
            self.link_expired = "False" if data_expired_api_resp=='False' else "True"
        self.reverse_api_extract(self.id, self.source,'MO')
        print ("projectx_ids : {}, headrun_hbomax_mo_id: {}".format(self.px_id,self.id),"threads:",thread_name)
        if len(self.px_id) > 1:               
            data_resp_search=duplicate_script_modules().search_api_call_response(self.movie_title,
                                          self.projectx_preprod_search_api,
                                          self.projectx_domain,self.token)     
            if data_resp_search is not None:
                result=duplicate_script_modules().search_api_response_validation(data_resp_search, self.source, self.px_id, duplicate,'MO',self.token,self.projectx_preprod_api,
                        self.projectx_mapping_api,self.projectx_preprod_api
                        ,self.duplicate_api,self.credit_db_api)

                if (self.credit_match=='False' or self.credit_match=='') and len(result["search_px_id"])==2:
                    #import pdb;pdb.set_trace()
                    px_link=self.projectx_preprod_api %'{}'.format(",".join([str(i) for i in result["search_px_id"]]))
                    data_resp_credits=lib_common_modules().fetch_response_for_api_(px_link,self.token)
                    #import pdb;pdb.set_trace()
                    for uu in data_resp_credits:
                        if uu.get("credits"):
                            for tt in uu.get("credits"):
                                credit_array.append(unidecode.unidecode(tt.get("full_credit_name")))        
                    if credit_array:
                        for cc in credit_array:
                            if credit_array.count(cc)>1:
                                self.credit_match='True'
                                break
                            else:
                                self.credit_match='False'
                    result_credit_match_false=checking_any_two_px_programs().checking_same_program(result["search_px_id"],self.projectx_preprod_api,self.credit_db_api,self.source,self.token)
                    #import pdb;pdb.set_trace()
                    writer_credit_match_false.writerow(result_credit_match_false)            
                writer.writerow([self.source,'',self.id,'MO',self.movie_title,'',''
                    ,self.release_year,self.link_expired,self.px_id,'','','',result["comment"],result["comment"],
                    result["duplicate"],result["search_px_id"],self.credit_match,result["count_rovi"],result["count_guidebox"],result["count_source"]
                    ,result["count_hulu"],result["count_vudu"],result["rovi_mapping"],result["guidebox_mapping"],result["source_mapping"],result["hulu_mapping"]
                    ,result["vudu_mapping"],result["comment_variant_parent_id_present"],result["comment_variant_parent_id"]])
            else:
                duplicate_api=self.duplicate_api%(self.id,self.source,'MO')
                data_resp_duplicate=lib_common_modules().fetch_response_for_api_(duplicate_api,self.token)
                if data_resp_duplicate:
                    duplicate='True'
                else:
                    duplicate='False'
                self.comment="search_api_has_no_response"
                self.result="search_api_has_no_response"
                writer.writerow([self.source,'', self.id,'MO', self.movie_title, 
                                 '', '', self.release_year, self.link_expired, self.px_id, 
                                 '', '', '' ,self.comment,self.result, duplicate])
        else:
            self.source="HBOGO"
            self.service='hbogo'
            data_expired_api_resp = lib_common_modules().link_expiry_check_(self.expire_api,
                                                  self.preprod_domain_name,self.id,self.service,self.Token)
            if data_expired_api_resp:
                self.link_expired = "False" if data_expired_api_resp=='False' else "True"
            self.reverse_api_extract(self.id, self.source,'MO')
            print ("projectx_ids : {}, headrun_hbogo_mo_id: {}".format(self.px_id,self.id),"threads:",thread_name)
            if len(self.px_id) > 1:               
                data_resp_search=duplicate_script_modules().search_api_call_response(self.movie_title,self.projectx_preprod_search_api,self.projectx_domain,self.token)     
                if data_resp_search is not None:
                    result=duplicate_script_modules().search_api_response_validation(data_resp_search, self.source, self.px_id, duplicate,'MO',self.token,self.projectx_preprod_api,
                            self.projectx_mapping_api,self.projectx_preprod_api
                            ,self.duplicate_api,self.credit_db_api)

                    if (self.credit_match=='False' or self.credit_match=='') and len(result["search_px_id"])==2:
                        #import pdb;pdb.set_trace()
                        px_link=self.projectx_preprod_api %'{}'.format(",".join([str(i) for i in result["search_px_id"]]))
                        data_resp_credits=lib_common_modules().fetch_response_for_api_(px_link,self.token)
                        #import pdb;pdb.set_trace()
                        for uu in data_resp_credits:
                            if uu.get("credits"):
                                for tt in uu.get("credits"):
                                    credit_array.append(unidecode.unidecode(tt.get("full_credit_name")))        
                        if credit_array:
                            for cc in credit_array:
                                if credit_array.count(cc)>1:
                                    self.credit_match='True'
                                    break
                                else:
                                    self.credit_match='False'
                        result_credit_match_false=checking_any_two_px_programs().checking_same_program(result["search_px_id"],self.projectx_preprod_api,self.credit_db_api,self.source,self.token)
                                            #import pdb;pdb.set_trace()
                        writer_credit_match_false.writerow(result_credit_match_false)            
                    writer.writerow([self.source,'',self.id,'MO',self.movie_title,'',''
                        ,self.release_year,self.link_expired,self.px_id,'','','',result["comment"],result["comment"],
                        result["duplicate"],result["search_px_id"],self.credit_match,result["count_rovi"],result["count_guidebox"],result["count_source"]
                        ,result["count_hulu"],result["count_vudu"],result["rovi_mapping"],result["guidebox_mapping"],result["source_mapping"],result["hulu_mapping"]
                        ,result["vudu_mapping"],result["comment_variant_parent_id_present"],result["comment_variant_parent_id"]])
                else:
                    duplicate_api=self.duplicate_api%(self.id,self.source,'MO')
                    data_resp_duplicate=lib_common_modules().fetch_response_for_api_(duplicate_api,self.token)
                    if data_resp_duplicate:
                        duplicate='True'
                    else:
                        duplicate='False'
                    self.comment="search_api_has_no_response"
                    self.result="search_api_has_no_response"
                    writer.writerow([self.source,'', self.id,'MO', self.movie_title, 
                                     '', '', self.release_year, self.link_expired, self.px_id, 
                                     '', '', '' ,self.comment,self.result, duplicate])
            else:           
                self.comment=('No multiple ids for this link',self.id)
                self.result="No multiple ids for this link"
                writer.writerow([self.source,'', self.id,'MO', self.movie_title, 
                                 '', '', self.release_year, self.link_expired, self.px_id, 
                                 '', '', '' ,self.comment,self.result])    
    def duplicate_checking_series(self, series_data, source, writer,writer_credit_match_false,thread_name): 
        #import pdb;pdb.set_trace()  
        duplicate=""  
        credit_array=[]   
        print("\nFunction duplicate_checking_series_called.................")
        self.id=series_data.get("id").encode("ascii","ignore")
        self.show_id=series_data.get("series_id").encode("ascii","ignore")
        self.release_year=series_data.get("release_year").encode("ascii","ignore")
        self.series_title=unidecode.unidecode(pinyin.get(series_data.get("title")))
        self.episode_title=unidecode.unidecode(pinyin.get(series_data.get("episode_title")))

        if self.series_title is not None:
            data_expired_api_resp = lib_common_modules().link_expiry_check_(self.expire_api,
                                                  self.preprod_domain_name,self.id,self.service,self.Token)
            if data_expired_api_resp:
                self.link_expired = "False" if data_expired_api_resp=='False' else "True"
            self.reverse_api_extract(self.id, source,'SE')
            print ("projectx_ids : {},fox_se_id: {}, fox_SM_id: {}".format(self.px_id,self.id,self.show_id),"thread_name:",thread_name)
            if len(self.px_id) > 1:
                px_link=self.projectx_preprod_api%'{}'.format(",".join([str(data) for data in self.px_id]))
                data_resp_link=lib_common_modules().fetch_response_for_api_(px_link,self.token)
                for id_ in data_resp_link:
                    if id_.get("series_id") not in self.series_id_px:
                        self.series_id_px.append(id_.get("series_id"))
                print ("projectx_ids_series : {0}".format(self.series_id_px),"thread:",thread_name) 
                if len(self.series_id_px)>1:
                    data_resp_search=duplicate_script_modules().search_api_call_response(self.series_title,self.projectx_preprod_search_api,
                                          self.projectx_domain,self.token)
                    if data_resp_search is not None:
                        result=duplicate_script_modules().search_api_response_validation(data_resp_search, source, self.series_id_px, duplicate,
                                'SM',self.token,self.projectx_preprod_api,
                                self.projectx_mapping_api,self.beta_programs_api
                                ,self.duplicate_api,self.credit_db_api)
                        if (self.credit_match=='False' or self.credit_match=='') and len(result["search_px_id"])==2:
                            #import pdb;pdb.set_trace()
                            px_link=self.projectx_preprod_api %'{}'.format(",".join([str(i) for i in result["search_px_id"]]))
                            data_resp_credits=lib_common_modules().fetch_response_for_api_(px_link,self.token)
                            #import pdb;pdb.set_trace()
                            for uu in data_resp_credits:
                                if uu.get("credits"):
                                    for tt in uu.get("credits"):
                                        credit_array.append(unidecode.unidecode(tt.get("full_credit_name")))        
                            if credit_array:
                                for cc in credit_array:
                                    if credit_array.count(cc)>1:
                                        self.credit_match='True'
                                        break
                                    else:
                                        self.credit_match='False'
                            result_credit_match_false=duplicate_script_modules().validation().meta_data_validation(result["search_px_id"],self.projectx_preprod_api
                                                ,self.credit_db_api,source,self.token)
                            #import pdb;pdb.set_trace()
                            writer_credit_match_false.writerow(result_credit_match_false)            
                        writer.writerow([source,self.show_id,self.id,'SE','',self.series_title,self.episode_title
                            ,self.release_year,self.link_expired,self.series_id_px,'','','',result["comment"],result["comment"],
                            result["duplicate"],result["search_px_id"],self.credit_match,result["count_rovi"],result["count_guidebox"],result["count_source"],
                            result["count_hulu"],result["count_vudu"],result["rovi_mapping"],result["guidebox_mapping"],result["source_mapping"],result["hulu_mapping"],result["vudu_mapping"],
                            result["comment_variant_parent_id_present"],result["comment_variant_parent_id"]])
                    else:
                        duplicate_api=self.duplicate_api%(self.show_id,source,'SM')
                        data_resp_duplicate=lib_common_modules().fetch_response_for_api_(duplicate_api,self.token)
                        if data_resp_duplicate:
                            duplicate='True'
                        else:
                            duplicate='False'
                        self.comment="search_api_has_no_response"
                        self.result="search_api_has_no_response"
                        writer.writerow([source,self.show_id,self.id,'SE', '',self.series_title, 
                                     self.episode_title, self.release_year, self.link_expired, self.series_id_px, 
                                     '', '', '' ,self.comment,self.result,duplicate])               
                else:
                    self.comment=('No multiple ids for this series',self.id,self.show_id)
                    self.result="No multiple ids for this series"
                    writer.writerow([source, self.show_id, self.id,'SE' ,'',self.series_title, 
                                     self.episode_title, self.release_year, self.link_expired, self.series_id_px, 
                                     '', '', '' ,self.comment,self.result])        
            else:
                self.comment=('No multiple ids for this episode',self.id,self.show_id)
                self.result="No multiple ids for this episode"
                writer.writerow([source, self.show_id, self.id,'SE', '',self.series_title, 
                                 self.episode_title, self.release_year, self.link_expired, self.px_id, 
                                 '', '', '' ,self.comment,self.result])    
        else:
            self.comment=('No series_title for this episode',self.id,self.show_id)
            self.result="No series_title for this episode"
            writer.writerow([source, self.show_id, self.id,'SE', '',self.series_title, 
                             self.episode_title, self.release_year, '', '', 
                             '', '', '' ,self.comment,self.result])    
Ejemplo n.º 3
0
 def duplicate_checking_movies(self, movie_data, writer,
                               writer_credit_match_false, thread_name):
     #import pdb;pdb.set_trace()
     duplicate = ""
     credit_array = []
     print(
         "\nFunction duplicate_checking_movies_called........................."
     )
     self.id = movie_data.get("sequence_id").encode("ascii", "ignore")
     self.movie_title = unidecode.unidecode(
         pinyin.get(movie_data.get("title"))).replace("#", "")
     self.release_year = movie_data.get("release_year")
     try:
         self.language = movie_data.get("original_language")
     except Exception:
         self.language = None
     extracting_link_details = self.getting_source_ott_details(movie_data)
     if extracting_link_details["link_id"]:
         self.reverse_api_extract(extracting_link_details["link_id"][0],
                                  extracting_link_details["service"], 'MO')
         self.logger.debug({
             "projectx_ids": self.px_id,
             "gracenote_mo_id": self.id,
             "threads": thread_name
         })
         if len(self.px_id) > 1:
             data_resp_search = duplicate_script_modules(
             ).search_api_call_response(self.movie_title,
                                        self.projectx_preprod_search_api,
                                        self.projectx_domain, self.token)
             if data_resp_search is not None:
                 result = duplicate_script_modules(
                 ).search_api_response_validation(
                     data_resp_search, self.source, self.px_id, duplicate,
                     'MO', self.token, self.projectx_preprod_api,
                     self.projectx_mapping_api, self.beta_programs_api,
                     self.duplicate_api, self.credit_db_api)
                 if (self.credit_match == 'False' or self.credit_match
                         == '') and len(result["search_px_id"]) == 2:
                     #import pdb;pdb.set_trace()
                     px_link = self.projectx_preprod_api % '{}'.format(
                         ",".join([str(i) for i in result["search_px_id"]]))
                     data_resp_credits = lib_common_modules(
                     ).fetch_response_for_api(px_link, self.token,
                                              self.logger)
                     #import pdb;pdb.set_trace()
                     for uu in data_resp_credits:
                         if uu.get("credits"):
                             for tt in uu.get("credits"):
                                 credit_array.append(
                                     unidecode.unidecode(
                                         tt.get("full_credit_name")))
                     if credit_array:
                         for cc in credit_array:
                             if credit_array.count(cc) > 1:
                                 self.credit_match = 'True'
                                 break
                             else:
                                 self.credit_match = 'False'
                     result_credit_match_false = duplicate_script_modules(
                     ).validation().meta_data_validation(
                         result["search_px_id"], self.projectx_preprod_api,
                         self.credit_db_api, self.source, self.token)
                     #import pdb;pdb.set_trace()
                     writer_credit_match_false.writerow(
                         result_credit_match_false)
                 writer.writerow([
                     self.source, '', self.id,
                     str({
                         extracting_link_details["service"]:
                         extracting_link_details["link_id"]
                     }), 'MO', self.movie_title, '', '', self.release_year,
                     self.link_expired, self.px_id, '', '', '',
                     result["comment"], result["comment"],
                     result["duplicate"], result["search_px_id"],
                     self.credit_match, result["count_rovi"],
                     result["count_guidebox"], result["count_source"],
                     result["count_hulu"], result["count_vudu"],
                     result["rovi_mapping"], result["guidebox_mapping"],
                     result["source_mapping"], result["hulu_mapping"],
                     result["vudu_mapping"],
                     result["comment_variant_parent_id_present"],
                     result["comment_variant_parent_id"], self.language
                 ])
             else:
                 duplicate_api = self.duplicate_api % (self.id, self.source,
                                                       'MO')
                 data_resp_duplicate = lib_common_modules(
                 ).fetch_response_for_api(duplicate_api, self.token,
                                          self.logger)
                 if data_resp_duplicate:
                     duplicate = 'True'
                 else:
                     duplicate = 'False'
                 self.comment = "search_api_has_no_response"
                 self.result = "search_api_has_no_response"
                 writer.writerow([
                     self.source, '', self.id, '', 'MO', self.movie_title,
                     '', '', self.release_year, self.link_expired,
                     self.px_id, '', '', '', self.comment, self.result,
                     duplicate, '', '', '', '', '', '', '', ''
                     '', '', '', '', '', '', self.language
                 ])
         else:
             self.comment = ('No multiple ids for this link', self.id)
             self.result = "No multiple ids for this link"
             writer.writerow([
                 self.source, '', self.id, '', 'MO', self.movie_title, '',
                 '', self.release_year, self.link_expired, self.px_id, '',
                 '', '', self.comment, self.result, '', '', '', '', '', '',
                 '', '', '', '', '', ''
                 '', '', '', self.language
             ])