def ott_checking(self,only_mapped_ids,appletv_id,show_type,thread_name,data):
     #import pdb;pdb.set_trace()
     try:     
         if only_mapped_ids["source_flag"]=='True':
             self.total+=1
             #import pdb;pdb.set_trace()
             projectx_id=only_mapped_ids["px_id"]
             source_id=only_mapped_ids["source_id"]
             self.logger.debug("\n")
             self.logger.debug ({"total_only_appletv_mapped":self.total,"%s_id"%self.source:appletv_id,
                 "Px_id":projectx_id,"%s_id"%self.source:source_id,"thread_name":thread_name})
             source_details=self.getting_source_details(source_id,show_type,data)
             projectx_details=self.getting_projectx_ott_details(projectx_id,show_type)
             if projectx_details!='Null':
                 if data.get("purchase_info")!="":
                     self.link_expired=lib_common_modules().link_expiry_check_(self.expired_api,self.projectx_domain,source_id,self.service,self.expired_token)
                     ott_validation_result=ott_meta_data_validation_modules().ott_validation(projectx_details,source_id)
                     self.writer.writerow([source_id,projectx_id,show_type,projectx_details["px_video_link_present"],
                               source_details["source_link_present"],ott_validation_result,only_mapped_ids["source_flag"],'',self.link_expired])
                 else:
                     self.writer.writerow([source_id,projectx_id,show_type,'','','',
                         only_mapped_ids["source_flag"],'purchase_info_null'])     
             else:
                 self.writer.writerow([source_id,projectx_id,show_type,projectx_details,
                          source_details["source_link_present"],'',only_mapped_ids["source_flag"],'Px_response_null'])
         elif only_mapped_ids["source_flag"]=='True(Rovi+others)':
             if show_type!='SM':
                 projectx_id=only_mapped_ids["px_id"]
                 source_id=only_mapped_ids["source_id"]
                 self.logger.debug("\n")
                 self.logger.debug ({"Px_id":projectx_id,"%s_id"%self.source:source_id,"thread_name":thread_name})
                 
                 projectx_details=self.getting_projectx_ott_details(projectx_id,show_type)
                 if projectx_details!='Null':
                     if data.get("purchase_info")!="":
                         self.link_expired=lib_common_modules().link_expiry_check_(self.expired_api,self.projectx_domain,source_id,self.service,self.expired_token)
                         ott_validation_result=ott_meta_data_validation_modules().ott_validation(projectx_details,source_id)
                         self.writer.writerow([source_id,projectx_id,show_type,'','',ott_validation_result,
                              only_mapped_ids["source_flag"],'',self.link_expired])
                 else:
                     self.writer.writerow([source_id,projectx_id,show_type,'','','',
                         only_mapped_ids["source_flag"],'purchase_info_null'])                                                                
         else:            
             self.writer.writerow([appletv_id,'',show_type,'','',only_mapped_ids,'Px_id_null'])          
             
     except (Exception,httplib.BadStatusLine,urllib2.HTTPError,socket.error,urllib2.URLError,RuntimeError) as e:
         self.retry_count+=1
         self.logger.debug("Retrying...................................",self.retry_count)
         self.logger.debug("\n")
         self.logger.debug ("exception/error caught in ott_checking func.........................",type(e),appletv_id,show_type,thread_name)
         if self.retry_count<=5:
             self.ott_checking(only_mapped_ids,appletv_id,show_type,thread_name,data)  
         else:
             self.retry_count=0
 def meta_data_validation_(self, data, projectx_id, thread_name,
                           only_mapped_ids):
     #import pdb;pdb.set_trace()
     source_details = source_meta_data().getting_source_details(
         self.starz_id, self.show_type, self.source, thread_name, data,
         self.cur)
     projectx_details = ott_meta_data_validation_modules(
     ).getting_projectx_details(projectx_id, self.show_type, self.source,
                                thread_name, self.projectx_programs_api,
                                self.token)
     meta_data_validation_result = ott_meta_data_validation_modules(
     ).meta_data_validate_headrun().meta_data_validation(
         self.starz_id, source_details, projectx_details, self.show_type)
     credits_validation_result = ott_meta_data_validation_modules(
     ).credits_validation(source_details, projectx_details)
     images_validation_result = ott_meta_data_validation_modules(
     ).images_validation(source_details, projectx_details)
     try:
         if projectx_details != 'Null':
             print({"projectx_details": projectx_details})
             self.writer.writerow([
                 self.starz_id, projectx_id, self.show_type,
                 projectx_details["is_group_language_primary"],
                 projectx_details["record_language"],
                 projectx_details["iso_3_char_language"],
                 source_details["source_title"],
                 projectx_details["px_long_title"],
                 projectx_details["px_episode_title"],
                 meta_data_validation_result["title_match"],
                 meta_data_validation_result["description_match"],
                 meta_data_validation_result["genres_match"],
                 meta_data_validation_result["aliases_match"],
                 meta_data_validation_result["release_year_match"],
                 meta_data_validation_result["duration_match"],
                 meta_data_validation_result["season_number_match"],
                 meta_data_validation_result["episode_number_match"],
                 meta_data_validation_result["px_video_link_present"],
                 meta_data_validation_result["source_link_present"],
                 images_validation_result[0], images_validation_result[1],
                 credits_validation_result[0], credits_validation_result[1],
                 only_mapped_ids["source_flag"]
             ])
         else:
             self.writer.writerow([
                 self.starz_id, projectx_id, self.show_type, '', '', '', '',
                 '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
                 'Px_response_null'
             ])
     except Exception as e:
         print("get exception in meta_data_validation func........",
               type(e), self.starz_id, self.show_type)
         pass
예제 #3
0
 def meta_data_validation_(self, projectx_id, source_id, show_type,
                           thread_name, only_mapped_ids, data):
     #import pdb;pdb.set_trace()
     source_details = source_meta_data().getting_source_details(
         source_id, show_type, self.source, thread_name, data)
     projectx_details = ott_meta_data_validation_modules(
     ).getting_projectx_details(projectx_id, show_type, self.source,
                                thread_name, self.projectx_programs_api,
                                self.token)
     meta_data_validation_result = ott_meta_data_validation_modules(
     ).meta_data_validate_gracenote().meta_data_validation(
         source_id, source_details, projectx_details, show_type)
     credits_validation_result = ott_meta_data_validation_modules(
     ).credits_validation(source_details, projectx_details)
     images_validation_result = ott_meta_data_validation_modules(
     ).images_validation(source_details, projectx_details)
     try:
         if projectx_details != 'Null':
             self.writer.writerow([
                 source_id, projectx_id, show_type,
                 source_details["source_title"],
                 projectx_details["px_long_title"],
                 projectx_details["px_episode_title"],
                 meta_data_validation_result["title_match"],
                 meta_data_validation_result["description_match"],
                 meta_data_validation_result["genres_match"],
                 meta_data_validation_result["aliases_match"],
                 meta_data_validation_result["release_year_match"],
                 meta_data_validation_result["duration_match"],
                 meta_data_validation_result["season_number_match"],
                 meta_data_validation_result["episode_number_match"],
                 meta_data_validation_result["px_video_link_present"],
                 meta_data_validation_result["source_link_present"],
                 images_validation_result[0], images_validation_result[1],
                 credits_validation_result[0], credits_validation_result[1],
                 only_mapped_ids["source_flag"]
             ])
         else:
             self.writer.writerow([
                 source_id, projectx_id, show_type, '', '', '', '', '', '',
                 '', '', '', '', '', '', '', '', '', '', '', '',
                 'Px_response_null'
             ])
     except Exception as e:
         self.logger.debug({
             "get exception in meta_data_validation func........":
             type(e)
         })
         pass
 def main(self,start_id,thread_name,end_id):
     #import pdb;pdb.set_trace()
     self.get_env_url()
     self.mongo_mysql_connection()
     result_sheet='/result/headrun_Showtime_ott_%s_checking%s.csv'%(thread_name,datetime.date.today())
     output_file=lib_common_modules().create_csv(result_sheet)
     with output_file as mycsvfile:
         fieldnames = ["%s_id"%self.source,"Projectx_id","show_type","px_video_link_present","Fetch_from","Series_id","Expiry_date","%s_link_present"%self.source,"ott_link_result","mapping","","Expired"]
         self.writer = csv.writer(mycsvfile,dialect="csv",lineterminator = '\n')
         self.writer.writerow(fieldnames)
         for _id in range(start_id,end_id,100):
             print({"skip":_id})
             query_headrun_Showtime=self.sourcetable.aggregate([{"$match":{"$and":[{"item_type":{"$in":["movie","episode"]}},{"service":self.source.lower()}]}},{"$project":{"id":1,"_id":0,"item_type":1,"series_id":1,"title":1,"episode_title":1,"release_year":1,"episode_number":1,"season_number":1,"duration":1,"image_url":1,"url":1,"description":1,"cast":1,"directors":1,"writers":1,"categories":1,"genres":1,"maturity_ratings":1,"purchase_info":1,"service":1,"expiry_date":1}},{"$skip":_id},{"$limit":100}])
             #query_headrun_Showtime=self.sourcetable.find({"service":"Showtime","item_type":"movie","id":"70301275"})  
             for data in query_headrun_Showtime:
                 if data.get("id")!="":
                     #import pdb;pdb.set_trace()
                     self.headrun_Showtime_id=data.get("id").encode("ascii","ignore")
                     if data.get("item_type")=="movie":
                         self.show_type="MO"
                     else:
                         self.show_type="SE"
                     self.count_headrun_Showtime_id+=1
                     print("\n")
                     print datetime.datetime.now()
                     print("\n")
                     print ({"count_headrun_Showtime_id":self.count_headrun_Showtime_id,
                            "id":self.headrun_Showtime_id,"thread_name":thread_name})
                     #import pdb;pdb.set_trace()
                     only_mapped_ids=ott_meta_data_validation_modules().getting_mapped_px_id_mapping_api(str(self.headrun_Showtime_id),self.source_mapping_api,self.projectx_mapping_api,self.show_type,self.source,self.token)
                     self.ott_checking(only_mapped_ids,thread_name,data)              
         print("\n")                    
         print ({"count_headrun_Showtime_id":self.count_headrun_Showtime_id,"name":thread_name})  
     output_file.close()                      
     self.connection.close()
 def ott_checking(self, only_mapped_ids, thread_name, data):
     try:
         if only_mapped_ids["source_flag"] == 'True' or only_mapped_ids[
                 "source_flag"] == 'True(Rovi+others)':
             self.total += 1
             projectx_id = only_mapped_ids["px_id"]
             print("\n")
             print({
                 "total_only_headrun_HBOMAX_mapped": self.total,
                 "%s_id" % self.source: self.headrun_HBOMAX_id,
                 "Px_id": projectx_id,
                 "thread_name": thread_name
             })
             projectx_details = self.getting_projectx_ott_details(
                 projectx_id, self.show_type)
             if projectx_details != 'Null':
                 self.link_expired = lib_common_modules(
                 ).link_expiry_check_(self.expired_api,
                                      self.projectx_domain,
                                      self.headrun_HBOMAX_id,
                                      self.source.lower(),
                                      self.expired_token)
                 ott_validation_result = ott_meta_data_validation_modules(
                 ).ott_validation(projectx_details, self.headrun_HBOMAX_id)
                 self.writer.writerow([
                     self.headrun_HBOMAX_id, projectx_id, self.show_type,
                     projectx_details["px_video_link_present"],
                     data["series_id"], data["expiry_date"], '',
                     ott_validation_result, only_mapped_ids["source_flag"],
                     '', self.link_expired,
                     projectx_details["appletv_id_format"],
                     projectx_details["androidtv_id_format"],
                     projectx_details["fetched_from"]
                 ])
             else:
                 self.writer.writerow([
                     self.headrun_HBOMAX_id, projectx_id, self.show_type,
                     projectx_details, data["series_id"],
                     data["expiry_date"], '', '',
                     only_mapped_ids["source_flag"], 'Px_response_null'
                 ])
         else:
             self.total += 1
             self.writer.writerow([
                 self.headrun_HBOMAX_id, '', self.show_type, '',
                 data["series_id"], data["expiry_date"], '',
                 only_mapped_ids, 'Px_id_null'
             ])
     except (Exception, httplib.BadStatusLine, urllib2.HTTPError,
             socket.error, urllib2.URLError, RuntimeError) as e:
         self.retry_count += 1
         print("Retrying..............................", self.retry_count)
         print("\n")
         print(
             "exception/error caught in ott_checking func...................",
             type(e), self.headrun_HBOMAX_id, thread_name)
         if self.retry_count <= 5:
             self.ott_checking(only_mapped_ids, thread_name, data)
         else:
             self.retry_count = 0
 def main(self, start_id, thread_name, end_id):
     #import pdb;pdb.set_trace()
     self.get_env_url()
     self.mongo_mysql_connection()
     input_file = "/input/ids"
     input_data = lib_common_modules().read_csv(input_file)
     result_sheet = '/result/headrun_netflix_ott_%s_checking%s.csv' % (
         thread_name, datetime.date.today())
     output_file = lib_common_modules().create_csv(result_sheet)
     with output_file as mycsvfile:
         fieldnames = [
             "%s_id" % self.source, "Series_id", "Projectx_id", "show_type",
             "px_video_link_present",
             "%s_link_present" % self.source, "ott_link_result", "mapping",
             "", "Expired"
         ]
         self.writer = csv.writer(mycsvfile,
                                  dialect="csv",
                                  lineterminator='\n')
         self.writer.writerow(fieldnames)
         for _id in range(start_id, end_id):
             self.headrun_netflix_id = str(input_data[_id][0])
             query_headrun_netflix = self.sourcetable.find({
                 "service":
                 "netflix",
                 "id":
                 "%s" % str(self.headrun_netflix_id)
             })
             for data in query_headrun_netflix:
                 if data.get("id") != "":
                     if data.get("item_type") == "movie":
                         self.show_type = "MO"
                     else:
                         self.show_type = "SE"
                     self.count_headrun_netflix_id += 1
                     print("\n")
                     print datetime.datetime.now()
                     print("\n")
                     print({
                         "count_headrun_netflix_id":
                         self.count_headrun_netflix_id,
                         "id": self.headrun_netflix_id,
                         "thread_name": thread_name
                     })
                     only_mapped_ids = ott_meta_data_validation_modules(
                     ).getting_mapped_px_id_mapping_api(
                         str(self.headrun_netflix_id),
                         self.source_mapping_api, self.projectx_mapping_api,
                         self.show_type, self.source, self.token)
                     self.ott_checking(only_mapped_ids, thread_name, data)
         print("\n")
         print({
             "count_headrun_netflix_id": self.count_headrun_netflix_id,
             "name": thread_name
         })
     output_file.close()
     self.connection.close()
예제 #7
0
    def main(self,start_id,thread_name,end_id):
        #import pdb;pdb.set_trace()
        self.get_env_url()
        self.mongo_mysql_connection()
        result_sheet='/result/headrun_netflix_meta_data_checking%s_%s.csv'%(thread_name,datetime.date.today())
        output_file=lib_common_modules().create_csv(result_sheet)
        with output_file as mycsvfile:
            fieldnames = ["%s_id"%self.source,"Projectx_id","show_type","%s_title"%self.source,"Px_title"
                        ,"Px_episode_title","title_match","description_match","genres_match","aliases_match",
                        "release_year_match","duration_match","season_number_match","episode_number_match",
                        "px_video_link_present","%s_link_present"%self.source,"image_url_missing","Wrong_url",
                        "credit_match","credit_mismatch"]
            self.writer = csv.writer(mycsvfile,dialect="excel",lineterminator = '\n')
            self.writer.writerow(fieldnames)
            projectx_id=0   
            source_id=0
            #import pdb;pdb.set_trace()
            for _id in range(start_id,end_id,1000):
                query_headrun_netflix=self.sourcetable.aggregate([{"$match":{"$and":[{"item_type":{"$in":[ "movie","episode","tvshow" ]}},{"service":"netflix"}]}}
                    ,{"$project":{"id":1,"_id":0,"item_type":1,"series_id":1,"title":1,"episode_title":1,"release_year":1,
                    "episode_number":1,"season_number":1,"duration":1,"image_url":1,"url":1,"description":1,"cast":1,"directors":1,"writers":1,
                    "categories":1,"genres":1,"maturity_ratings":1,"purchase_info":1,"service":1}},{"$skip":_id},{"$limit":1000}])
                #query_headrun_netflix=self.sourcetable.find({"service":"netflix","item_type":"episode","id":"80192967"})
                for data in query_headrun_netflix:
                    if data.get("id")!="":
                        #import pdb;pdb.set_trace()
                        headrun_netflix_id=data.get("id")
                        if data.get("item_type")=="movie":
                            show_type="MO"
                        elif data.get("item_type")=="tvshow":
                            show_type="SM"
                        else:
                            show_type="SE"    
                        self.count_headrun_netflix_id+=1
                        print("\n")
                        print datetime.datetime.now()
                        print ("\n")
                        print("%s_id:"%self.source,"id:",headrun_netflix_id,"count_headrun_netflix_id:"
                            +str(self.count_headrun_netflix_id),"name:"+str(thread_name))
                        only_mapped_ids=ott_meta_data_validation_modules().getting_mapped_px_id_mapping_api(headrun_netflix_id,self.source_mapping_api
                                                           ,self.projectx_mapping_api,show_type,self.source,self.token)
                        if only_mapped_ids["source_flag"]=='True':
                            self.total+=1
                            #import pdb;pdb.set_trace()
                            projectx_id=only_mapped_ids["px_id"]
                            source_id=only_mapped_ids["source_id"]
                            print("\n")
                            print ({"total":self.total,"id":headrun_netflix_id,"Px_id":projectx_id,
                                "%s_id"%self.source:source_id,"thread_name":thread_name,"source_map":only_mapped_ids['source_map']})
                            self.meta_data_validation_(data,projectx_id,source_id,show_type,thread_name,only_mapped_ids)                    

        output_file.close()
        self.connection.close()                    
 def main(self, start_id, thread_name, end_id):
     #import pdb;pdb.set_trace()
     self.get_env_url()
     self.mongo_mysql_connection()
     result_sheet = '/result/headrun_usanetwork_ott_%s_checking%s.csv' % (
         thread_name, datetime.date.today())
     output_file = lib_common_modules().create_csv(result_sheet)
     with output_file as mycsvfile:
         self.writer = csv.writer(mycsvfile,
                                  dialect="excel",
                                  lineterminator='\n')
         self.writer.writerow(self.fieldnames)
         for _id in range(start_id, end_id, 10):
             print({"skip": _id})
             query_headrun_usanetwork = self.executed_query(_id)
             #query_headrun_usanetwork=self.sourcetable.find({"service":"nbc","item_type":"movie","id":"70301275"})
             for data in query_headrun_usanetwork:
                 if data.get("id") != "":
                     #import pdb;pdb.set_trace()
                     self.headrun_usanetwork_id = data.get("id").encode(
                         "ascii", "ignore")
                     if data.get("item_type") == "movie":
                         self.show_type = "MO"  #data.get("item_type")
                     else:
                         self.show_type = "SE"
                     self.count_headrun_usanetwork_id += 1
                     print("\n")
                     print datetime.datetime.now()
                     print("\n")
                     print({
                         "count_headrun_usanetwork_id":
                         self.count_headrun_usanetwork_id,
                         "show_type": self.show_type,
                         "thread_name": thread_name,
                         "nbc_id": self.headrun_usanetwork_id
                     })
                     #import pdb;pdb.set_trace()
                     only_mapped_ids = ott_meta_data_validation_modules(
                     ).getting_mapped_px_id_mapping_api(
                         str(self.headrun_usanetwork_id),
                         self.source_mapping_api, self.projectx_mapping_api,
                         self.show_type, self.source, self.token)
                     self.ott_checking(only_mapped_ids, thread_name, data)
         print("\n")
         print({
             "count_headrun_usanetwork_id":
             self.count_headrun_usanetwork_id,
             "name": thread_name
         })
     output_file.close()
     self.connection.close()
 def main(self, start_id, thread_name, end_id):
     #import pdb;pdb.set_trace()
     self.get_env_url()
     self.mongo_mysql_connection()
     result_sheet = '/result/gracenote_ott_%s_checking%s.csv' % (
         thread_name, datetime.date.today())
     output_file = lib_common_modules().create_csv(result_sheet)
     with output_file as mycsvfile:
         self.writer = csv.writer(mycsvfile,
                                  dialect="excel",
                                  lineterminator='\n')
         self.writer.writerow(self.fieldnames)
         for _id in range(start_id, end_id, 100):
             self.logger.debug({"skip": _id})
             query_gracenote = self.executed_query(_id, 100)
             for data in query_gracenote:
                 if data.get("Videos") != {}:
                     #import pdb;pdb.set_trace()
                     gracenote_id = data.get("sequence_id").encode(
                         "ascii", "ignore")
                     if data.get("show_type") == "SM":
                         show_type = "SE"
                     else:
                         show_type = "MO"
                     self.count_gracenote_id += 1
                     self.logger.debug("\n")
                     self.logger.debug(datetime.datetime.now())
                     self.logger.debug("\n")
                     self.logger.debug({
                         "count_gracenote_id": self.count_gracenote_id,
                         "thread_name": thread_name,
                         "details": data
                     })
                     #import pdb;pdb.set_trace()
                     only_mapped_ids = ott_meta_data_validation_modules(
                     ).getting_mapped_px_id_mapping_api(
                         str(gracenote_id), self.source_mapping_api,
                         self.projectx_mapping_api, show_type, self.source,
                         self.token)
                     #TODO: Sub_main : here OTT validation result printed
                     self.ott_checking(only_mapped_ids, gracenote_id,
                                       show_type, thread_name, data)
         self.logger.debug("\n")
         self.logger.debug({
             "count_gracenote_id": self.count_gracenote_id,
             "name": thread_name
         })
     output_file.close()
     self.connection.close()
예제 #10
0
 def main(self, start_id, thread_name, end_id):
     #import pdb;pdb.set_trace()
     self.get_env_url()
     self.mongo_mysql_connection()
     result_sheet = '/result/%s_ott_%s_checking%s.csv' % (
         self.service, thread_name, datetime.date.today())
     output_file = lib_common_modules().create_csv(result_sheet)
     with output_file as mycsvfile:
         self.writer = csv.writer(mycsvfile,
                                  dialect="csv",
                                  lineterminator='\n')
         self.writer.writerow(self.fieldnames)
         for _id in range(start_id, end_id, 100):
             print({"skip": _id})
             #import pdb;pdb.set_trace()
             query_starz = self.cur.execute(
                 "SELECT * FROM starz_programs where item_type in ('episode','movie') limit %d,100"
                 % (_id))
             query_starz_result = self.cur.fetchall()
             for data in query_starz_result:
                 if data.get("source_program_id") != "" and data.get(
                         "source_program_id") is not None:
                     #import pdb;pdb.set_trace()
                     self.starz_id = data.get("source_program_id")
                     self.show_type = data.get("item_type")
                     self.show_type = 'MO' if self.show_type == 'movie' else self.show_type
                     self.show_type = 'SE' if self.show_type == 'episode' else self.show_type
                     self.count_starz_id += 1
                     print("\n")
                     print datetime.datetime.now()
                     print("\n")
                     print({
                         "count_starz_id": self.count_starz_id,
                         "show_type": self.show_type,
                         "id": self.starz_id,
                         "thread_name": thread_name
                     })
                     #import pdb;pdb.set_trace()
                     only_mapped_ids = ott_meta_data_validation_modules(
                     ).getting_mapped_px_id_mapping_api(
                         str(self.starz_id), self.source_mapping_api,
                         self.projectx_mapping_api, self.show_type,
                         self.source, self.token)
                     self.ott_checking(only_mapped_ids, thread_name, data)
         print("\n")
         print({"count_starz_id": self.count_starz_id, "name": thread_name})
     output_file.close()
     self.connection.close()
     self.cur.close()
예제 #11
0
 def to_check_only_mapping_to_source(self, gracenote_id, show_type,
                                     thread_name):
     try:
         only_mapped_ids = ott_meta_data_validation_modules(
         ).getting_mapped_px_id_mapping_api(str(gracenote_id),
                                            self.source_mapping_api,
                                            self.projectx_mapping_api,
                                            show_type, self.source,
                                            self.token)
         self.logger.debug({
             "%s_id" % self.source:
             gracenote_id,
             "count_gracenote_id":
             str(self.count_gracenote_id),
             "name":
             str(thread_name),
             "only_mapped_ids":
             only_mapped_ids["source_flag"]
         })
         if only_mapped_ids["source_flag"] == 'True':
             source_details = lib_common_modules().fetch_response_for_api_(
                 self.gracenote_api % (show_type, gracenote_id), self.token)
             self.total += 1
             #import pdb;pdb.set_trace()
             projectx_id = only_mapped_ids["px_id"]
             source_id = only_mapped_ids["source_id"]
             self.logger.debug("\n")
             self.logger.debug({
                 "total": self.total,
                 "MO_id": gracenote_id,
                 "Px_id": projectx_id,
                 "%s_id" % self.source: source_id,
                 "thread_name": thread_name
             })
             #self.meta_data_validation_(projectx_id,source_id,show_type,thread_name,only_mapped_ids,source_details)
             return ({
                 "projectx_id": projectx_id,
                 "only_mapped_ids": only_mapped_ids,
                 "source_details": source_details
             })
     except (Exception, URLError, HTTPError,
             httplib.BadStatusLine) as error:
         self.logger.debug({
             "exception caught in to_check_only_mapping_to_source func.. ":
             str(type(error), gracenote_id, show_type)
         })
         self.to_check_only_mapping_to_source(gracenote_id, show_type,
                                              thread_name)
    def main(self,start_id,thread_name,end_id):
        #import pdb;pdb.set_trace()
        self.get_env_url()
        self.mongo_mysql_connection()
        result_sheet='/result/hbogo_meta_data_checking%s_%s.csv'%(thread_name,datetime.date.today())
        output_file=lib_common_modules().create_csv(result_sheet)
        with output_file as mycsvfile:
            self.writer = csv.writer(mycsvfile,dialect="csv",lineterminator = '\n')
            self.writer.writerow(self.fieldnames)
            projectx_id=0   
            #import pdb;pdb.set_trace()
            for _id in range(start_id,end_id,1000):
                query_hbogo=self.cur.execute("SELECT * FROM hbogo_programs where (expired_at is null or expired_at > '%s') and expired='0' limit %d,1000 "%(self.running_datetime,_id))
                #query_hbogo=self.cur.execute("SELECT * FROM hbogo_programs where launch_id='urn:hbo:feature:GVU4OxQ8lp47DwvwIAb24' and (expired_at is null or expired_at > '%s') "%(self.running_datetime))
                query_hbogo_result=self.cur.fetchall()
                for data in query_hbogo_result:
                    if data.get("launch_id")!="" and data.get("launch_id") is not None:
                        #import pdb;pdb.set_trace()
                        self.hbogo_id=data.get("launch_id")    
                        self.show_type=data.get("show_type")
                        if self.show_type is not None and self.show_type!='SN':
                            self.show_type='MO' if self.show_type=='OT' else self.show_type
                            self.count_hbogo_id+=1
                            print("\n")
                            print datetime.datetime.now()
                            print ("\n")
                            print("%s_id:"%self.source,"id:",self.hbogo_id,"count_hbogo_id:"
                                +str(self.count_hbogo_id),"show_type:"+self.show_type,"name:"+str(thread_name))
                            only_mapped_ids=ott_meta_data_validation_modules().getting_mapped_px_id_mapping_api(self.hbogo_id,self.source_mapping_api,self.projectx_mapping_api,self.show_type,self.source,self.token)
                            try:
                                if only_mapped_ids["source_flag"]=='True':
                                    self.total+=1
                                    #import pdb;pdb.set_trace()
                                    projectx_id=only_mapped_ids["px_id"]
                                    print("\n")
                                    print ({"total":self.total,"id":self.hbogo_id,"Px_id":projectx_id,
                                        "thread_name":thread_name,"source_map":only_mapped_ids['source_map']})
                                    self.meta_data_validation_(data,projectx_id,thread_name,only_mapped_ids)
                            except Exception as e:
                                print ("got exception in main....",self.hbogo_id
                                                       ,self.show_type,only_mapped_ids,type(e),thread_name)
                                pass                            

        output_file.close()
        self.connection.close()                    
        self.cur.close()
    def main(self,start_id,thread_name,end_id):
        #import pdb;pdb.set_trace()
        self.get_env_url()
        self.mongo_mysql_connection()
        result_sheet='/result/headrun_showtime_meta_data_checking%s_%s.csv'%(thread_name,datetime.date.today())
        output_file=lib_common_modules().create_csv(result_sheet)
        with output_file as mycsvfile:
            self.writer = csv.writer(mycsvfile,dialect="csv",lineterminator = '\n')
            self.writer.writerow(self.fieldnames)
            projectx_id=0   
            for _id in range(start_id,end_id,100):
                query_headrun_showtime=self.sourcetable.aggregate([{"$match":{"$and":[{"item_type":{"$in":[ "movie","episode","tvshow" ]}},{"service":"showtime"}]}},{"$project":{"id":1,"_id":0,"item_type":1,"series_id":1,"title":1,"episode_title":1,"release_year":1,"episode_number":1,"season_number":1,"duration":1,"image_url":1,"url":1,"description":1,"cast":1,"directors":1,"writers":1,
                    "categories":1,"genres":1,"maturity_ratings":1,"purchase_info":1,"service":1}},{"$skip":_id},{"$limit":100}])
                #query_headrun_showtime=self.sourcetable.find({"service":"showtime","id":"3481853"})
                for data in query_headrun_showtime:
                    if data.get("id")!="":
                        self.headrun_showtime_id=data.get("id")
                        if data.get("item_type")=="movie":
                            self.show_type="MO"
                        elif data.get("item_type")=="tvshow":
                            self.show_type="SM"
                        else:
                            self.show_type="SE"    
                        self.count_headrun_showtime_id+=1
                        print("\n")
                        print(datetime.datetime.now())
                        print ("\n")
                        print("%s_id:"%self.source,"id:",self.headrun_showtime_id,"count_headrun_showtime_id:"
                            +str(self.count_headrun_showtime_id),"name:"+str(thread_name))
                        only_mapped_ids=ott_meta_data_validation_modules().getting_mapped_px_id_mapping_api(self.headrun_showtime_id,self.source_mapping_api,self.projectx_mapping_api,self.show_type,self.source,self.token)
                        try:
                            if only_mapped_ids["source_flag"]=='True':
                                self.total+=1
                                projectx_id=only_mapped_ids["px_id"]
                                print("\n")
                                print ({"total":self.total,"id":self.headrun_showtime_id,"Px_id":projectx_id,
                                    "thread_name":thread_name,"source_map":only_mapped_ids['source_map']})
                                self.meta_data_validation_(data,projectx_id,thread_name,only_mapped_ids)
                        except Exception as e:
                            print ("got exception in main....",self.headrun_showtime_id
                                                   ,self.show_type,only_mapped_ids,type(e),thread_name)
                            pass                            

        output_file.close()
        self.connection.close()                    
 def check_ott(self, projectx_details, link_id, service):
     #import pdb;pdb.set_trace()
     try:
         ott_validation_result = ott_meta_data_validation_modules(
         ).ott_validation(projectx_details, link_id)
         if ott_validation_result == "Present":
             self.present_otts.append({service: link_id})
         elif ott_validation_result == "Not_Present":
             self.not_present_otts.append({service: link_id})
         else:
             self.Comment = "px_videos_null"
         self.logger.debug("\n")
         if self.not_present_otts:
             ott_validation_result = "Not_Present"
         self.logger.debug({
             "ott_validation_result": ott_validation_result,
             "present_ott_list": self.present_otts,
             "not_present_otts": self.not_present_otts,
             "comment": self.Comment
         })
         return {
             "ott_validation_result": ott_validation_result,
             "present_ott_list": self.present_otts,
             "not_present_otts": self.not_present_otts,
             "comment": self.Comment
         }
     except (Exception, httplib.BadStatusLine, urllib2.HTTPError,
             socket.error, urllib2.URLError, RuntimeError) as error:
         self.retry_count += 1
         self.logger.debug({
             "Retrying...................................":
             self.retry_count
         })
         self.logger.debug("\n")
         self.logger.debug({
             "exception/error caught in check_ott func..............":
             '{},{},{}'.format(type(error), projectx_details, link_id)
         })
         if self.retry_count <= 5:
             self.check_ott(projectx_details, link_id)
         else:
             self.retry_count = 0
 def main(self, start_id, thread_name, end_id):
     #import pdb;pdb.set_trace()
     self.get_env_url()
     self.mongo_mysql_connection()
     input_file = "/input/hbogo_ids"
     input_data = lib_common_modules().read_csv(input_file)
     result_sheet = '/result/%s_ott_%s_checking%s.csv' % (
         self.service, thread_name, datetime.date.today())
     output_file = lib_common_modules().create_csv(result_sheet)
     with output_file as mycsvfile:
         self.writer = csv.writer(mycsvfile,
                                  dialect="csv",
                                  lineterminator='\n')
         self.writer.writerow(self.fieldnames)
         for _id in range(start_id, end_id):
             print({"skip": _id})
             self.hbogo_id = str(input_data[_id][0])
             self.show_type = str(input_data[_id][2])
             self.show_type = 'MO' if self.show_type == 'OT' else self.show_type
             self.count_hbogo_id += 1
             print("\n")
             print datetime.datetime.now()
             print("\n")
             print({
                 "count_hbogo_id": self.count_hbogo_id,
                 "show_type": self.show_type,
                 "id": self.hbogo_id,
                 "thread_name": thread_name
             })
             only_mapped_ids = ott_meta_data_validation_modules(
             ).getting_mapped_px_id_mapping_api(str(self.hbogo_id),
                                                self.source_mapping_api,
                                                self.projectx_mapping_api,
                                                self.show_type, self.source,
                                                self.token)
             self.ott_checking(only_mapped_ids, thread_name)
         print("\n")
         print({"count_hbogo_id": self.count_hbogo_id, "name": thread_name})
     output_file.close()
     self.connection.close()
     self.cur.close()
예제 #16
0
 def main(self, start_id, thread_name, end_id):
     self.get_env_url()
     self.mongo_mysql_connection()
     result_sheet = '/result/disneyplus_ott_%s_checking%s.csv' % (
         thread_name, datetime.date.today())
     output_file = lib_common_modules().create_csv(result_sheet)
     with output_file as mycsvfile:
         self.writer = csv.writer(mycsvfile,
                                  dialect="csv",
                                  lineterminator='\n')
         self.writer.writerow(self.fieldnames)
         for _id in range(start_id, end_id, 100):
             print({"skip": _id})
             query_disneyplus = self.sourcetable.aggregate([{
                 "$match": {
                     "$and": [{
                         "item_type": {
                             "$in": ["movie", "episode"]
                         }
                     }, {
                         "service": "%s" % self.source.lower()
                     }]
                 }
             }, {
                 "$project": {
                     "id": 1,
                     "_id": 0,
                     "item_type": 1,
                     "series_id": 1,
                     "title": 1,
                     "episode_title": 1,
                     "release_year": 1,
                     "episode_number": 1,
                     "season_number": 1,
                     "duration": 1,
                     "image_url": 1,
                     "url": 1,
                     "description": 1,
                     "cast": 1,
                     "directors": 1,
                     "writers": 1,
                     "categories": 1,
                     "genres": 1,
                     "maturity_ratings": 1,
                     "purchase_info": 1,
                     "service": 1,
                     "expiry_date": 1
                 }
             }, {
                 "$skip": _id
             }, {
                 "$limit": 100
             }])
             #query_disneyplus=self.sourcetable.aggregate([{"$match":{"$and":[{"item_type":{"$in":["movie","episode"]}},{"service":"%s"%self.source.lower(),"id":{"$in":array}}]}}])
             for data in query_disneyplus:
                 if data.get("id") != "":
                     self.disneyplus_id = data.get("id").encode(
                         "ascii", "ignore")
                     self.disneyplus_link_id = json.loads(
                         data.get("purchase_info").encode(
                             "ascii", "ignore")
                     )[0]["template_platforms"]["template_values"]["sk"]
                     if data.get("item_type") == "movie":
                         self.show_type = "MO"
                     else:
                         self.show_type = "SE"
                     self.count_disneyplus_id += 1
                     print("\n")
                     print datetime.datetime.now()
                     print("\n")
                     print({
                         "count_disneyplus_id": self.count_disneyplus_id,
                         "id": self.disneyplus_id,
                         "thread_name": thread_name,
                         "show_type": self.show_type
                     })
                     only_mapped_ids = ott_meta_data_validation_modules(
                     ).getting_mapped_px_id_mapping_api(
                         str(self.disneyplus_id), self.source_mapping_api,
                         self.projectx_mapping_api, self.show_type,
                         self.source, self.token)
                     self.ott_checking(only_mapped_ids, thread_name, data)
         print("\n")
         print({
             "count_disneyplus_id": self.count_disneyplus_id,
             "name": thread_name
         })
     output_file.close()
     self.connection.close()
예제 #17
0
    def main(self, start_id, thread_name, end_id):
        #import pdb;pdb.set_trace()
        self.get_env_url()
        self.mongo_mysql_connection()
        result_sheet = '/result/headrun_usanetwork_meta_data_checking%s_%s.csv' % (
            thread_name, datetime.date.today())
        output_file = lib_common_modules().create_csv(result_sheet)
        with output_file as mycsvfile:
            self.writer = csv.writer(mycsvfile,
                                     dialect="excel",
                                     lineterminator='\n')
            self.writer.writerow(self.fieldnames)
            projectx_id = 0
            source_id = 0
            #import pdb;pdb.set_trace()
            for _id in range(start_id, end_id, 10):
                query_headrun_usanetwork = self.executed_query(_id)
                #query_headrun_usanetwork=self.sourcetable.find({"service":"nbc","item_type":"episode","id":"80192967"})
                for data in query_headrun_usanetwork:
                    if data.get("id") != "":
                        #import pdb;pdb.set_trace()
                        self.headrun_usanetwork_id = data.get("id")
                        if data.get("item_type") == "movie":
                            self.show_type = "MO"
                        elif data.get("item_type") == "tvshow":
                            self.show_type = "SM"
                        else:
                            self.show_type = "SE"
                        self.count_headrun_usanetwork_id += 1
                        print("\n")
                        print datetime.datetime.now()
                        print("\n")
                        print(
                            "%s_id:" % self.source, "id:",
                            self.headrun_usanetwork_id,
                            "count_headrun_usanetwork_id:" +
                            str(self.count_headrun_usanetwork_id),
                            "show_type:", self.show_type,
                            "name:" + str(thread_name))
                        only_mapped_ids = ott_meta_data_validation_modules(
                        ).getting_mapped_px_id_mapping_api(
                            self.headrun_usanetwork_id,
                            self.source_mapping_api, self.projectx_mapping_api,
                            self.show_type, self.source, self.token)
                        if only_mapped_ids["source_flag"] == 'True':
                            self.total += 1
                            #import pdb;pdb.set_trace()
                            projectx_id = only_mapped_ids["px_id"]
                            print("\n")
                            print({
                                "total": self.total,
                                "id": self.headrun_usanetwork_id,
                                "Px_id": projectx_id,
                                "%s_id" % self.source: source_id,
                                "thread_name": thread_name,
                                "source_map": only_mapped_ids['source_map']
                            })
                            self.meta_data_validation_(data, projectx_id,
                                                       thread_name,
                                                       only_mapped_ids)

        output_file.close()
        self.connection.close()
예제 #18
0
    def main(self, start_id, thread_name, end_id):
        try:
            #import pdb;pdb.set_trace()
            projectx_id = 0
            source_id = 0
            self.get_env_url()
            input_sheet = '/input/gracenote_only_mapped_ids'
            data = lib_common_modules().read_csv(input_sheet)

            result_sheet = '/output/gracenote_meta_data_checking%s_%s.csv' % (
                thread_name, datetime.date.today())
            output_file = lib_common_modules().create_csv(result_sheet)
            with output_file as mycsvfile:
                fieldnames = [
                    "%s_id" % self.source, "Projectx_id", "show_type",
                    "%s_title" % self.source, "Px_title", "Px_episode_title",
                    "title_match", "description_match", "genres_match",
                    "aliases_match", "release_year_match", "duration_match",
                    "season_number_match", "episode_number_match",
                    "px_video_link_present",
                    "%s_link_present" % self.source, "image_url_missing",
                    "Wrong_url", "credit_match", "credit_mismatch"
                ]
                self.writer = csv.writer(mycsvfile,
                                         dialect="excel",
                                         lineterminator='\n')
                self.writer.writerow(fieldnames)
                #import pdb;pdb.set_trace()
                for input_data in range(start_id, end_id):
                    gracenote_episode_id = []
                    gracenote_id = (data[input_data][0])
                    show_type = (data[input_data][1])
                    self.count_gracenote_id += 1
                    if show_type == "MO":
                        self.logger.debug("\n")
                        self.logger.debug(datetime.datetime.now())
                        self.logger.debug("\n")
                        result_movies = self.to_check_only_mapping_to_source(
                            gracenote_id, show_type, thread_name)
                        if result_movies:
                            self.meta_data_validation_(
                                result_movies["projectx_id"], gracenote_id,
                                show_type, thread_name,
                                result_movies["only_mapped_ids"],
                                result_movies["source_details"])
                    else:
                        self.logger.debug("\n")
                        self.logger.debug(datetime.datetime.now())
                        self.logger.debug("\n")
                        result_series = self.to_check_only_mapping_to_source(
                            gracenote_id, show_type, thread_name)
                        if result_series:
                            self.meta_data_validation_(
                                result_series["projectx_id"], gracenote_id,
                                show_type, thread_name,
                                result_series["only_mapped_ids"],
                                result_series["source_details"]["showMeta"])
                            episodes_details = result_series["source_details"][
                                "episodes"]
                            for episodes in episode_details:
                                gracenote_id = episode["program"]["id"].encode(
                                    "utf-8")
                                show_type = episode["showType"]["id"].encode(
                                    "utf-8")
                                self.logger.debug("\n")
                                self.logger.debug(datetime.datetime.now())
                                self.logger.debug("\n")
                                only_mapped_ids = ott_meta_data_validation_modules(
                                ).getting_mapped_px_id_mapping_api(
                                    str(gracenote_id), self.source_mapping_api,
                                    self.projectx_mapping_api, show_type,
                                    self.source, self.token)
                                self.logger.debug({
                                    "%s_SE_id" % self.source:
                                    gracenote_id,
                                    "count_gracenote_id":
                                    str(self.count_gracenote_id),
                                    "name":
                                    str(thread_name),
                                    "only_mapped_ids":
                                    only_mapped_ids["source_flag"]
                                })
                                if only_mapped_ids["source_flag"] == 'True':
                                    self.total += 1
                                    #import pdb;pdb.set_trace()
                                    projectx_id = only_mapped_ids["px_id"]
                                    source_id = only_mapped_ids["source_id"]
                                    self.logger.debug("\n")
                                    self.logger.debug({
                                        "total":
                                        self.total,
                                        "SE_id":
                                        gracenote_id,
                                        "Px_id":
                                        projectx_id,
                                        "%s_id" % self.source:
                                        source_id,
                                        "thread_name":
                                        thread_name
                                    })
                                    self.meta_data_validation_(
                                        projectx_id, source_id, show_type,
                                        thread_name, only_mapped_ids, episodes)
            output_file.close()
        except Exception as error:
            self.logger.debug({
                "exception caught in main func":
                str(type(error), gracenote_id, show_type, thread_name)
            })
            pass