def handle(self, *args, **options): task_name = self.get_task_name(options) try: start_time = time.time() self.run_task(options) runtime = round(time.time() - start_time, 2) # Monitor duration of the task logger.info("Task " + task_name + " executed in " + str(runtime) + "s", extra={ 'task': task_name, 'success': True, 'runtime': runtime }) except Exception as error: if not settings.DEBUG: # Say if PROD or PRE-PROD and report to sentry a problem has been detected client.user_context({'prod_status': settings.PROD_STATUS}) client.captureException() # Log the error logger.error("Task " + task_name + " failed", extra={ 'task': task_name, 'success': False }) else: raise error
def send_fb_message(self, url, message): if self.client_send: try: response = requests.post(url, headers={"Content-Type": "application/json"}, data=message) if response.status_code != 200: logger.warning("Facebook message sending error", extra={ 'url': url, 'content': str(response.content), 'response_code': response.status_code }) else: logger.info("Facebook message sending success", extra={ 'url': url, 'content': str(response.content), 'response_code': response.status_code }) except Timeout: logger.error("Facebook message sending timed out", extra={ 'url': url, 'data': message }) except: logger.error("Facebook message sending failed", extra={ 'url': url, 'data': message }) else: self.messages.append(message)
def fetch(site): highlights = [] fetcher = FETCHERS.get(site) if not fetcher: raise Exception("Fetcher for " + site + " does not exists!") num_pagelet = fetcher['num_pagelet'] max_days_ago = fetcher['max_days_ago'] try: highlights += fetcher['fetch'](num_pagelet=num_pagelet, max_days_ago=max_days_ago) except: # Say which fetcher failed and the prod status client.user_context({ 'prod_status': settings.PROD_STATUS, 'highlights_fetcher': site }) # Report to sentry problem detected client.captureException() logger.error("Error while fetching for " + str(site)) # Update scrapping status in database scrapping_status_manager.update_scrapping_status(site, bool(highlights)) # Tell sentry scrapping problem occurred if not highlights: raise ScrappingException("Failed to scrape " + site) return highlights
def get_video_info(link): # Make sure video is from Ok.ru if not providers.OK_RU in link: return None response = None try: response = requests.get(link) except Exception: client.captureException() logger.error('Ok.ru status: error | Error url: ' + link) return None duration_search_result = re.compile( 'duration\\\\":\\\\"(.*?)\\\\"', 0).search(response.text) if not duration_search_result: return None duration = duration_search_result.groups()[0] info = {'duration': int(duration), 'video_url': None} return info
def add_image_for_highlight(highlight): img_link = highlight.img_link # No need to upload in debug mode if settings.DEBUG: return # No need to upload default images if _is_default_highlight_img(img_link): return # No need to upload if image already exists if HighlightImage.objects.filter(match_id=highlight.id, img_link=img_link): return # Upload the image to the cloud try: img_uploaded_link = uploader.upload_image(img_link) logger.info("Image added for img_link: " + img_link, extra={ 'img_link': img_link, 'img_uploaded_link': img_uploaded_link }) except: logger.error("Image failed uploading: " + img_link) return HighlightImage.objects.update_or_create(match_id=highlight.id, img_link=img_link, img_uploaded_link=img_uploaded_link, source=highlight.source)
def _check_validity(highlights): for h in highlights: try: is_valid = ressource_checker.check(h.link) if not is_valid: logger.log("Invalidated highlight: " + h.link) latest_highlight_manager.set_invalid(h) except: logger.error("Failed to validate link: {}".format(h.link))
def process_exception(self, request, exception): id = HighlightsBotView.LATEST_SENDER_ID manager_response.send_error_message(id) # Make sure the exception signal is fired for Sentry client.user_context({'user_id': id}) got_request_exception.send(sender=self, request=request) # Log the error logger.error("An error occurred: " + str(exception), extra={ 'user_id': id, 'method': request.method, 'full_path': request.get_full_path() }) return HttpResponse()
def get_video_info(link): # Make sure video is from matchat.online or videostreamlet.net if not providers.MATCHAT_ONLINE in link \ and not providers.CONTENT_VENTURES in link \ and not providers.VIDEO_STREAMLET in link \ and not providers.VEUCLIPS in link: return None # Disable temporarily matchat.online as not working anymore if providers.MATCHAT_ONLINE in link or providers.CONTENT_VENTURES in link: return None try: page = requests.get(link) regex = "settings.bitrates = {hls:\"(.*?)\"" streaming_link_search_result = re.compile(regex, 0).search(page.text) streaming_link = 'https://' + streaming_link_search_result.groups( )[0].replace('//', '').replace('0.m3u8', '360p.m3u8') text = requests.get(streaming_link).text regex = "#EXTINF:(.*?)," durations_search_result = re.findall(regex, text) duration = int(sum([float(d) for d in durations_search_result])) info = {'duration': duration, 'video_url': None} scrapping_status_manager.update_scrapping_status('m3u8', True) logger.info('matchat.online SUCCESS | url: ' + link + ' | duration: ' + str(duration)) return info except: client.captureException() logger.error("Failed to fetch info for link {}".format(link)) try: if ressource_checker.check(link): scrapping_status_manager.update_scrapping_status('m3u8', False) logger.error('matchat.online FAILURE | url: ' + link) return { 'duration': 0, # Allow for retries if link is valid but scrapping not working 'video_url': None } else: return None except: client.captureException() logger.error( "Failed to fetch info for link {} and resource check failed". format(link)) return { 'duration': 0, # Allow for retries if link is valid but scrapping not working 'video_url': None }