def upload_image_to_s3(bucket_name, public_url, url=None, data=None, filename=None, raise_exception=False): assert url or data, 'Must pass url or data' source = url if url else str(ImageData(data)) debug_logger.debug("Uploading image to S3 from %s" % source) try: if not data: response = requests.get(url, timeout=10) data = response.content if not filename: filename = generate_image_name() # Check if an actual image Image.open(StringIO(data)) # Connect to S3 s3 = boto3.resource('s3') # Upload s3.Object(bucket_name, filename).put( Body=data, ContentType='image/jpg' ) # Todo (mo): set actual content type or convert to jpg # Construct public url s3_image_url = '%s/%s' % (public_url, filename) debug_logger.debug("Uploaded image to S3: %s" % s3_image_url) return s3_image_url except Exception as e: if raise_exception: raise e else: error_logger.warn("Uploading image to S3 failed", exc_info=True)
def error_response(self, error, **kwargs): """ Return an error response to the client with default status code of *400* stating the error as outlined in :rfc:`5.2`. """ client = kwargs.get('client') grant_type = kwargs.get('grant_type', 'no-grant') client_name = client.name if client else 'no-client' error_name = "oAuth2 Error - %s - %s - %s" % (client_name, grant_type, error) error_logger.warn(error_name, extra={'request': self.request._request}, exc_info=True) return Response(error, status=400)
def add_videos_to_item(item, videos=None, remove_existing=False): if videos is not None: if remove_existing: item.videos.all().delete() for v in videos[:settings.MAX_VIDEOS_PER_ITEM]: # todo: better handling try: video = Video.objects.create( url=v['url'], thumbnail_url=v['thumbnail_url'], provider=v['provider'], id_on_provider=v['id_on_provider'], duration=v['duration']) item.videos.add(video) except (KeyError, IntegrityError) as e: error_logger.warn(str(e), exc_info=True)
def from_location_index(lat, lon, ip=None, ip_location=None): lat = round(float(lat), 6) lon = round(float(lon), 6) location = {} # 0 - if lat and lon are 0, use the ip to determine location if lat == 0 and lon == 0: location = from_ip(ip, use_location_index=True) # 1 - search for saved locations ordered by distance if not location: try: indexed_locations = LocationIndex.search().sort({ "_geo_distance": { "location": { 'lat': lat, 'lon': lon }, 'unit': 'km', 'order': 'asc' } }).execute()[:1] except (ElasticsearchException, KeyError): error_logger.warn("Location Index searching failed", exc_info=True) if ip_location: location = ip_location elif ip: location = from_ip(ip) else: location = DEFAULT_LOCATION else: # 2 - check if there are 'correct' results if indexed_locations and isinstance(indexed_locations[0], LocationIndex): # 3 - check closest location, if closer than x km return its attributes closest_location = indexed_locations[0] if closest_location.meta['sort'][0] < 5.0: location = closest_location.location_dict # 4 - else if not location: latlng = "%s,%s" % (lat, lon) location = from_google_geocode_response(latlng, ip, ip_location) # 5 - use original lat, lon if there were not 0 and we did not use DEFAULT_LOCATION if location != DEFAULT_LOCATION: if lat != 0.0: location['latitude'] = round(float(lat), 6) if lon != 0.0: location['longitude'] = round(float(lon), 6) return location
def filter_location(self, queryset): country = self.data.get('country', '') state = self.data.get('state', '') city = self.data.get('city', '') # country if country: country_qs = queryset.filter(country=country) if not country_qs: country = '' country_qs = queryset.filter(country=country) queryset = country_qs # state if country: if state: state_qs = queryset.filter(state=state) if not state_qs: state = '' state_qs = queryset.filter(state=state) queryset = state_qs # city if state: if city: city_qs = queryset.filter(city=city) if not city_qs: city = '' city_qs = queryset.filter(city=city) queryset = city_qs else: queryset = queryset.filter(city=city) else: queryset = queryset.filter(state=state) else: queryset = queryset.filter(country=country) if not queryset: queryset = queryset.filter(country='') error_logger.warn("Discover returned 0 Featured Tags", extra={ 'country': country, 'state': state, 'city': city, }) return queryset
def ready(self): from elasticsearch import Elasticsearch, RequestsHttpConnection, RequestError, ConnectionTimeout from elasticsearch_dsl.connections import connections from requests_aws4auth import AWS4Auth from django.conf import settings from shoutit.models import LocationIndex, ShoutIndex from shoutit.utils import error_logger import shoutit # Todo (Nour): Cleanup! # Define a default global Elasticsearch client if 'es.amazonaws.com' in settings.ES_URL: # Connect using IAM based authentication on AWS awsauth = AWS4Auth(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, 'eu-west-1', 'es') ES = Elasticsearch(hosts=[settings.ES_URL], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection) connections.add_connection(alias='default', conn=ES) else: ES = connections.create_connection(hosts=[settings.ES_URL]) shoutit.ES = ES # Initiate the index if not initiated try: LocationIndex.init() except RequestError: pass except ConnectionTimeout: error_logger.warn("ES Server is down", exc_info=True) # Initiate the index if not initiated try: ShoutIndex.init() except RequestError: pass except ConnectionTimeout: error_logger.warn("ES Server is down", exc_info=True)
def save_message_attachments(message, attachments): conversation = message.conversation for attachment in attachments: # todo: map the content types to models if MESSAGE_ATTACHMENT_TYPE_SHOUT.text in attachment: object_id = attachment[MESSAGE_ATTACHMENT_TYPE_SHOUT.text]['id'] content_type = ContentType.objects.get_for_model(Shout) ma_type = MESSAGE_ATTACHMENT_TYPE_SHOUT MessageAttachment.create(message_id=message.id, conversation_id=conversation.id, content_type=content_type, object_id=object_id, type=ma_type).save() if MESSAGE_ATTACHMENT_TYPE_PROFILE.text in attachment: object_id = attachment[MESSAGE_ATTACHMENT_TYPE_PROFILE.text]['id'] content_type = ContentType.objects.get_for_model(User) ma_type = MESSAGE_ATTACHMENT_TYPE_PROFILE MessageAttachment.create(message_id=message.id, conversation_id=conversation.id, content_type=content_type, object_id=object_id, type=ma_type).save() if MESSAGE_ATTACHMENT_TYPE_LOCATION.text in attachment: location = attachment['location'] sl = SharedLocation(latitude=location['latitude'], longitude=location['longitude']) sl.save() object_id = sl.id content_type = ContentType.objects.get_for_model(SharedLocation) ma_type = MESSAGE_ATTACHMENT_TYPE_LOCATION MessageAttachment.create(message=message, conversation=conversation, content_type=content_type, object_id=object_id, type=ma_type) if any_in(['images', 'videos'], attachment): ma_type = MESSAGE_ATTACHMENT_TYPE_MEDIA images = attachment.get('images', []) or [] videos = attachment.get('videos', []) or [] ma = MessageAttachment.create(type=ma_type, message=message, conversation=conversation, images=images) for v in videos: # todo: better handling try: video = Video.create(url=v['url'], thumbnail_url=v['thumbnail_url'], provider=v['provider'], id_on_provider=v['id_on_provider'], duration=v['duration']) ma.videos.add(video) except Exception: error_logger.warn("Error creating video", exc_info=True)
def from_google_geocode_response(latlng, ip=None, ip_location=None): params = { 'latlng': latlng, 'language': "en" } try: if latlng in ['0,0', '0.0,0.0', '0.0,0', '0,0.0']: raise ValueError("Ignoring 0,0 lat lng") geocode_response = requests.get("https://maps.googleapis.com/maps/api/geocode/json", params).json() if geocode_response.get('status') != 'OK': raise Exception("Make sure you have a valid latlng param") location = parse_google_geocode_response(geocode_response) except Exception: error_logger.warn("Google geocoding failed", exc_info=True) if ip_location: location = ip_location elif ip: location = from_ip(ip) else: location = DEFAULT_LOCATION return location
def exchange_code(request, code): # Get Access Token using the Code then make an authResponse try: qs = request.META['QUERY_STRING'].split('&code')[0] # redirect_uri = urllib.quote('%s%s?%s' % (settings.SITE_LINK, request.path[1:], qs)) redirect_uri = settings.SITE_LINK + request.path[1:] + qs exchange_url = FB_GRAPH_ACCESS_TOKEN_URL params = { 'client_id': settings.FACEBOOK_APP_ID, 'client_secret': settings.FACEBOOK_APP_SECRET, 'redirect_uri': redirect_uri, 'code': code } response = requests.get(exchange_url, params=params, timeout=20) params = dict(urlparse.parse_qsl(response.content)) except Exception as e: error_logger.warn(e.message) return None auth_response = { 'accessToken': params['access_token'], 'expiresIn': params['expires'], } return auth_response
def publish_shout_to_facebook(shout): la = getattr(shout.user, 'linked_facebook', None) if not la: debug_logger.debug( 'No linked_facebook, skip publishing Shout %s on Facebook' % shout) return if 'publish_actions' not in la.scopes: debug_logger.debug( 'No publish_actions in scopes, skip publishing Shout %s on Facebook' % shout) return prod = settings.SHOUTIT_ENV == 'prod' namespace = 'shoutitcom' if prod else 'shoutitcom-' + settings.SHOUTIT_ENV actions_url = 'https://graph.facebook.com/v2.6/me/%s:shout' % namespace params = { 'access_token': la.access_token, shout.get_type_display(): shout.web_url, 'privacy': "{'value':'EVERYONE'}" } if prod: params['fb:explicitly_shared'] = True res = requests.post(actions_url, params=params).json() id_on_facebook = res.get('id') if id_on_facebook: shout.published_on['facebook'] = id_on_facebook shout.save(update_fields=['published_on']) # Track mixpanel_controller.track(shout.user.pk, 'share_shout_on_fb', shout.track_properties) debug_logger.debug('Published shout %s on Facebook' % shout) else: error_logger.warn('Error publishing shout on Facebook', extra={ 'res': res, 'shout': shout })
def paginate_queryset(self, index_queryset, request, view=None): """ Paginate a queryset using Elasticsearch index. """ self.page_size = self.get_page_size(request) if not self.page_size: return None max_page_number = self.max_results / self.page_size page_number = request.query_params.get(self.page_query_param, 1) page_number = self.get_valid_page_number(page_number) index_response = [] if page_number > max_page_number: self._max_page_number_exceeded = True self._num_results = index_queryset.count() else: _from = (page_number - 1) * self.page_size _to = page_number * self.page_size try: index_response = index_queryset[_from:_to].execute() # if there are no results for this [_from:_to], check if there are ones at all if not index_response: self._num_results = index_queryset.count() if self._num_results: # there are results meaning provided page number exceeded max possible one self._max_possible_page_number_exceeded = True else: if isinstance(index_response[0], Result): raise SerializationError("Results from different index") # Logging success calls - to compare # extra = {'request': request._request, 'query_dict': index_queryset.__dict__} # error_logger.info('ES Success', extra=extra) except (ElasticsearchException, KeyError) as e: msg = "ES Exception: " + str(type(e)) extra = {'detail': str(e), 'request': request._request, 'query_dict': index_queryset.__dict__} error_logger.warn(msg, exc_info=True, extra=extra) # possible errors # SerializationError: returned data was corrupted # KeyError: some bug in the elasticsearch-dsl library. # ConnectionTimeout # todo: handle returned data are from different index! report bug issue index_response = [] # Save the index order. `None` is used to later filter out the objects that do not exist in db query index_tuples = map(lambda s: (s.meta.id, None), index_response) objects_dict = OrderedDict(index_tuples) # Fetch objects from database ids = objects_dict.keys() if ids: qs = view.get_queryset().filter(id__in=ids) else: qs = [] # Replace the values of objects_dict with the actual db objects and filter out the non existing ones # str(pk) is used to make sure the ids are converted to strings otherwise setitem will create new keys map(lambda s: objects_dict.__setitem__(str(s.pk), s), qs) self.page = filter(None, objects_dict.values()) self._num_results = index_response.hits.total if self.page else 0 self._num_pages = int(math.ceil(self._num_results / (self.page_size * 1.0))) if self._max_page_number_exceeded or self._max_possible_page_number_exceeded: self.page_number = min(self._num_pages + 1, max_page_number + 1) else: self.page_number = page_number self.request = request if (len(self.page) > 1 or self._num_results) and self.template is not None: # The browsable API should display pagination controls. self.display_page_controls = True return self.page