def get(self, request, **kwargs): try: video = self.get_object() except ValueError: return HttpResponseBadRequest("Invalid video_id parameter.") if not self.form.is_valid(): return HttpResponseBadRequest("Invalid query.") if video is None: return HttpResponseBadRequest("No video found for that video_id.") if not request.GET.get('queue'): sitelocation = SiteLocation.objects.get_current() if not sitelocation.get_tier().can_add_more_videos(): return HttpResponse( content="You are over the video limit. You " "will need to upgrade to approve " "that video.", status=402) current_site = Site.objects.get_current() try: saved_search = SavedSearch.objects.get(site=current_site, query_string=self.form.cleaned_data['q']) except SavedSearch.DoesNotExist: video.user = request.user else: video.search = saved_search video.status = Video.ACTIVE if request.GET.get('feature'): video.last_featured = datetime.now() elif request.GET.get('queue'): video.status = Video.UNAPPROVED video.save() try: user = User.objects.get(username=video.video_service_user) except User.DoesNotExist: user = User(username=video.video_service_user) user.set_unusable_password() user.save() utils.get_profile_model().objects.create( user=user, website=video.video_service_url ) video.authors.add(user) # Exclude this video from future listings. cache_key = self._get_cache_key() exclusions = request.session.get(cache_key) if exclusions is not None: if video.website_url: exclusions['website_urls'].add(video.website_url) if video.file_url: exclusions['file_urls'].add(video.file_url) request.session[cache_key] = exclusions return HttpResponse('SUCCESS')
def get(self, request, **kwargs): try: video = self.get_object() except ValueError: return HttpResponseBadRequest("Invalid video_id parameter.") if not self.form.is_valid(): return HttpResponseBadRequest("Invalid query.") if video is None: return HttpResponseBadRequest("No video found for that video_id.") if not request.GET.get('queue'): sitelocation = SiteLocation.objects.get_current() if not sitelocation.get_tier().can_add_more_videos(): return HttpResponse( content="You are over the video limit. You " "will need to upgrade to approve " "that video.", status=402) current_site = Site.objects.get_current() try: saved_search = SavedSearch.objects.get( site=current_site, query_string=self.form.cleaned_data['q']) except SavedSearch.DoesNotExist: video.user = request.user else: video.search = saved_search video.status = Video.ACTIVE if request.GET.get('feature'): video.last_featured = datetime.now() elif request.GET.get('queue'): video.status = Video.UNAPPROVED video.save() try: user = User.objects.get(username=video.video_service_user) except User.DoesNotExist: user = User(username=video.video_service_user) user.set_unusable_password() user.save() utils.get_profile_model().objects.create( user=user, website=video.video_service_url) video.authors.add(user) # Exclude this video from future listings. cache_key = self._get_cache_key() exclusions = request.session.get(cache_key) if exclusions is not None: if video.website_url: exclusions['website_urls'].add(video.website_url) if video.file_url: exclusions['file_urls'].add(video.file_url) request.session[cache_key] = exclusions return HttpResponse('SUCCESS')
def facebookuserprofile_created(sender, instance=None, raw=None, created=False, **kwargs): if not created: return # we don't care about updates get_profile_model().objects.create( user_id=instance.user_id, location=instance.location or '', description=instance.about_me or '', website=instance.url or '')
def save_m2m(): video = instance if vidscraper_video.user: author, created = User.objects.get_or_create( username=vidscraper_video.user, defaults={'first_name': vidscraper_video.user}) if created: author.set_unusable_password() author.save() get_profile_model().objects.create( user=author, website=vidscraper_video.user_url) video.authors.add(author) old_m2m()
def get(self, request, **kwargs): try: video = self.get_object() except ValueError: return HttpResponseBadRequest("Invalid video_id parameter.") if not self.form.is_valid(): return HttpResponseBadRequest("Invalid query.") if video is None: return HttpResponseBadRequest("No video found for that video_id.") current_site = Site.objects.get_current() try: saved_search = SavedSearch.objects.get(site=current_site, query_string=self.form.cleaned_data["query"]) except SavedSearch.DoesNotExist: video.user = request.user else: video.search = saved_search video.status = Video.ACTIVE if request.GET.get("feature"): video.last_featured = datetime.now() elif request.GET.get("queue"): video.status = Video.UNAPPROVED video.save() try: user = User.objects.get(username=video.video_service_user) except User.DoesNotExist: user = User(username=video.video_service_user) user.set_unusable_password() user.save() utils.get_profile_model().objects.create(user=user, website=video.video_service_url) video.authors.add(user) # Exclude this video from future listings. cache_key = self._get_cache_key() exclusions = request.session.get(cache_key) if exclusions is not None: if video.website_url: exclusions["website_urls"].add(video.website_url) if video.file_url: exclusions["file_urls"].add(video.file_url) request.session[cache_key] = exclusions return HttpResponse("SUCCESS")
def twitter_profile_data(sender, user, response, details, **kwargs): profile = get_profile_model()(user=user, location=response.get('location') or '', description=response.get('description') or '', website=response.get('url') or '') if response.get('profile_image_url'): try: cf = ContentFile(urllib.urlopen(response['profile_image_url']).read()) except Exception: pass else: cf.name = response['profile_image_url'] profile.logo = cf profile.save()
def twitter_profile_data(sender, user, response, details, **kwargs): profile = get_profile_model()(user=user, location=response.get('location') or '', description=response.get('description') or '', website=response.get('url') or '') if response.get('profile_image_url'): try: cf = ContentFile( urllib.urlopen(response['profile_image_url']).read()) except Exception: pass else: cf.name = response['profile_image_url'] profile.logo = cf profile.save()
def twitteruserprofile_created(sender, instance=None, raw=None, created=False, **kwargs): if not created: return # we don't care about updates profile = get_profile_model().objects.create( user_id=instance.user_id, location=instance.location or '', description=instance.description or '', website=instance.url or '') if instance.profile_image_url: try: cf = ContentFile(urllib.urlopen( instance.profile_image_url).read()) except Exception: pass else: cf.name = instance.profile_image_url profile.logo = cf profile.save()
from django.contrib.auth.models import User from django.test import TestCase from django.test.client import Client from django.core.files.base import File from django.core.urlresolvers import reverse from notification import models as notification from localtv.tests.legacy.test_localtv import BaseTestCase from localtv.user_profile import forms from localtv import utils Profile = utils.get_profile_model() class ProfileFormTestCase(TestCase): fixtures = ['site', 'users'] def setUp(self): self.user = User.objects.get(username='******') self.profile = Profile.objects.create( user=self.user, description='Description', location='Location', website='http://www.pculture.org/') def test_save(self): """ Filling the ProfileForm with data should cause the Profile to be updated. """
def video_from_vidscraper_video(vidscraper_video, site_pk, import_app_label=None, import_model=None, import_pk=None, status=None, author_pks=None, category_pks=None, clear_rejected=False, using='default'): import_class = get_model(import_app_label, import_model) try: source_import = import_class.objects.using(using).get( pk=import_pk, status=import_class.STARTED) except import_class.DoesNotExist: logging.warn('Retrying %r: expected %s instance (pk=%r) missing.', vidscraper_video.url, import_class.__name__, import_pk) video_from_vidscraper_video.retry() try: try: vidscraper_video.load() except Exception: source_import.handle_error( ('Skipped %r: Could not load video data.' % vidscraper_video.url), using=using, is_skip=True, with_exception=True) return if not vidscraper_video.title: source_import.handle_error( ('Skipped %r: Failed to scrape basic data.' % vidscraper_video.url), is_skip=True, using=using) return if ((vidscraper_video.file_url_expires or not vidscraper_video.file_url) and not vidscraper_video.embed_code): source_import.handle_error( ('Skipping %r: no file or embed code.' % vidscraper_video.url), is_skip=True, using=using) return site_videos = Video.objects.using(using).filter(site=site_pk) if vidscraper_video.guid: guid_videos = site_videos.filter(guid=vidscraper_video.guid) if clear_rejected: guid_videos.filter(status=Video.REJECTED).delete() if guid_videos.exists(): source_import.handle_error( ('Skipping %r: duplicate guid.' % vidscraper_video.url), is_skip=True, using=using) return if vidscraper_video.link: videos_with_link = site_videos.filter( website_url=vidscraper_video.link) if clear_rejected: videos_with_link.filter(status=Video.REJECTED).delete() if videos_with_link.exists(): source_import.handle_error( ('Skipping %r: duplicate link.' % vidscraper_video.url), is_skip=True, using=using) return categories = Category.objects.using(using).filter(pk__in=category_pks) if author_pks: authors = User.objects.using(using).filter(pk__in=author_pks) else: if vidscraper_video.user: name = vidscraper_video.user if ' ' in name: first, last = name.split(' ', 1) else: first, last = name, '' author, created = User.objects.db_manager(using).get_or_create( username=name[:30], defaults={ 'first_name': first[:30], 'last_name': last[:30] }) if created: author.set_unusable_password() author.save() utils.get_profile_model().objects.db_manager(using).create( user=author, website=vidscraper_video.user_url or '') authors = [author] else: authors = [] # Since we check above whether the vidscraper_video is valid, we don't # catch InvalidVideo here, since it would be unexpected. We don't # update the index because this is expected to be run as part of the # import process; the video will be indexed in bulk after the feed # import is complete. video = Video.from_vidscraper_video(vidscraper_video, status=status, using=using, source_import=source_import, authors=authors, categories=categories, site_pk=site_pk, update_index=False) logging.debug('Made video %i: %r', video.pk, video.name) if video.thumbnail_url: video_save_thumbnail.delay(video.pk, using=using) except Exception: source_import.handle_error( ('Unknown error during import of %r' % vidscraper_video.url), is_skip=True, using=using, with_exception=True) raise # so it shows up in the Celery log
authors = User.objects.using(using).filter(pk__in=author_pks) else: if vidscraper_video.user: name = vidscraper_video.user if " " in name: first, last = name.split(" ", 1) else: first, last = name, "" author, created = User.objects.db_manager(using).get_or_create( username=name[:30], defaults={"first_name": first[:30], "last_name": last[:30]} ) if created: author.set_unusable_password() author.save() utils.get_profile_model().objects.db_manager(using).create( user=author, website=vidscraper_video.user_url or "" ) authors = [author] else: authors = [] # Since we check above whether the vidscraper_video is valid, we don't # catch InvalidVideo here, since it would be unexpected. video = Video.from_vidscraper_video( vidscraper_video, status=status, using=using, source_import=source_import, authors=authors, categories=categories, site_pk=site_pk,
def facebook_profile_data(sender, user, response, details, **kwargs): get_profile_model().objects.create(user=user, location=response.get('location') or '', description=response.get('about_me') or '', website=response.get('url') or '')
def video_from_vidscraper_video(vidscraper_video, site_pk, import_app_label=None, import_model=None, import_pk=None, status=None, author_pks=None, category_pks=None, clear_rejected=False, using='default'): import_class = get_model(import_app_label, import_model) try: source_import = import_class.objects.using(using).get( pk=import_pk, status=import_class.STARTED) except import_class.DoesNotExist: logging.warn('Retrying %r: expected %s instance (pk=%r) missing.', vidscraper_video.url, import_class.__name__, import_pk) video_from_vidscraper_video.retry() try: try: vidscraper_video.load() except Exception: source_import.handle_error( ('Skipped %r: Could not load video data.' % vidscraper_video.url), using=using, is_skip=True, with_exception=True) return if not vidscraper_video.title: source_import.handle_error( ('Skipped %r: Failed to scrape basic data.' % vidscraper_video.url), is_skip=True, using=using) return if ((vidscraper_video.file_url_expires or not vidscraper_video.file_url) and not vidscraper_video.embed_code): source_import.handle_error(('Skipping %r: no file or embed code.' % vidscraper_video.url), is_skip=True, using=using) return site_videos = Video.objects.using(using).filter(site=site_pk) if vidscraper_video.guid: guid_videos = site_videos.filter(guid=vidscraper_video.guid) if clear_rejected: guid_videos.filter(status=Video.REJECTED).delete() if guid_videos.exists(): source_import.handle_error(('Skipping %r: duplicate guid.' % vidscraper_video.url), is_skip=True, using=using) return if vidscraper_video.link: videos_with_link = site_videos.filter( website_url=vidscraper_video.link) if clear_rejected: videos_with_link.filter(status=Video.REJECTED).delete() if videos_with_link.exists(): source_import.handle_error(('Skipping %r: duplicate link.' % vidscraper_video.url), is_skip=True, using=using) return categories = Category.objects.using(using).filter(pk__in=category_pks) if author_pks: authors = User.objects.using(using).filter(pk__in=author_pks) else: if vidscraper_video.user: name = vidscraper_video.user if ' ' in name: first, last = name.split(' ', 1) else: first, last = name, '' author, created = User.objects.db_manager(using).get_or_create( username=name[:30], defaults={'first_name': first[:30], 'last_name': last[:30]}) if created: author.set_unusable_password() author.save() utils.get_profile_model().objects.db_manager(using).create( user=author, website=vidscraper_video.user_url or '') authors = [author] else: authors = [] # Since we check above whether the vidscraper_video is valid, we don't # catch InvalidVideo here, since it would be unexpected. We don't # update the index because this is expected to be run as part of the # import process; the video will be indexed in bulk after the feed # import is complete. video = Video.from_vidscraper_video(vidscraper_video, status=status, using=using, source_import=source_import, authors=authors, categories=categories, site_pk=site_pk, update_index=False) logging.debug('Made video %i: %r', video.pk, video.name) if video.thumbnail_url: video_save_thumbnail.delay(video.pk, using=using) except Exception: source_import.handle_error(('Unknown error during import of %r' % vidscraper_video.url), is_skip=True, using=using, with_exception=True) raise # so it shows up in the Celery log
def facebook_profile_data(sender, user, response, details, **kwargs): get_profile_model().objects.create( user=user, location=response.get('location') or '', description=response.get('about_me') or '', website=response.get('url') or '')