Beispiel #1
0
def profile_detail(request, username):
    # The templates for this view are in templates/profiles/
    user = request.user
    profiled_user = get_object_or_404(User, username=username)
    
    # sorts against what the current user can see and what the identity of the profiled_user
    extra_context = { }
    owned_code_objects = scraper_search_query(request.user, None).filter(usercoderole__user=profiled_user)
    extra_context['owned_code_objects'] = owned_code_objects
    extra_context['emailer_code_objects'] = owned_code_objects.filter(Q(usercoderole__user__username=username) & Q(usercoderole__role='email'))
    return profile_views.profile_detail(request, username=username, extra_context=extra_context)
Beispiel #2
0
def dashboard(request, privacy_status=None, page_number=1):
    user = request.user
    owned_or_edited_code_objects = scraper_search_query(request.user, None).filter(usercoderole__user=user)
    
    if privacy_status == 'private':
        owned_or_edited_code_objects = owned_or_edited_code_objects.filter(privacy_status="private")
    elif privacy_status == 'nonprivate':
        owned_or_edited_code_objects = owned_or_edited_code_objects.filter(Q(privacy_status="public")|Q(privacy_status="visible"))
    
    context = {'object_list': owned_or_edited_code_objects,
               'language':'python' }
    return render_to_response('frontend/dashboard.html', context, context_instance = RequestContext(request))
Beispiel #3
0
def search(request, q=""):
    if (q != ""):
        form = SearchForm(initial={'q': q})
        q = q.strip()

        # If q looks like a url then we should just pass it through to search_urls
        # and return that instead.
        if re.match('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', q):
            return search_urls(request,q)

        tags = Tag.objects.filter(name__icontains=q)
        scrapers = scraper_search_query(request.user, q)

        # The following line used to exclude private scrapers, but these were already excluded in
        # the call to scraper_search_query above.
        scrapers = scrapers.exclude(usercoderole__role='email')
        scrapers_num_results = tags.count() + scrapers.count()

        users = user_search_query(request.user, q)
        users_num_results = users.count()

        return render_to_response('frontend/search_results.html',
            {
                'scrapers': scrapers,
                'users': users,
                'tags': tags,
                'scrapers_num_results': scrapers_num_results,
                'users_num_results': users_num_results,
                'form': form,
                'query': q},
            context_instance=RequestContext(request))

    # If the form has been submitted, or we have a search term in the URL
    # - redirect to nice URL
    elif (request.POST):
        form = SearchForm(request.POST)
        if form.is_valid():
            q = form.cleaned_data['q']
            # Process the data in form.cleaned_data
            # Redirect after POST
            return HttpResponseRedirect('/search/%s/' % urllib.quote(q.encode('utf-8')))
        else:
            form = SearchForm()
            return render_to_response('frontend/search_results.html', {'form': form},
                context_instance=RequestContext(request))
    else:
        form = SearchForm()
        return render_to_response('frontend/search_results.html', {'form': form}, context_instance = RequestContext(request))
Beispiel #4
0
def search(request, q=""):
    if (q != ""):
        form = SearchForm(initial={'q': q})
        q = q.strip()

        # If q looks like a url then we should just pass it through to search_urls
        # and return that instead.
        if re.match('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', q):
            return search_urls(request,q)
        
        tags = Tag.objects.filter(name__icontains=q)
        scrapers = scraper_search_query(request.user, q)
        
        # The following line used to exclude private scrapers, but these were already excluded in 
        # the call to scraper_search_query above.
        scrapers = scrapers.exclude(usercoderole__role='email') 
        scrapers_num_results = tags.count() + scrapers.count()

        users = user_search_query(request.user, q)
        users_num_results = users.count()

        return render_to_response('frontend/search_results.html',
            {
                'scrapers': scrapers,
                'users': users,
                'tags': tags,
                'scrapers_num_results': scrapers_num_results,
                'users_num_results': users_num_results,
                'form': form,
                'query': q},
            context_instance=RequestContext(request))

    # If the form has been submitted, or we have a search term in the URL
    # - redirect to nice URL
    elif (request.POST):
        form = SearchForm(request.POST)
        if form.is_valid():
            q = form.cleaned_data['q']
            # Process the data in form.cleaned_data
            # Redirect after POST
            return HttpResponseRedirect('/search/%s/' % urllib.quote(q.encode('utf-8')))
        else:
            form = SearchForm()
            return render_to_response('frontend/search_results.html', {'form': form},
                context_instance=RequestContext(request))
    else:
        form = SearchForm()
        return render_to_response('frontend/search_results.html', {'form': form}, context_instance = RequestContext(request))
Beispiel #5
0
def tag(request, tag):
    ttag = get_tag(tag)
    code_objects = None
    
    if ttag:
        # query set of code objects this user can see
        user_visible_code_objects = scraper_search_query(request.user, None)

        # inlining of tagging.models.get_by_model() but removing the content_type_id condition so that tags 
        # attached to scrapers and views get interpreted as tags on code objects
        code_objects = user_visible_code_objects.extra(
            tables=['tagging_taggeditem'],
            where=['tagging_taggeditem.tag_id = %s', 'codewiki_code.id = tagging_taggeditem.object_id'], 
            params=[ttag.pk])

    return render_to_response('frontend/tag.html', {'tag_string': tag, 'tag' : ttag, 'scrapers': code_objects}, context_instance=RequestContext(request))
Beispiel #6
0
def tag(request, tag):
    ttag = get_tag(tag)
    code_objects = None

    if ttag:
        # query set of code objects this user can see
        user_visible_code_objects = scraper_search_query(request.user, None)

        # inlining of tagging.models.get_by_model() but removing the content_type_id condition so that tags
        # attached to scrapers and views get interpreted as tags on code objects
        code_objects = user_visible_code_objects.extra(
            tables=['tagging_taggeditem'],
            where=['tagging_taggeditem.tag_id = %s', 'codewiki_code.id = tagging_taggeditem.object_id'],
            params=[ttag.pk])

    return render_to_response('frontend/tag.html', {'tag_string': tag, 'tag' : ttag, 'scrapers': code_objects}, context_instance=RequestContext(request))
Beispiel #7
0
def browse(request, page_number=1, wiki_type=None, special_filter=None, ff=None):
    all_code_objects = scraper_search_query(request.user, None).select_related('owner','owner__userprofile_set')

    if wiki_type:
        all_code_objects = all_code_objects.filter(wiki_type=wiki_type)

    # One last check because this is a slightly convoluted way of building this page.
    if not ff:
        ff = request.GET.get('forked_from', None)

    if ff:
        try:
            s = Scraper.objects.get(short_name=ff)
            if s and not s.privacy_status == 'private' and not s.privacy_status == 'deleted':
                all_code_objects = all_code_objects.filter(forked_from=s)
        except Scraper.DoesNotExist:
            # Just ignore the forked_from if the scraper does not exist
            pass

    #extra filters (broken scraper lists etc)
    if special_filter == 'sick':
        all_code_objects = all_code_objects.filter(status='sick')
    elif special_filter == 'no_description':
        all_code_objects = all_code_objects.filter(description='')
    elif special_filter == 'no_tags':
        #hack to get scrapers with no tags (tags don't recognise inheritance)
        if wiki_type == 'scraper':
            all_code_objects = TaggedItem.objects.get_no_tags(Scraper.objects.exclude(privacy_status="deleted").order_by('-created_at') )
        else:
            all_code_objects = TaggedItem.objects.get_no_tags(View.objects.exclude(privacy_status="deleted").order_by('-created_at') )


    # filter out scrapers that have no records unless we are looking at the forked_from list
    if not ff and not special_filter:
        pass
    #    all_code_objects = all_code_objects.exclude(wiki_type='scraper', scraper__record_count=0)

    form = SearchForm()

    dictionary = { "ff": ff, "scrapers": all_code_objects, 'wiki_type':wiki_type, "form": form, 'special_filter': special_filter, 'language': 'python'}
    return render_to_response('frontend/browse.html', dictionary, context_instance=RequestContext(request))
Beispiel #8
0
def browse(request, page_number=1, wiki_type=None, special_filter=None, ff=None):
    all_code_objects = scraper_search_query(request.user, None).select_related('owner','owner__userprofile_set')

    if wiki_type:
        all_code_objects = all_code_objects.filter(wiki_type=wiki_type) 

    # One last check because this is a slightly convoluted way of building this page.
    if not ff:
        ff = request.GET.get('forked_from', None)
        
    if ff:
        try:
            s = Scraper.objects.get(short_name=ff)
            if s and not s.privacy_status == 'private' and not s.privacy_status == 'deleted':
                all_code_objects = all_code_objects.filter(forked_from=s)
        except Scraper.DoesNotExist:
            # Just ignore the forked_from if the scraper does not exist
            pass

    #extra filters (broken scraper lists etc)
    if special_filter == 'sick':
        all_code_objects = all_code_objects.filter(status='sick')
    elif special_filter == 'no_description':
        all_code_objects = all_code_objects.filter(description='')
    elif special_filter == 'no_tags':
        #hack to get scrapers with no tags (tags don't recognise inheritance)
        if wiki_type == 'scraper':
            all_code_objects = TaggedItem.objects.get_no_tags(Scraper.objects.exclude(privacy_status="deleted").order_by('-created_at') )
        else:
            all_code_objects = TaggedItem.objects.get_no_tags(View.objects.exclude(privacy_status="deleted").order_by('-created_at') )


    # filter out scrapers that have no records unless we are looking at the forked_from list
    if not ff and not special_filter:
        all_code_objects = all_code_objects.exclude(wiki_type='scraper', scraper__record_count=0)
    
    form = SearchForm()

    dictionary = { "ff": ff, "scrapers": all_code_objects, 'wiki_type':wiki_type, "form": form, 'special_filter': special_filter, 'language': 'python'}
    return render_to_response('frontend/browse.html', dictionary, context_instance=RequestContext(request))
 def items(self, obj):
     code_objects = scraper_search_query(None, obj) 
     return code_objects[:settings.RSS_ITEMS]
 def owned_code_objects(self, user):
     from codewiki.models import scraper_search_query
     return scraper_search_query(user, None).filter(usercoderole__user=self.user)
Beispiel #11
0
 def items(self, obj):
     code_objects = scraper_search_query(None, obj)
     return code_objects[:settings.RSS_ITEMS]
Beispiel #12
0
 def owned_code_objects(self, user):
     from codewiki.models import scraper_search_query
     return scraper_search_query(user,
                                 None).filter(usercoderole__user=self.user)