Пример #1
0
        def http_request(self, request):
            scheme = request.get_type()
            if scheme not in ["http", "https"]:
                # robots exclusion only applies to HTTP
                return request

            if request.get_selector() == "/robots.txt":
                # /robots.txt is always OK to fetch
                return request

            host = request.get_host()

            # robots.txt requests don't need to be allowed by robots.txt :-)
            origin_req = getattr(request, "_origin_req", None)
            if (origin_req is not None and
                origin_req.get_selector() == "/robots.txt" and
                origin_req.get_host() == host
                ):
                return request

            if host != self._host:
                self.rfp = self.rfp_class()
                try:
                    self.rfp.set_opener(self.parent)
                except AttributeError:
                    debug("%r instance does not support set_opener" %
                          self.rfp.__class__)
                self.rfp.set_url(scheme+"://"+host+"/robots.txt")
                self.rfp.read()
                self._host = host

            ua = request.get_header("User-agent", "")
            if self.rfp.can_fetch(ua, request.get_full_url()):
                return request
            else:
                # XXX This should really have raised URLError.  Too late now...
                msg = "request disallowed by robots.txt"
                raise RobotExclusionError(
                    request,
                    request.get_full_url(),
                    403, msg,
                    self.http_response_class(StringIO()), StringIO(msg))
Пример #2
0
def register(request):
    context = {}
    if request.method == 'GET':
        context['form'] = Registration_form()
        return render(request, 'register.html', context)

    form = Registration_form(request.POST)
    context['form'] = form

    if not form.is_valid():
        return render(request, 'register.html', context)
    # create a user object
    new_user = User.objects.create_user(
        username=form.cleaned_data['username'],
        password=form.cleaned_data['confirm_password'],
        email=form.cleaned_data['email'],
        first_name=form.cleaned_data['first_name'],
        last_name=form.cleaned_data['last_name'])
    # create a profile object
    profile = User_profile(user=new_user)
    new_user.is_active = False

    # save these two objects
    new_user.save()
    profile.save()

    token = default_token_generator.make_token(new_user)

    # define the email body
    email_body = """Welcome to 42. Please click the link below to verify your email address and
    complete the registration of your count:
    http://%s%s """ % (request.get_host(),
                       reverse('confirm_registration',
                               args=(new_user.email, token)))
    # send mail
    send_mail(subject="Verify your email address",
              message=email_body,
              from_email="*****@*****.**",
              recipient_list=[new_user.email])

    context['email'] = form.cleaned_data['email']
    return render(request, 'confirm.html', context)
Пример #3
0
def get_base_url(request=None):
    proto = 'https' if request.is_secure() else 'http'
    addr = request.META.get("SERVER_ADDR")
    if not addr:
        addr = request.get_host()
    else:
        port = int(request.META.get("SERVER_PORT", 80))
        if (
            (proto == 'http' and port != 80) or
            (proto == 'https' and port != 443)
        ):
            addr = "%s:%d" % (addr, port)

    try:
        IPv6Address(addr)
        addr = "[%s]" % addr
    except:
        pass

    return "%s://%s" % (proto, addr)
Пример #4
0
def extract_pdf(request):
    urlParams = request.get_full_path()
    urlParams = urlParams[40:]

    path_wkthmltopdf_krax = r'/app/bin/wkhtmltopdf'
    config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf_krax)
    resumeUrl = 'https://' + request.get_host(
    ) + '/generate_pdf/resume_create/' + "resume" + urlParams

    if not os.path.exists('download/'):
        os.mkdir('download/')

    pdfkit.from_url(resumeUrl, 'download/resume.pdf', configuration=config)

    response = HttpResponse(open("download/resume.pdf", 'rb').read())
    response['Content-Type'] = 'application/pdf'
    response['Content-Disposition'] = 'attachment; filename=resume.pdf'
    if os.path.exists('upload/'):
        shutil.rmtree('upload/')
    return response
Пример #5
0
 def _prepare_viewinscr_plain_get_response(q, z_bibids, specific_sources,
                                           current_display_status, inscrid,
                                           request, view_xml_url,
                                           current_url, log_id):
     """ Returns view-inscription response-object for regular GET.
         Called by viewinscr() """
     log.debug(u'in _prepare_viewinscr_plain_get_response(); starting')
     context = {
         'inscription':
         q,
         'z_ids':
         z_bibids,
         'biblDiplomatic':
         specific_sources['diplomatic'],
         'biblTranscription':
         specific_sources['transcription'],
         'biblTranslation':
         specific_sources['translation'],
         'biblioFull':
         True,
         'chosen_display_status':
         current_display_status,
         'inscription_id':
         inscrid,
         'session_authz_info':
         request.session['authz_info'],
         'admin_links':
         common.make_admin_links(
             session_authz_dict=request.session[u'authz_info'],
             url_host=request.get_host(),
             log_id=log_id),
         'view_xml_url':
         view_xml_url,
         'current_url':
         current_url,
     }
     # log.debug( u'in _prepare_viewinscr_plain_get_response(); context, %s' % pprint.pformat(context) )
     return_response = render(request,
                              u'iip_search_templates/old_viewinscr.html',
                              context)
     return return_response
Пример #6
0
def handleData(request, year, ids, coefs, opers):

    data = []

    for i in range(len(ids)):
        my_id = ids[i]
        my_weight = coefs[i]

        my_url = "http://" + request.get_host() + "/api/" + my_id + "/" + str(
            year) + "/" + str(my_weight)

        with urllib.request.urlopen(my_url) as url:
            result = json.loads(url.read().decode())['result']
            data.append(result)

    data = beautify(data, ids, coefs, opers)
    data = onlyAvailable(data, ids)
    data = sortFormat(data)
    data = setUNDP(request, data, year)

    return data
Пример #7
0
    def do_request_(self, request):
        host = request.get_host()
        if not host:
            raise URLError('no host given')

        if request.has_data():  # POST
            data = request.get_data()
            if not request.has_header('Content-type'):
                request.add_unredirected_header(
                    'Content-type', 'application/x-www-form-urlencoded')

        scheme, sel = urllib.parse.splittype(request.get_selector())
        sel_host, sel_path = urllib.parse.splithost(sel)
        if not request.has_header('Host'):
            request.add_unredirected_header('Host', sel_host or host)
        for name, value in self.parent.addheaders:
            name = name.capitalize()
            if not request.has_header(name):
                request.add_unredirected_header(name, value)

        return request
Пример #8
0
        def http_request(self, request):
            scheme = request.get_type()
            if scheme not in ["http", "https"]:
                # robots exclusion only applies to HTTP
                return request

            if request.get_selector() == "/robots.txt":
                # /robots.txt is always OK to fetch
                return request

            host = request.get_host()

            # robots.txt requests don't need to be allowed by robots.txt :-)
            origin_req = getattr(request, "_origin_req", None)
            if (origin_req is not None
                    and origin_req.get_selector() == "/robots.txt"
                    and origin_req.get_host() == host):
                return request

            if host != self._host:
                self.rfp = self.rfp_class()
                try:
                    self.rfp.set_opener(self.parent)
                except AttributeError:
                    debug("%r instance does not support set_opener" %
                          self.rfp.__class__)
                self.rfp.set_url(scheme + "://" + host + "/robots.txt")
                self.rfp.read()
                self._host = host

            ua = request.get_header("User-agent", "")
            if self.rfp.can_fetch(ua, request.get_full_url()):
                return request
            else:
                # XXX This should really have raised URLError.  Too late now...
                msg = "request disallowed by robots.txt"
                raise RobotExclusionError(request, request.get_full_url(), 403,
                                          msg,
                                          self.http_response_class(StringIO()),
                                          StringIO(msg))
Пример #9
0
def send_account_activation_email(request, user):
    text_content = 'Account Activation Email'
    subject = 'Email Activation'
    template_name = "emails/account/activation.html"
    from_email = settings.DEFAULT_FROM_EMAIL
    recipients = [user.email]
    kwargs = {
        "uidb64": urlsafe_base64_encode(force_bytes(user.pk)).decode(),
        "token": default_token_generator.make_token(user)
    }
    activation_url = reverse("app:activate_user_account", kwargs=kwargs)

    activate_url = "{0}://{1}{2}".format(request.scheme, request.get_host(), activation_url)

    context = {
        'user': user,
        'activate_url': activate_url
    }
    html_content = render_to_string(template_name, context)
    email = EmailMultiAlternatives(subject, text_content, from_email, recipients)
    email.attach_alternative(html_content, "text/html")
    email.send()
Пример #10
0
    def do_request_(self, request):
        host = request.get_host()
        if not host:
            raise URLError('no host given')

        if request.has_data():  # POST
            data = request.get_data()
            if not request.has_header('Content-type'):
                request.add_unredirected_header(
                    'Content-type',
                    'application/x-www-form-urlencoded')

        scheme, sel = urllib.parse.splittype(request.get_selector())
        sel_host, sel_path = urllib.parse.splithost(sel)
        if not request.has_header('Host'):
            request.add_unredirected_header('Host', sel_host or host)
        for name, value in self.parent.addheaders:
            name = name.capitalize()
            if not request.has_header(name):
                request.add_unredirected_header(name, value)

        return request
Пример #11
0
def transDownloadView(request, slug):
    import urllib.request
    try:
        query_data = Transmission.objects.filter(slug=slug)
        if not query_data:
            raise Http404
    except Transmission.DoesNotExist:
        raise Http404
    query_data2 = limit_transmission_history(request, query_data)
    if not query_data2:
        raise Http404  # Just raise 404 if its too old
    restricted, new_query = restrict_talkgroups(request, query_data)
    if not new_query:
        raise Http404
    trans = new_query[0]
    if trans.audio_file_type == 'm4a':
        audio_type = 'audio/m4a'
    else:
        audio_type = 'audio/mp3'
    response = HttpResponse(content_type=audio_type)
    start_time = timezone.localtime(
        trans.start_datetime).strftime('%Y%m%d_%H%M%S')
    filename = '{}_{}.{}'.format(start_time, trans.talkgroup_info.slug,
                                 trans.audio_file_type)
    response['Content-Disposition'] = 'attachment; filename="{}"'.format(
        filename)
    url = 'https:{}{}.{}'.format(trans.audio_url, trans.audio_file,
                                 trans.audio_file_type)
    if trans.audio_url[:2] != '//':
        url = 'http:'
        if request.is_secure():
            url = 'https:'
        url += '//{}/{}{}.{}'.format(request.get_host(), trans.audio_url,
                                     trans.audio_file, trans.audio_file_type)
    req = urllib.request.Request(url)
    with urllib.request.urlopen(req) as web_response:
        response.write(web_response.read())
    return response
Пример #12
0
def getSignPackage(request):
    # 获得jsapi_ticket
    jsapiTicket = getJsApiTicket()

    # 注意 URL 一定要动态获取,不能 hardcode.
    # 获取当前页面的url
    url = 'http://' + request.get_host() + request.get_full_path()

    # 获取timestamp(时间戳)
    timestamp = int(time.time())
    # 获取noncestr(随机字符串)
    nonceStr = createNonceStr()

    # 这里参数的顺序要按照 key 值 ASCII 码升序排序
    # 得到signature
    # $signature = hashlib.sha1(string).hexdigest();
    ret = {
        'nonceStr': nonceStr,
        'jsapi_ticket': jsapiTicket,
        'timestamp': timestamp,
        'url': url
    }

    string = '&'.join(['%s=%s' % (key.lower(), ret[key]) for key in sorted(ret)])
    # signature = hashlib.sha1(string).hexdigest()
    sha1 = hashlib.sha1()
    sha1.update(string.encode('utf-8'))
    signature = sha1.hexdigest()

    signPackage = {
        "appId": 'wx8bc079dad4b03645',
        "nonceStr": nonceStr,
        "timestamp": timestamp,
        "url": url,
        "signature": signature,
        "rawString": string
    }
    return signPackage;
Пример #13
0
 def post(self, request):
     # TODO implement post creation by API Call
     # Reference
     # https://www.django-rest-framework.org/tutorial/3-class-based-views/
     # http://www.chenxm.cc/article/244.html
     # http://webdocs.cs.ualberta.ca/~hindle1/2014/07-REST.pdf
     #profile = get_object_or_404(Profile, pk=pk)
     if request.user.is_authenticated:
         new_data = request.data.copy()
         user_id = str(
             UserProfile.objects.filter(
                 user_id=request.user).first().author_id)
         new_data.__setitem__("user_id", user_id)
         host = request.scheme + "://" + request.get_host() + "/"
         new_data["host"] = host
         serializer = PostSerializer(data=new_data)
         if not serializer.is_valid():
             return Response({'serializer': serializer})
         serializer.save()
         # TODO Response cannot allow a redirect so just use redirect('/') now
         return redirect('/')
     else:
         return HttpResponse('Unauthorized', status=401)
Пример #14
0
def oauth_login(request):
    assert oauth is not None

    consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(),
                              OAUTH.CONSUMER_SECRET.get())
    client = oauth.Client(consumer)
    resp, content = client.request(OAUTH.REQUEST_TOKEN_URL.get(),
                                   "POST",
                                   body=urllib_urlencode({
                                       'oauth_callback':
                                       'http://' + request.get_host() +
                                       '/login/oauth_authenticated/'
                                   }))

    if resp['status'] != '200':
        raise Exception(_("Invalid response from OAuth provider: %s") % resp)

    request.session['request_token'] = dict(cgi.parse_qsl(content))

    url = "%s?oauth_token=%s" % (OAUTH.AUTHENTICATE_URL.get(), request.
                                 session['request_token']['oauth_token'])

    return HttpResponseRedirect(url)
Пример #15
0
def example(request):
    # GNI per capita, PPP (constant 2011 international $) (NY.GNP.PCAP.PP.KD)
    # GDP per capita, PPP (constant 2011 international $) (NY.GDP.PCAP.PP.KD)
    # Life expectancy at birth, total (years) (SP.DYN.LE00.IN)
    # UIS: Mean years of schooling of the population age 25+. Male (UIS.EA.MEAN.1T6.AG25T99.M) - NOT AVAILABLE
    # Government expenditure on education, total (% of GDP) (SE.XPD.TOTL.GD.ZS)
    # Computer, communications and other services (% of commercial service exports) (TX.VAL.OTHR.ZS.WT)
    # Access to electricity (% of population) (EG.ELC.ACCS.ZS)
    # Adjusted net national income per capita (constant 2010 US$) (NY.ADJ.NNTY.PC.KD)
    # Armed forces personnel, total (MS.MIL.TOTL.P1)
    # Commercial bank branches (per 100,000 adults) (FB.CBK.BRCH.P5)
    # Completeness of birth registration (%) (SP.REG.BRTH.ZS)
    # Compulsory education, duration (years) (SE.COM.DURS)

    ids = ["NY.GDP.PCAP.PP.KD","SP.DYN.LE00.IN","NY.GNP.PCAP.PP.KD"]

    year = getRecentOfAll(ids)

    url = "http://" + request.get_host() + "/api/" + ids[0] + "/" + str(year)
    data = requests.get(url=url).json()

    dump = json.dumps({"result": data})

    return HttpResponse(dump, content_type='application/json')
Пример #16
0
def get_overlayed_category_image(request, category_label_id):
    category_label = CategoryLabel.objects.filter(id=category_label_id)
    if not category_label:
        return HttpResponseBadRequest('Bad category_label_id: ' + category_label_id)
    category_label = category_label[0]
    image = category_label.parent_label.parentImage
    try:
        blob = render_SVG_from_label(category_label)
    except RuntimeError as e:
        print(e, file=sys.stderr)
        return HttpResponseServerError(str(e))
    foreground = PILImage.open(io.BytesIO(blob))
    #path = re.match(re_image_path, image.path).groups(1)[0]
    path = image.path
    #background = PILImage.open(path + image.name).convert('RGB')
    #print(request.get_host())
    #fd = urllib.request.urlopen(path+image.name)
    #image_file = io.BytesIO(fd.read())
    url = 'http://' + request.get_host() + path + image.name
    background = PILImage.open(urlopen(url))
    background.paste(foreground, (0, 0), foreground)
    output = io.BytesIO()
    background.save(output, format='png')
    return HttpResponse(output.getvalue(), content_type="image/png")
Пример #17
0
 def get_redirect_url(cls, request):
     protocol = "http" + ("s" if request.is_secure() else "") + "://"
     return protocol + request.get_host() + reverse(
         "utils:facebook_connect")
Пример #18
0
def collects(request):
    engit_env = os.environ["DJANGO_SETTINGS_MODULE"].split(',')[2]
    # Collecting Article by NewsAPI
    newsapi_key = os.environ["NEWSAPI_KEY"]
    newsapi_url = ('https://newsapi.org/v2/top-headlines?' + request.GET.urlencode() + '&apikey=' + newsapi_key)
    articles = requests.get(newsapi_url)
    now_time = datetime.utcnow().replace(microsecond=0).isoformat()

    # Set time of before working using time_file like flag file
    time_file = str(os.path.join(settings.STATICFILES_DIRS[0], 'engit', 'time_file'))
    if os.path.isfile(time_file):
        with open(time_file, 'r') as tf:
            lines = tf.readlines()
            oldest = datetime.strptime(lines[0], '%Y-%m-%dT%H:%M:%S')
    else:
            oldest = datetime(1970, 1, 1)
    
    # for article in articles.get('articles'):
    for article in articles.json()['articles']:
        publishedat = datetime.strptime(article['publishedAt'], '%Y-%m-%dT%H:%M:%SZ')

        if publishedat <= oldest:
            continue

        tmp_file_name = os.path.basename(article['url'].rstrip('/'))
        tmp_output_audio = str(os.path.join(settings.TMP_AUDIO_DIR[0], tmp_file_name + '-tmp.mp3'))
        audio_file_name = tmp_file_name + '.mp3'
        output_audio = str(os.path.join(settings.AUDIOFILES_DIR[0], audio_file_name))

        if article['source']['name'] == 'TechCrunch':
            # crawling (Get Body of an Article)
            html = urllib.request.urlopen(article['url'])
            soup = BeautifulSoup(html, 'html.parser')
            ## Get Contents
            contents_html = soup.find("div",{"class":"article-content"})

            # Convert text to audio
            len_paragraph = len(contents_html.find_all(["p","h2"])) - 1
            tmp_body_html = contents_html.find_all(["p","h2"])
            body_html = BeautifulSoup( '\n\n'.join(str(tb) for tb in tmp_body_html), 'html.parser')

            for n_paragraph, paragraph in enumerate(contents_html.find_all(["p","h2"]), 1):
                client = texttospeech.TextToSpeechClient()
                input_text = texttospeech.types.SynthesisInput(text=paragraph.get_text())

                voice = texttospeech.types.VoiceSelectionParams(
                    language_code='en-US',
                    ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE)
            
                audio_config = texttospeech.types.AudioConfig(
                    audio_encoding=texttospeech.enums.AudioEncoding.MP3)

                response = client.synthesize_speech(input_text, voice, audio_config)

                ## The response's audio_content is binary.
                with open(tmp_output_audio, 'wb') as out:
                    out.write(response.audio_content)

                if n_paragraph == 1:
                    print("Title: {}".format(article['title']))
                    print("Start Converting")
                    audio = AudioSegment.from_file(tmp_output_audio, "mp3")
                else:
                    audio = audio + AudioSegment.from_file(tmp_output_audio, "mp3")

                print("In progress: ({}/{}) paragraph have finished to convert text to audio.".format(str(n_paragraph), str(len_paragraph + 1)))
        
        ## Create a audio file
        audio.export(output_audio, format="mp3")

        ## Delete Temporary Audio File
        if os.path.isfile(tmp_output_audio):
            os.remove(tmp_output_audio)
        else:
            print("Error: Temporary Audio File {} not found".format(tmp_output_audio))

        # Update File for production

        # remove img tag
        regex_img = r"<img .*?/>"
        
        # Add record to Model 
        record = Article(title = str(article['title']),
                    body = re.sub(regex_img, "", str(body_html)),
                    author = str(article['author']),
                    published_at = datetime.strptime(article['publishedAt'], '%Y-%m-%dT%H:%M:%SZ'),
                    source_url = str(article['url']),
                    is_published = False)
        record.save()

        ## Update record with Audio URL
        if str(settings.AUDIOFILES_STORE) == 'LOCAL':
            #Article.objects.filter(title=str(article['title'])).update(audio_url='https://'+ request.get_host() + '/static/engit/audio/' + audio_file_name)
            Article.objects.filter(title=str(article['title'])).update(audio_url='http://engit-' + engit_env + '.japaneast.cloudapp.azure.com' + request.get_host() + '/static/engit/audio/' + audio_file_name)
            Article.objects.filter(title=str(article['title'])).update(is_published=True)

    # upate time file
    with open(time_file, 'w') as tf:
        tf.write(now_time)

    return render(request, 'engit/collects.json', {})
Пример #19
0
    def get(self, request, author_id):
        if not request.user.is_authenticated:
            return HttpResponse('Unauthorized', status=401)
        resp = {}
        # public posts that is made by this author
        author = UserProfile.objects.filter(author_id=author_id).first()
        posts = Post.objects.filter(user_id=author).filter(
            visibility="PUBLIC").all()
        posts = list(posts)
        request_user = UserProfile.objects.filter(user_id=request.user).first()
        if request_user:
            if str(request_user.author_id) == str(author_id):
                posts += list(
                    Post.objects.filter(user_id=author).exclude(
                        visibility="PUBLIC").all())
        # TODO add friend stuff to this
        thisRequestUserUrl = request.META.get(
            'HTTP_X_REQUEST_USER_ID'
        )  # this is the CUSTOM header we shared within connected group
        if thisRequestUserUrl:
            # get all visibility = "FRIENDS"
            all_user_who_follow_requestUser = Follow.objects.filter(
                following_url=thisRequestUserUrl).all().values_list(
                    'follower_url', flat=True)
            # add all request user 's follower
            for userurl in all_user_who_follow_requestUser:
                authorid = userurl.rstrip("/").split("/")[
                    -1]  # this was url so need to extract author id
                if authorid == str(author_id):
                    # find this user's "friend"(follower) post
                    posts += list(
                        Post.objects.filter(visibility="FRIENDS").filter(
                            user_id=authorid).all())
                    break
        else:
            all_user_who_follow_requestUser = Follow.objects.filter(
                following_url=request_user.url).all().values_list(
                    'follower_url', flat=True)
            # add all request user 's follower
            for userurl in all_user_who_follow_requestUser:
                authorid = userurl.rstrip("/").split("/")[
                    -1]  # this was url so need to extract author id
                if authorid == str(author_id):
                    # find this user's "friend"(follower) post
                    posts += list(
                        Post.objects.filter(visibility="FRIENDS").filter(
                            user_id=authorid).all())
                    break

        # TODO implement visible_to
        # Get all visibility as "PRIVATE"
        if thisRequestUserUrl:
            all_private_posts = Post.objects.filter(visibility="PRIVATE").all()
            for private_post in all_private_posts:
                if thisRequestUserUrl in private_post.visibleTo:
                    posts.append(private_post)
        count = len(posts)
        resp['count'] = count
        pageSize = request.GET.get('size')
        if not pageSize:
            pageSize = 50
        pageSize = int(pageSize)
        resp['size'] = pageSize
        posts.sort(key=lambda post: post.published, reverse=True)
        paginator = Paginator(posts, pageSize)
        posts = paginator.get_page(request.GET.get('page'))
        # No need to return next if last page
        # No need to return previous if page is 0
        # next = None;
        # previous = None;
        if posts.has_next():
            resp['next'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/author/" + str(
                    author_id) + "/posts?page=" + str(posts.next_page_number())
        if posts.has_previous():
            resp['previous'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/author/" + str(
                    author_id) + "/posts?page=" + str(
                        posts.previous_page_number())
        serializer = PostSerializer(posts, many=True)
        # paginate comments and add friend list
        for post in serializer.data:
            post['size'] = pageSize
            comments = Comment.objects.filter(
                post_id=post['id']).order_by("-published").all()
            commentPaginator = Paginator(comments, pageSize)
            comments = commentPaginator.get_page(0)
            post['visibleTo'] = post['visibleTo'].split(",")
            post['categories'] = post['categories'].split()
            post['next'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/posts/" + str(post['id']) + "/comments"
            post['origin'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/posts/" + str(post['id'])
            post['source'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/posts/" + str(post['id'])
            comments = GETCommentSerializer(comments, many=True).data
            post['comments'] = comments
        resp['posts'] = serializer.data
        resp['query'] = 'posts'
        return Response(resp)
Пример #20
0
    def __write_capture(self, request, response):

        ohandle = io.StringIO()
        response_body = b''
        saved_exception = None
        try:
            ohandle.write('<capture>\n')
            ohandle.write('<request>\n')
            method = request.get_method()
            url = request.get_full_url() 
            parsed = urlparse.urlsplit(url)
            relative_url = parsed.path
            if parsed.query:
                relative_url += '?' + parsed.query
            if parsed.fragment:
                # TODO: will this ever happen?
                relative_url += '#' + parsed.fragment

            host = None
            request_body = None

            if hasattr(request, 'get_host'):
                host = request.get_host()
                # support 3.3
                if request.has_data():
                    request_body = request.get_data()
            else:
                host = request.host
                request_body = request.data
            
            ohandle.write('<method>%s</method>\n' % escape(method))
            ohandle.write('<url>%s</url>\n' % escape(url))
            ohandle.write('<host>%s</host>\n' % escape(host))
            try:
                # ghetto
                addr = response.fp.raw._sock.getpeername()
                if addr:
                    ohandle.write('<hostip>%s</hostip>\n' % escape(addr[0]))
            except Exception as error:
                pass
            ohandle.write('<datetime>%s</datetime>\n' % escape(time.asctime(time.gmtime())+' GMT')) # TODO: can we calculate request time and elapsed?
            request_headers = '%s %s HTTP/1.1\r\n' % (method, relative_url) # TODO: is there access to the HTTP version?
            for item in request.header_items():
                request_headers += item[0] + ': ' + '\r\n\t'.join(item[1:]) + '\r\n'

            if self.re_nonprintable_str.search(request_headers):
                ohandle.write('<headers encoding="base64">%s</headers>\n' % base64.b64encode(request_headers.encode('utf-8')).decode('ascii'))
            else:
                ohandle.write('<headers>%s</headers>\n' % escape(request_headers))
            if request_body is not None:
                if self.re_nonprintable.search(request_body):
                    ohandle.write('<body encoding="base64">%s</body>\n' % base64.b64encode(request_body).decode('ascii'))
                else:
                    ohandle.write('<body>%s</body>\n' % escape(request_body.decode('ascii')))
            ohandle.write('</request>\n')
            ohandle.write('<response>\n')
            status = int(response.getcode())
            ohandle.write('<status>%d</status>\n' % status)
            headers = response.info()
            if 'HEAD' == method or status < 200 or status in (204, 304,):
                response_body = b''
            else:
                try:
                    response_body = response.read()
                except urllib2.IncompleteRead as e:
                    saved_exception = e
            response_headers = 'HTTP/1.1 %d %s\r\n' % (status, response.msg) # TODO: is there access to the HTTP version?
            response_headers += headers.as_string()
            content_type = headers.get('Content-Type')
            content_length = headers.get('Content-Length')

            if content_type:
                ohandle.write('<content_type>%s</content_type>\n' % escape(content_type))
            if content_length:
                ohandle.write('<content_length>%d</content_length>\n' % int(content_length))

            if self.re_nonprintable_str.search(response_headers):
                ohandle.write('<headers encoding="base64">%s</headers>\n' % base64.b64encode(response_headers.encode('utf-8')).decode('ascii'))
            else:
                ohandle.write('<headers>%s</headers>\n' % escape(response_headers))
            if response_body:
                if self.re_nonprintable.search(response_body):
                    ohandle.write('<body encoding="base64">%s</body>\n' % base64.b64encode(response_body).decode('ascii'))
                else:
                    ohandle.write('<body>%s</body>\n' % escape(response_body.decode('ascii')))

            ohandle.write('</response>\n')
            ohandle.write('</capture>\n')

            self.ofhandle.write(ohandle.getvalue().encode('utf-8'))
            ohandle.close()
            
            self.write_count += 1
            if 0 == (self.write_count % self.cut_count):
                self.close()
                self.open_file()

        except Exception as e:
            sys.stderr.write('*** unhandled error in RaftCaptureProcessor: %s\n' % (e))

        if saved_exception:
            raise(saved_exception)

        return response_body
Пример #21
0
def api_main(request, moudle):
    context = {}
    context['static_url'] = request.get_host()
    return api_process.process(request, moudle)
Пример #22
0
def old_results(request):
    """ Handles /results/ GET, POST, and ajax-GET. """
    def _get_results_context(request, log_id):
        """ Returns correct context for POST.
            Called by results() """
        log.debug('starting')
        context = {}
        request.encoding = u'utf-8'

        form = old_forms.SearchForm(
            request.POST)  # form bound to the POST data

        resultsPage = 1
        qstring_provided = None
        if request.method == u'GET':
            qstring_provided = request.GET.get("q", None)
            resultsPage = int(request.GET.get('resultsPage', resultsPage))

        if form.is_valid() or qstring_provided:
            initial_qstring = ""
            if qstring_provided:
                initial_qstring = qstring_provided
            else:
                initial_qstring = form.generateSolrQuery()

            updated_qstring = common.updateQstring(
                initial_qstring=initial_qstring,
                session_authz_dict=request.session['authz_info'],
                log_id=common.get_log_identifier(
                    request.session))['modified_qstring']
            context = common.paginateRequest(qstring=updated_qstring,
                                             resultsPage=resultsPage,
                                             log_id=common.get_log_identifier(
                                                 request.session))
            log.debug('context, ```%s```' % pprint.pformat(context))
            context[u'session_authz_info'] = request.session[u'authz_info']
            context[u'admin_links'] = common.make_admin_links(
                session_authz_dict=request.session[u'authz_info'],
                url_host=request.get_host(),
                log_id=log_id)
            context[u'initial_qstring'] = initial_qstring
        log.debug('context.keys(), ```%s```' %
                  pprint.pformat(sorted(context.keys())))
        log.debug('type(context), `%s`' % type(context))

        # results = context['iipResult']
        # log.debug( 'type(results), `%s`' % type(results) )
        # for (i, result) in enumerate(results.object_list):
        #     log.debug( 'type(result), `%s`' % type(result) )
        #     log.debug( 'result, `%s`' % result )
        #     if i > 0:
        #         break
        #     1/0

        return context

    def _get_ajax_unistring(request):
        """ Returns unicode string based on ajax update.
            Called by results() """
        log_id = common.get_log_identifier(request.session)
        log.info('id, `%s`; starting' % log_id)
        initial_qstring = request.GET.get(u'qstring', u'*:*')
        updated_qstring = common.updateQstring(initial_qstring,
                                               request.session[u'authz_info'],
                                               log_id)[u'modified_qstring']
        resultsPage = int(request.GET[u'resultsPage'])
        context = common.paginateRequest(qstring=updated_qstring,
                                         resultsPage=resultsPage,
                                         log_id=log_id)
        return_str = ajax_snippet.render_block_to_string(
            u'iip_search_templates/old_results.html', u'content', context)
        return unicode(return_str)

    def _get_searchform_context(request, log_id):
        """ Returns correct context for GET.
            Called by results() """
        log.debug('_get_searchform_context() starting')
        if not u'authz_info' in request.session:
            request.session[u'authz_info'] = {u'authorized': False}
        # form = SearchForm()  # an unbound form
        form = old_forms.SearchForm()  # an unbound form
        log.debug('form, `%s`' % repr(form))
        # place_field_object = form.fields['place']
        # place_field_object.choices = [(item, item) for item in sorted( common.facetResults('placeMenu').keys()) if item]
        # form.fields['place'] = place_field_object
        context = {
            u'form':
            form,
            u'session_authz_info':
            request.session[u'authz_info'],
            u'settings_app':
            settings_app,
            u'admin_links':
            common.make_admin_links(
                session_authz_dict=request.session[u'authz_info'],
                url_host=request.get_host(),
                log_id=log_id)
        }
        log.debug('context, ```%s```' % pprint.pformat(context))
        return context

    log_id = common.get_log_identifier(request.session)
    log.info('id, `%s`; starting' % log_id)
    if not u'authz_info' in request.session:
        request.session[u'authz_info'] = {u'authorized': False}
    if request.method == u'POST':  # form has been submitted by user
        log.debug('POST, search-form was submitted by user')
        request.encoding = u'utf-8'
        form = old_forms.SearchForm(request.POST)
        if not form.is_valid():
            log.debug('form not valid, redirecting')
            redirect_url = '%s://%s%s?q=*:*' % (
                request.META[u'wsgi.url_scheme'], request.get_host(),
                reverse(u'results_url'))
            log.debug('redirect_url for non-valid form, ```%s```' %
                      redirect_url)
            return HttpResponseRedirect(redirect_url)
        qstring = form.generateSolrQuery()
        # e.g. http://library.brown.edu/cds/projects/iip/results?q=*:*
        redirect_url = '%s://%s%s?q=%s' % (request.META[u'wsgi.url_scheme'],
                                           request.get_host(),
                                           reverse(u'results_url'), qstring)
        log.debug('redirect_url for valid form, ```%s```' % redirect_url)
        return HttpResponseRedirect(redirect_url)
    if request.method == u'GET' and request.GET.get(u'q', None) != None:
        log.debug('GET, with params, hit solr and show results')
        return render(request, u'iip_search_templates/old_results.html',
                      _get_results_context(request, log_id))
    elif request.is_ajax():  # user has requested another page, a facet, etc.
        log.debug('request.is_axax() is True')
        return HttpResponse(_get_ajax_unistring(request))
    else:  # regular GET, no params
        log.debug('GET, no params, show search form')
        return render(request, u'iip_search_templates/search_form.html',
                      _get_searchform_context(request, log_id))
Пример #23
0
def status(request, status_id):
    image_entry_list = ImageEntry.objects.filter(status_id=status_id) \
                                         .order_by('image_number')
    if not image_entry_list:
        url = '{}://{}/register/{}'.format(request.scheme, request.get_host(),
                                           status_id)
        t = threading.Thread(target=register_status, args=(url, ))
        t.start()
        return render(
            request, 'hello/status.html', {
                'title': 'ツイート詳細 - にじさーち',
                'status_id': status_id,
                'screen_name': 'unknown'
            })
    else:
        t = threading.Thread(target=update_like_count,
                             args=(image_entry_list, status_id))
        t.start()
        hashtags = []
        usertags = []
        i2vtags_list = []
        is_illust = []
        for image_entry in image_entry_list:
            is_illust.append(image_entry.is_illust)
            tags = image_entry.tags.all()
            i2vtags = []
            rating = None
            for tag in tags:
                if tag.tag_type == 'HS':
                    hashtags.append(tag)
                elif tag.tag_type == 'UR':
                    usertags.append(tag)
                elif tag.tag_type == 'IV':
                    if tag.name in ['safe', 'questionable', 'explicit']:
                        rating = tag
                    else:
                        i2vtags.append(tag)
            if rating is not None:
                i2vtags.insert(0, rating)
            i2vtags = [{
                "name": t.name,
                "name_escape": quote(t.name)
            } for t in i2vtags]
            i2vtags_list.append(i2vtags)
        hashtags = list(set(hashtags))
        hashtags = [{
            "name": t.name,
            "name_escape": quote(t.name)
        } for t in hashtags]
        usertags = [{
            "name": t.name,
            "name_escape": quote(t.name)
        } for t in usertags]

        all_usertags = Tag.objects.filter(tag_type='UR')

        return render(
            request, 'hello/status.html', {
                'title': 'ツイート詳細 - にじさーち',
                'status_id': status_id,
                'screen_name': image_entry_list[0].author_screen_name,
                'hashtags': hashtags,
                'usertags': usertags,
                'all_usertags': all_usertags,
                'i2vtags_list': i2vtags_list,
                'is_illust': is_illust
            })
Пример #24
0
def cleanUpAndFixImages(request):
    helper_ops.fixAllImagePaths()
    helper_ops.updateAllImageSizes(request.scheme, request.get_host())
    return HttpResponse("All images rows cleaned up and fixed.")
Пример #25
0
    def test(request, *args, **kwargs):
        # redirect
        if request.get_host() == 'www.chalaoshi.cn':
            return HttpResponsePermanentRedirect('https://chalaoshi.cn' +
                                                 request.get_full_path())

        test_ua(request)
        if request.ua_is_pc:
            copyright = True
            return render_to_response('pc.html', locals())

        # add uuid
        uuid = -1
        ip = request.META['REMOTE_ADDR']

        if not 'uuid' in request.session and 'uuid' in request.COOKIES:
            request.session['uuid'] = request.COOKIES['uuid']

        if not 'uuid' in request.session:
            uuid = generate_uuid(ip)
        else:
            uuid = request.session['uuid']
            try:
                uuid = int(uuid)
            except:
                uuid = generate_uuid(ip)
        request.session['uuid'] = uuid

        # check new openid
        redirect = request.GET.get('redirect', '')
        if redirect == 'openid_callback':
            pass

        # SNS visit log
        fr = request.GET.get('from', '')
        if not fr == '':
            # save visit log if from SNS
            log = SNSVisitLog()
            log.ip = request.META['REMOTE_ADDR']
            log.source = fr
            log.path = request.get_full_path()
            log.uuid = uuid
            log.save()

        # add wx js signature
        request.wx = wx_js_sign('https://' + request.get_host() +
                                request.get_full_path())

        # redirect to OpenID url
        response = None
        #if 'openid' not in request.session and request.ua_is_wx:
        #    from urllib import quote
        #    callback_url = quote(settings.HOST_NAME+'/wechat/wx_userinfo_callback')
        #    request.session['redirect'] = 'https://'+request.get_host()+request.get_full_path()
        #    response = HttpResponseRedirect('https://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&redirect_uri=%s&response_type=code&scope=snsapi_base&state=%s#wechat_redirect' % (settings.WECHAT['APPID'], callback_url, settings.WECHAT['TOKEN']))
        #else:
        #    if request.ua_is_wx:
        #        oid = OpenID.get_or_create(request.session['openid'], uuid)
        #        request.session['uuid'] = oid.uuid

        response = func(request, *args, **kwargs)
        response.set_cookie('uuid', '', expires=-1)

        return response
Пример #26
0
def query(url,
          method="GET",
          params=None,
          data=None,
          data_file=None,
          header_dict=None,
          header_list=None,
          header_file=None,
          username=None,
          password=None,
          auth=None,
          decode=False,
          decode_type="auto",
          status=False,
          headers=False,
          text=False,
          cookies=None,
          cookie_jar=None,
          cookie_format="lwp",
          persist_session=False,
          session_cookie_jar=None,
          data_render=False,
          data_renderer=None,
          header_render=False,
          header_renderer=None,
          template_dict=None,
          test=False,
          test_url=None,
          node="minion",
          port=80,
          opts=None,
          backend=None,
          ca_bundle=None,
          verify_ssl=None,
          cert=None,
          text_out=None,
          headers_out=None,
          decode_out=None,
          stream=False,
          streaming_callback=None,
          header_callback=None,
          handle=False,
          agent=USERAGENT,
          hide_fields=None,
          raise_error=True,
          formdata=False,
          formdata_fieldname=None,
          formdata_filename=None,
          decode_body=True,
          **kwargs):
    """
    Query a resource, and decode the return data
    """
    ret = {}

    if opts is None:
        if node == "master":
            opts = salt.config.master_config(
                os.path.join(salt.syspaths.CONFIG_DIR, "master"))
        elif node == "minion":
            opts = salt.config.minion_config(
                os.path.join(salt.syspaths.CONFIG_DIR, "minion"))
        else:
            opts = {}

    if not backend:
        backend = opts.get("backend", "tornado")

    match = re.match(
        r"https?://((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)($|/)",
        url,
    )
    if not match:
        salt.utils.network.refresh_dns()

    if backend == "requests":
        if HAS_REQUESTS is False:
            ret["error"] = ("http.query has been set to use requests, but the "
                            "requests library does not seem to be installed")
            log.error(ret["error"])
            return ret
        else:
            requests_log = logging.getLogger("requests")
            requests_log.setLevel(logging.WARNING)

    # Some libraries don't support separation of url and GET parameters
    # Don't need a try/except block, since Salt depends on tornado
    url_full = salt.ext.tornado.httputil.url_concat(url,
                                                    params) if params else url

    if ca_bundle is None:
        ca_bundle = get_ca_bundle(opts)

    if verify_ssl is None:
        verify_ssl = opts.get("verify_ssl", True)

    if cert is None:
        cert = opts.get("cert", None)

    if data_file is not None:
        data = _render(data_file, data_render, data_renderer, template_dict,
                       opts)

    # Make sure no secret fields show up in logs
    log_url = sanitize_url(url_full, hide_fields)

    log.debug("Requesting URL %s using %s method", log_url, method)
    log.debug("Using backend: %s", backend)

    if method == "POST" and log.isEnabledFor(logging.TRACE):
        # Make sure no secret fields show up in logs
        if isinstance(data, dict):
            log_data = data.copy()
            if isinstance(hide_fields, list):
                for item in data:
                    for field in hide_fields:
                        if item == field:
                            log_data[item] = "XXXXXXXXXX"
            log.trace("Request POST Data: %s", pprint.pformat(log_data))
        else:
            log.trace("Request POST Data: %s", pprint.pformat(data))

    if header_file is not None:
        header_tpl = _render(header_file, header_render, header_renderer,
                             template_dict, opts)
        if isinstance(header_tpl, dict):
            header_dict = header_tpl
        else:
            header_list = header_tpl.splitlines()

    if header_dict is None:
        header_dict = {}

    if header_list is None:
        header_list = []

    if cookie_jar is None:
        cookie_jar = os.path.join(
            opts.get("cachedir", salt.syspaths.CACHE_DIR), "cookies.txt")
    if session_cookie_jar is None:
        session_cookie_jar = os.path.join(
            opts.get("cachedir", salt.syspaths.CACHE_DIR), "cookies.session.p")

    if persist_session is True and salt.utils.msgpack.HAS_MSGPACK:
        # TODO: This is hackish; it will overwrite the session cookie jar with
        # all cookies from this one connection, rather than behaving like a
        # proper cookie jar. Unfortunately, since session cookies do not
        # contain expirations, they can't be stored in a proper cookie jar.
        if os.path.isfile(session_cookie_jar):
            with salt.utils.files.fopen(session_cookie_jar, "rb") as fh_:
                session_cookies = salt.utils.msgpack.load(fh_)
            if isinstance(session_cookies, dict):
                header_dict.update(session_cookies)
        else:
            with salt.utils.files.fopen(session_cookie_jar, "wb") as fh_:
                salt.utils.msgpack.dump("", fh_)

    for header in header_list:
        comps = header.split(":")
        if len(comps) < 2:
            continue
        header_dict[comps[0].strip()] = comps[1].strip()

    if not auth:
        if username and password:
            auth = (username, password)

    if agent == USERAGENT:
        agent = "{} http.query()".format(agent)
    header_dict["User-agent"] = agent

    if backend == "requests":
        sess = requests.Session()
        sess.auth = auth
        sess.headers.update(header_dict)
        log.trace("Request Headers: %s", sess.headers)
        sess_cookies = sess.cookies
        sess.verify = verify_ssl
    elif backend == "urllib2":
        sess_cookies = None
    else:
        # Tornado
        sess_cookies = None

    if cookies is not None:
        if cookie_format == "mozilla":
            sess_cookies = http.cookiejar.MozillaCookieJar(cookie_jar)
        else:
            sess_cookies = http.cookiejar.LWPCookieJar(cookie_jar)
        if not os.path.isfile(cookie_jar):
            sess_cookies.save()
        sess_cookies.load()

    if test is True:
        if test_url is None:
            return {}
        else:
            url = test_url
            ret["test"] = True

    if backend == "requests":
        req_kwargs = {}
        if stream is True:
            if requests.__version__[0] == "0":
                # 'stream' was called 'prefetch' before 1.0, with flipped meaning
                req_kwargs["prefetch"] = False
            else:
                req_kwargs["stream"] = True

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, str):
                if os.path.exists(cert):
                    req_kwargs["cert"] = cert
            elif isinstance(cert, list):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs["cert"] = cert
            else:
                log.error(
                    "The client-side certificate path that"
                    " was passed is not valid: %s",
                    cert,
                )

        if formdata:
            if not formdata_fieldname:
                ret["error"] = "formdata_fieldname is required when formdata=True"
                log.error(ret["error"])
                return ret
            result = sess.request(method,
                                  url,
                                  params=params,
                                  files={
                                      formdata_fieldname:
                                      (formdata_filename, io.StringIO(data))
                                  },
                                  **req_kwargs)
        else:
            result = sess.request(method,
                                  url,
                                  params=params,
                                  data=data,
                                  **req_kwargs)
        result.raise_for_status()
        if stream is True:
            # fake a HTTP response header
            header_callback("HTTP/1.0 {} MESSAGE".format(result.status_code))
            # fake streaming the content
            streaming_callback(result.content)
            return {
                "handle": result,
            }

        if handle is True:
            return {
                "handle": result,
                "body": result.content,
            }

        log.debug("Final URL location of Response: %s",
                  sanitize_url(result.url, hide_fields))

        result_status_code = result.status_code
        result_headers = result.headers
        result_text = result.content
        result_cookies = result.cookies
        body = result.content
        if not isinstance(body, str) and decode_body:
            body = body.decode(result.encoding or "utf-8")
        ret["body"] = body
    elif backend == "urllib2":
        request = urllib.request.Request(url_full, data)
        handlers = [
            urllib.request.HTTPHandler,
            urllib.request.HTTPCookieProcessor(sess_cookies),
        ]

        if url.startswith("https"):
            hostname = request.get_host()
            handlers[0] = urllib.request.HTTPSHandler(1)
            if not HAS_MATCHHOSTNAME:
                log.warning(
                    "match_hostname() not available, SSL hostname checking "
                    "not available. THIS CONNECTION MAY NOT BE SECURE!")
            elif verify_ssl is False:
                log.warning("SSL certificate verification has been explicitly "
                            "disabled. THIS CONNECTION MAY NOT BE SECURE!")
            else:
                if ":" in hostname:
                    hostname, port = hostname.split(":")
                else:
                    port = 443
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.connect((hostname, int(port)))
                sockwrap = ssl.wrap_socket(sock,
                                           ca_certs=ca_bundle,
                                           cert_reqs=ssl.CERT_REQUIRED)
                try:
                    match_hostname(sockwrap.getpeercert(), hostname)
                except CertificateError as exc:
                    ret["error"] = "The certificate was invalid. Error returned was: {}".format(
                        pprint.pformat(exc))
                    return ret

                # Client-side cert handling
                if cert is not None:
                    cert_chain = None
                    if isinstance(cert, str):
                        if os.path.exists(cert):
                            cert_chain = cert
                    elif isinstance(cert, list):
                        if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                            cert_chain = cert
                    else:
                        log.error(
                            "The client-side certificate path that was "
                            "passed is not valid: %s",
                            cert,
                        )
                        return
                    if hasattr(ssl, "SSLContext"):
                        # Python >= 2.7.9
                        context = ssl.SSLContext.load_cert_chain(*cert_chain)
                        handlers.append(
                            urllib.request.HTTPSHandler(context=context))  # pylint: disable=E1123
                    else:
                        # Python < 2.7.9
                        cert_kwargs = {
                            "host": request.get_host(),
                            "port": port,
                            "cert_file": cert_chain[0],
                        }
                        if len(cert_chain) > 1:
                            cert_kwargs["key_file"] = cert_chain[1]
                        handlers[0] = http.client.HTTPSConnection(
                            **cert_kwargs)

        opener = urllib.request.build_opener(*handlers)
        for header in header_dict:
            request.add_header(header, header_dict[header])
        request.get_method = lambda: method
        try:
            result = opener.open(request)
        except urllib.error.URLError as exc:
            return {"Error": str(exc)}
        if stream is True or handle is True:
            return {
                "handle": result,
                "body": result.content,
            }

        result_status_code = result.code
        result_headers = dict(result.info())
        result_text = result.read()
        if "Content-Type" in result_headers:
            res_content_type, res_params = cgi.parse_header(
                result_headers["Content-Type"])
            if (res_content_type.startswith("text/")
                    and "charset" in res_params
                    and not isinstance(result_text, str)):
                result_text = result_text.decode(res_params["charset"])
        if isinstance(result_text, bytes) and decode_body:
            result_text = result_text.decode("utf-8")
        ret["body"] = result_text
    else:
        # Tornado
        req_kwargs = {}

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, str):
                if os.path.exists(cert):
                    req_kwargs["client_cert"] = cert
            elif isinstance(cert, list):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs["client_cert"] = cert[0]
                    req_kwargs["client_key"] = cert[1]
            else:
                log.error(
                    "The client-side certificate path that "
                    "was passed is not valid: %s",
                    cert,
                )

        if isinstance(data, dict):
            data = urllib.parse.urlencode(data)

        if verify_ssl:
            req_kwargs["ca_certs"] = ca_bundle

        max_body = opts.get("http_max_body",
                            salt.config.DEFAULT_MINION_OPTS["http_max_body"])
        connect_timeout = opts.get(
            "http_connect_timeout",
            salt.config.DEFAULT_MINION_OPTS["http_connect_timeout"],
        )
        timeout = opts.get(
            "http_request_timeout",
            salt.config.DEFAULT_MINION_OPTS["http_request_timeout"],
        )

        client_argspec = None

        proxy_host = opts.get("proxy_host", None)
        if proxy_host:
            # tornado requires a str for proxy_host, cannot be a unicode str in py2
            proxy_host = salt.utils.stringutils.to_str(proxy_host)
        proxy_port = opts.get("proxy_port", None)
        proxy_username = opts.get("proxy_username", None)
        if proxy_username:
            # tornado requires a str, cannot be unicode str in py2
            proxy_username = salt.utils.stringutils.to_str(proxy_username)
        proxy_password = opts.get("proxy_password", None)
        if proxy_password:
            # tornado requires a str, cannot be unicode str in py2
            proxy_password = salt.utils.stringutils.to_str(proxy_password)
        no_proxy = opts.get("no_proxy", [])

        # Since tornado doesnt support no_proxy, we'll always hand it empty proxies or valid ones
        # except we remove the valid ones if a url has a no_proxy hostname in it
        if urllib.parse.urlparse(url_full).hostname in no_proxy:
            proxy_host = None
            proxy_port = None
            proxy_username = None
            proxy_password = None

        # We want to use curl_http if we have a proxy defined
        if proxy_host and proxy_port:
            if HAS_CURL_HTTPCLIENT is False:
                ret["error"] = (
                    "proxy_host and proxy_port has been set. This requires pycurl and tornado, "
                    "but the libraries does not seem to be installed")
                log.error(ret["error"])
                return ret

            salt.ext.tornado.httpclient.AsyncHTTPClient.configure(
                "tornado.curl_httpclient.CurlAsyncHTTPClient")
            client_argspec = salt.utils.args.get_function_argspec(
                salt.ext.tornado.curl_httpclient.CurlAsyncHTTPClient.initialize
            )
        else:
            salt.ext.tornado.httpclient.AsyncHTTPClient.configure(None)
            client_argspec = salt.utils.args.get_function_argspec(
                salt.ext.tornado.simple_httpclient.SimpleAsyncHTTPClient.
                initialize)

        supports_max_body_size = "max_body_size" in client_argspec.args

        req_kwargs.update({
            "method": method,
            "headers": header_dict,
            "auth_username": username,
            "auth_password": password,
            "body": data,
            "validate_cert": verify_ssl,
            "allow_nonstandard_methods": True,
            "streaming_callback": streaming_callback,
            "header_callback": header_callback,
            "connect_timeout": connect_timeout,
            "request_timeout": timeout,
            "proxy_host": proxy_host,
            "proxy_port": proxy_port,
            "proxy_username": proxy_username,
            "proxy_password": proxy_password,
            "raise_error": raise_error,
            "decompress_response": False,
        })

        # Unicode types will cause a TypeError when Tornado's curl HTTPClient
        # invokes setopt. Therefore, make sure all arguments we pass which
        # contain strings are str types.
        req_kwargs = salt.utils.data.decode(req_kwargs, to_str=True)

        try:
            download_client = (HTTPClient(max_body_size=max_body)
                               if supports_max_body_size else HTTPClient())
            result = download_client.fetch(url_full, **req_kwargs)
        except salt.ext.tornado.httpclient.HTTPError as exc:
            ret["status"] = exc.code
            ret["error"] = str(exc)
            return ret
        except (socket.herror, OSError, socket.timeout,
                socket.gaierror) as exc:
            if status is True:
                ret["status"] = 0
            ret["error"] = str(exc)
            log.debug("Cannot perform 'http.query': %s - %s", url_full,
                      ret["error"])
            return ret

        if stream is True or handle is True:
            return {
                "handle": result,
                "body": result.body,
            }

        result_status_code = result.code
        result_headers = result.headers
        result_text = result.body
        if "Content-Type" in result_headers:
            res_content_type, res_params = cgi.parse_header(
                result_headers["Content-Type"])
            if (res_content_type.startswith("text/")
                    and "charset" in res_params
                    and not isinstance(result_text, str)):
                result_text = result_text.decode(res_params["charset"])
        if isinstance(result_text, bytes) and decode_body:
            result_text = result_text.decode("utf-8")
        ret["body"] = result_text
        if "Set-Cookie" in result_headers and cookies is not None:
            result_cookies = parse_cookie_header(result_headers["Set-Cookie"])
            for item in result_cookies:
                sess_cookies.set_cookie(item)
        else:
            result_cookies = None

    if isinstance(result_headers, list):
        result_headers_dict = {}
        for header in result_headers:
            comps = header.split(":")
            result_headers_dict[comps[0].strip()] = ":".join(comps[1:]).strip()
        result_headers = result_headers_dict

    log.debug("Response Status Code: %s", result_status_code)
    log.trace("Response Headers: %s", result_headers)
    log.trace("Response Cookies: %s", sess_cookies)
    # log.trace("Content: %s", result_text)

    coding = result_headers.get("Content-Encoding", "identity")

    # Requests will always decompress the content, and working around that is annoying.
    if backend != "requests":
        result_text = __decompressContent(coding, result_text)

    try:
        log.trace("Response Text: %s", result_text)
    except UnicodeEncodeError as exc:
        log.trace(
            "Cannot Trace Log Response Text: %s. This may be due to "
            "incompatibilities between requests and logging.",
            exc,
        )

    if text_out is not None:
        with salt.utils.files.fopen(text_out, "w") as tof:
            tof.write(result_text)

    if headers_out is not None and os.path.exists(headers_out):
        with salt.utils.files.fopen(headers_out, "w") as hof:
            hof.write(result_headers)

    if cookies is not None:
        sess_cookies.save()

    if persist_session is True and salt.utils.msgpack.HAS_MSGPACK:
        # TODO: See persist_session above
        if "set-cookie" in result_headers:
            with salt.utils.files.fopen(session_cookie_jar, "wb") as fh_:
                session_cookies = result_headers.get("set-cookie", None)
                if session_cookies is not None:
                    salt.utils.msgpack.dump({"Cookie": session_cookies}, fh_)
                else:
                    salt.utils.msgpack.dump("", fh_)

    if status is True:
        ret["status"] = result_status_code

    if headers is True:
        ret["headers"] = result_headers

    if decode is True:
        if decode_type == "auto":
            content_type = result_headers.get("content-type",
                                              "application/json")
            if "xml" in content_type:
                decode_type = "xml"
            elif "json" in content_type:
                decode_type = "json"
            elif "yaml" in content_type:
                decode_type = "yaml"
            else:
                decode_type = "plain"

        valid_decodes = ("json", "xml", "yaml", "plain")
        if decode_type not in valid_decodes:
            ret["error"] = "Invalid decode_type specified. Valid decode types are: {}".format(
                pprint.pformat(valid_decodes))
            log.error(ret["error"])
            return ret

        if decode_type == "json":
            ret["dict"] = salt.utils.json.loads(result_text)
        elif decode_type == "xml":
            ret["dict"] = []
            items = ET.fromstring(result_text)
            for item in items:
                ret["dict"].append(xml.to_dict(item))
        elif decode_type == "yaml":
            ret["dict"] = salt.utils.data.decode(
                salt.utils.yaml.safe_load(result_text))
        else:
            text = True

        if decode_out:
            with salt.utils.files.fopen(decode_out, "w") as dof:
                dof.write(result_text)

    if text is True:
        ret["text"] = result_text

    return ret
Пример #27
0
    def get(self, request):
        resp = {}
        # look for the userprofile if this is our own server user
        if request.user.is_authenticated:
            user = UserProfile.objects.filter(user_id=request.user).first()
        else:
            return HttpResponse('Unauthorized', status=401)
        # All the public posts
        posts = Post.objects.filter(visibility="PUBLIC").all()
        posts = list(posts)
        # Only for our own server user
        if request.user.is_authenticated:
            # user's own post and prevent duplication
            # by excluding those are public
            if user:
                # if this user's userprofile exists in our server
                # that means this is our own server user
                # not external endpoint user
                posts += list(
                    Post.objects.filter(user_id=user.author_id).exclude(
                        visibility="PUBLIC").all())
        # return all friends post which the authors are following this requested user
        thisRequestUserUrl = request.META.get(
            'HTTP_X_REQUEST_USER_ID'
        )  # this is the CUSTOM header we shared within connected group
        if thisRequestUserUrl:
            # get all visibility = "FRIENDS"
            all_user_who_follow_requestUser = Follow.objects.filter(
                following_url=thisRequestUserUrl).all().values_list(
                    'follower_url', flat=True)
            # add all request user 's follower
            for userurl in all_user_who_follow_requestUser:
                authorid = userurl.rstrip("/").split("/")[
                    -1]  # this was url so need to extract author id
                # find this user's "friend"(follower) post
                posts += list(
                    Post.objects.filter(visibility="FRIENDS").filter(
                        user_id=authorid).all())
                # here also check for request user is in our server
                if user:
                    # if the followers of the request user on our server has SERVERONLY
                    # and this request user is our server user
                    # get all the "SERVERONLY" post for this user
                    posts += list(
                        Post.objects.filter(visibility="SERVERONLY").filter(
                            user_id=authorid).all())
        # TODO add post_visible_to stuff
        # Get all visibility as "PRIVATE"
        if thisRequestUserUrl:
            all_private_posts = Post.objects.filter(visibility="PRIVATE").all()
            for private_post in all_private_posts:
                if thisRequestUserUrl in private_post.visibleTo:
                    posts.append(private_post)
        count = len(posts)
        resp['count'] = count
        pageSize = request.GET.get('size')
        if not pageSize:
            pageSize = 50
        pageSize = int(pageSize)
        resp['size'] = pageSize
        # posts = list(posts)
        posts.sort(key=lambda post: post.published, reverse=True)
        paginator = Paginator(posts, pageSize)
        posts = paginator.get_page(request.GET.get('page'))
        # No need to return next if last page
        # No need to return previous if page is 0
        # next = None;
        # previous = None;
        if posts.has_next():
            resp['next'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/author/posts?page=" + str(
                    posts.next_page_number())
            if pageSize != 50:
                resp['next'] += "&size=" + str(pageSize)
        if posts.has_previous():
            resp['previous'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/author/posts?page=" + str(
                    posts.previous_page_number())
            if pageSize != 50:
                resp['previous'] += "&size=" + str(pageSize)
        serializer = PostSerializer(posts, many=True)
        # paginate comments and add friend list
        #counter = 0
        for post in serializer.data:
            #counter += 1
            post['size'] = pageSize
            comments = Comment.objects.filter(
                post_id=post['id']).order_by("-published").all()
            commentPaginator = Paginator(comments, pageSize)
            comments = commentPaginator.get_page(0)
            post['next'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/posts/" + str(post['id']) + "/comments"
            post['visibleTo'] = post['visibleTo'].split(",")
            post['categories'] = post['categories'].split()
            post['origin'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/posts/" + str(post['id'])
            post['source'] = str(request.scheme) + "://" + str(
                request.get_host()) + "/posts/" + str(post['id'])
            comments = GETCommentSerializer(comments, many=True).data
            post['comments'] = comments

        resp['posts'] = serializer.data
        resp['query'] = 'posts'
        return Response(resp)