Esempio n. 1
0
def normalise_url(url):
    """
    Take a URL and turn it into a form which is suitable for normalised comparison with other normalised
    URLs.

    The function does the following:
    * strips leading/trailing whitespace
    * validates the URL is realistic
    * strips the scheme (so, removes http, https, ftp, ftps, etc)

    :param url:
    :return:
    """
    if url is None:
        return url

    schemes = ["http", "https", "ftp", "ftps"]
    url = url.strip()
    if url.startswith("//"):
        url = "http:" + url

    if "://" not in url:
        url = "http://" + url

    u = urlparse.urlparse(url)

    if u.netloc is None or u.netloc == "":
        raise ValueError(
            "Could not extract a normalised URL from '{x}'".format(x=url))

    if u.scheme not in schemes:
        raise ValueError(
            "URL must be at http(s) or ftp(s), found '{x}'".format(x=u.netloc))

    n = urlparse.ParseResult(None, u.netloc, u.path, u.params, u.query,
                             u.fragment)
    return urlparse.urlunparse(n)
Esempio n. 2
0
def __sanitize_url(server_url):
    """
    Parses a URL and makes sure it has a scheme and no extra / and path.

    ..note:: Calling this method only once might yield incorrect result. Always call
        the sanitize_url function instead.

    :param str server_url: URL to clean up.

    :returns: The cleaned up URL.
    """

    # The given url https://192.168.1.250:30/path?a=b is parsed such that
    # scheme => https
    # netloc => 192.168.1.250:30
    # path = /path
    # query = a=b

    # As such, when sanitizing a url, we want to keep only the scheme and
    # network location

    # Then break up the url into chunks
    tokens_parsed = urlparse.urlparse(server_url)

    # Then extract the good parts from the url
    clean_url_tokens = urlparse.ParseResult(
        # We want https when there is no specified scheme.
        scheme=tokens_parsed.scheme or "https",
        # If only a host has been provided, path will be set.
        # If a scheme was set, then use the netloc
        netloc=tokens_parsed.netloc or tokens_parsed.path,
        path="",
        params="",
        query="",
        fragment="")

    return urlparse.urlunparse(clean_url_tokens)
Esempio n. 3
0
 def can_fetch(self, url):
     # construct URL of robots.txt
     parts = urlparse.urlparse(url)
     parts2 = urlparse.ParseResult(parts.scheme, parts.netloc,
                                   '/robots.txt', None, None, None)
     robotsurl = parts2.geturl()
     print url, robotsurl
     # create new parser, if there is none in cache
     if robotsurl not in self.cache:
         rp = robotparser.RobotFileParser()
         rp.set_url(robotsurl)
         self.cache[robotsurl] = rp
     # fetch and parse robots.txt if existing one is too old
     if time.time() - self.cache[robotsurl].mtime() > 60 * 60 * 24:
         try:
             status, header, content = self.http_get(robotsurl)
         except:
             return False
         if status >= 400:
             return True
         rp.parse(content)
         rp.modified()
     # lookup current given url with current parser
     return self.cache[robotsurl].can_fetch(SPIDERAGENT, url)
Esempio n. 4
0
def _build_network_relative_path(url):
    p = urlparse.urlparse(url)
    return urlparse.urlunparse(
        urlparse.ParseResult(p.scheme, p.netloc, os.path.dirname(p.path), '',
                             '', ''))
Esempio n. 5
0
 def geturl(self):
     return urlparse.ParseResult(*self.gettuple()).geturl()
Esempio n. 6
0
 def get_url(self):
     query = werkzeug.url_encode(self._query)
     return urlparse.ParseResult(self.scheme, self.netloc, self.path,
                                 self.params, query,
                                 self.fragment).geturl()
Esempio n. 7
0
def pdns_reverse_request_handler(search_value,
                                 result_format,
                                 **dynamic_fields):
    """method to allow for custom pdns reverse requests handler
       for DNSDB as a pdns source

    Note: the method name must be "pdns_reverse_request_handler"
    """
    scheme = "https"
    netloc = "api.dnsdb.info"
    path = "lookup/rdata".split('/')
    query = ["limit=%d" % ((int(dynamic_fields['limit'])))]

    results = {'success': False}
    if not config.myConfig['apikey']:
        results['error'] = 'No DNSDB key.'
        return results

    try:
        value = _verify_type(search_value, dynamic_fields['type'])
    except Exception as e:
        results['error'] = 'Unable to verify input'
        return results

    path.extend([dynamic_fields['type'], urllib.quote(value)])

    # If 'any' is in rrtypes and anything else too, just default to 'any'
    if 'any' in dynamic_fields['rrtypes']:
        if 'any-dnssec' in dynamic_fields['rrtypes']:
            dynamic_fields['rrtypes'] = ['any', 'any-dnssec']
        else:
            dynamic_fields['rrtypes'] = ['any']

    results['data'] = {}
    for rrtype in dynamic_fields['rrtypes']:
        local_path = list(path) + [rrtype]
        local_path = "/".join(local_path)

        local_url = urlparse.ParseResult(scheme,
                                         netloc,
                                         local_path,
                                         "",
                                         "&".join(query),
                                         "")

        url = urlparse.urlunparse(local_url)
        try:
            headers = {'Accept': 'application/json',
                       'X-API-Key': config.myConfig['apikey']}
            r = requests.get(url,
                             proxies=settings.PROXIES,
                             headers=headers,
                             verify=config.myConfig["ssl_verify"])
        except Exception as e:
            results['error'] = str(e)
            return results

        if r.status_code not in [200, 404]:
            return check_return_code(r)

        if r.status_code == 200:
            # Each line of the response is an individual JSON blob.
            for line in r.text.split('\n'):
                # Skip empty lines.
                if not line:
                    continue

                try:
                    tmp = json.loads(line)
                except Exception as e:
                    results['error'] = \
                        "%s: %s" % (str(e), cgi.escape(line, quote=True))
                    return results

                # Convert epoch timestamps to human readable.
                for key in ['time_first', 'time_last']:
                    if key in tmp:
                        tmp[key] = time.strftime("%Y-%m-%d %H:%M:%S",
                                                 time.gmtime(tmp[key]))

                rrtype = tmp['rrtype']
                # Strip the MX weight
                if rrtype == 'MX':
                    tmp['rdata'] = [tmp['rdata'].split()[1]]
                else:
                    tmp['rdata'] = [tmp['rdata']]

                if result_format in ['none', 'list']:
                    if tmp['rrname'][-1] == ".":
                        tmp['rrname'] = tmp['rrname'][:-1]
                    for i in range(len(tmp['rdata'])):
                        if tmp['rdata'][i][-1] == ".":
                            tmp['rdata'][i] = tmp['rdata'][i][:-1]

                try:
                    results['data'][rrtype].append(tmp)
                except KeyError:
                    results['data'][rrtype] = [tmp]

    results['success'] = True
    rate = {'limit': r.headers['X-RateLimit-Limit'],
            'remaining': r.headers['X-RateLimit-Remaining'],
            'reset': r.headers['X-RateLimit-Reset']}
    if rate['reset'] != 'n/a':
        rate['reset'] = datetime.datetime.utcfromtimestamp(
            float(rate['reset'])).strftime('%Y-%m-%d %H:%M:%S GMT')
    results['rate'] = rate

    if result_format != 'none':
        results = _format_results(results, result_format, dynamic_fields)

    return results
Esempio n. 8
0
        urls = defaultdict(list)
        for s in (a, b):
            filename = os.path.join(args.prefix, domain, s)
            html = read_file(filename)

            url = original_url(html)
            url = "http://" + url
            # print url

            parsed_url = urlparse.urlparse(url)
            stripped_path = stripper.strip(parsed_url.path).replace("//", "/")
            stripped_query = stripper.strip(parsed_url.query).replace(
                "//", "/")
            stripped_url = urlparse.ParseResult(parsed_url.scheme,
                                                parsed_url.netloc,
                                                stripped_path,
                                                parsed_url.params,
                                                stripped_query,
                                                parsed_url.fragment).geturl()

            urls[stripped_url].append(url)
            if stripped_url != url:
                was_stripped += 1
        if len(urls) == 1:
            correct += 1

        print len(urls), was_stripped, correct, urls.items()

    print "correct: ", correct
def delayed_shipping(request, template):
    apitoolsusername = '******' + request.user.username
    appname_id = MainSDKApp.objects.using('apiserver_db').all()
    if request.method == 'GET':
        form = DelayedShippingForm()
        return render(
            request, template, {
                'form': form,
                'username': apitoolsusername,
                'appname_id': appname_id,
                'guestlist': settings.GUEST
            })
    elif request.method == 'POST':
        if apitoolsusername in settings.GUEST:
            form = DelayedShippingForm(request.POST)
            return render(
                request, template, {
                    'form': form,
                    'username': apitoolsusername,
                    'appname_id': appname_id,
                    'guestlist': settings.GUEST
                })
        form = DelayedShippingForm(request.POST)
        if form.is_valid():
            cleaned_data = form.cleaned_data
            if cleaned_data['captcha'].lower() != request.session['captcha']:
                form.add_error('captcha', '验证码错误')
            else:
                appid = cleaned_data['appid']
                orderid = cleaned_data['orderid']
                price = cleaned_data['price'] * 100
                productid = cleaned_data['productid']
                try:
                    app = MainSDKApp.objects.using('apiserver_db').get(
                        appid=appid)
                except MainSDKApp.DoesNotExist:
                    form.add_error('appid', 'appid在SDK服务器中不存在')
                else:
                    try:
                        U8Uorder.objects.using('u8server_db').get(
                            orderId=orderid)
                    except Exception:
                        form.add_error('orderid', 'U8服务器上不存在此订单')
                        return render(
                            request, template, {
                                'form': form,
                                'username': apitoolsusername,
                                'appname_id': appname_id,
                                'guestlist': settings.GUEST
                            })
                    pay_callback_url = app.pay_callback_url
                    parsed_u8_callback_url = urlparse.urlparse(
                        pay_callback_url)
                    user = MainSDKUser.objects.using('apiserver_db').get(
                        username='******')
                    request_args = [('AppOrderID', orderid), ('Price', price),
                                    ('Uid', user.id),
                                    ('ChannelOrderID', uuid.uuid4().get_hex()),
                                    ('ProductID', productid)]
                    request_query_str = '&'.join(
                        ['='.join(map(str, item)) for item in request_args])
                    new_u8_parsed_callback_url = urlparse.ParseResult(
                        scheme=parsed_u8_callback_url.scheme,
                        netloc=parsed_u8_callback_url.netloc,
                        path=parsed_u8_callback_url.path,
                        params=parsed_u8_callback_url.params,
                        query=request_query_str,
                        fragment=parsed_u8_callback_url.fragment)
                    new_u8_callback_url = urlparse.urlunparse(
                        new_u8_parsed_callback_url)
                    callback_sign = get_signature(
                        app.appsecret.encode('utf-8'), new_u8_callback_url)
                    request_args.append(('sign', callback_sign))
                    request_args_map = dict(request_args)
                    request_obj = urllib2.Request(pay_callback_url)
                    request_obj.add_data(urllib.urlencode(request_args_map))
                    try:
                        response = urllib2.urlopen(request_obj,
                                                   timeout=6).read()
                        messages.info(request, 'U8 said: %s' % response)
                    except Exception as e:
                        messages.error(request, 'Exception: %s' % str(e))
                    else:
                        if json.loads(response)['status'] == 'success':
                            infostr = '订单发货成功'
                            try:
                                ApiToolsShippingHistory2.objects.using(
                                    'default').create(
                                        username=apitoolsusername[1:],
                                        time=time.strftime(
                                            '%Y-%m-%d %H:%M:%S',
                                            time.localtime(time.time() +
                                                           28800)),
                                        orderid=orderid,
                                        price=price,
                                        appid=appid)
                            except Exception:
                                infostr = infostr + ',写入缓存失败'
                            messages.success(request, infostr)
                        else:
                            messages.error(request, '修改U8服务器失败')
        return render(
            request, template, {
                'form': form,
                'username': apitoolsusername,
                'appname_id': appname_id,
                'guestlist': settings.GUEST
            })
Esempio n. 10
0
    def get(self, sid):
        memcache_client = memcache.Client()
        results = memcache_client.get(sid)
        if results:
            try:
                results = json.loads(zlib.decompress(results))
            except Exception:
                results = None
        if not results:
            try:
                access_token = crypto.decrypt(sid,
                                              self.app.config["SECRET_KEY"])
            except (ValueError, TypeError):
                self.response.status_int = 403
                return

            logging.debug("access_token: %s", access_token)
            content = urlfetch.fetch(
                "https://api.weibo.com/2/statuses/home_timeline.json?" +
                urllib.urlencode({
                    "count": 100,
                    "base_app": 0,
                    "feature": 0,
                    "trim_user": 0,
                    "isGetLongText": "1",
                }),
                headers={
                    "Authorization": "OAuth2 " + access_token
                }).content
            body = json.loads(content)
            if "error" in body:
                logging.error("error: %s", content)
                self.response.status_int = 500
                self.response.write(body["error"])
                return
            results = body["statuses"]
            # 读取超过140字的全文
            long_text_ids = []
            long_text_map = {}
            for status in results:
                if status.get("isLongText", True):
                    long_text_ids.append(status["idstr"])
                status = status.get("retweeted_status")
                if status is not None:
                    if status.get("isLongText", True):
                        long_text_ids.append(status["idstr"])
            logging.debug("long_text_ids before cache size=%d",
                          len(long_text_ids))
            if len(long_text_ids):
                cached_result = memcache_client.get_multi(
                    long_text_ids, "long#")
                long_text_map.update(cached_result)
                long_text_ids = filter(lambda x: x not in long_text_map,
                                       long_text_ids)
            logging.debug("long_text_ids after cache size=%d",
                          len(long_text_ids))
            long_text_ids = sorted(list(set(long_text_ids)))
            if len(long_text_ids) > 0:
                rpcs = []
                max_size = 50
                for chunk in (
                        long_text_ids[x:x + max_size]
                        for x in xrange(0, len(long_text_ids), max_size)):
                    ids = ",".join(chunk)
                    rpc = urlfetch.create_rpc()
                    urlfetch.make_fetch_call(
                        rpc,
                        "https://api.weibo.com/2/statuses/show_batch.json?" +
                        urllib.urlencode({
                            "ids": ids,
                            "isGetLongText": "1",
                        }),
                        headers={"Authorization": "OAuth2 " + access_token})
                    rpc.ids = ids
                    rpcs.append(rpc)
                for rpc in rpcs:
                    try:
                        body = json.loads(rpc.get_result().content)
                    except Exception as e:
                        body = {"error": str(e)}
                    if "error" in body:
                        logging.warn("show_batch %s error: %s", rpc.ids,
                                     str(body["error"]))
                    elif "statuses" in body:
                        for status in body["statuses"]:
                            if "longText" in status:
                                long_text_map[status["idstr"]] = status[
                                    "longText"]["longTextContent"]
            logging.debug("long_text_map size=%d", len(long_text_map))
            if len(long_text_map) > 0:
                memcache_client.set_multi(long_text_map,
                                          time=86400,
                                          key_prefix="long#")

                def expand_long_text(status):
                    if status.get("isLongText", True):
                        text = long_text_map.get(status["idstr"])
                        if text is not None:
                            logging.debug("replace long text for %s",
                                          status["idstr"])
                            status["text"] = text
                            status["isLongText"] = False

                for status in results:
                    expand_long_text(status)
                    status = status.get("retweeted_status")
                    if status is not None:
                        expand_long_text(status)
            # 将t.cn短链接展开
            tcn_id2url = defaultdict(set)
            all_tcn_urls = set()

            def extract_tcn_urls(status):
                tcn_urls = tcn_regex.findall(status["text"])
                idstr = status["idstr"]
                tcn_id2url[idstr].update(tcn_urls)
                all_tcn_urls.update(tcn_urls)

            for status in results:
                extract_tcn_urls(status)
                status = status.get("retweeted_status")
                if status is not None:
                    extract_tcn_urls(status)
            logging.debug("all_tcn_urls before cache size=%d",
                          len(all_tcn_urls))
            all_tcn_urls = list(all_tcn_urls)
            tcn_short2long = {}
            cached_result = memcache_client.get_multi(all_tcn_urls, "tcn#")
            tcn_short2long.update(cached_result)
            all_tcn_urls = filter(lambda x: x not in tcn_short2long,
                                  all_tcn_urls)
            logging.debug("all_tcn_urls after cache size=%d",
                          len(all_tcn_urls))
            rpcs = []
            max_size = 20
            for chunk in (all_tcn_urls[x:x + max_size]
                          for x in xrange(0, len(all_tcn_urls), max_size)):
                rpc = urlfetch.create_rpc()
                rpc.chunk = chunk
                urlfetch.make_fetch_call(
                    rpc,
                    "https://api.weibo.com/2/short_url/expand.json?" +
                    urllib.urlencode([("url_short", x) for x in chunk]),
                    headers={"Authorization": "OAuth2 " + access_token})
                rpcs.append(rpc)
            for rpc in rpcs:
                try:
                    result = json.loads(rpc.get_result().content)
                except Exception as e:
                    result = {"error": str(e)}
                if "error" in result:
                    logging.warn("expand %s error: %s", str(rpc.chunk),
                                 result["error"])
                elif "urls" in result:
                    for u in result["urls"]:
                        url_long = u["url_long"]
                        if u["result"] and url_long != "":
                            o = urlparse.urlparse(url_long,
                                                  scheme="http",
                                                  allow_fragments=True)
                            qsl = filter(lambda x: x[0] not in utm_queries,
                                         urlparse.parse_qsl(o.query, True))
                            o = urlparse.ParseResult(
                                o.scheme, o.netloc, o.path, o.params,
                                urllib.urlencode(encode_obj(qsl)), o.fragment)
                            url_long = urlparse.urlunparse(o)
                            if url_long:
                                tcn_short2long[u["url_short"]] = url_long
            logging.debug("tcn_short2long size=%d", len(tcn_short2long))
            if len(tcn_short2long) > 0:
                memcache_client.set_multi(tcn_short2long,
                                          time=86400,
                                          key_prefix="tcn#")

                def expand_url(status):
                    idstr = status["idstr"]
                    tcn_urls = tcn_id2url[idstr]
                    if len(tcn_urls) > 0:
                        text = status["text"]
                        for short_url in tcn_urls:
                            long_url = tcn_short2long.get(short_url)
                            if long_url is not None:
                                text = text.replace(short_url, long_url)
                        status["text"] = text

                for status in results:
                    expand_url(status)
                    status = status.get("retweeted_status")
                    if status is not None:
                        expand_url(status)

            # 将结果缓存
            memcache_client.set(sid,
                                zlib.compress(json.dumps(results), 9),
                                time=120)
        else:
            logging.debug("sid %s from cache", sid)
        self.response.headers[
            "Content-Type"] = "application/rss+xml; charset=utf-8"
        self.render_response("rss.xml",
                             results=results,
                             abs_uri_for=functools.partial(self.uri_for,
                                                           _full=True,
                                                           _scheme="https"))
Esempio n. 11
0
        stripped_path = language_stripper.strip_path(parsed_uri.path)
        stripped_path = re.sub(r'//+', '/', stripped_path)
        stripped_path = re.sub(r'__+', '_', stripped_path)
        stripped_path = re.sub(r'--+', '-', stripped_path)

        stripped_query = language_stripper.strip_query(parsed_uri.query)

        stripped_netloc = parsed_uri.netloc
        if '@' in stripped_netloc:
            stripped_netloc = stripped_netloc.split('@')[1]
        if ':' in stripped_netloc:
            stripped_netloc = stripped_netloc.split(':')[0]
        if not stripped_netloc:
            continue

        stripped_uri = urlparse.ParseResult(scheme="http",
                                            netloc=stripped_netloc,
                                            path=stripped_path,
                                            params='',
                                            query=stripped_query,
                                            fragment='').geturl()

        # remove new trailing /
        if stripped_uri and stripped_uri[-1] == '/' \
                and parsed_uri.path and parsed_uri.path[-1] != '/':
            stripped_uri = stripped_uri[:-1]

        if stripped_uri in candidates:
            print_match(stripped_uri, uri, args.crawl, candidates)
            continue
Esempio n. 12
0
def add_wfs_layer(endpoint,
                  version,
                  typename,
                  metadata_string=None,
                  title=None,
                  bbox=None,
                  user=None,
                  password=None):
    endpoint_parsed = urlparse.urlparse(endpoint)
    q_dict = {
        'version': version,
        'typename': typename,
        'outputFormat': 'shape-zip',
        'request': 'GetFeature',
        'service': 'WFS',
        'srsName': 'EPSG:4326',
    }
    if bbox:
        q_dict['bbox'] = ','.join(bbox)
    parsed_url = urlparse.ParseResult(scheme=endpoint_parsed.scheme,
                                      netloc=endpoint_parsed.netloc,
                                      path=endpoint_parsed.path,
                                      params=None,
                                      query=urllib.urlencode(q_dict),
                                      fragment=None)
    tmpfile = download_file(parsed_url.geturl(), user=user, password=password)

    # args = [
    #     'ogr2ogr',
    #     '-nlt POLYGON',
    #     '-skipfailures',
    #     '%s.shp' % tmpfile,
    #     tmpfile,
    #     'OGRGeoJSON'
    # ]
    #
    # retval = subprocess.call(args)

    # # get metadata file
    # if metadata_string:
    #     if not isinstance(metadata_string, unicode):
    #         metadata_string = unicode(metadata_string, 'utf-8')
    #     metadata_file = '%s.xml' % tmpfile
    #     with io.open(metadata_file, mode='w', encoding='utf-8') as f:
    #         f.write(metadata_string)
    #
    # saved_layer = None
    # if retval == 0:
    #     saved_layer = file_upload(
    #         '%s.shp' % tmpfile,
    #         overwrite=True)
    #     saved_layer.set_default_permissions()
    #     saved_layer.title = title or typename
    #     saved_layer.save()
    #
    # # cleanup
    # dir_name = os.path.dirname(tmpfile)
    # for root, dirs, files in os.walk(dir_name):
    #     for f in files:
    #         if tmpfile in f:
    #             try:
    #                 os.remove(os.path.join(root, f))
    #             except:
    #                 pass

    dir_name = os.path.dirname(tmpfile)
    saved_layer = None
    metadata_file = None

    with ZipFile(tmpfile) as zf:
        zf.extractall(path=dir_name)
        for name in zf.namelist():
            basename, ext = os.path.splitext(name)
            if '.shp' in ext:
                # get metadata file
                if metadata_string:
                    if not isinstance(metadata_string, unicode):
                        metadata_string = unicode(metadata_string, 'utf-8')
                    metadata_file = '%s.xml' % basename
                    metadata_file = os.path.join(dir_name, metadata_file)
                    metadata_string = cleanup_metadata(metadata_string)
                    with io.open(metadata_file, mode='w',
                                 encoding='utf-8') as f:
                        f.write(metadata_string)

                # process shapefile layer
                saved_layer = file_upload(os.path.join(dir_name, name),
                                          overwrite=True)
                saved_layer.set_default_permissions()
                saved_layer.title = title or typename
                saved_layer.save()
                break

        # cleanup
        for name in zf.namelist():
            filepath = os.path.join(dir_name, name)
            try:
                os.remove(filepath)
            except BaseException:
                pass

        if metadata_file:
            try:
                os.remove(metadata_file)
            except BaseException:
                pass

    # cleanup
    try:
        os.remove(tmpfile)
    except BaseException:
        pass
    return saved_layer
Esempio n. 13
0
            list_only = True
        elif a[0] == '-i':
            interactive = True
        elif a[0] == '-h':
            usage()
            sys.exit(0)

    try:
        in_app = urlparse.urlparse(leftover[0])
    except:
        error("%s is not a valid Acre app" % leftover[0])

    try:
        out_app = urlparse.urlparse(leftover[1])
    except IndexError, e:
        out_app = urlparse.ParseResult('','',in_app.path.split('/')[-1],'','','')

    if in_app.scheme != "" and in_app.scheme != "freebase":
        error("%s is not a valid acre URL" % leftover[0])

    if out_app.scheme != "" and out_app.scheme != "freebase":
        error("%s is not a valid acre URL" % leftover[1])

    if in_app.scheme == "" and in_app.scheme == out_app.scheme:
        error("You must specify at least one URL")

    if in_app.netloc:
        username = in_app.username or username
        password = in_app.password or password
        msb = MetawebServicesBundle(in_app.netloc, None, username, password)
        if version:
Esempio n. 14
0
 def unauth(self):
     if not self.is_auth():
         return self
     return URL.objectify(urlparse.ParseResult(
         self.scheme, '%s:%s' % (self.hostname, self.port or {'https': 443, 'http': 80}[self.scheme]),
         self.path.replace('//', '/'), self.params, self.query, self.fragment))
Esempio n. 15
0
    def _parse_url(self, rawUrl):
        """
        Get direct image URL for web pages.
        """
        url = urlparse.urlparse(rawUrl)

        # check whether the url is accessible
        req = urllib2.Request(rawUrl,
                              headers={'User-Agent': credentials.USER_AGENT})
        fd = urllib2.urlopen(req, context=self.context)

        p = url.path.lower()
        if p.endswith(".jpg") or p.endswith(".jpeg") or p.endswith(
                ".png") or p.endswith(".gif"):
            return rawUrl

        # get direct url from imgur (skip sets and albums)
        if "imgur.com" in url.netloc and "a/" not in url.path and (
                "," not in url.path) and ("gifv" not in url.path):
            newloc = "i." + url.netloc
            newpath = url.path
            if newpath.endswith("/new"):
                newpath.replace("/new", "")
            newpath += ".jpg"
            newpath = newpath.replace("gallery/", "")
            newUrl = urlparse.ParseResult(url.scheme, newloc, newpath,
                                          url.params, url.query, url.fragment)

            return newUrl.geturl()

        # get direct url from flickr
        if "flickr.com" in url.netloc:
            path = filter(lambda x: x != '', url.path.split('/'))
            if path[0] == 'photos' and len(path) >= 3:
                newpath = '/photos/%s/%s/sizes/l' % (path[1], path[2])
                newUrl = urlparse.ParseResult(url.scheme, url.netloc, newpath,
                                              url.params, url.query,
                                              url.fragment)

                try:
                    file = urllib2.urlopen(newUrl.geturl(),
                                           context=self.context)
                    tree = etree.HTML(file.read())
                    staticUrl = tree.xpath(
                        '//div[@id="allsizes-photo"]/img/@src')
                    if len(staticUrl):
                        return staticUrl[0]
                except:
                    pass

        if "apod.nasa.gov" in url.netloc:
            try:
                file = urllib2.urlopen(url.geturl(), context=self.context)
                tree = etree.HTML(file.read())
                directUrl = tree.xpath('//img/@src')
                if len(directUrl):
                    return "http://apod.nasa.gov/apod/" + directUrl[0]
            except:
                pass

        if "wikipedia.org" in url.netloc and "File:" in url.path:
            try:
                file = urllib2.urlopen(url.geturl(), context=self.context)
                tree = etree.HTML(file.read())
                directUrl = tree.xpath('//div[@class="fullMedia"]/a/@href')[0]
                if len(directUrl):
                    return "http:" + directUrl
            except:
                pass

        return None
Esempio n. 16
0
     try:
         developer_key = developer_key_file.read().strip()
     except IOError, io_error:
         print 'Error loading developer key from file %s' % (
                 FLAGS.developer_key_file)
         print 'Error details: %s' % str(io_error)
         sys.exit()
     finally:
         developer_key_file.close()
     s = urlparse.urlparse(uri)
     query = 'key=' + developer_key
     if s.query:
         query = s.query + '&key=' + developer_key
     d = urlparse.ParseResult(s.scheme,
                              s.netloc,
                              s.path,
                              s.params,
                              query,
                              s.fragment)
     uri = urlparse.urlunparse(d)
 if FLAGS.dump_request:
     print '--request-start--'
     print '%s %s' % (method, uri)
     if headers:
         for (h, v) in headers.iteritems():
             print '%s: %s' % (h, v)
     print ''
     if body:
         print json.dumps(json.loads(body),
                          sort_keys=True,
                          indent=2)
     print '--request-end--'
Esempio n. 17
0
    def upload(request):
        """
            view that handles file upload via Ajax
        """

        # check upload permission
        error = ''
        new_file_name = ''
        allowed_upload_file_types = ALLOWED_UPLOAD_FILE_TYPE
        max_upload_file_size = MAX_UPLOAD_FILE_SIZE
        try:
            base_file_name = str(time.time()).replace(
                '.', str(random.randint(0, 100000)))
            file_storage, new_file_name = store_uploaded_file(
                request,
                'file-upload',
                allowed_upload_file_types,
                base_file_name,
                max_file_size=max_upload_file_size)
            logger.info(
                "docreaderxblock FileStoreAPI.upload try uploaded {}: success".
                format(str(new_file_name)))
        except Exception as e:
            logger.warning(
                "docreaderxblock FileStoreAPI.upload try uploaded {}: fail -- {}"
                .format(str(new_file_name), e))
            file = request.POST["file-upload"].file
            request.FILES = {}
            request.FILES["file-upload"] = file
            try:
                file_storage, new_file_name = store_uploaded_file(
                    request,
                    'file-upload',
                    allowed_upload_file_types,
                    base_file_name,
                    max_file_size=max_upload_file_size)
                logger.info(
                    "docreaderxblock FileStoreAPI.upload try uploaded {}: success"
                    .format(str(new_file_name)))
            except Exception as e:
                error = str(type(e)) + " - " + str(e)
                logger.error(
                    "docreaderxblock FileStoreAPI.upload except uploaded {}: fail"
                    .format(str(new_file_name), e))
                raise Exception
        if error == '':
            result = 'SUCCESS'
            file_url = file_storage.url(new_file_name)
            parsed_url = urlparse.urlparse(file_url)
            file_url = urlparse.urlunparse(
                urlparse.ParseResult(parsed_url.scheme, parsed_url.netloc,
                                     parsed_url.path, '', '', ''))
        else:
            result = ''
            file_url = ''

        return HttpResponse(json.dumps({
            'result': "SUCCESS",
            'msg': result,
            'error': error,
            'file_url': file_url,
        }),
                            content_type="application/json")
Esempio n. 18
0
    def parseUri(packageUri, path=None, packageloc=None):
        """ static method to parse the uri
        @throws - AgentException
        """

        if (path == None):
            from agent.lib.packagemgr import PackageMgr
            path = PackageMgr.packagePath()

        uri = urlparse.urlparse(packageUri)

        if (uri.scheme != 'http'):
            raise AgentException(
                Errors.PACKAGE_SCHEME_ERROR,
                'uri (%s) scheme(%s) not supported' % (packageUri, uri.scheme))

        if (uri.path == ''):
            raise AgentException(Errors.PACKAGE_PATH_ERROR,
                                 'uri (%s) path is empty' % (packageUri))

        # now parse the path.  get the name and then verify that it matches the convention
        if packageloc is not None:
            # check if torrent download
            if uri.path.endswith('.torrent'):
                if not packageloc.endswith('.torrent'):
                    packageloc = packageloc + '.torrent'
            packName = packageloc
        else:
            packName = uri.path.rpartition('/')[2]

        match = PackageUtil.nameRe.match(packName)
        if (match == None or match.group(0) != packName):
            raise AgentException(
                Errors.PACKAGE_PATH_ERROR,
                'cannot find package name in path %s' % (uri.path))

        # ok now we can fill out the dictionary
        uriDict = {}
        uriDict['uri'] = packageUri
        uriDict['scheme'] = uri.scheme
        uriDict['uripath'] = uri.path
        uriDict['package'] = match.group(1)
        uriDict['packageNameVersion'] = match.group(2)
        uriDict['inProgressPackage'] = PackageUtil.inProgressPath(
            uriDict['package'])
        uriDict['packageName'] = match.group(3)
        uriDict['propName'] = uriDict['package'] + '.prop'
        uriDict['packageVersion'] = match.group(4)
        uriDict['packageVersionMajor'] = match.group(5)
        uriDict['packageVersionMinor'] = match.group(6)
        uriDict['packageVersionBuild'] = match.group(7)
        uriDict['packagePlat'] = match.group(8)

        # path specific attributes
        uriDict['packagePath'] = os.path.join(path, uriDict['package'])
        uriDict['inProgressPackagePath'] = os.path.join(
            path, uriDict['inProgressPackage'])
        uriDict['propPath'] = os.path.join(path, uriDict['propName'])

        # check if bittorrent transfer and define the necessary keys
        if match.group(9) is not None and match.group(9) == '.torrent':
            uriDict['scheme'] = 'bittorrent'
        uriDict['torrentName'] = uriDict['package'] + '.torrent'
        uriDict['torrentPath'] = os.path.join(path, uriDict['torrentName'])

        # calculate prop url
        # append path with .prop - mons: leaving the below code in place in case we support other protocols in future
        uriScheme = uri.scheme
        if (uriScheme !=
                "http"):  #only use http to download .prop and .torrent files
            uriScheme = "http"

        if uri.path.endswith('.torrent'):
            uripath = uri.path.strip('.torrent')
        else:
            uripath = uri.path

        propParseResult = urlparse.ParseResult(uriScheme, uri.netloc,
                                               uripath + '.prop', uri.params,
                                               uri.query, uri.fragment)
        propUri = urlparse.urlunparse(propParseResult)
        uriDict['propUri'] = propUri

        # calculate the torrent url
        # append path with .torrent
        torrentParseResult = urlparse.ParseResult(uriScheme, uri.netloc,
                                                  uripath + '.torrent',
                                                  uri.params, uri.query,
                                                  uri.fragment)
        torrentUri = urlparse.urlunparse(torrentParseResult)
        uriDict['torrentUri'] = torrentUri

        return uriDict
Esempio n. 19
0
def googlepay_info2u8(self, context, meta, appid, packagename, productid,
                      token, userid, app_order_id, amount, game_callback_url,
                      good_name, passthrough, platform):
    pay_channel = '96'
    context['pay_channel'] = pay_channel  # google支付,代码96
    event_name = settings.API_IMPORTANT_EVENTS.ORDER_CREATED
    try:
        user = User.objects.get(id=userid)
        app = App.objects.get(appid=appid)
    except Exception:
        context['reason'] = 'user or app error'
        _track(event_name, context, meta)

    #创建本地订单
    order = UserGameOrder.create_order(user=user,
                                       app=app,
                                       game_order_id=app_order_id,
                                       amount=amount,
                                       real_amount=amount,
                                       callback_url=game_callback_url,
                                       good_name=good_name,
                                       passthrough=passthrough,
                                       platform=platform,
                                       pay_channel=pay_channel)
    # 本地系统为本订单生成trade id
    trade_id = uuid.uuid4().get_hex()
    order.trade_id = trade_id
    order.order_status = "I"  # 发起支付接口,状态设置为I:待支付
    order.save()

    event_name = settings.API_IMPORTANT_EVENTS.GOOGLE_PAY_VERIFY_INFO
    try:
        servertoken = googlepay_token_refresh(appid)
    except Exception:
        event_name = settings.API_IMPORTANT_EVENTS.GOOGLE_PAY_REFRESH_TOKEN
        context['reason'] = 'google refresh token error'
        _track(event_name, context, meta)
    try:
        purchaseState = googlepay_product_status(packagename, productid, token,
                                                 servertoken)
    except Exception:
        event_name = settings.API_IMPORTANT_EVENTS.GOOGLE_PAY_VERIFY_ERROR
        context['reason'] = 'google server connect error'
        _track(event_name, context, meta)
    else:
        if str(purchaseState) == '0':
            event_name = settings.API_IMPORTANT_EVENTS.REUQEST_U8
            # 只有在订单状态是购买成功的情况下,通知U8服务器
            request_args = get_callback_arg_tuples(order,
                                                   others=[('ProductID', '')])
            request_query_str = '&'.join(
                ['='.join(item) for item in request_args])
            pay_callback_url = order.app.pay_callback_url
            parsed_u8_callback_url = urlparse.urlparse(pay_callback_url)
            new_u8_parsed_callback_url = urlparse.ParseResult(
                scheme=parsed_u8_callback_url.scheme,
                netloc=parsed_u8_callback_url.netloc,
                path=parsed_u8_callback_url.path,
                params=parsed_u8_callback_url.params,
                query=request_query_str,
                fragment=parsed_u8_callback_url.fragment)
            new_u8_callback_url = urlparse.urlunparse(
                new_u8_parsed_callback_url)
            callback_sign = get_signature(order.app.appsecret.encode('utf-8'),
                                          new_u8_callback_url)
            request_args.append(('sign', callback_sign))
            request_args_map = dict(request_args)
            request_obj = urllib2.Request(pay_callback_url)
            request_obj.add_data(urllib.urlencode(request_args_map))
            try:
                response = urllib2.urlopen(request_obj, timeout=6).read()
            except Exception:
                context['reason'] = 'u8 connect error'
                _track(event_name, context, meta)
            else:
                #若U8返回成功,则修改本地订单状态为S
                if json.loads(response)['status'] == 'success':
                    order.order_status = "S"
                    order.save()
                    event_name = settings.API_IMPORTANT_EVENTS.PAY_SUCCESS
                    _track(event_name, context, meta)
                else:
                    context['reason'] = 'u8 response is failed'
                    _track(event_name, context, meta)
        elif str(purchaseState) == '1':
            #订单状态是已取消
            context['reason'] = 'order state is cancelled'
            _track(event_name, context, meta)
        else:
            #理论上不会出现这种情况
            context['reason'] = 'order state error'
            _track(event_name, context, meta)
Esempio n. 20
0
def pdns_request_handler(domain, result_format, **dynamic_data):
    scheme = "https"
    netloc = "api.dnsdb.info"
    path = "lookup/rrset/name".split('/')
    query = ["limit=%d" % (int(dynamic_data['limit']))]

    results = {'success': False}
    if not config.myConfig['apikey']:
        results['error'] = 'No DNSDB key.'
        return results

    # If 'any' is in rrtypes and anything else too, just default to 'any'
    if 'any' in dynamic_data['rrtypes']:
        if 'any-dnssec' in dynamic_data['rrtypes']:
            dynamic_data['rrtypes'] = ['any', 'any-dnssec']
        else:
            dynamic_data['rrtypes'] = ['any']

    results['data'] = {}
    wildcard = "*."

    if dynamic_data['absolute']:
        wildcard = ""

    owner_name = wildcard + urllib.quote(domain)
    path.append(owner_name)

    for rrtype in dynamic_data['rrtypes']:
        local_path = list(path) + [rrtype]
        local_path = "/".join(local_path)

        local_url = urlparse.ParseResult(scheme,
                                         netloc,
                                         local_path,
                                         "",
                                         "&".join(query),
                                         "")

        url = urlparse.urlunparse(local_url)
        try:
            headers = {'Accept': 'application/json',
                       'X-API-Key': config.myConfig['apikey']}
            r = requests.get(url,
                             proxies=settings.PROXIES,
                             headers=headers,
                             verify=config.myConfig["ssl_verify"])
        except Exception as e:
            results['error'] = str(e)
            return results

        if r.status_code not in [200, 404]:
            return check_return_code(r)

        if r.status_code == 200:
            # Each line of the response is an individual JSON blob.
            for line in r.text.split('\n'):
                # Skip empty lines.
                if not line:
                    continue
                try:
                    tmp = json.loads(line)
                except Exception as e:
                    results['error'] = \
                        "%s: %s" % (str(e), cgi.escape(line, quote=True))
                    return results

                # Convert epoch timestamps to human readable.
                for key in ['time_first', 'time_last']:
                    if key in tmp:
                        tmp[key] = time.strftime("%Y-%m-%d %H:%M:%S",
                                                 time.gmtime(tmp[key]))
                rrtype = tmp['rrtype']
                # Strip the MX weight.
                if rrtype == 'MX':
                    tmp['rdata'] = [rd.split()[1] for rd in tmp['rdata']]

                if result_format in ['none', 'list']:
                    if tmp['rrname'][-1] == ".":
                        tmp['rrname'] = tmp['rrname'][:-1]

                    for (idx, rdat) in enumerate(list(tmp['rdata'])):
                        if rdat and rdat[-1] == ".":
                            tmp['rdata'][idx] = rdat[:-1]

                try:
                    results['data'][rrtype].append(tmp)
                except KeyError:
                    results['data'][rrtype] = [tmp]

    results['success'] = True
    rate = {'limit': r.headers['X-RateLimit-Limit'],
            'remaining': r.headers['X-RateLimit-Remaining'],
            'reset': r.headers['X-RateLimit-Reset']}
    if rate['reset'] != 'n/a':
        rate['reset'] = datetime.datetime.utcfromtimestamp(
            float(rate['reset'])).strftime('%Y-%m-%d %H:%M:%S GMT')
    results['rate'] = rate

    if result_format != 'none':
        results = _format_results(results, result_format, dynamic_data)

    return results
Esempio n. 21
0
    except exceptions.PermissionDenied, err:
        error = unicode(err)
    except Exception, err:
        print err
        logging.critical(unicode(err))
        error = _(
            'Error uploading file. Please contact the site administrator. Thank you.'
        )

    if error == '':
        result = _('Good')
        file_url = file_storage.url(new_file_name)
        parsed_url = urlparse.urlparse(file_url)
        file_url = urlparse.urlunparse(
            urlparse.ParseResult(parsed_url.scheme, parsed_url.netloc,
                                 parsed_url.path, '', '', ''))
    else:
        result = ''
        file_url = ''

    return JsonResponse(
        {'result': {
            'msg': result,
            'error': error,
            'file_url': file_url,
        }})


@require_GET
@login_required
def users(request, course_id):
Esempio n. 22
0
def strip_path(input_url):
    """srips path, params and hash fragments of the url"""
    purl = urlparse.urlparse(input_url)
    return urlparse.urlunparse(
        urlparse.ParseResult(purl.scheme, purl.netloc, '', '', '', ''))
Esempio n. 23
0
    def parse(self):
        url_dict = {}
        url_parse = urlparse.urlparse(self.url)
        base_url = urlparse.ParseResult(url_parse.scheme, url_parse.netloc,
                                        '/', None, None, None).geturl()
        base_url_domain = urlutils.getdomain(self.url)
        LOG.info(
            _("Url: %(url)s,Get BaseUrl: %(baseurl)s and base_domain:%(basedomain)s"
              ), {
                  'url': self.url,
                  'baseurl': base_url,
                  'basedomain': base_url_domain
              })

        soup = BeautifulSoup(self.content)
        for a_tag in soup.findAll('a'):
            if not a_tag.has_key('href'):
                continue
            if a_tag['href'].lower().find('javascript') != -1:
                continue
            if CONF.filter_no_follow and a_tag.has_key('nofollow'):
                continue
            if CONF.filter_onclick and a_tag.has_key('onclick'):
                continue

            new_url = a_tag['href']
            if base_url and not new_url.startswith("http"):
                if new_url.startswith('/'):
                    new_url = new_url[1:]
                new_url = base_url + new_url
            ret, reason = self.__filter.filter(new_url)
            if ret:
                LOG.info(_("Filter Url: %(url)s,Reason: %(reason)s"), {
                    'url': new_url,
                    'reason': reason
                })
                continue
            if CONF.extract_indomain_link:
                domain = urlutils.getdomain(new_url)
                if not domain.lower() == base_url_domain.lower():
                    LOG.info(_("Filter Url: %(url)s,Reason: NotInDomain"),
                             {'url': new_url})
                    continue
            if new_url in url_dict.keys():
                if not a_tag.string in url_dict[new_url]:
                    url_dict[new_url].append(a_tag.string)
                    LOG.debug(
                        _("Add outlink Text Url: %(url)s,value: %(value)s"), {
                            'url': new_url,
                            'value': url_dict[new_url]
                        })
            else:
                l = list()
                l.append(a_tag.string)
                url_dict[new_url] = l
            LOG.debug(_("Extract Outlink: url: %(url)s,text: %(text)s "), {
                'url': new_url,
                'text': a_tag.string
            })
        for key, value in url_dict.iteritems():
            ol = OutLink(url=key, text='$@$'.join(value))
            self.outlinks.append(ol)
Esempio n. 24
0
def iap_verify_receipt(self, verify_from, appid, amount, app_order_id,
                       good_name, pay_channel, userid, **kwargs):
    '''使用rawdata作为凭证请求苹果的服务器,获取支付回执信息'''
    args_string = '\t'.join([
        verify_from, appid, amount, app_order_id, good_name, pay_channel,
        userid
    ])  # for logging
    raw_data = kwargs.get('raw_data')

    raw_data_digest = hashlib.sha1(
        raw_data).hexdigest()  # 用于判断是否重复使用了一个支付凭证,如果重复直接拒绝
    iap_receipt_history, created = IAPReceiptHistory2.objects.get_or_create(
        iap_digest=raw_data_digest)

    # 本处理逻辑中涉及到的重要事件
    IAP_VERIFY_ERROR = settings.API_IMPORTANT_EVENTS.IAP_VERIFY_ERROR
    IAP_VERIFY_INFO = settings.API_IMPORTANT_EVENTS.IAP_VERIFY_INFO
    REUQEST_U8 = settings.API_IMPORTANT_EVENTS.REUQEST_U8
    raw_args = {
        'appid': appid,
        'amount': amount,
        'app_order_id': app_order_id,
        'good_name': good_name,
        'pay_channel': pay_channel,
        'userid': userid,
    }
    context = {}
    context['args_map'] = raw_args
    context['pay_channel'] = pay_channel

    if (not created) and (iap_receipt_history.state == 1):
        context['reason'] = 'duplicated receipt'
        _track(IAP_VERIFY_ERROR, context)
        return

    if verify_from != '1':
        context['reason'] = 'verify_from should be 1'
        _track(IAP_VERIFY_ERROR, context)
        return

    try:
        IS_SANDBOX = True
        response = None
        try:
            with itunesiap.env.review:
                response = itunesiap.verify(raw_data)
        except itunesiap.exceptions.InvalidReceipt as e:
            context['reason'] = 'exception when request iap endpoint %s' % str(
                e)
            _track(IAP_VERIFY_ERROR, context)

        if not response:
            context['reason'] = 'response from iap is none'
            _track(IAP_VERIFY_ERROR, context)
            return

        if response.status == 0:
            IS_SANDBOX = response['environment'].lower() == 'sandbox'
            NEED_NOTIFY = True

            try:
                app = App.objects.get(appid=appid)
            except App.DoesNotExist:
                context['reason'] = 'app not exists %s' % appid
                _track(IAP_VERIFY_ERROR, context)
                return

            ###### 校验包名
            try:
                response.receipt.in_app.sort(
                    key=lambda x: int(getattr(x, 'original_purchase_date_ms')))
                last_in_app = response.receipt.last_in_app
                ios_receipt_logger.info('{}\t{}\t{}\t{}'.format(
                    raw_data_digest, raw_data, args_string,
                    response))  # 记录支付凭证摘要何其原始值的对应关系到日志
                # 通过original_transaction_id防止重复发货
                try:
                    _original_transaction_id = getattr(
                        last_in_app, 'original_transaction_id')
                except:
                    _original_transaction_id = None
                original_transaction_ids = IAPReceiptHistory2.objects.filter(
                    original_transaction_id=_original_transaction_id)
                if original_transaction_ids.exists():
                    context['reason'] = 'duplicated original_transaction_id'
                    context[
                        'original_transaction_id'] = _original_transaction_id
                    _track(IAP_VERIFY_ERROR, context)
                    return
                # 通过original_transaction_id防止重复发货 END
            except IndexError:
                context['reason'] = 'no last_in_app found'
                _track(IAP_VERIFY_ERROR, context)
                return

            try:
                bundle_id = response.receipt['bundle_id']
            except:
                bundle_id = None

            if not bundle_id:
                try:
                    bundle_id = last_in_app.bid
                except AttributeError as _:
                    bundle_id = None

            if not bundle_id:
                context['reason'] = 'bundle_id is empty'
                _track(IAP_VERIFY_ERROR, context)
                return
            else:
                package_names = app.package_names
                try:
                    package_names_info = json.loads(package_names)
                except Exception as e:
                    context['reason'] = 'bundle_id not configured properly'
                    _track(IAP_VERIFY_ERROR, context)
                    return

                if bundle_id not in package_names_info:  # 首先,确保包名是合法的
                    context['reason'] = 'invalid bundle_id'
                    _track(IAP_VERIFY_ERROR, context)
                    return
                else:
                    # 包名(bundle_id)确定是合法的,就要再进一步从product_id(bundleid-currency-goodid-realmoney)中获取到,此支付订单的真实的订单价格
                    SEP = '_'
                    product_id = last_in_app.product_id
                    try:
                        _bundleid, _currency, _goodid, realmoney = product_id.split(
                            SEP)
                    except:
                        context[
                            'reason'] = 'cannot extract the 4 parts from product_id %s' % product_id
                        _track(IAP_VERIFY_ERROR, context)
                        return

                    if bundle_id != _bundleid:
                        context[
                            'info'] = 'bundle_id from iap mismatch bundle_id extracted from product_id %s %s' % (
                                bundle_id, _bundleid)
                        _track(IAP_VERIFY_INFO, context)
                    try:
                        realmoney = float(realmoney)  # 单位为元
                        real_amount = int(realmoney * 100)  # 元转换为分
                    except Exception as _:
                        context[
                            'reason'] = 'error when reading realmony from product_id'
                        _track(IAP_VERIFY_ERROR, context)
                        return
            ###### 校验包名 END
            ###### 校验包的审核状态,如果过审,则禁用沙箱支付
            package_online = package_names_info[bundle_id]['production'] == '1'
            if not IS_SANDBOX:
                order_status = 'S'
            else:
                if not package_online:
                    order_status = 'SS'
                else:
                    NEED_NOTIFY = False  # 如果已经上线,禁用沙箱支付
                    order_status = 'E'  # 同时将订单状态设置为异常
            ###### 校验包的审核状态,如果过审,则禁用沙箱支付 END

            try:
                user = User.objects.get(id=userid)
            except User.DoesNotExist:
                context['reason'] = 'user not exists'
                _track(IAP_VERIFY_ERROR, context)
                return

            if pay_channel != '99':  # 苹果iTunes支付
                context['reason'] = 'invalid pay_channel'
                _track(IAP_VERIFY_ERROR, context)
                return

            orders = UserGameOrder.objects.filter(game_order_id=app_order_id)
            if not orders.exists():  # 如果订单不存在,创建订单
                platform = 2  # 手游
                passthrough = kwargs.get('passthrough', '')
                game_callback_url = kwargs.get('game_callback_url', '')
                # 创建本地订单
                order = UserGameOrder.create_order(
                    user=user,
                    real_amount=real_amount,
                    currency=_currency,
                    app=app,
                    game_order_id=app_order_id,
                    amount=amount,
                    callback_url=game_callback_url,
                    good_name=good_name,
                    passthrough=passthrough,
                    platform=platform,
                    pay_channel=pay_channel)
                # 本地系统为本订单生成trade id
                order.trade_id = uuid.uuid4().get_hex()
            else:
                order = orders[0]
            order.order_status = order_status  # 根据上下文信息修改订单状态

            try:  # 保存订单
                order.save()

                # 防止应用内购买重复刷单状态更新
                def _getattr(name):
                    try:
                        return getattr(last_in_app, name)
                    except AttributeError:
                        return None

                attr_names = [
                    'quantity', 'product_id', 'transaction_id',
                    'purchase_date_ms', 'original_transaction_id',
                    'original_purchase_date_ms'
                ]
                attr_values = map(_getattr, attr_names)
                for item in zip(attr_names, attr_values):
                    setattr(iap_receipt_history, item[0], item[1])

                iap_receipt_history.bundle_id = bundle_id
                iap_receipt_history.trade_id = order.trade_id
                iap_receipt_history.is_sandbox = IS_SANDBOX
                iap_receipt_history.state = 0  # 当前处于未验证状态
                iap_receipt_history.save()
                # 防止应用内购买重复刷单状态更新 END
            except Exception as save_exc:
                context['info'] = 'failed to create local order'
                _track(IAP_VERIFY_INFO, context)
                # 虽然本地订单保存不成功,但是还是要通知U8服务器,故此处不返回

            if not NEED_NOTIFY:
                context[
                    'reason'] = 'sandbox receipt trying to buy in production envronment'
                _track(IAP_VERIFY_ERROR, context)
                return
            request_args = get_callback_arg_tuples(order,
                                                   others=[
                                                       ('ProductID',
                                                        product_id)
                                                   ])  # 获取回调参数,用于请求U8服务器
            request_query_str = '&'.join(
                ['='.join(item) for item in request_args])
            pay_callback_url = app.pay_callback_url
            context['pay_callback_url'] = pay_callback_url  # 日志记录
            parsed_u8_callback_url = urlparse.urlparse(pay_callback_url)
            new_u8_parsed_callback_url = urlparse.ParseResult(
                scheme=parsed_u8_callback_url.scheme,
                netloc=parsed_u8_callback_url.netloc,
                path=parsed_u8_callback_url.path,
                params=parsed_u8_callback_url.params,
                query=request_query_str,
                fragment=parsed_u8_callback_url.fragment)
            new_u8_callback_url = urlparse.urlunparse(
                new_u8_parsed_callback_url)
            callback_sign = get_signature(app.appsecret.encode('utf-8'),
                                          new_u8_callback_url)
            request_args.append(('Sign', callback_sign))
            args_map = dict(request_args)
            request_obj = urllib2.Request(pay_callback_url)  # 创建请求对象
            request_obj.add_data(urllib.urlencode(args_map))  # 添加请求参数

            response = urllib2.urlopen(
                request_obj, timeout=settings.PAY_CALLBACK_TIMEOUT).read()
            response_map = json.loads(response)
            context['response_map'] = response_map  # 日志记录
            if response_map['status'] == 'success':
                _track(REUQEST_U8, context)
                iap_receipt_history.state = 1  # 标记为验证成功
                iap_receipt_history.save()
                set_user_ispay_cache(app.appid, user.id, real_amount)  # 设置付费标记
                event_name = settings.API_IMPORTANT_EVENTS.PAY_SUCCESS
                _track(event_name, context)
            else:
                iap_receipt_history.state = 2  # 标记为验证失败
                iap_receipt_history.save()
                raise U8ResponseException(
                    response_map['description'])  # 向外层传播本异常,以引入重试机制
        else:
            context['reason'] = 'iap endpoint return none zero code'
            _track(IAP_VERIFY_ERROR, context)
    except Exception as ee:
        # 有异常的情况下,重试机制介入
        raise self.retry(
            exc=ee,
            max_retries=settings.CELERY_TASK_RETRY_POLICY_MAX_RETRIES,
            countdown=settings.CELERY_TASK_RETRY_POLICY[self.request.retries])
Esempio n. 25
0
def mod_repo(repo, **kwargs):
    '''
    Modify one or more values for a repo. If the repo does not exist, it will
    be created, so long as the following values are specified:

    repo or alias
        alias by which the zypper refers to the repo

    url or mirrorlist
        the URL for zypper to reference

    enabled
        enable or disable (True or False) repository,
        but do not remove if disabled.

    refresh
        enable or disable (True or False) auto-refresh of the repository.

    cache
        Enable or disable (True or False) RPM files caching.

    gpgcheck
        Enable or disable (True or False) GOG check for this repository.

    Key/Value pairs may also be removed from a repo's configuration by setting
    a key to a blank value. Bear in mind that a name cannot be deleted, and a
    url can only be deleted if a mirrorlist is specified (or vice versa).

    CLI Examples:

    .. code-block:: bash

        salt '*' pkg.mod_repo alias alias=new_alias
        salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
    '''

    repos_cfg = _get_configured_repos()
    added = False

    # An attempt to add new one?
    if repo not in repos_cfg.sections():
        url = kwargs.get("url", kwargs.get("mirrorlist"))
        if not url:
            raise CommandExecutionError(
                'Repository "{0}" not found and no URL passed to create one.'.format(repo))

        if not urlparse.urlparse(url).scheme:
            raise CommandExecutionError(
                'Repository "{0}" not found and passed URL looks wrong.'.format(repo))

        # Is there already such repo under different alias?
        for alias in repos_cfg.sections():
            repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg)

            # Complete user URL, in case it is not
            new_url = urlparse.urlparse(url)
            if not new_url.path:
                new_url = urlparse.ParseResult(scheme=new_url.scheme,  # pylint: disable=E1123
                                               netloc=new_url.netloc,
                                               path='/',
                                               params=new_url.params,
                                               query=new_url.query,
                                               fragment=new_url.fragment)
            base_url = urlparse.urlparse(repo_meta["baseurl"])

            if new_url == base_url:
                raise CommandExecutionError(
                    'Repository "{0}" already exists as "{1}".'.format(repo, alias))

        # Add new repo
        doc = None
        try:
            # Try to parse the output and find the error,
            # but this not always working (depends on Zypper version)
            doc = dom.parseString(__salt__['cmd.run'](("zypper -x ar {0} '{1}'".format(url, repo)),
                                                      output_loglevel='trace'))
        except Exception:
            # No XML out available, but it is still unknown the state of the result.
            pass

        if doc:
            msg_nodes = doc.getElementsByTagName("message")
            if msg_nodes:
                msg_node = msg_nodes[0]
                if msg_node.getAttribute("type") == "error":
                    raise CommandExecutionError(msg_node.childNodes[0].nodeValue)

        # Verify the repository has been added
        repos_cfg = _get_configured_repos()
        if repo not in repos_cfg.sections():
            raise CommandExecutionError(
                'Failed add new repository "{0}" for unknown reason. Please look into Zypper logs.'.format(repo))
        added = True

    # Modify added or existing repo according to the options
    cmd_opt = []

    if "enabled" in kwargs:
        cmd_opt.append(kwargs["enabled"] and "--enable" or "--disable")

    if "refresh" in kwargs:
        cmd_opt.append(kwargs["refresh"] and "--refresh" or "--no-refresh")

    if "cache" in kwargs:
        cmd_opt.append(kwargs["cache"] and "--keep-packages" or "--no-keep-packages")

    if "gpgcheck" in kwargs:
        cmd_opt.append(kwargs["gpgcheck"] and "--gpgcheck" or "--no-gpgcheck")

    if cmd_opt:
        __salt__['cmd.run'](("zypper -x mr {0} '{1}'".format(' '.join(cmd_opt), repo)),
                            output_loglevel='trace')

    # If repo nor added neither modified, error should be thrown
    if not added and not cmd_opt:
        raise CommandExecutionError('Modification of the repository "{0}" was not specified.'.format(repo))

    return {}
Esempio n. 26
0
 def mk_url(self, **params):
     query = dict(urlparse.parse_qsl(self.url_params.query))
     query.update(params)
     prlist = list(self.url_params)
     prlist[4] = urllib.urlencode(query)
     return urlparse.ParseResult(*prlist).geturl()
Esempio n. 27
0
 def CreateFirstSearchPageUrl(self):
     query_string = {'q': self.keyword, 'count': '50'}
     url = urlparse.ParseResult('https', self.hostname, '/search', '',
                                urllib.urlencode(query_string), '')
     return url.geturl()