Example #1
0
    def _addDir(self, name, action_key, action_value, iconimage, is_folder):
        """
        .. py_function:: _addDir(self, name, action_key, action_value,
                            iconimage, is_folder)

        Creates a link in xbmc.

        :param name: Name of the link
        :param action_key: Name of the action to take when link selected
        :param action_value: Parameter to use with the action
        :param iconimage: Icon to use for the link
        :param is_folder: Does the link lead to a folder or playable item

        """
        formatparams = {
            "base_url": self.base_url,
            "key": quote(str(action_key)),
            "value": quote(str(action_value)),
            "name": quote(str(name))
        }

        url = "{base_url}?action_key={key}&action_value={value}&name={name}".format(**formatparams)

        listitem = xbmcgui.ListItem(name,
                                    iconImage=iconimage,
                                    thumbnailImage='')
        listitem.setInfo(type="Video", infoLabels={"Title": name})

        xbmcplugin.addDirectoryItem(
            handle=self.addon_handle,
            url=url,
            listitem=listitem,
            isFolder=is_folder)
Example #2
0
    def any(self, provider_name):
        logging.info("logging in -- PROVIDER NAME IS: %s", provider_name)

        result = authomatic.login(Webapp2Adapter(self), provider_name)
        self.response.set_cookie('result', str(result))
        # user = User(result=result)
        # user.put()
        pprint.pprint('RESULT IS %s' % result)

        if result:
            if result.user:
                result.user.update()
                # Save the user name and ID to cookies that we can use it in other handlers.
                self.response.set_cookie('user_id', result.user.id)
                logging.info('SET COOKIE WITH VALUE OF: %s', result.user.id)
                self.response.set_cookie('user_name', urllib.quote(result.user.name))

                if result.user.credentials:
                    # Serialize credentials and store it as well.
                    serialized_credentials = result.user.credentials.serialize()
                    self.response.set_cookie('credentials', serialized_credentials)

            elif result.error:
                self.response.set_cookie('error', urllib.quote(result.error.message))

            self.redirect('/')
    def make_request(self, env, apiCredentials):
        # prepare the post data
        data = ""
        # add payKey
        data += "payKey=" + urllib.quote(self.payKey)
        # add currencyCode
        data += "&currencyCode=" + urllib.quote(self.currencyCode)

        # add trackingId
        if hasattr(self, "trackingId"):
            data += "&trackingId=" + urllib.quote(self.trackingId)

        # add transactionId
        if hasattr(self, "transactionId"):
            data += "&transactionId=" + urllib.quote(self.transactionId)

        # add receivers
        if hasattr(self, "receiverList"):
            index = 0
            for receiver in self.receiverList:
                data += "&"
                data += receiver.serialize(index)
                index += 1

        response = PayPalRequest.make_request(self, env, apiCredentials, "Refund", data)
        payResponse = ExecutePaymentResponse(response)
        return payResponse
def application(req):
    #doGzip = 0
    #try:
    #    if string.find(os.environ["HTTP_ACCEPT_ENCODING"], "gzip") != -1:
    #        doGzip = 1
    #except:
    #    pass

    treeid = req.params.get("treeid")
    module = req.params.get("module")
    branch = req.params.get("branch")
    mindate = req.params.get("mindate")
    maxdate = req.params.get("maxdate")
    xml_nofiles = req.params.get("xml_nofiles")

    if not treeid or not module or not branch or not mindate or not maxdate:
        raise exc.HTTPBadRequest("ERROR")

    url = bonsai + "?" + "branchtype=match&sortby=Date&date=explicit&cvsroot=%2Fcvsroot&xml=1"
    url += "&treeid=%s&module=%s&branch=%s&mindate=%s&maxdate=%s" % (quote(treeid), quote(module), quote(branch), quote(mindate), quote(maxdate))

    if xml_nofiles:
        url += "&xml_nofiles=1"

    urlstream = urllib.urlopen(url)
    resp = Response(content_type='text/xml')
    for s in urlstream:
        resp.write(s)
    urlstream.close()
    return resp
Example #5
0
def handle_wowwiki(bot, ievent):
    """ wikipedia <what> .. search wikipedia for <what> """
    if not ievent.rest:
        ievent.missing('<what>')
        return
    what = ""
    lang = 'en'
    for i in ievent.rest.split():
        first = i[0].upper()
        rest = i[1:]
        if i.startswith('-'):
            if len(i) != 3:
                ievent.reply('invalid option')
                return
            lang = i[1:]
            continue
        what += "%s%s " % (first, rest)
    what = what.strip().replace(' ', '_')
    url = 'http://wowwiki.com/wiki/Special:Export/%s' % quote(what.encode('utf-8'))
    url2 = 'http://wowwiki.com/wiki/%s' % quote(what.encode('utf-8'))
    txt = getwikidata(url, ievent)
    if not txt:
        return
    if '#REDIRECT' in txt or '#redirect' in txt:
        redir = ' '.join(txt.split()[1:])
        url = 'http://wowwiki.com/wiki/Special:Export/%s' % quote(redir.encode('utf-8'))
        url2 = 'http://wowwiki.com/wiki/%s' % quote(redir.encode('utf-8'))
        txt = getwikidata(url, ievent)
    if not txt:
        return
    res = ['%s ===> ' % url2, ]
    res += splittxt(striphtml(txt).strip())
    ievent.reply(res)
Example #6
0
    def getBuildDetails(self, request, builderName, build):
        """Returns an HTML list of failures for a given build."""
        details = {}
        if not build.getLogs():
            return details
        
        for step in build.getSteps():
            (result, reason) = step.getResults()
            if result == builder.FAILURE:
                name = step.getName()

                # Remove html tags from the error text.
                stripHtml = re.compile(r'<.*?>')
                strippedDetails = stripHtml.sub('', ' '.join(step.getText()))
                
                details['buildername'] = builderName
                details['status'] = strippedDetails
                details['reason'] = reason
                logs = details['logs'] = []

                if step.getLogs():
                    for log in step.getLogs():
                        logname = log.getName()
                        logurl = request.childLink(
                          "../builders/%s/builds/%s/steps/%s/logs/%s" % 
                            (urllib.quote(builderName),
                             build.getNumber(),
                             urllib.quote(name),
                             urllib.quote(logname)))
                        logs.append(dict(url=logurl, name=logname))
        return details
Example #7
0
def tag_feed(request, tag, full_text = False):
    # horrible special-case for the tag feed
    server = "http://%s"%(request.META['SERVER_NAME'] or 'localhost')
    feedclass = feed_engine( 'atom' )

    feed = feedclass(
      title = 'things tagged %s on jerakeen.org'%tag,
      link = "%s/tags/%s/"%(server, urllib.quote(tag.encode('utf-8'))),
      description = "",
      author_name = "Tom Insam",
      author_email = "*****@*****.**",
      author_link = "http://jerakeen.org/",
      feed_url = "%s/tags/%s/feed/"%(server,urllib.quote(tag.encode('utf-8'))),
    );

    tags = filter( lambda x: len(x), tag.split('+') )
    page_list = Page.pages_tagged(tags)[:15]

    for p in page_list:
        if full_text:
            data = p.body
        else:
            data = p.excerpt or p.body
        image = '<img src="%s" style="float:left" width="75" height="75">'%p.thumb_url()
        feed.add_item(
            p.title,
            p.really_absolute_url(),
            image + pagefilter( data ),
            pubdate = p.date )

    return HttpResponse(
        feed.writeString('utf-8'), mimetype = feed.mime_type )
Example #8
0
def create_subtitle_files(request, overwrite = True):
    rows = TutorialResource.objects.filter(Q(status = 1) | Q(status = 2))
    for row in rows:
        code = 0
        if row.language.name == 'English':
            if row.timed_script and row.timed_script != 'pending':
                script_path = settings.SCRIPT_URL.strip('/') + '?title=' + quote(row.timed_script) + '&printable=yes'
            elif row.script and row.script != 'pending':
                script_path = settings.SCRIPT_URL.strip('/') + '?title=' + quote(row.script + '-timed') + '&printable=yes'
            else:
                continue
        else:
            if row.script and row.script != 'pending':
                script_path = settings.SCRIPT_URL.strip('/') + '?title=' + quote(row.script) + '&printable=yes'
            else:
                continue
        srt_file_path = settings.MEDIA_ROOT + 'videos/' + str(row.tutorial_detail.foss_id) + '/' + str(row.tutorial_detail_id) + '/'
        srt_file_name = row.tutorial_detail.tutorial.replace(' ', '-') + '-' + row.language.name + '.srt'
        # print srt_file_name
        if not overwrite and os.path.isfile(srt_file_path + srt_file_name):
            continue
        try:
            code = urlopen(script_path).code
        except Exception, e:
            code = e.code
        result = ''
        if(int(code) == 200):
            if generate_subtitle(script_path, srt_file_path + srt_file_name):
                print 'Success: ', row.tutorial_detail.foss.foss + ',', srt_file_name
            else:
                print 'Failed: ', row.tutorial_detail.foss.foss + ',', srt_file_name
Example #9
0
    def urlencode(self, safe=None):
        """
        Returns an encoded string of all query string arguments.

        :arg safe: Used to specify characters which do not require quoting, for
            example::

                >>> q = QueryDict('', mutable=True)
                >>> q['next'] = '/a&b/'
                >>> q.urlencode()
                'next=%2Fa%26b%2F'
                >>> q.urlencode(safe='/')
                'next=/a%26b/'

        """
        output = []
        if safe:
            encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
        else:
            encode = lambda k, v: urlencode({k: v})
        for k, list_ in self.lists():
            k = smart_str(k, self.encoding)
            output.extend([encode(k, smart_str(v, self.encoding))
                           for v in list_])
        return '&'.join(output)
Example #10
0
    def create_auth_url(repo_info):
        # Accepted repo url formats
        # "https://host.com/path/to/repo.git"
        # "https://[email protected]/path/to/repo.git"
        # "https://*****:*****@host.org/path/to/repo.git" NOT RECOMMENDED
        # IMPORTANT: if the credentials are provided in the repo url, they must be url encoded
        if repo_info.repo_username is not None or repo_info.repo_password is not None:
            # credentials provided, have to modify url
            repo_url = repo_info.repo_url
            url_split = repo_url.split("://", 1)

            # urlencode repo username and password
            urlencoded_username = urllib.quote(repo_info.repo_username.strip(), safe='')
            urlencoded_password = urllib.quote(repo_info.repo_password.strip(), safe='')
            if "@" in url_split[1]:
                # credentials seem to be in the url, check
                at_split = url_split[1].split("@", 1)
                if ":" in at_split[0]:
                    # both username and password are in the url, return as is
                    return repo_info.repo_url
                else:
                    # only username is provided, need to include password
                    username_in_url = at_split[0].split(":", 1)[0]
                    return str(url_split[0] + "://" + username_in_url + ":" + urlencoded_password
                               + "@" + at_split[1])
            else:
                # no credentials in the url, need to include username and password
                return str(url_split[0] + "://" + urlencoded_username + ":" + urlencoded_password + "@" + url_split[1])
        # no credentials specified, return as is
        return repo_info.repo_url
def _encode_multipart(vars, content_type):
    """Encode a multipart request body into a string"""
    boundary_match = re.search(r'boundary=([^ ]+)', content_type, re.I)
    if not boundary_match:
        raise ValueError('Content-type: %r does not contain boundary' % content_type)
    boundary = boundary_match.group(1).strip('"')
    lines = []
    for name, value in vars.iteritems():
        lines.append('--%s' % boundary)
        ## FIXME: encode the name like this?
        assert name is not None, 'Value associated with no name: %r' % value
        disp = 'Content-Disposition: form-data; name="%s"' % urllib.quote(name)
        if getattr(value, 'filename', None):
            disp += '; filename="%s"' % urllib.quote(value.filename)
        lines.append(disp)
        ## FIXME: should handle value.disposition_options
        if getattr(value, 'type', None):
            ct = 'Content-type: %s' % value.type
            if value.type_options:
                ct += ''.join(['; %s="%s"' % (ct_name, urllib.quote(ct_value))
                               for ct_name, ct_value in sorted(value.type_options.items())])
            lines.append(ct)
        lines.append('')
        if hasattr(value, 'value'):
            lines.append(value.value)
        else:
            lines.append(value)
    lines.append('--%s--' % boundary)
    return '\r\n'.join(lines)
Example #12
0
def write_installed_packages_list():
    """
    Saves the installed package list and
    their location (file over-writen depends
    on run as root or as a normal user)
    """
    global installed_packages_list
    try:
        tmp=open(os.path.join(dataset_conf_path,"installed.lst.2"),"w")
    except IOError:
        raise RuntimeError("[cf] fatal: cannot create temp file")
    else:
        # ok, probably worked?
        for package in installed_packages_list.values():
            # adds only packages that are readable for
            # this user (maybe some site-installed datasets
            # are out of his reach)
            #
            if package.where!=None and \
                    file_access_rights(os.path.join(package.where,package.name),
                                      os.F_OK | os.R_OK):
                print(
                    " ".join(map(str,[ package.name,
                                       package.timestamp,
                                       package.readable_size,
                                       urllib.quote(package.source,"/:~"),
                                       urllib.quote(package.where,"/:~") ] )),
                    file=tmp)

        # replace the installed.lst in
        # a safe way
        atomic_replace(os.path.join(dataset_conf_path,"installed.lst.2"),
                       os.path.join(dataset_conf_path,"installed.lst"))
Example #13
0
    def _gen_signed_data(self, base_url, method="GET", sign_method='HMAC-SHA1', **params):
        args = {'oauth_consumer_key': self.consumer_key,
                'oauth_timestamp': self.__timestamp(),
                'oauth_nonce': self.__nonce(),
                'oauth_version': '1.0'}
                
        args.update(params)

        if sign_method == 'HMAC-SHA1':
            args['oauth_signature_method'] = 'HMAC-SHA1'

            key = self.consumer_secret + "&"

            if self.token is not None:
                args['oauth_token'] = self.token.oauth_token
                key += urllib.quote(self.token.oauth_token_secret, '')

            #would use urlencode, but it doesn't sort arguments
            #pargs = [sorted('%s=%s' % (k,v) for k,v in args.values())]
            message = '&'.join(
                    urllib.quote(i, '') for i in [method.upper(), base_url,
                                    urllib.urlencode(sorted(args.iteritems()))])

            args['oauth_signature'] = hmac.new(key, message, hashlib.sha1
                                                ).digest().encode('base64')[:-1]

        # Add other sign_methods here   
        else:
            raise self.UnknownSignatureException("Unknown signature method %s" % sign_method)

        return args
Example #14
0
    def __init__(self, environ, start_response, charset='utf-8'):
        self.charset = charset
        self.start_response = start_response
        self.environ = environ
        self.environ['REQUEST_URI'] = get_full_url(self.environ)

        # copy a reference to the request object
        # into the environ so wsgi middlewares
        # can communicate with it.
        environ['colubrid.request'] = self

        # get absolute path to script
        root = self.environ.get('SCRIPT_NAME', '/')
        if not root or not root.startswith('/'):
            root = '/' + root
        self.environ['SCRIPT_ROOT'] = root

        # get the full application request
        url = ''.join([
            quote(self.environ['SCRIPT_NAME']),
            quote(self.environ.get('PATH_INFO', ''))
        ])
        if not url.startswith('/'):
            url = '/' + url
        self.environ['APPLICATION_REQUEST'] = url
Example #15
0
 def test_default_quoting(self):
     # Make sure all characters that should be quoted are by default sans
     # space (separate test for that).
     should_quote = [chr(num) for num in range(32)]  # For 0x00 - 0x1F
     should_quote.append('<>#%"{}|\^[]`')
     should_quote.append(chr(127))  # For 0x7F
     should_quote = "".join(should_quote)
     for char in should_quote:
         result = urllib.quote(char)
         self.assertEqual(
             hexescape(char),
             result,
             "using quote(): %s should be escaped to %s, not %s" % (char, hexescape(char), result),
         )
         result = urllib.quote_plus(char)
         self.assertEqual(
             hexescape(char),
             result,
             "using quote_plus(): " "%s should be escapes to %s, not %s" % (char, hexescape(char), result),
         )
     del should_quote
     partial_quote = "ab[]cd"
     expected = "ab%5B%5Dcd"
     result = urllib.quote(partial_quote)
     self.assertEqual(expected, result, "using quote(): %s != %s" % (expected, result))
     result = urllib.quote_plus(partial_quote)
     self.assertEqual(expected, result, "using quote_plus(): %s != %s" % (expected, result))
     self.assertRaises(TypeError, urllib.quote, None)
Example #16
0
 def render(self, wiki, page, request, wikiURL='..'):
     if self.protocol == 'mailto':
         return u'<a href="%s:%s">%s</a>'%(
             self.protocol, quote(self.text), self.text)
     else:
         return u'<a href="%s://%s">%s</a>'%(
             self.protocol, quote(self.text), self.text)
Example #17
0
def get_current_url(environ, root_only=False, strip_querystring=False,
                    host_only=False):
    """A handy helper function that recreates the full URL for the current
    request or parts of it.  Here an example:

    >>> from werkzeug import create_environ
    >>> env = create_environ("/?param=foo", "http://localhost/script")
    >>> get_current_url(env)
    'http://localhost/script/?param=foo'
    >>> get_current_url(env, root_only=True)
    'http://localhost/script/'
    >>> get_current_url(env, host_only=True)
    'http://localhost/'
    >>> get_current_url(env, strip_querystring=True)
    'http://localhost/script/'

    :param environ: the WSGI environment to get the current URL from.
    :param root_only: set `True` if you only want the root URL.
    :param strip_querystring: set to `True` if you don't want the querystring.
    :param host_only: set to `True` if the host URL should be returned.
    """
    tmp = [environ['wsgi.url_scheme'], '://', get_host(environ)]
    cat = tmp.append
    if host_only:
        return ''.join(tmp) + '/'
    cat(urllib.quote(environ.get('SCRIPT_NAME', '').rstrip('/')))
    if root_only:
        cat('/')
    else:
        cat(urllib.quote('/' + environ.get('PATH_INFO', '').lstrip('/')))
        if not strip_querystring:
            qs = environ.get('QUERY_STRING')
            if qs:
                cat('?' + qs)
    return ''.join(tmp)
Example #18
0
def login( username, password,site="www.planet-lab.org"):

   print "Using site:",site

   if site == "www.planet-lab.org":
      authtype = "PLauthenticate"
   elif site == "www.planet-lab.eu":
      authtype = "PLEauthenticate"
   else:
      raise ValueError, "Unrecognized site: "+site

   # the curl command to login
   command = arizonaconfig.get_option("curlpath")+" -L -k -d \"%USERINFO%\" -D GetCookie "+repository+":8081/stork/login.php?"

   # build the user info string
   info = "username="******"&password="******"&authmethod="+urllib.quote(authtype)
   command  = command.replace( "%USERINFO%", info )

   # try to run the command and see what we get
   (sin, sout, serr) = os.popen3( command )

   outstring = sout.read()
   errstring = serr.read()

   sout.close()
   serr.close()

   if "incorrect username" not in outstring:
      return True

   else:
      return False
Example #19
0
  def testShellCommand(self):
    class TestClient(WebSocketBaseClient):
      def __init__(self, *args, **kwargs):
        super(TestClient, self).__init__(*args, **kwargs)
        self.answer = subprocess.check_output(['uname', '-r'])
        self.test_run = False

      def closed(self, code, reason=None):
        if not self.test_run:
          raise RuntimeError('test exit before being run: %s' % reason)

      def handshake_ok(self):
        pass

      def received_message(self, msg):
        self.test_run = True
        assert msg.data == self.answer
        raise CloseWebSocket

    clients = self._GetJSON('/api/agents/list')
    assert len(clients) > 0

    for client in clients:
      ws = TestClient('ws://' + _HOST + '/api/agent/shell/%s' %
                      urllib.quote(client['mid']) + '?command=' +
                      urllib.quote('uname -r'))
      ws.connect()
      try:
        ws.run()
      except TestError as e:
        raise e
      except CloseWebSocket:
        ws.close()
Example #20
0
 def GenStepBox(stepstatus):
   """Generates a box for one step."""
   class_ = build_get_class(stepstatus)
   style = ''
   if class_ and class_ in styles:
     style = styles[class_]
   stepname = stepstatus.getName()
   text = stepstatus.getText() or []
   text = text[:]
   base_url = '%sbuilders/%s/builds/%d/steps' % (
       waterfall_url,
       urllib.quote(stepstatus.getBuild().getBuilder().getName(), safe=''),
       stepstatus.getBuild().getNumber())
   for steplog in stepstatus.getLogs():
     name = steplog.getName()
     log.msg('name = %s' % name)
     url = '%s/%s/logs/%s' % (
         base_url,
         urllib.quote(stepname, safe=''),
         urllib.quote(name))
     text.append('<a href="%s">%s</a>' % (url, html.escape(name)))
   for name, target in stepstatus.getURLs().iteritems():
     text.append('<a href="%s">%s</a>' % (target, html.escape(name)))
   fmt = '<tr><td style="%s">%s</td></tr>'
   return fmt % (style, '<br/>'.join(text))
Example #21
0
 def _urlencode(self, h):
     rv = []
     for k,v in h.iteritems():
         rv.append('%s=%s' %
             (urllib.quote(k.encode("utf-8")),
             urllib.quote(v.encode("utf-8"))))
     return '&'.join(rv)
    def request(self, type, data, update=True):
        query = []

        # if user has defined their own _t, then include necessary _d
        if "_t" in data:
            data["_d"] = 1
        else:
            data["_t"] = self.now().strftime("%s")

        # add customer key to data sent
        data["_k"] = self._key

        if update:
            data["_p"] = self._id

        for key, val in data.items():
            query.append(urllib.quote(str(key)) + "=" + urllib.quote(str(val)))

        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            host, port = self._host.split(":")
            sock.connect((host, int(port)))
            sock.setblocking(0)  # 0 is non-blocking

            get = "GET /" + type + "?" + "&".join(query) + " HTTP/1.1\r\n"
            out = get
            out += "Host: " + host + "\r\n"
            out += "Connection: Close\r\n\r\n"
            sock.send(out)
            sock.close()
        except:
            self.logm("Could not transmit to " + self._host)
Example #23
0
def replace_or_add_query(url, query, exclusions=None):
    """
    Adds field/value pair to the provided url as a query string if the
    key isn't already in the url, or replaces it otherwise.

    Appends the proper pair separator (?&) based on the input url

    Inputs:
    :url: URL that query string should be appended to
    :query: Query string(s) to add to :url:
    :exclusions: List of keys that should not be copied; common keys
        include 'vs' and 'z'

    Outputs:
    :url: Input url with query string appended
    """
    if not exclusions:
        exclusions = []
    if len(query) > 1 and query[0] in ['?', '&']:
        query = query[1:]
        query = query.encode('utf-8')
        url = url.encode('utf-8')
        url = urlparse.urlparse(url)
        old_query = urlparse.parse_qsl(url.query, keep_blank_values=True)
        old_keys = [q[0] for q in old_query]
        # make a lower-case copy of old_keys so we can do some comparisons
        insensitive_keys = map(str.lower, old_keys)

        new_query = urlparse.parse_qsl(query, keep_blank_values=True)

        # For each source code that we are going to add
        for new_index in range(len(new_query)):
            # Make sure we are not adding a source code that should be excluded
            if new_query[new_index][0] not in exclusions:
                try:
                    # Case-insensitively determine if the new source code
                    # is already applied
                    old_index = insensitive_keys.index(
                        new_query[new_index][0].lower())
                except ValueError:
                    # The current source code is not applied; apply it
                    old_query.append(new_query[new_index])
                else:
                    # The current source code is already applied; replace its
                    # value, keeping the case of the old parameter
                    old_query[old_index] = (old_query[old_index][0],
                                            new_query[new_index][1])

        # parse_qsl unencodes the query that you pass it; Re-encode the query
        # parameters when reconstructing the string.
        old_query = '&'.join(['='.join([urllib.quote(k, safe=','),
                                        urllib.quote(v, safe=',')])
                             for k, v in old_query])
        url = url._replace(query=old_query)
        url = urlparse.urlunparse(url)
    else:
        parts = url.split('#')
        parts[0] += query
        url = '#'.join(parts)
    return url
Example #24
0
    def execute_request(self, method, *stuff) :

        enc_method = urllib.quote(method)
        url = self.host + self.endpoint + enc_method + ".json"

        if len(stuff) :

            enc_stuff = []
            
            for thing in stuff :
                if not thing :
                    continue
                
                enc_stuff.append(urllib.quote(thing))

            if len(enc_stuff) :
                url = "%s/%s" % (url, "/".join(enc_stuff))

        data = None
        
        headers = {
            'api_key' : self.api_key,
            }

        try :
            req = urllib2.Request(url, data, headers)
            res = urllib2.urlopen(req)
        except Exception, e :
            raise e
Example #25
0
    def Configure(self, prefix="XBMC-Event", xbmcip="192.168.1.1", xbmchttpport=8080, zone="224.0.0.2", port=8278, selfXbmceventbroadcast=False, payDelim="<b></b>"):
        panel = eg.ConfigPanel(self)

        editCtrl = panel.TextCtrl(prefix)
        xbmcipCtrl = panel.TextCtrl(xbmcip)
        xbmchttpportCtrl = panel.SpinIntCtrl(xbmchttpport, min=1, max=65535)
        zoneCtrl = panel.TextCtrl(zone)
        portCtrl = panel.SpinIntCtrl(port, min=1, max=65535)
        selfXbmceventbroadcastCtrl=panel.CheckBox(selfXbmceventbroadcast)
        payDelimCtrl = panel.TextCtrl(payDelim)

        panel.AddLine(self.text.eventPrefix, editCtrl)
        panel.AddLine(self.text.xbmcip, xbmcipCtrl)
        panel.AddLine(self.text.xbmchttpport, xbmchttpportCtrl)
        panel.AddLine(self.text.zone, zoneCtrl)
        panel.AddLine(self.text.port, portCtrl)

        panel.AddLine(self.text.selfXbmceventbroadcast,selfXbmceventbroadcastCtrl)
        panel.AddLine("Payload Delimiter", payDelimCtrl)

        while panel.Affirmed():
            panel.SetResult(editCtrl.GetValue(),xbmcipCtrl.GetValue(),int(xbmchttpportCtrl.GetValue()),zoneCtrl.GetValue(),int(portCtrl.GetValue()),selfXbmceventbroadcastCtrl.GetValue(), payDelimCtrl.GetValue() )
            v_header = urllib.quote("This is the Header")

            v_message = urllib.quote("This is the Message")

	    host_xbmc = xbmcipCtrl.GetValue()
	    port_xbmc = int(xbmchttpportCtrl.GetValue())
	    udp_xbmc = int(portCtrl.GetValue())
	    url_xbmc = "http://" + str(host_xbmc) + ":" + str(port_xbmc) + "/xbmcCmds/xbmcHttp?command=SetBroadcast&parameter=2;" + str(udp_xbmc) + "(Notification(" + v_header + "," + v_message + "))"
            print "str(url_xbmc)"
	    try:
				urllib.urlopen(url_xbmc)
	    except IOError:
				print 'Connection error'
Example #26
0
 def _get_all(self, element_map, initial_query_string='',
              headers=None, **params):
     l = []
     for k,v in params.items():
         k = k.replace('_', '-')
         if  k == 'maxkeys':
             k = 'max-keys'
         if isinstance(v, unicode):
             v = v.encode('utf-8')
         if v is not None and v != '':
             l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
     if len(l):
         s = initial_query_string + '&' + '&'.join(l)
     else:
         s = initial_query_string
     response = self.connection.make_request('GET', self.name,
             headers=headers, query_args=s)
     body = response.read()
     boto.log.debug(body)
     if response.status == 200:
         rs = ResultSet(element_map)
         h = handler.XmlHandler(rs, self)
         xml.sax.parseString(body, h)
         return rs
     else:
         raise self.connection.provider.storage_response_error(
             response.status, response.reason, body)
Example #27
0
 def get_authenticated_user(self, server_ticket):
     """
     Requests the user's information from the CAS server using the given
     *server_ticket* and calls ``self._on_auth`` with the resulting user
     dict.
     """
     cas_version = self.settings.get('cas_version', 2)
     cas_server = self.settings.get('cas_server')
     ca_certs = self.settings.get('cas_ca_certs', None)
     if not cas_server.endswith('/'):
         cas_server += '/'
     service_url = "%sauth" % self.base_url
     #validate the ST
     validate_suffix = 'proxyValidate'
     if cas_version == 1:
         validate_suffix = 'validate'
     validate_url = (
         cas_server +
         validate_suffix +
         '?service=' +
         quote(service_url) +
         '&ticket=' +
         quote(server_ticket)
     )
     logging.debug("Fetching CAS URL: %s" % validate_url)
     validate_cert = False
     if ca_certs:
         validate_cert = True
     http_client = tornado.httpclient.AsyncHTTPClient()
     http_client.fetch(
         validate_url, validate_cert=validate_cert, callback=self._on_auth)
Example #28
0
 def handle_upload(self):
   results = []
   blob_keys = []
   for name, fieldStorage in self.request.POST.items():
       if type(fieldStorage) is unicode:
           continue
       result = {}
       result['name'] = re.sub(r'^.*\\','',fieldStorage.filename)
       result['type'] = fieldStorage.type
       result['size'] = self.get_file_size(fieldStorage.file)
       if self.validate(result):
           blob_key = str(self.write_blob(fieldStorage.value, result))
           blob_keys.append(blob_key)
           result['deleteType'] = 'DELETE'
           result['deleteUrl'] = self.request.host_url +\
               '/?key=' + urllib.quote(blob_key, '')
           if (IMAGE_TYPES.match(result['type'])):
               try:
                   result['url'] = images.get_serving_url(
                       blob_key,
                       secure_url=self.request.host_url.startswith(
                           'https'
                       )
                   )
                   result['thumbnailUrl'] = result['url'] +\
                       THUMBNAIL_MODIFICATOR
               except:  # Could not get an image serving url
                   pass
           if not 'url' in result:
               result['url'] = self.request.host_url +\
                   '/' + blob_key + '/' + urllib.quote(
                       result['name'].encode('utf-8'), '')
       results.append(result)
   deferred.defer(cleanup,blob_keys,_countdown=EXPIRATION_TIME)
   return results
 def find_books(self, search_text):
     if _NEW_TOOLBAR_SUPPORT:
         self.enable_button(False)
     else:
         self._books_toolbar.enable_button(False)
     self.clear_downloaded_bytes()
     textbuffer = self.textview.get_buffer()
     textbuffer.set_text(_('Performing lookup, please wait') + '...')
     self.book_selected = False
     self.ls.clear()
     search_tuple = search_text.lower().split()
     if len(search_tuple) == 0:
         self._alert(_('Error'), _('You must enter at least one search word.'))
         if _NEW_TOOLBAR_SUPPORT:
             self.search_entry.grab_focus()
         else:
             self._books_toolbar.search_entry.grab_focus()
         return
     FL = urllib.quote('fl[]')
     SORT = urllib.quote('sort[]')
     self.search_url = 'http://www.archive.org/advancedsearch.php?q=' +  \
         urllib.quote('(title:(' + search_text.lower() + ') OR creator:(' + search_text.lower() +')) AND format:(DJVU)')
     self.search_url += '&' + FL + '=creator&' + FL + '=description&' + FL + '=format&' + FL + '=identifier&'  \
         + FL + '=language'
     self.search_url += '&' + FL +  '=publisher&' + FL + '=subject&' + FL + '=title&' + FL + '=volume'
     self.search_url += '&' + SORT + '=title&' + SORT + '&' + SORT + '=&rows=500&save=yes&fmt=csv&xmlsearch=Search'
     GObject.idle_add(self.download_csv,  self.search_url)
Example #30
0
File: obj.py Project: saebyuk/swift
 def _listing_pages_iter(self, lcontainer, lprefix, env):
     lpartition, lnodes = self.app.container_ring.get_nodes(
         self.account_name, lcontainer)
     marker = ''
     while True:
         lreq = Request.blank('i will be overridden by env', environ=env)
         # Don't quote PATH_INFO, by WSGI spec
         lreq.environ['PATH_INFO'] = \
             '/%s/%s' % (self.account_name, lcontainer)
         lreq.environ['REQUEST_METHOD'] = 'GET'
         lreq.environ['QUERY_STRING'] = \
             'format=json&prefix=%s&marker=%s' % (quote(lprefix),
                                                  quote(marker))
         lnodes = self.app.sort_nodes(lnodes)
         lresp = self.GETorHEAD_base(
             lreq, _('Container'), lpartition, lnodes, lreq.path_info,
             len(lnodes))
         if 'swift.authorize' in env:
             lreq.acl = lresp.headers.get('x-container-read')
             aresp = env['swift.authorize'](lreq)
             if aresp:
                 raise ListingIterNotAuthorized(aresp)
         if lresp.status_int == HTTP_NOT_FOUND:
             raise ListingIterNotFound()
         elif not is_success(lresp.status_int):
             raise ListingIterError()
         if not lresp.body:
             break
         sublisting = json.loads(lresp.body)
         if not sublisting:
             break
         marker = sublisting[-1]['name'].encode('utf-8')
         yield sublisting
Example #31
0
 def test_euc_jp(self):
     url = urllib.quote(self.raw_url.encode('euc_jp'))
     eq_(render('{{ url|display_url }}', {'url': url}), self.raw_url)
Example #32
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url1 = urlparse.urljoin(self.base_link, url)

            result, headers, content, cookie = client.request(url1, output='extended')

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
                auth = 'Bearer %s' % urllib.unquote_plus(auth)
            except:
                auth = 'Bearer false'

            headers['Authorization'] = auth
            headers['X-Requested-With'] = 'XMLHttpRequest'
            #headers['Content-Type']='application/x-www-form-urlencoded; charset=UTF-8'
            #headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Cookie'] = cookie

            u = '/ajax/nembeds.php'
            u = urlparse.urljoin(self.base_link, u)

            #action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'
            if '/episode/' in url:
                url = urlparse.urljoin(self.base_link,  '/tv-series'+ url)
                action = 'getEpisodeEmb'
            else:
                action = 'getMovieEmb'
                url = urlparse.urljoin(self.base_link, '/tv-series' + url)

            headers['Referer'] = url
            control.sleep(200)

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid}
            post = urllib.urlencode(post)
            print post
            print headers


            r = client.request(u, post=post, headers=headers, output='cookie2')
            print("####",r)
            r = str(json.loads(r))
            r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?')

            links = []

            for i in r:
                try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}]
                except: pass

            links += [{'source': 'openload', 'quality': 'SD', 'url': i} for i in r if 'openload.co' in i]
            links += [{'source': 'videomega', 'quality': 'SD', 'url': i} for i in r if 'videomega.tv' in i]
            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url']})

            return sources
        except:
            return sources
Example #33
0
    def sources(self, url):
        sources = []
        try:
            # print("ONEMOVIES SOURCES", url)

            if url == None: return sources
            referer = url
            headers = {'User-Agent': random_agent()}
            url = url.replace('/watching.html', '')
            html = requests.get(url, headers=headers).content
            # print ("ONEMOVIES Source", html)
            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None
            vid_id = re.findall('-(\d+)', url)[-1]
            # print ("ONEMOVIES", vid_id)
            quality = re.findall('<span class="quality">(.*?)</span>', html)
            quality = str(quality)
            if quality == 'cam' or quality == 'ts':
                quality = 'CAM'
            elif quality == 'hd':
                quality = '720'
            else:
                quality = '480'
            try:
                headers = {'X-Requested-With': 'XMLHttpRequest'}
                headers['Referer'] = referer
                headers['User-Agent'] = random_agent()
                u = urlparse.urljoin(self.base_link, self.server_link % vid_id)
                # print("SERVERS", u)
                r = BeautifulSoup(requests.get(u, headers=headers).content)
                # print("SERVERS", r)
                containers = r.findAll('div', attrs={'class': 'les-content'})
                for result in containers:
                    links = result.findAll('a')
                    # print("ONEMOVIES", links)
                    for link in links:
                        title = str(link['title'])
                        # print("ONEMOVIES TITLE", title)
                        if not episode == None:
                            title = re.findall('Episode\s+(\d+):', title)[0]
                            title = '%01d' % int(title)
                            if title == episode:
                                episode_id = str(link['episode-id'])
                            # print("ONEMOVIES EPISODE", episode_id)
                            else:
                                continue

                        else:
                            episode_id = str(link['episode-id'])
                        onclick = str(link['onclick'])

                        key_gen = ''.join(
                            random.choice(string.ascii_lowercase +
                                          string.digits) for x in range(16))
                        ################# FIX FROM MUCKY DUCK & XUNITY TALK ################
                        key = '87wwxtp3dqii'
                        key2 = '7bcq9826avrbi6m49vd7shxkn985mhod'
                        cookie = hashlib.md5(episode_id +
                                             key).hexdigest() + '=%s' % key_gen
                        a = episode_id + key2
                        b = key_gen
                        i = b[-1]
                        h = b[:-1]
                        b = i + h + i + h + i + h
                        hash_id = uncensored(a, b)
                        ################# FIX FROM MUCKY DUCK & XUNITY TALK ################

                        serverurl = self.base_link + '/ajax/v2_get_sources/' + episode_id + '?hash=' + urllib.quote(
                            hash_id)
                        # print ("playurl ONEMOVIES", serverurl)

                        headers = {
                            'Accept-Language': 'en-US',
                            'Cookie': cookie,
                            'Referer': referer,
                            'User-Agent':
                            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
                            'X-Requested-With': 'XMLHttpRequest'
                        }
                        # print ("playurl ONEMOVIES", headers)
                        result = requests.get(serverurl,
                                              headers=headers).content
                        # print ("RESULT ONEMOVIES", result)
                        result = result.replace('\\', '')
                        # print ("ONEMOVIES Result", result)
                        url = re.findall('"?file"?\s*:\s*"(.+?)"', result)
                        url = [googletag(i) for i in url]
                        url = [i[0] for i in url if len(i) > 0]
                        u = []
                        try:
                            u += [[i for i in url
                                   if i['quality'] == '1080p'][0]]
                        except:
                            pass
                        try:
                            u += [[i for i in url if i['quality'] == '720'][0]]
                        except:
                            pass
                        try:
                            u += [[i for i in url if i['quality'] == '480'][0]]
                        except:
                            pass
                        url = replaceHTMLCodes(u[0]['url'])
                        quality = googletag(url)[0]['quality']

                        # print ("ONEMOVIES PLAY URL", quality, url)

                        sources.append({
                            'source': 'google video',
                            'quality': quality,
                            'scraper': self.name,
                            'url': url,
                            'direct': True
                        })
            except:
                pass

        except:
            pass
        return sources
Example #34
0
 def path_to(self, course_id, filename):
     """Return the full path to a given file for a given course."""
     return os.path.join(
         self.root_path,
         urllib.quote(course_id.to_deprecated_string(), safe=''), filename)
Example #35
0
def get_request_form_sp(request_users,
                        submit_value,
                        title,
                        body,
                        callbackurl,
                        mobile_url,
                        touch_url,
                        option_params={}):
    u"""
    リクエストサービスのリンクを作成し、返す
    スマートフォン版

    外部テンプレート: gsocial/templates/opensocial/request_sp.html

    使い方::

        {% get_request_form <request_users> <submit_value> <title> <body> <callbackurl> <mobile_url> <touch_url> <option_params> %}

    引数:

        :request_users: リクエストを送信したいユーザリスト
                    (to_user_id[]に渡すパラメータ)
        :submit_value: 送信ボタンの文言
        :title: リクエストのタイトル(必須)
        :body: 本文
        :callbackurl: リクエストした後に遷移するURL
        :mobile_url: リクエストをユーザがクリックした際の飛び先のURL(FP)
        :touch_url: リクエストをユーザがクリックした際の飛び先のURL(SP)
        :option_params: オプションディクショナリ

    option_params:

        :backto_url: リクエスト送信確認画面からアプリへ戻るためのURL
        :mobile_image: メッセージに含める画像のURL(FP)
        :touch_image: メッセージに含める画像のURL(SP)
        :list_type: リクエストの対象となるユーザの種別
        :editable: メッセージをユーザに入力させる
        :expire_time: リクエストが期限切れとなる日時(UTC FORMAT)

    Create the link for the request service and return it.(For SP)

    Arguments:

        :request_users: A Userlist whom you want to send request.(A parameter to be handed to to_user_id[])
        :submit_value: A letter on submit botton
        :title: The title of request(indispensable)
        :body: Message
        :callbackurl: A URL which will be redirected after the request.
        :mobile_url: A URL which will be redirected after the click(FP).
        :touch_url: A URL which will be redirected after the click(SP).
        :option_params: a optional dictionary

    option_params:

         :backto_url: A URL of the application from the "request sent confirmation"screen
         :mobile_image: URL of the image which will be contain in the message(FP)
         :touch_image: URL of the image which will be contain in the message(SP)
         :list_type: The type of subjected users.
         :editable: Whether User is allowed to edit or not.
         :expire_time: The expire date of the request(UTC FORMAT)
    """
    # check the content of option_params
    # option_paramsの中身をチェック
    # TODO : もっと綺麗にしたい
    keys = option_params.keys()
    backto_url = option_params['backto_url'] if 'backto_url' in keys else None
    mobile_image = option_params[
        'mobile_image'] if 'mobile_image' in keys else None
    touch_image = option_params[
        'touch_image'] if 'touch_image' in keys else None
    list_type = option_params['list_type'] if 'list_type' in keys else None
    editable = option_params['editable'] if 'editable' in keys else None
    expire_time = option_params[
        'expire_time'] if 'expire_time' in keys else None

    # title is indispensable ,so check
    # titleは必須のため、チェック
    if not title:
        Log.error('title is empty.', body)
        raise

    # if list_type was not setted ,use "specified"
    # list_typeの指定が無かった場合、specifiedを指定する
    if list_type == None:
        list_type = 'specified'

    # if editable is True body is indispensable,so check
    # bodyはeditableがtrueでない場合は必須のため、チェック
    # TODO : editable==Trueの場合、メッセージをユーザに入力させる
    # そのため、フォーム生成のhtml側で、入力フォームを生成する必要がある
    if editable != True:
        if body == None:
            Log.error('body is empty.', body)
            raise

    # encode
    # エンコードする
    callbackurl = urllib.quote(callbackurl)

    # create a parameter to be handed to user_to_id
    # user_to_idに渡すパラメータを生成
    request_user_ids_str = ','.join(request_users)

    return {
        'request_users': request_user_ids_str,
        'submit_value': submit_value,
        'title': title,
        'body': body,
        'callbackurl': callbackurl,
        'mobile_url': mobile_url,
        'touch_url': touch_url,
        'backto_url': backto_url,
        'mobile_image': mobile_image,
        'touch_image': touch_image,
        'list_type': list_type,
        'editable': editable,
        'expire_time': expire_time,
    }
Example #36
0
def make_path(path, **kwargs):
    return path % dict(((k, urllib.quote(v)) for (k, v) in kwargs.iteritems()))
    def save(self, contents, **kwargs):

        logger.info("Saving incident settings contents...")

        user = cherrypy.session['user']['name']
        sessionKey = cherrypy.session.get('sessionKey')
        splunk.setDefault('sessionKey', sessionKey)

        eh = EventHandler(sessionKey=sessionKey)

        config = {}
        config['index'] = 'alerts'

        restconfig = entity.getEntities('configs/alert_manager',
                                        count=-1,
                                        sessionKey=sessionKey)
        if len(restconfig) > 0:
            if 'index' in restconfig['settings']:
                config['index'] = restconfig['settings']['index']

        logger.debug("Global settings: %s" % config)

        # Parse the JSON
        contents = json.loads(contents)

        logger.debug("Contents: %s" % json.dumps(contents))

        # Get key
        query = {}
        query['incident_id'] = contents['incident_id']
        logger.debug("Filter: %s" % json.dumps(query))

        uri = '/servicesNS/nobody/alert_manager/storage/collections/data/incidents?query=%s' % urllib.quote(
            json.dumps(query))
        serverResponse, incident = rest.simpleRequest(uri,
                                                      sessionKey=sessionKey)
        logger.debug("Settings for incident: %s" % incident)
        incident = json.loads(incident)

        # Update incident
        uri = '/servicesNS/nobody/alert_manager/storage/collections/data/incidents/' + incident[
            0]['_key']
        logger.debug("URI for incident update: %s" % uri)

        # Prepared new entry
        now = datetime.datetime.now().isoformat()
        changed_keys = []
        for key in incident[0].keys():
            if (key in contents) and (incident[0][key] != contents[key]):
                changed_keys.append(key)
                logger.info(
                    "%s for incident %s changed. Writing change event to index %s."
                    % (key, incident[0]['incident_id'], config['index']))
                event_id = hashlib.md5(incident[0]['incident_id'] +
                                       now).hexdigest()
                event = 'time=%s severity=INFO origin="incident_posture" event_id="%s" user="******" action="change" incident_id="%s" %s="%s" previous_%s="%s"' % (
                    now, event_id, user, incident[0]['incident_id'], key,
                    contents[key], key, incident[0][key])
                logger.debug("Change event will be: %s" % event)
                input.submit(event,
                             hostname=socket.gethostname(),
                             sourcetype='incident_change',
                             source='incident_settings.py',
                             index=config['index'])
                incident[0][key] = contents[key]

            else:
                logger.info("%s for incident %s didn't change." %
                            (key, incident[0]['incident_id']))

        del incident[0]['_key']
        contentsStr = json.dumps(incident[0])
        logger.debug("content for update: %s" % contentsStr)
        serverResponse, serverContent = rest.simpleRequest(
            uri, sessionKey=sessionKey, jsonargs=contentsStr)

        logger.debug("Response from update incident entry was %s " %
                     serverResponse)
        logger.debug("Changed keys: %s" % changed_keys)

        if len(changed_keys) > 0:
            ic = IncidentContext(sessionKey, contents['incident_id'])
            if "owner" in changed_keys:
                eh.handleEvent(alert=incident[0]["alert"],
                               event="incident_assigned",
                               incident=incident[0],
                               context=ic.getContext())
            elif "status" in changed_keys and contents["status"] == "resolved":
                eh.handleEvent(alert=incident[0]["alert"],
                               event="incident_resolved",
                               incident=incident[0],
                               context=ic.getContext())
            else:
                eh.handleEvent(alert=incident[0]["alert"],
                               event="incident_changed",
                               incident=incident[0],
                               context=ic.getContext())

        if contents['comment'] != "":
            contents['comment'] = contents['comment'].replace(
                '\n', '<br />').replace('\r', '')
            event_id = hashlib.md5(incident[0]['incident_id'] +
                                   now).hexdigest()
            event = 'time=%s severity=INFO origin="incident_posture" event_id="%s" user="******" action="comment" incident_id="%s" comment="%s"' % (
                now, event_id, user, incident[0]['incident_id'],
                contents['comment'])
            logger.debug("Comment event will be: %s" % event)
            event = event.encode('utf8')
            input.submit(event,
                         hostname=socket.gethostname(),
                         sourcetype='incident_change',
                         source='incident_settings.py',
                         index=config['index'])

        return 'Done'
Example #38
0
def get_youtube_subtitle(videoID, lang_code="en", allow_translate=False):
    """
    videoID - a string of alphanumeric characters in the Youtube URL after watch?v=
    lang_code - a string representing the language code of the desired subtitle
    allow_translate - a boolean specifying whether using Google Translate is OK
    Returns an ET, or None if 404 error occurs or if no subtitles are available.
    """
    sub_langs = get_subtitle_languages(videoID)
    
    if sub_langs.is_empty():
        return None
    
    is_translated = sub_langs.dict[lang_code] is None # if name is none, then it's translated

    if is_translated and not allow_translate:
        return None
    
    try:
        if is_translated:
            url = u"https://www.youtube.com/api/timedtext?lang={}&name={}&tlang={}&v={}".format(sub_langs.default_lang, urllib.quote(sub_langs.dict[sub_langs.default_lang].encode("utf8")), lang_code, videoID).encode('ascii', 'xmlcharrefreplace')
            response = urllib2.urlopen(url)
        else:
            url = u"https://www.youtube.com/api/timedtext?lang={}&name={}&v={}".format(lang_code, urllib.quote(sub_langs.dict[lang_code].encode("utf8")), videoID).encode('ascii', 'xmlcharrefreplace')
            response = urllib2.urlopen(url)
    except HTTPError:
        return None
    xml = response.read()
    response.close()
    return ET.fromstring(xml)
Example #39
0
 def _prepare_text_api_call(self, terminal_ref, count_after=False):
     return 'api/texts/{}?count_after={}&index_after=0'.format(urllib.quote(terminal_ref.replace(" ", "_").encode('utf-8')), int(count_after))
Example #40
0
 def test_unicode(self):
     url = urllib.quote(self.raw_url.encode('utf8'))
     url = unicode(url, 'utf8')
     eq_(render('{{ url|display_url }}', {'url': url}), self.raw_url)
Example #41
0
    def analyse(self, tags, wikipediaTag="wikipedia"):
        err = []
        if wikipediaTag in tags:
            m = self.wiki_regexp.match(tags[wikipediaTag])
            if (tags[wikipediaTag].startswith("http://")
                    or tags[wikipediaTag].startswith("https://")) and not m:
                # tag 'wikipedia' starts with 'http://' but it's not a wikipedia url
                return [{"class": 30310, "subclass": 0}]
            elif m:
                # tag 'wikipedia' seams to be an url
                return [{
                    "class": 30311,
                    "subclass": 1,
                    "text": T_(u"Use wikipedia=%s:*", m.group(2)),
                    "fix": {
                        wikipediaTag:
                        "%s:%s" % (m.group(2), self.human_readable(m.group(3)))
                    }
                }]

            if not self.lang_regexp.match(tags[wikipediaTag]):
                err.append({"class": 30312, "subclass": 2})
            else:
                prefix = tags[wikipediaTag].split(':', 1)[0]
                tag = wikipediaTag + ':' + prefix
                if tag in tags:
                    err.append({
                        "class": 30316,
                        "subclass": 6,
                        "fix": {
                            '-': [tag]
                        }
                    })
            if "%" in tags[wikipediaTag] or "_" in tags[wikipediaTag]:
                err.append({
                    "class": 30313,
                    "subclass": 3,
                    "fix": {
                        wikipediaTag: self.human_readable(tags[wikipediaTag])
                    }
                })

        interwiki = False
        missing_primary = []
        for tag in [t for t in tags if t.startswith(wikipediaTag + ":")]:
            suffix = tag[len(wikipediaTag) + 1:]
            if ":" in suffix:
                suffix = suffix.split(":")[0]

            if wikipediaTag in tags:
                if interwiki == False:
                    try:
                        lang, title = tags[wikipediaTag].split(':')
                        json_str = urlread(
                            "https://" + lang +
                            ".wikipedia.org/w/api.php?action=query&prop=langlinks&titles="
                            + urllib.quote(title.encode('utf-8')) +
                            "&redirects=&lllimit=500&format=json", 30)
                        interwiki = json.loads(json_str)
                        interwiki = dict(
                            map(
                                lambda x: [x["lang"], x["*"]],
                                interwiki["query"]["pages"].values()[0]
                                ["langlinks"]))
                    except:
                        interwiki = None

                    if interwiki and suffix in interwiki and interwiki[
                            suffix] == self.human_readable(tags[tag]):
                        err.append({
                            "class":
                            30317,
                            "subclass":
                            7,
                            "fix": [{
                                '-': [tag]
                            }, {
                                '-': [tag],
                                '~': {
                                    wikipediaTag:
                                    suffix + ':' + interwiki[suffix]
                                }
                            }]
                        })

            if suffix in tags:
                # wikipedia:xxxx only authorized if tag xxxx exist
                err.extend(self.analyse(tags, wikipediaTag + ":" + suffix))

            elif self.lang_restriction_regexp.match(suffix):
                if not wikipediaTag in tags:
                    m = self.wiki_regexp.match(tags[tag])
                    if m:
                        value = self.human_readable(m.group(3))
                    elif tags[tag].startswith(suffix + ":"):
                        value = tags[tag][len(suffix) + 1:]
                    else:
                        value = self.human_readable(tags[tag])
                    missing_primary.append({
                        '-': [tag],
                        '+': {
                            wikipediaTag: "%s:%s" % (suffix, value)
                        }
                    })
            else:
                err.append({
                    "class": 30315,
                    "subclass": 5,
                    "text": T_(u"Invalid wikipedia suffix '%s'", suffix)
                })

        if missing_primary != []:
            if self.Language:
                missing_primary = sorted(
                    missing_primary,
                    key=lambda x: x['+'][wikipediaTag][0:2]
                    if x['+'][wikipediaTag][0:2] != self.Language else '')
            err.append({"class": 30314, "subclass": 4, "fix": missing_primary})

        return err
Example #42
0
 def _prepare_version_attrs_api_call(self, title, lang, vtitle):
     return "api/version/flags/{}/{}/{}".format(urllib.quote(title), urllib.quote(lang), urllib.quote(vtitle))
Example #43
0
 def search_payload_cmd(self,payload):
     "Generate a command execution payload"
     encoded = urllib.quote(base64.b64encode(payload))
     encodedpl = """search index=_internal source=*splunkd.log |mappy x=eval("sys.modules['os'].system(base64.b64decode('%s'))")""" % encoded
     #print encodedpl
     return encodedpl
Example #44
0
 def _upload_term(self, name):
     t = Term().load({'name': name})
     if t is None:
         raise AttributeError("Necessary Term not Present on this Environment")
     self._make_post_request_to_server('api/terms/{}'.format(urllib.quote(name)), t.contents())
def search_results(params):

    item_type = params.get('item_type')
    query_string = params.get('query')
    if query_string:
        log.debug("query_string : {0}", query_string)
        query_string = urllib.unquote(query_string)
        log.debug("query_string : {0}", query_string)

    item_type = item_type.lower()

    if item_type == 'movie':
        heading_type = string_load(30231)
        content_type = 'movies'
    elif item_type == 'series':
        heading_type = string_load(30229)
        content_type = 'tvshows'
    elif item_type == 'episode':
        heading_type = string_load(30235)
        content_type = 'episodes'
        params["name_format"] = "Episode|episode_name_format"
    elif item_type == "music" or item_type == "audio" or item_type == "musicalbum":
        heading_type = 'Music'
        content_type = 'songs'
    elif item_type == "person":
        heading_type = 'Artists'
        content_type = 'artists'
    else:
        heading_type = item_type
        content_type = 'video'

    handle = int(sys.argv[1])

    if not query_string:
        home_window = HomeWindow()
        last_search = home_window.getProperty("last_search")
        kb = xbmc.Keyboard()
        kb.setHeading(heading_type.capitalize() + ' ' +
                      string_load(30246).lower())
        kb.setDefault(last_search)
        kb.doModal()

        if kb.isConfirmed():
            user_input = kb.getText().strip()
        else:
            return

        home_window.setProperty("last_search", user_input)
        log.debug('searchResults Called: {0}', params)
        query = user_input

    else:
        query = query_string

    query = urllib.quote(query)
    log.debug("query : {0}", query)

    if (not item_type) or (not query):
        return

    limit = int(params.get('limit', 20))
    content_url = ('{server}/emby/Search/Hints?searchTerm=' + query +
                   '&UserId={userid}' + '&Limit=' + str(limit) +
                   '&IncludeItemTypes=' + item_type +
                   '&ExcludeItemTypes=LiveTvProgram' + '&IncludePeople=false' +
                   '&IncludeMedia=true' + '&IncludeGenres=false' +
                   '&IncludeStudios=false' + '&IncludeArtists=false')

    if item_type == "person":
        content_url = ('{server}/emby/Search/Hints?searchTerm=' + query +
                       '&UserId={userid}' + '&Limit=' + str(limit) +
                       '&IncludePeople=true' + '&IncludeMedia=false' +
                       '&IncludeGenres=false' + '&IncludeStudios=false' +
                       '&IncludeArtists=false')

    # show a progress indicator if needed
    settings = xbmcaddon.Addon()
    progress = None
    if settings.getSetting('showLoadProgress') == "true":
        progress = xbmcgui.DialogProgress()
        progress.create(string_load(30112))
        progress.update(0, string_load(30113))

    search_hints_result = dataManager.GetContent(content_url)
    log.debug('SearchHints jsonData: {0}', search_hints_result)

    if search_hints_result is None:
        search_hints_result = {}

    search_hints = search_hints_result.get('SearchHints')
    if search_hints is None:
        search_hints = []

    total_results = int(search_hints_result.get('TotalRecordCount', 0))
    log.debug('SEARCH_TOTAL_RESULTS: {0}', total_results)

    # what type of search was it
    if item_type == "person":
        log.debug("Item Search Result")
        server = downloadUtils.getServer()
        list_items = []
        for item in search_hints:
            person_id = item.get('ItemId')
            person_name = item.get('Name')
            image_tag = item.get('PrimaryImageTag')
            person_thumbnail = downloadUtils.imageUrl(person_id,
                                                      "Primary",
                                                      0,
                                                      400,
                                                      400,
                                                      image_tag,
                                                      server=server)

            action_url = sys.argv[
                0] + "?mode=NEW_SEARCH_PERSON&person_id=" + person_id

            list_item = xbmcgui.ListItem(label=person_name)
            list_item.setProperty("id", person_id)
            if person_thumbnail:
                art_links = {}
                art_links["thumb"] = person_thumbnail
                art_links["poster"] = person_thumbnail
                list_item.setArt(art_links)

            item_tupple = (action_url, list_item, True)
            list_items.append(item_tupple)

        xbmcplugin.setContent(handle, 'artists')
        xbmcplugin.addDirectoryItems(handle, list_items)
        xbmcplugin.endOfDirectory(handle, cacheToDisc=False)

    else:
        # extract IDs for details query
        log.debug("Item Search Result")
        id_list = []
        for item in search_hints:
            item_id = item.get('ItemId')
            id_list.append(str(item_id))

        if len(id_list) > 0:
            Ids = ",".join(id_list)
            details_url = ('{server}/emby/Users/{userid}/items' + '?Ids=' +
                           Ids + '&Fields={field_filters}' + '&format=json')
            '''
            details_result = dataManager.GetContent(details_url)
            log.debug("Search Results Details: {0}", details_result)
            '''

            # set content type
            xbmcplugin.setContent(handle, content_type)

            dir_items, detected_type = processDirectory(
                details_url, progress, params)
            if dir_items is not None:
                xbmcplugin.addDirectoryItems(handle, dir_items)
                xbmcplugin.endOfDirectory(handle, cacheToDisc=False)

        elif not query_string:
            xbmcgui.Dialog().ok(string_load(30335), string_load(30336))

    if progress is not None:
        progress.update(100, string_load(30125))
        progress.close()
Example #46
0
import sys, urllib, time, hashlib

def ShowMenu():
    menu = '''
	./sms.py mobile content
	./sms.py 13426332010 test
'''	    
    print '#' * 10
    print menu
    print '#' * 10
    
def GetStringMd5(str):
    h = hashlib.md5(str)
    return h.hexdigest()

if __name__ == "__main__":
    if len(sys.argv) != 3:
        ShowMenu()
    else:
	#get localtime
	ltime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))

	#get values

	mobile = str(sys.argv[1])
	content = urllib.quote(str(sys.argv[2]))
	md5key = GetStringMd5(mobile+'ASDxcv8234sfsj12'+time.strftime('%y-%m',time.localtime(time.time())))
	apiurl = 'http://interface.kaixin001.com/interface/sms/send.php?mobile=%s&content="%s"&sig=%s&subnum=566002&monitor=base'

	f = urllib.urlopen(apiurl % (mobile, content, md5key))
Example #47
0
 def get_encode_param(param):
     return quote(TeaConverter.to_str(param), safe='~')
Example #48
0
import urllib

url = sys.argv[1]

tokenLoc = url.find("token=") + 6
endLoc = url.find("&user="******"password" + url[msgLoc:]
m_len = len(m_hash)

m_bits = (m_len + len(padding(m_len * 8))) * 8

h = md5(state="402a574d265dc212ee64970f159575d0".decode("hex"), count=m_bits)

x = "&command3=UnlockAllSafes"
h.update(x)

x_padding = urllib.quote(padding(m_len * 8))

padded_msg = url[msgLoc:] + x_padding + x

updatedURL = url[:tokenLoc] + h.hexdigest() + "&" + padded_msg

print updatedURL

parsedUrl = urlparse.urlparse(updatedURL)
conn = httplib.HTTPSConnection(parsedUrl.hostname, parsedUrl.port)
conn.request("GET", parsedUrl.path + "?" + parsedUrl.query)
print conn.getresponse().read()
Example #49
0
def query():
    """Queries ComicBookDB to get issue details.

    This doctest is broken :(  It needs to dynamically replace the version
    string.

        >>> GROO = '''<Book>
        ...   <field name="title">Groo the Wanderer #3</field>
        ...   <field name="publisher">Pacific Comics</field>
        ... </Book>'''
        >>> XMEN = '''<Book>
        ...   <field name="title">X-Men #188</field>
        ...   <field name="publisher">Marvel Comics</field>
        ... </Book>'''
        >>> CABLE = '''<Book>
        ...   <field name="title">Cable &amp; Deadpool #45</field>
        ...   <field name="publisher">Marvel Comics</field>
        ... </Book>'''
        >>> file = open('/tmp/books-quickfill.xml', 'w')
        >>> file.write(GROO)
        >>> file.close()
        >>> query()
        <?xml version="1.0" encoding="UTF-8"?>
        <importedData>
          <List name="ComicBookDB Import" version="1.1">
            <Book title="Groo the Wanderer">
              <field name="title">
                Groo the Wanderer #3
              </field>
              <field name="series">
                Groo the Wanderer
              </field>
              <field name="publisher">
                Pacific Comics
              </field>
              <field name="publishDate">
                April 1, 1983
              </field>
              <field name="CoverImageURL">
                http://www.comicbookdb.com/graphics/comic_graphics/1/24/15903_20051209094425_large.jpg
              </field>
              <field name="link">
                http://www.comicbookdb.com/issue.php?ID=15903
              </field>
            </Book>
            <Book title="Groo the Wanderer">
              <field name="title">
                Groo the Wanderer #3
              </field>
              <field name="series">
                Groo the Wanderer
              </field>
              <field name="authors">
                Mark Evanier
              </field>
              <field name="illustrators">
                Sergio Aragonés
              </field>
              <field name="publishDate">
                May 1, 1985
              </field>
              <field name="CoverImageURL">
                http://www.comicbookdb.com/graphics/comic_graphics/1/4/287_20050924142121_large.jpg
              </field>
              <field name="link">
                http://www.comicbookdb.com/issue.php?ID=287
              </field>
            </Book>
          </List>
        </importedData>
        >>> file = open('/tmp/books-quickfill.xml', 'w')
        >>> file.write(XMEN)
        >>> file.close()
        >>> query()
        <?xml version="1.0" encoding="UTF-8"?>
        <importedData>
          <List name="ComicBookDB Import" version="1.1">
            <Book title="X-Men">
              <field name="title">
                X-Men #188
              </field>
              <field name="series">
                X-Men
              </field>
              <field name="authors">
                Mike Carey
              </field>
              <field name="illustrators">
                Chris Bachalo; Jaime Mendoza; Tim Townsend
              </field>
              <field name="editors">
                Mike Marts
              </field>
              <field name="publisher">
                Marvel Comics
              </field>
              <field name="publishDate">
                September 1, 2006
              </field>
              <field name="CoverImageURL">
                http://www.comicbookdb.com/graphics/comic_graphics/1/95/51001_20060713214406_large.jpg
              </field>
              <field name="link">
                http://www.comicbookdb.com/issue.php?ID=51001
              </field>
            </Book>
            <Book title="X-Men">
              <field name="title">
                X-Men #188
              </field>
              <field name="series">
                X-Men
              </field>
              <field name="authors">
                Mike Carey
              </field>
              <field name="illustrators">
                Chris Bachalo; Jaime Mendoza; Tim Townsend
              </field>
              <field name="editors">
                Mike Marts
              </field>
              <field name="publisher">
                Marvel Comics
              </field>
              <field name="publishDate">
                September 1, 2006
              </field>
              <field name="CoverImageURL">
                http://www.comicbookdb.com/graphics/comic_graphics/1/154/77478_20061231044347_large.jpg
              </field>
              <field name="link">
                http://www.comicbookdb.com/issue.php?ID=77478
              </field>
            </Book>
          </List>
        </importedData>
        >>> file = open('/tmp/books-quickfill.xml', 'w')
        >>> file.write(CABLE)
        >>> file.close()
        >>> query()
        <?xml version="1.0" encoding="UTF-8"?>
        <importedData>
          <List name="ComicBookDB Import" version="1.1">
            <Book title="Cable &amp; Deadpool">
              <field name="title">
                Cable &amp; Deadpool #45
              </field>
              <field name="series">
                Cable &amp; Deadpool
              </field>
              <field name="authors">
                Fabian Nicieza
              </field>
              <field name="illustrators">
                Reilly Brown; Jeremy Freeman
              </field>
              <field name="editors">
                Nicole Boose
              </field>
              <field name="publisher">
                Marvel Comics
              </field>
              <field name="publishDate">
                November 1, 2007
              </field>
              <field name="CoverImageURL">
                http://www.comicbookdb.com/graphics/comic_graphics/1/213/106331_20070929112004_large.jpg
              </field>
              <field name="link">
                http://www.comicbookdb.com/issue.php?ID=106331
              </field>
            </Book>
          </List>
        </importedData>
        >>> 
    """
    title_ids = []

    (title, issue_number, publisher, octothorp) = parse_books_quickfill()

    title_list = get_page('http://www.comicbookdb.com/search.php?'
            'form_search=%s&form_searchtype=Title' % urllib.quote(title))

    title_list = title_list.replace('&amp;', '&')

    matches = TITLE.findall(title_list)

    for match in matches:
        # match looks like ('title_id', 'title', 'year', 'publisher')
        t = match[1]
        try:
            t = t.decode('utf-8')
        except UnicodeDecodeError:
            t = t.decode('iso-8859-1')
        if t == title:
            title_ids.append(match[0])

    if not title_ids:
        print_output()
    else:
        print_output(title, title_ids, issue_number, octothorp)
def show_menu(params):
    log.debug("showMenu(): {0}", params)

    item_id = params["item_id"]

    url = "{server}/emby/Users/{userid}/Items/" + item_id + "?format=json"
    data_manager = DataManager()
    result = data_manager.GetContent(url)
    log.debug("Playfile item info: {0}", result)

    if result is None:
        return

    action_items = []

    if result["Type"] in [
            "Episode", "Movie", "Music", "Video", "Audio", "TvChannel",
            "Program"
    ]:
        li = xbmcgui.ListItem(string_load(30314))
        li.setProperty('menu_id', 'play')
        action_items.append(li)

    if result["Type"] in ["Season", "MusicAlbum"]:
        li = xbmcgui.ListItem(string_load(30317))
        li.setProperty('menu_id', 'play_all')
        action_items.append(li)

    if result["Type"] in ["Episode", "Movie", "Video", "TvChannel", "Program"]:
        li = xbmcgui.ListItem(string_load(30275))
        li.setProperty('menu_id', 'transcode')
        action_items.append(li)

    if result["Type"] == "Movie":
        li = xbmcgui.ListItem(string_load(30307))
        li.setProperty('menu_id', 'play_trailer')
        action_items.append(li)

    if result["Type"] == "Episode" and result["ParentId"] is not None:
        li = xbmcgui.ListItem(string_load(30327))
        li.setProperty('menu_id', 'view_season')
        action_items.append(li)

    if result["Type"] == "Series":
        li = xbmcgui.ListItem(string_load(30354))
        li.setProperty('menu_id', 'view_series')
        action_items.append(li)

    user_data = result.get("UserData", None)
    if user_data:
        progress = user_data.get("PlaybackPositionTicks", 0) != 0
        played = user_data.get("Played", False)
        if not played or progress:
            li = xbmcgui.ListItem(string_load(30270))
            li.setProperty('menu_id', 'mark_watched')
            action_items.append(li)
        if played or progress:
            li = xbmcgui.ListItem(string_load(30271))
            li.setProperty('menu_id', 'mark_unwatched')
            action_items.append(li)

        if user_data.get("IsFavorite", False) == False:
            li = xbmcgui.ListItem(string_load(30272))
            li.setProperty('menu_id', 'emby_set_favorite')
            action_items.append(li)
        else:
            li = xbmcgui.ListItem(string_load(30273))
            li.setProperty('menu_id', 'emby_unset_favorite')
            action_items.append(li)

    li = xbmcgui.ListItem(string_load(30274))
    li.setProperty('menu_id', 'delete')
    action_items.append(li)

    li = xbmcgui.ListItem(string_load(30281))
    li.setProperty('menu_id', 'refresh_images')
    action_items.append(li)

    #xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)

    action_menu = ActionMenu("ActionMenu.xml", PLUGINPATH, "default", "720p")
    action_menu.setActionItems(action_items)
    action_menu.doModal()
    selected_action_item = action_menu.getActionItem()
    selected_action = ""
    if selected_action_item is not None:
        selected_action = selected_action_item.getProperty('menu_id')
    log.debug("Menu Action Selected: {0}", selected_action_item)
    del action_menu

    if selected_action == "play":
        log.debug("Play Item")
        #list_item = populate_listitem(params["item_id"])
        #result = xbmcgui.Dialog().info(list_item)
        #log.debug("xbmcgui.Dialog().info: {0}", result)
        PLAY(params)

    elif selected_action == "play_all":
        PLAY(params)

    elif selected_action == "play_trailer":
        playTrailer(item_id)

    elif selected_action == "transcode":
        params['force_transcode'] = 'true'
        PLAY(params)

    elif selected_action == "emby_set_favorite":
        markFavorite(item_id)

    elif selected_action == "emby_unset_favorite":
        unmarkFavorite(item_id)

    elif selected_action == "mark_watched":
        markWatched(item_id)

    elif selected_action == "mark_unwatched":
        markUnwatched(item_id)

    elif selected_action == "delete":
        delete(result)

    elif selected_action == "view_season":
        xbmc.executebuiltin("Dialog.Close(all,true)")
        parent_id = result["ParentId"]
        series_id = result["SeriesId"]
        u = ('{server}/emby/Shows/' + series_id + '/Episodes'
             '?userId={userid}' + '&seasonId=' + parent_id +
             '&IsVirtualUnAired=false' + '&IsMissing=false' +
             '&Fields={field_filters}' + '&format=json')
        action_url = ("plugin://plugin.video.embycon/?url=" + urllib.quote(u) +
                      "&mode=GET_CONTENT&media_type=Season")
        built_in_command = 'ActivateWindow(Videos, ' + action_url + ', return)'
        xbmc.executebuiltin(built_in_command)

    elif selected_action == "view_series":
        xbmc.executebuiltin("Dialog.Close(all,true)")
        u = ('{server}/emby/Shows/' + item_id + '/Seasons'
             '?userId={userid}' + '&Fields={field_filters}' + '&format=json')
        action_url = ("plugin://plugin.video.embycon/?url=" + urllib.quote(u) +
                      "&mode=GET_CONTENT&media_type=Series")
        built_in_command = 'ActivateWindow(Videos, ' + action_url + ', return)'
        xbmc.executebuiltin(built_in_command)

    elif selected_action == "refresh_images":
        CacheArtwork().delete_cached_images(item_id)
Example #51
0
def main():
    module = AnsibleModule(
        argument_spec = dict(
            state = dict(default='present', choices=['present', 'absent'], type='str'),
            name = dict(required=True, aliases=[ "src", "source" ], type='str'),
            login_user = dict(default='guest', type='str'),
            login_password = dict(default='guest', type='str', no_log=True),
            login_host = dict(default='localhost', type='str'),
            login_port = dict(default='15672', type='str'),
            vhost = dict(default='/', type='str'),
            destination = dict(required=True, aliases=[ "dst", "dest"], type='str'),
            destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'),
            routing_key = dict(default='#', type='str'),
            arguments = dict(default=dict(), type='dict')
        ),
        supports_check_mode = True
    )

    if module.params['destination_type'] == "queue":
        dest_type="q"
    else:
        dest_type="e"

    if module.params['routing_key'] == "":
        props = "~"
    else:
        props = urllib.quote(module.params['routing_key'],'')

    url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
        module.params['login_host'],
        module.params['login_port'],
        urllib.quote(module.params['vhost'],''),
        urllib.quote(module.params['name'],''),
        dest_type,
        urllib.quote(module.params['destination'],''),
        props
    )

    # Check if exchange already exists
    r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))

    if r.status_code==200:
        binding_exists = True
        response = r.json()
    elif r.status_code==404:
        binding_exists = False
        response = r.text
    else:
        module.fail_json(
            msg = "Invalid response from RESTAPI when trying to check if exchange exists",
            details = r.text
        )

    if module.params['state']=='present':
        change_required = not binding_exists
    else:
        change_required = binding_exists

    # Exit if check_mode
    if module.check_mode:
        module.exit_json(
            changed= change_required,
            name = module.params['name'],
            details = response,
            arguments = module.params['arguments']
        )

    # Do changes
    if change_required:
        if module.params['state'] == 'present':
            url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % (
                module.params['login_host'],
                module.params['login_port'],
                urllib.quote(module.params['vhost'],''),
                urllib.quote(module.params['name'],''),
                dest_type,
                urllib.quote(module.params['destination'],'')
            )

            r = requests.post(
                url,
                auth = (module.params['login_user'],module.params['login_password']),
                headers = { "content-type": "application/json"},
                data = json.dumps({
                    "routing_key": module.params['routing_key'],
                    "arguments": module.params['arguments']
                    })
            )
        elif module.params['state'] == 'absent':
            r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))

        if r.status_code == 204 or r.status_code == 201:
            module.exit_json(
                changed = True,
                name = module.params['name'],
                destination = module.params['destination']
            )
        else:
            module.fail_json(
                msg = "Error creating exchange",
                status = r.status_code,
                details = r.text
            )

    else:
        module.exit_json(
            changed = False,
            name = module.params['name']
        )
Example #52
0
 def get_encode_path(path):
     return quote(TeaConverter.to_str(path), safe='/~')
Example #53
0
def urlquote(value, safe=''):
    return quote(value.encode('utf8'), safe)
Example #54
0
def generateFoursquareAuthUri(client_id):
  redirect_uri = generateRedirectUri()
  server = CONFIG['foursquare_server']
  url = '%s/oauth2/authenticate?client_id=%s&response_type=code&redirect_uri=%s'
  return url % (server, client_id, urllib.quote(redirect_uri))
Example #55
0
 def encode_url(kls, url):
     return quote(url, '/:?%=&()~",\'$')
Example #56
0
File: api.py Project: dulems/hue-1
def utf_quoter(what):
  return urllib.quote(unicode(what).encode('utf-8'), safe='~@#$&()*!+=;,.?/\'')
Example #57
0
 def build_path_base(self, bucket, key=''):
     key = boto.utils.get_utf8_value(key)
     return '/%s' % urllib.quote(key)
Example #58
0
        except crawler.CrawlerException, e:
            output.print_msg("徽章%s解析失败,原因:%s" % (badges_detail_url, e.message))
            raise
        if len(wanted_card_list) > 0:
            game_id = badges_detail_url.split("/")[-2]
            output.print_msg("game id: %s" % game_id, False)
            # 获取全部卡牌的市场售价
            try:
                market_card_list = steamCommon.get_market_game_trade_card_price(game_id, login_cookie)
            except crawler.CrawlerException, e:
                output.print_msg("游戏id%s的市场解析失败,原因:%s" % (game_id, e.message))
                raise 
            card_real_name_dict = {}
            for card_read_name in market_card_list:
                card_name = card_read_name.replace(" (Trading Card)", "")
                card_real_name_dict[card_name] = card_read_name
            for card_name in wanted_card_list:
                if card_name in card_real_name_dict:
                    card_read_name = card_real_name_dict[card_name]
                else:
                    card_read_name = card_name
                if card_read_name in market_card_list:
                    market_link = "http://steamcommunity.com/market/listings/753/%s-%s" % (game_id, urllib.quote(card_read_name))
                    output.print_msg("card: %s, wanted %s, min price: %s, link: %s" % (card_name, wanted_card_list[card_name], market_card_list[card_read_name], market_link), False)
                else:
                    output.print_msg("card: %s, wanted %s, not found price in market" % (card_name, wanted_card_list[card_read_name]), False)


if __name__ == "__main__":
    main(steamCommon.get_account_id_from_file())
Example #59
0
 def build_auth_path(self, bucket, key=''):
     key = boto.utils.get_utf8_value(key)
     path = ''
     if bucket != '':
         path = '/' + bucket
     return path + '/%s' % urllib.quote(key)
    def search_movie_web(movie_list, movie_name, movie_year):
        try:
            #movie_list = []
            url = 'https://suggest-bar.daum.net/suggest?id=movie&cate=movie&multiple=1&mod=json&code=utf_in_out&q=%s' % (
                urllib.quote(movie_name.encode('utf8')))
            data = get_json(url)

            for index, item in enumerate(data['items']['movie']):
                tmps = item.split('|')
                score = 85 - (index * 5)
                if tmps[0].find(movie_name) != -1 and tmps[-2] == movie_year:
                    score = 95
                elif tmps[3] == movie_year:
                    score = score + 5
                if score < 10:
                    score = 10
                MovieSearch.movie_append(
                    movie_list, {
                        'id': tmps[1],
                        'title': tmps[0],
                        'year': tmps[-2],
                        'score': score
                    })
        except Exception as e:
            log_error('Exception:%s', e)
            log_error(traceback.format_exc())

        try:
            url = 'https://search.daum.net/search?nil_suggest=btn&w=tot&DA=SBC&q=%s%s' % (
                '%EC%98%81%ED%99%94+', urllib.quote(movie_name.encode('utf8')))
            ret = MovieSearch.get_movie_info_from_home(url)
            if ret is not None:

                # 부제목때문에 제목은 체크 하지 않는다.
                # 홈에 검색한게 년도도 같다면 score : 100을 주고 다른것은 검색하지 않는다.
                if ret['year'] == movie_year:
                    score = 100
                    need_another_search = False
                else:
                    score = 90
                    need_another_search = True
                MovieSearch.movie_append(
                    movie_list, {
                        'id': ret['daum_id'],
                        'title': ret['title'],
                        'year': ret['year'],
                        'score': score,
                        'country': ret['country'],
                        'more': ret['more']
                    })

                log_debug('need_another_search : %s' % need_another_search)

                movie = ret['movie']

                if need_another_search:
                    # 동명영화
                    tmp = movie.find('div[@class="coll_etc"]')
                    log_debug('coll_etc : %s' % tmp)
                    if tmp is not None:
                        first_url = None
                        tag_list = tmp.findall('.//a')
                        for tag in tag_list:
                            match = re.compile(r'(.*?)\((.*?)\)').search(
                                tag.text_content())
                            if match:
                                daum_id = tag.attrib['href'].split('||')[1]
                                score = 80
                                if match.group(
                                        1) == movie_name and match.group(
                                            2) == movie_year:
                                    first_url = 'https://search.daum.net/search?%s' % tag.attrib[
                                        'href']
                                elif match.group(
                                        2
                                ) == movie_year and first_url is not None:
                                    first_url = 'https://search.daum.net/search?%s' % tag.attrib[
                                        'href']
                                MovieSearch.movie_append(
                                    movie_list, {
                                        'id': daum_id,
                                        'title': match.group(1),
                                        'year': match.group(2),
                                        'score': score
                                    })
                                #results.Append(MetadataSearchResult(id=daum_id, name=match.group(1), year=match.group(2), score=score, lang=lang))
                        log_debug('first_url : %s' % first_url)
                        if need_another_search and first_url is not None:
                            #log_debug('RRRRRRRRRRRRRRRRRRRRRR')
                            new_ret = MovieSearch.get_movie_info_from_home(
                                first_url)
                            MovieSearch.movie_append(
                                movie_list, {
                                    'id': new_ret['daum_id'],
                                    'title': new_ret['title'],
                                    'year': new_ret['year'],
                                    'score': 100,
                                    'country': new_ret['country'],
                                    'more': new_ret['more']
                                })

                    #시리즈
                    tmp = movie.find('.//ul[@class="list_thumb list_few"]')
                    if tmp is None:
                        tmp = movie.find(
                            './/ul[@class="list_thumb list_more"]')

                    log_debug('SERIES:%s' % tmp)
                    if tmp is not None:
                        tag_list = tmp.findall('.//div[@class="wrap_cont"]')
                        first_url = None
                        score = 80
                        for tag in tag_list:
                            a_tag = tag.find('a')
                            daum_id = a_tag.attrib['href'].split('||')[1]
                            daum_name = a_tag.text_content()
                            span_tag = tag.find('span')
                            year = span_tag.text_content()
                            log_debug('daum_id:%s %s %s' %
                                      (daum_id, year, daum_name))
                            if daum_name == movie_name and year == movie_year:
                                first_url = 'https://search.daum.net/search?%s' % a_tag.attrib[
                                    'href']
                            elif year == movie_year and first_url is not None:
                                first_url = 'https://search.daum.net/search?%s' % tag.attrib[
                                    'href']
                            MovieSearch.movie_append(
                                movie_list, {
                                    'id': daum_id,
                                    'title': daum_name,
                                    'year': year,
                                    'score': score
                                })
                            log_debug('first_url : %s' % first_url)
                        if need_another_search and first_url is not None:
                            #log_debug('RRRRRRRRRRRRRRRRRRRRRR')
                            new_ret = MovieSearch.get_movie_info_from_home(
                                first_url)
                            MovieSearch.movie_append(
                                movie_list, {
                                    'id': new_ret['daum_id'],
                                    'title': new_ret['title'],
                                    'year': new_ret['year'],
                                    'score': 100,
                                    'country': new_ret['country'],
                                    'more': new_ret['more']
                                })

        except Exception as e:
            log_error('Exception:%s', e)
            log_error(traceback.format_exc())
        movie_list = list(
            reversed(sorted(movie_list, key=lambda k: k['score'])))
        return movie_list