예제 #1
0
def wave():
    try:
        waveid = binascii.unhexlify(request.args[0])
    except IndexError:
        #?w= old query string method supported
        waveid = urllib.unquote_plus(request.vars.get('w'))
    if not waveid:
        return 'No waveid specified'
    return dict(waveid=waveid,
                quoted_waveid=qp(qp((waveid)))) # no idea why i need to quote twice
예제 #2
0
파일: bunny1.py 프로젝트: dreiss/bunny1
 def _promote(self, arg):
     """prints out the given url, but with "promote" stripped"""
     target = self._my_url() + '?' + qp(arg)
     raise Content(
         target + "<br>" +
         '<a href="' + cgi.escape(target) + '">' + target + "</a>"
         )
예제 #3
0
    def search(q, raw=False, strict=False):
        """ Search nrk for stuff

            Params:
                    raw(bool): used by cli,
                    strict(bool): limit the search to a exact match

            Returns:
                    If raw is false it will return a Program, Episode or series,
                    else json

        """
        s = _fetch('search/%s' % qp(q))

        if strict:
            s['hits'] = [item for item in s['hits']
                         if item['hit'].get('title', '').lower() == q.lower()]

        if s:
            if not raw:
                if s['hits'] is None:
                    return []
                return list(filter(None, map(_build, s['hits'])))
            else:
                return s
        else:
            return []
예제 #4
0
파일: lazyme.py 프로젝트: Ar11rA/Lazyme
def band_top():
	goog_url = "https://www.google.co.in/search?q="
	query=raw_input("Enter band:")
	band_name=query
	query=query+" top tens"
	url = goog_url + qp(query)
	print url
	req = requests.get(url)
	result = req.content
	link_start = result.find("http://www.thetoptens.com")
	link_end = result.find("&amp",link_start)
	link = result[link_start:link_end]
	ctr=1
	print link
	req=requests.get(link)
	data=req.content
	text_file = open("Output.txt", "w")
	text_file.write(str(data))
	text_file.close()
	s2=''
	s3=''
	soup=BeautifulSoup(data,"html.parser")
	print "Top 10:"
	for s2 in soup.findAll("div", {"id" : re.compile('i[0-9]*')}):
	    s3=s2.find('b').text
	    s3 = s3.encode('utf-8')
	    if(ctr!=1):
	       dwn_url=yout_url(s3+band_name)
	       song_dwnld(dwn_url)
	    ctr=ctr+1
	    if ctr == 12:
	       break
예제 #5
0
파일: nrkdl.py 프로젝트: esp0/nrkdl
    def search(q, raw=False, strict=False):
        """ Search nrk for stuff

            Params:
                    raw(bool): used by cli,
                    strict(bool): limit the search to a exact match

            Returns:
                    If raw is false it will return a Program, Episode or series,
                    else json

        """
        s = _fetch('search/%s' % qp(q))

        if strict:
            s['hits'] = [item for item in s['hits']
                         if item['hit'].get('title', '').lower() == q.lower()]

        if s:
            if not raw:
                if s['hits'] is None:
                    return []
                return filter(None, map(_build, s['hits']))
            else:
                return s
        else:
            return []
예제 #6
0
파일: views.py 프로젝트: yask123/api
def index(request):
    if request.method == 'GET':
        name = request.GET.get('name', '')
        location = request.GET.get('location')
        acc_token = request.GET.get('token')
        if location:
            geolocator = Nominatim()
            location = geolocator.geocode(location)
            location = str(location.latitude) + ',' + str(location.longitude)
        else:
            lat = request.GET.get('lat')
            lon = request.GET.get('lon')
            location = str(lat) + ',' + str(lon)

        print name, location, acc_token

        if acc_token:
            graph = GraphAPI(acc_token)
        else:
            graph = GraphAPI(
                'CAACEdEose0cBAPJRZA8xHkMmbokHYBCUyjcKxZBohVhzJnGlm2ETlOYESQpEjG1Gj6ykTV4FMmhqMUrgFsJp0HdH4TszHwCkoMA8PS8L2MRFth3w3Wm7ucx4xMglc9ZBZAMhnyrr3XNAlH6MHZBtGmeWusWvzu4GSt4Mt9oS2KIOkWh70WhQ3ktOUC40PgChklQN31X0EgAZDZD'
            )

        search = name
        search = qp(search)

        result = graph.get('search?type=place&q=' + search + '&center=' +
                           location)
        page_id = result['data'][0]['id']

        params = 'fields=phone,likes,current_location,about,website,food_styles,description,hours,awards,price_range,location,booking_agent,is_verified,offers,public_transit,founded,products,emails,parking'
        a = str(page_id) + '?' + params
        cache = {}
        cache['facebook'] = {}
        cache['google'] = {}

        cache['facebook'] = {'fb_page_url': 'http://facebook.com/' + page_id}
        params = params.split(',')
        for each in params:
            try:
                cache['facebook'][each] = str(graph.get(a)[each])
            except:
                pass

#Google Data
        url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=' + location + '&radius=5000&name=' + name + '&key=AIzaSyDAERlVmOrLWdq0pHF5fK3c2cHmCSvy55I'
        print url
        r = requests.get(url)
        google_result = json.loads(r.text)
        cache['google'] = google_result

        return HttpResponse(json.dumps(cache), content_type="application/json")

    elif request.method == 'POST':
        t = request.POST.get("task", "")
        a = Task(text=t, date=timezone.now())
        a.save()
        return redirect('/')
예제 #7
0
파일: api.py 프로젝트: hcvst/wavedirectory
def waves():
    try:
        version = int(request.vars.get(C_API_QS_VERSION))
        index = request.vars.get(C_API_QS_INDEX)
        order_by = request.vars.get(C_API_QS_ORDER, C_API_DEFAULT_ORDER)
        pagesize = int(request.vars.get(C_API_QS_PAGESIZE, C_API_DEFAULT_PAGESIZE))
    except:
        return T_API_INVALID_REQUEST
    if not version: return T_API_VERSION_REQUIRED 
    if version != C_API_VERSION: return T_API_UNKNOWN_VERSION
    if pagesize < 0: return T_API_PAGESIZE_TOO_SMALL
    if pagesize > C_API_MAX_SIZE: return T_API_PAGESIZE_TOO_LARGE
    waves, next_index = get_waves(order_by=order_by,
                                  pagesize=pagesize,
                                  index=index)
    if next_index:
        vars = request.vars
        vars[C_API_QS_INDEX] = next_index
        next_url = C_SERVER+URL(r=request, vars=vars)
    else:
        next_url = None
    d = dict()
    d['api'] = dict(api_name=C_API_NAME,
                    api_version=C_API_VERSION,
                    api_author='Hans Christian v. Stockhausen',
                    contact='http://xri.net/=hc',
                    twitter='http://twitter.com/wavedirectory',
                    wave_robot=C_BOT_ADDRESS)
    d['results'] = dict(next_url=next_url,
                        waves=[dict(waveid=w.key().name(),
                                    title=w.str_title,
                                    avatar=w.str_avatar,
                                    votes=w.str_votes,
                                    participants=w.str_participants,
                                    wave_url=C_WAVEBROWSER+\
                                    qp(qp(w.key().name())))
                               for w in waves])
    response.write(json(d),escape=False)
    response.headers['Content-Type']='text/json'
    response.body.seek(0)
    return response.body.read()
예제 #8
0
def get_youtube_links(pl):
    base_url = "https://www.youtube.com/results?search_query="

    for i in pl:
        query = '' + qp(i["song_name"])

        artist_arr = [qp(j) for j in i["artists"]]
        artists = artist_arr[0]
        for j in artist_arr[1:]:
            artists = artists + '+' + j

        query += '+'+artists
        req_url = base_url + query

        if DEBUG:
            print 'Search Query: ' + req_url

        result = urllib2.urlopen(req_url)
        html = result.read()
        soup = BeautifulSoup(html,"lxml")

        links = soup.find_all('h3',class_='yt-lockup-title')
        # TODO scrape title
        title_texts = [link.a.string for link in links]

        if DEBUG:
            print "\nLINK TITLES\n"
            for title in title_texts:
                print title,type(title)

        links_arr = [link.a['href'] for link in links]

        if DEBUG:
            print "\nLINKS\n"
            for link in links_arr:
                print link,type(link)

        i['yt_link'] = links_arr[0]
예제 #9
0
파일: lazyme.py 프로젝트: Ar11rA/Lazyme
def yout_url(query):
    you_url = "https://www.youtube.com/results?search_query=" #searching in youtube
    #query=raw_input("Enter song :") --- test purpose
    url = you_url + qp(query)
    #print url ---just checking if youtube urls are right
    req = requests.get(url)
    result = req.content
    soup=BeautifulSoup(result,"html.parser")
    link=""
    for link in soup.find_all('a'):
        if  "watch" in (link.get('href')):
           fin_you="https://www.youtube.com"+link.get('href')
           return(fin_you) # -- return youtube link for supplying to pafy
           break
예제 #10
0
def grab(search=''):
	search = qp(search)
	site = "https://www.google.com/search?site=&tbm=isch&source=hp&biw=1112&bih=613&q="+search+"&oq=backst&gs_l=img.3.0.0l10.1011.3209.0.4292.8.7.1.0.0.0.246.770.0j3j1.4.0..3..0...1.1.64.img..3.5.772.KyXkrVfTLT4#tbm=isch&q=back+street+boys+I+want+it+that+way"
	hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
	       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
	       'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
	       'Accept-Encoding': 'none',
	       'Accept-Language': 'en-US,en;q=0.8',
	       'Connection': 'keep-alive'}
	req = urllib2.Request(site, headers=hdr)

	try:
	    page = urllib2.urlopen(req)
	except urllib2.HTTPError, e:
	    print e.fp.read()
예제 #11
0
def song_search(search_term):
    if search_term and search_term != '':
        search_term = qp(search_term)
        try:
            response = urlopen('https://www.youtube.com/results?search_query=' + search_term)

            html = response.read()
            soup = BeautifulSoup(html, 'html.parser')
            for link in soup.find_all('a'):
                if '/watch?v=' in link.get('href'):
                    video_link = link.get('href')
                    break

            title = soup.find("a", "yt-uix-tile-link").text
        except:
            title = 'Not Found'
            video_link = ''
        return {'title':title, 'video_link':video_link}
예제 #12
0
def song_search(search_term):
    if search_term and search_term != '':
        search_term = qp(search_term)
        try:
            response = urlopen(
                'https://www.youtube.com/results?search_query=' + search_term)

            html = response.read()
            soup = BeautifulSoup(html, 'html.parser')
            for link in soup.find_all('a'):
                if '/watch?v=' in link.get('href'):
                    video_link = link.get('href')
                    break

            title = soup.find("a", "yt-uix-tile-link").text
        except:
            title = 'Not Found'
            video_link = ''
        return {'title': title, 'video_link': video_link}
예제 #13
0
def grab(search=''):
    search = qp(search)
    site = "https://www.google.com/search?site=&tbm=isch&source=hp&biw=1112&bih=613&q=" + search + "&oq=backst&gs_l=img.3.0.0l10.1011.3209.0.4292.8.7.1.0.0.0.246.770.0j3j1.4.0..3..0...1.1.64.img..3.5.772.KyXkrVfTLT4#tbm=isch&q=back+street+boys+I+want+it+that+way"
    hdr = {
        'User-Agent':
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
        'Accept-Encoding': 'none',
        'Accept-Language': 'en-US,en;q=0.8',
        'Connection': 'keep-alive'
    }
    req = urllib2.Request(site, headers=hdr)

    try:
        page = urllib2.urlopen(req)
    except urllib2.HTTPError, e:
        print e.fp.read()
예제 #14
0
def listVideos(category):
    """
    Create the list of playable videos in the Kodi interface.
    """
    xbmcplugin.setPluginCategory(_handle, category)
    xbmcplugin.setContent(_handle, 'videos')
    eps_list = middle.getEpisodes(category, 'full')
    if eps_list:
        for ep in eps_list:
            item = xbmcgui.ListItem(label=ep['episode'])
            item.setProperty('IsPlayable', 'true')
            url = get_url(action='find_relevant', \
                          category=ep['episode'].encode('utf-8'), \
                          link=ep['episode_link'])

            # контекстное меню
            commands = []
            args = '{}|{}|{}'.format(ep['episode'].encode('utf-8'),
                                     ep['episode_link'], _handle)
            args = qp(args)
            script = "special://home/addons/plugin.video.shikionline/" \
                     "resources/context/manual_select.py"
            runner = 'XBMC.RunScript({}, {})'.format(script, args)
            name = "Выбрать вручную"
            commands.append((
                name,
                runner,
            ))
            item.addContextMenuItems(commands)
            xbmcplugin.addDirectoryItem(_handle, url, item)

        xbmcplugin.endOfDirectory(_handle,
                                  succeeded=True,
                                  updateListing=False,
                                  cacheToDisc=True)
    else:
        alert('Упс!', 'Что-то пошло не так! Возможно до этого аниме ' \
                      'добрались правообладатели.')
        xbmcplugin.endOfDirectory(_handle)
예제 #15
0
파일: utils.py 프로젝트: c24b/scholarScape
def scholarize(query="", nr_results_per_page="100", exact="", at_least_one="",
               without="", where_words_occurs="", author="", publication="",
               start_date="", end_date="", areas = [] ) :
    """
    Advanced research in Google Scholar to URL
    areas may contain "bio"  Biology, Life Sciences, and Environmental Science	            
                      "med", Medicine, Pharmacology, and Veterinary Science       
                      "bus", Business, Administration, Finance, and Economics
                      "phy", Physics, Astronomy, and Planetary Science
                      "chm", Chemistry and Materials Science      
                      "soc", Social Sciences, Arts, and Humanities
                      "eng", Engineering, Computer Science, and Mathematics
    """
    return ("http://scholar.google.com/scholar?\
             as_q="+ qp(query) +"&num="+ nr_results_per_page +"& \
             as_epq="+ qp(exact) +"&as_oq="+ qp(at_least_one) +"& \
             as_eq="+ qp(without) +"&as_occt="+ qp(where_words_occurs) +"& \
             as_sauthors="+ qp(author) +"&as_publication="+ qp(publication) +"& \
             as_ylo="+ start_date +"&as_yhi="+ end_date +"& \
             btnG=Search+Scholar&hl=en& \
             as_subj=" + str.join('&as_subj',areas) ).replace(" ","")
예제 #16
0
def findRelevant(url):
    """Find relevant video to playback."""
    ass = None
    if locale_type == 'dub':
        video = middle.getRelevantVideo(url, teams_priority)
    elif locale_type == 'sub':
        vass = middle.getRelevantSubVideo(url)
        if vass:
            video, ass = vass
        else:
            video = None
    if not video:
        alert('Нет локализации', 'Для данной серии отсутствует ' \
            'выбранная локализация типа %s. Вы можете попробовать выбрать ' \
            'эпизод вручную из контекстного меню.' % locale_type)
    play_item = xbmcgui.ListItem(path=video)
    if ass:
        play_item.setSubtitles([ass])
    video = qp(video)
    xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
    if re.search(r'127\.0\.0\.1', video):
        xbmcgui.Dialog().notification('Перемотка не работает', \
            'Для видео с сайта smotret-anime.ru перемотка не работает', \
            xbmcgui.NOTIFICATION_INFO, 5000)
예제 #17
0
파일: lazyme.py 프로젝트: Ar11rA/Lazyme
def movie_ost():
	goog_url="https://www.google.co.in/search?q="
	movie_name=raw_input('Enter movie name:') 
	query=goog_url+qp(movie_name)
	req=requests.get(query)
	result=req.content
	link_start = result.find("http://www.imdb.com")
	link_end = result.find("&amp",link_start)
	link = result[link_start:link_end]
	text_file = open("Output.txt", "w")
	text_file.write(str(result))
	text_file.close()
	link=link+"soundtrack"
	req=requests.get(link)
	data=req.content
	soup=BeautifulSoup(data,"html.parser")
	print "Movie Songs : "
	for s2 in soup.findAll("div", {"id" : re.compile('sn[0-9]{7}')}):
	    s3=s2.text
	    s3 = s3.encode('utf-8')
	    song_end=s3.find('\n')
	    fin_son=s3[0:song_end]
	    dwn_url=yout_url(fin_son+movie_name)
	    song_dwnld(dwn_url)
예제 #18
0
파일: bsd4.py 프로젝트: Ar11rA/Lazyme
from bs4 import BeautifulSoup
import re
import requests
import os
import sys
import pafy
from urllib2 import urlopen
from urllib import quote_plus as qp
goog_url="https://www.google.co.in/search?q="
movie_name=raw_input('Enter movie name:') 
query=goog_url+qp(movie_name)
print query
req=requests.get(query)
result=req.content
link_start = result.find("http://www.imdb.com")
link_end = result.find("&amp",link_start)
link = result[link_start:link_end]
text_file = open("Output.txt", "w")
text_file.write(str(result))
text_file.close()
link=link+"soundtrack"
req=requests.get(link)
data=req.content
soup=BeautifulSoup(data,"html.parser")
print "Movie Songs : "
for s2 in soup.findAll("div", {"id" : re.compile('sn[0-9]{7}')}):
    s3=s2.text
    s3 = s3.encode('utf-8')
    song_end=s3.find('\n')
    fin_son=s3[0:song_end]
    print fin_son
예제 #19
0
파일: bsd2.py 프로젝트: Ar11rA/Lazyme
from bs4 import BeautifulSoup
import requests
import re
import youtube_dl
import os
import sys
from urllib2 import urlopen
from urllib import quote_plus as qp
you_url = "https://www.youtube.com/results?search_query="
query=raw_input("Enter song :")
url = you_url + qp(query)
print url
req = requests.get(url)
result = req.content
soup=BeautifulSoup(result,"html.parser")
link=""
for link in soup.find_all('a'):
    if  "watch" in (link.get('href')):
       fin_you="https://www.youtube.com"+link.get('href')
       print(fin_you)
       break
    else:
        {}
def main(verbose=False, dry_run=False):
  """
    Core of the backup script which implement the backup strategy.
  """

  def isSSHPasswordLess(host, user=None, port=22):
    """
      This method test if a ssh authentification on a remote machine can be done via a
      rsa-key/certificate or require a password.
    """
    # If no user given try "user-less" connection
    user_string = ''
    if user not in (None, ''):
      user_string = "%s@" % user
    TEST_STRING = "SSH KEY AUTH OK"
    test_cmd = """ssh -p %s %s%s "echo '%s'" """ % (port, user_string, host, TEST_STRING)
    if verbose:
      print " INFO - run `%s`..." % test_cmd
    ssh = pexpect.spawn(test_cmd, timeout=TIMEOUT)
    time.sleep(1)
    if verbose:
      import StringIO
      ssh_log = StringIO.StringIO()
      ssh.log_file = ssh_log
    ret_code = ssh.expect([TEST_STRING, '.ssword:*', pexpect.EOF, pexpect.TIMEOUT])
    time.sleep(1)
    password_less = None
    if ret_code == 0:
      password_less = True
    elif ret_code == 1:
      password_less = False
    else:
      print "ERROR - SSH server '%s:%s' is unreachable" % (host, port)
    if verbose:
      nice_log(ssh_log.getvalue(), 'ssh')
      ssh_log.close()
    ssh.close()
    if password_less:
      print " INFO - SSH connection to '%s:%s' is password-less" % (host, port)
    else:
      print " INFO - SSH connection to '%s:%s' require password" % (host, port)
    return password_less


  ######################
  # Self checking phase
  ######################

  # Announce the first phase
  print "=" * 40
  print "Backup script self-checking phase"
  print "=" * 40

  # Check that we are running this script on a UNIX system
  from os import name as os_name
  if os_name != 'posix':
    print "FATAL - This script doesn't support systems other than POSIX's"
    sys.exit(1)

  # Check that every command is installed
  checkCommand(['rdiff-backup', 'rm', 'tar', 'bzip2'])

  # Check existence of main backup folder
  if not exists(abspath(BACKUP_DIR)):
    print "FATAL - Main backup folder '%s' does't exist !" % BACKUP_DIR
    sys.exit(1)

  # This variable indicate if pexpect module is required or not
  is_pexpect_required = False

  # Check datas and requirement for each backup
  # Doing this right now is nicer to the user: thanks to this he doesn't need to wait the end of the (X)th backup to get the error about the (X+1)th
  for backup in backup_list:
    # Normalize backup type
    backup_type = backup['type'].lower().strip()
    if backup_type.find('ftps') != -1:
      backup_type = 'FTPS'
    elif backup_type.find('ftp') != -1:
      backup_type = 'FTP'
    elif backup_type == 'ssh':
      backup_type = 'SSH'
    elif backup_type.find('mysql') != -1:
      if backup_type.find('ssh') != -1:
        backup_type = 'MYSQLDUMP+SSH'
      else:
        backup_type = 'MYSQLDUMP'
    else:
      print "ERROR - Backup type '%s' for '%s' is unrecognized: ignore it." % (backup['type'], title)
      # Reset backup type
      backup['type'] = ''
      continue
    backup['type'] = backup_type
    # Check if pexpect is required
    if backup_type.find('SSH') != -1:
      is_pexpect_required = True
    # Check requirements
    REQUIRED_COMMANDS = { 'FTP'          : 'lftp'
                        , 'FTPS'         : 'lftp'
                        , 'SSH'          : ['rsync', 'ssh']
                        , 'MYSQLDUMP'    : 'mysqldump'
                        , 'MYSQLDUMP+SSH': 'ssh' # TODO: How to check that 'mysqldump' is present on the distant machine ???
                        }
    checkCommand(REQUIRED_COMMANDS[backup_type])
    # Set default parameters if missing
    DEFAULT_PARAMETERS = { 'FTP'          : {'port': 21}
                         , 'FTPS'         : {'port': 21}
                         , 'SSH'          : {'port': 22}
                         , 'MYSQLDUMP'    : {'db_port': 3306}
                         , 'MYSQLDUMP+SSH': {'port': 22, 'db_port': 3306}
                         }
    default_config = DEFAULT_PARAMETERS.get(backup_type, {}).copy()
    default_config.update(backup)
    backup.update(default_config)

  # Import pexpect if necessary
  if is_pexpect_required:
    try:
      import pexpect
    except ImportError:
      print "FATAL - pexpect python module not found: it is required to make backup over SSH !"
      sys.exit(1)



  ######################
  # Proceed each backup
  ######################

  for backup in backup_list:

    # Announce the backup item
    title = backup['title']
    print ""
    print "=" * 40
    print "Backup item: %s" % title
    print "=" * 40

    # Create backup folder structure if needed
    main_folder = abspath(SEP.join([BACKUP_DIR, backup['local_dir']]))
    backup_folders = {
        'main'    : main_folder
      , 'archives': abspath(SEP.join([main_folder, 'monthly-archives']))  # Contain monthly archives
      , 'diff'    : abspath(SEP.join([main_folder, 'rdiff-repository']))  # Contain current month diferential backup
      , 'mirror'  : abspath(SEP.join([main_folder, 'mirror']))            # Contain a mirror of the remote folder
      }
    for (folder_type, folder_path) in backup_folders.items():
      if not exists(folder_path):
        if not dry_run:
          makedirs(folder_path)
        print " INFO - '%s' folder created" % folder_path


    ##########
    # Step 1 - Mirror data with the right tool
    ##########

    ### Start of this step
    backup_type = backup['type']
    print " INFO - Start mirroring via %s method" % backup_type

    ### Mirror remote data via FTP or FTPS
    if backup_type in ['FTP', 'FTPS']:
      # Generate FTP url
      remote_url = "ftp://%s:%s@%s:%s/%s" % ( qp(backup['user'])
                                            , qp(backup['password'])
                                            , qp(backup['host'])
                                            , backup['port']
                                            , q(backup['remote_dir'])
                                            )
      # Force SSL layer for secure FTP
      secure_options = ''
      if backup_type == 'FTPS':
        secure_options = 'set ftp:ssl-force true && set ftp:ssl-protect-data true && '
      # Get a copy of the remote directory
      ftp_backup = """lftp -c '%sset ftp:list-options -a && open -e "mirror -e --verbose=3 --parallel=2 . %s" %s'""" % (secure_options, backup_folders['mirror'], remote_url)
      run(ftp_backup, verbose, dry_run)


    ### Mirror remote data via SSH
    elif backup_type == 'SSH':

      ## Test SSH password-less connection
      password_less = isSSHPasswordLess(backup['host'], backup['user'], backup['port'])
      if password_less == None:
        print "ERROR - Can't guess authentication method of '%s:%s'" % (backup['host'], backup['port'])
        continue
      if not password_less and not (backup.has_key('password') and len(backup['password']) > 0):
        print "ERROR - No password provided !"
        continue
      # Use rsync + ssh to make a mirror of the distant folder
      user_string = ''
      if backup['user'] not in (None, ''):
        user_string = "%s@" % backup['user']
      remote_url = "%s%s:%s" % (user_string, backup['host'], backup['remote_dir'])
      rsync_backup = """rsync -axHvz --numeric-ids --progress --stats --delete --partial --delete-excluded -e 'ssh -2 -p %s' %s %s""" % (backup['port'], remote_url, backup_folders['mirror'])

      # If it is passwordless, don't use pexpect but run() method instead
      if password_less:
        run(rsync_backup, verbose, dry_run)
      else:
        # In this case we use pexpect to send the password
        if verbose:
          print " INFO - Run `%s`..." % rsync_backup  # XXX Duplicate with 'run()' method
        if not dry_run:
          p = pexpect.spawn(rsync_backup)   # TODO: create a method similar to run() but that take a password as parameter to handle pexpect nicely
          import StringIO
          p_log = StringIO.StringIO()
          p.setlog(p_log)
          i = p.expect(['.ssword:*', pexpect.EOF, pexpect.TIMEOUT], timeout=TIMEOUT)
          time.sleep(1)
          # Password required
          if i == 0:
            # rsync ask for a password. Send it.
            p.sendline(backup['password'])
            print " INFO - SSH password sent"
            j = p.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=TIMEOUT)
            time.sleep(1)
            if j == 1:
              print "ERROR - Backup via SSH reached timeout"
              continue
          elif i == 1:
            print "ERROR - Backup via SSH didn't end correctly"
            continue
          elif i == 2:
            print "ERROR - Backup via SSH reached timeout"
            continue
          # Terminate child process
          nice_log(p_log.getvalue(), 'rsync')
          p_log.close()
          p.close()


    ### Mirror remote mysql database
    elif backup_type in ['MYSQLDUMP', 'MYSQLDUMP+SSH']:
      # Build mysqldump command
      mysqldump = """mysqldump --host=%s --port=%s --user=%s --password=%s --opt""" % (backup['db_host'], backup['db_port'], backup['db_user'], backup['db_pass'])
      # if no database name provided, dump all databases
      db_to_dump = '--all-databases'
      if backup.has_key('db_name') and len(backup['db_name']) > 0:
        db_to_dump = '--databases %s' % backup['db_name']
      mysqldump += ' %s' % db_to_dump
      # Build final command
      sql_file = abspath(SEP.join([backup_folders['mirror'], SQL_FILENAME]))
      if backup_type == 'MYSQLDUMP+SSH':
        # Test SSH password-less connection
        password_less = isSSHPasswordLess(backup['host'], backup['user'], backup['port'])
        if password_less == None:
          print "FATAL - Can't guess authentication method of '%s:%s'" % (backup['host'], backup['port'])
          continue
        cmd = """ssh -C -2 -p %s %s@%s "%s" > %s""" % (backup['port'], backup['user'], backup['host'], mysqldump, sql_file)
      else:
        cmd = "%s > %s" % (mysqldump, sql_file)
      run(cmd, verbose, dry_run)


    ### Mirroring is successful
    print " INFO - %s mirroring succeed" % backup_type


    ##########
    # Step 2 - Update incremental backup
    ##########

    print " INFO - Add the mirror as increment"

    # Use rdiff-backup to do efficient incremental backups
    rdiff_cmd = """rdiff-backup "%s" "%s" """ % (backup_folders['mirror'], backup_folders['diff'])
    run(rdiff_cmd, verbose, dry_run)

    print " INFO - Increment added"


    ##########
    # Step 3 - Generate monthly archives
    ##########

    # Generate monthly archive name
    today_items   = datetime.date.today().timetuple()
    current_year  = today_items[0]
    current_month = today_items[1]
    monthly_archive = abspath("%s%s%04d-%02d.tar.bz2" % (backup_folders['archives'], SEP, current_year, current_month))
    snapshot_date = "%04d-%02d-01" % (current_year, current_month)

    # If month started, make a bzip2 archive
    if not exists(monthly_archive):
      print " INFO - Generate archive of previous month (= %s 00:00 snapshot)" % snapshot_date
      tmp_archives_path = abspath(backup_folders['archives'] + SEP + "tmp")
      if exists(tmp_archives_path):
        run("""rm -rf "%s" """ % tmp_archives_path, verbose, dry_run)
        print " INFO - Previous temporary folder '%s' removed" % tmp_archives_path
      if not dry_run:
        mkdir(tmp_archives_path)
      print " INFO - Temporary folder '%s' created" % tmp_archives_path
      rdiff_cmd = """rdiff-backup -r "%s" "%s" "%s" """ % ( snapshot_date
                                                          , backup_folders['diff']
                                                          , tmp_archives_path
                                                          )
      run(rdiff_cmd, verbose, dry_run)
      run("tar c -C %s ./ | bzip2 > %s" % (tmp_archives_path, monthly_archive), verbose, dry_run)
      # Delete the tmp folder
      run("""rm -vrf "%s" """ % tmp_archives_path, verbose, dry_run)
    else:
      print " INFO - No need to generate archive: previous month already archived"

    # Keep last 32 increments (31 days = 1 month + 1 day)
    print " INFO - Remove increments older than 32 days"
    rdiff_cmd = """rdiff-backup --force --remove-older-than 32B "%s" """ % backup_folders['diff']
    run(rdiff_cmd, verbose, dry_run)

    # Final message before next backup item
    print " INFO - Backup successful"
예제 #21
0
def main(verbose=False, dry_run=False):
    """
    Core of the backup script which implement the backup strategy.
  """
    def isSSHPasswordLess(host, user=None, port=22):
        """
      This method test if a ssh authentification on a remote machine can be done via a
      rsa-key/certificate or require a password.
    """
        # If no user given try "user-less" connection
        user_string = ''
        if user not in (None, ''):
            user_string = "%s@" % user
        TEST_STRING = "SSH KEY AUTH OK"
        test_cmd = """ssh -p %s %s%s "echo '%s'" """ % (port, user_string,
                                                        host, TEST_STRING)
        if verbose:
            print " INFO - run `%s`..." % test_cmd
        ssh = pexpect.spawn(test_cmd, timeout=TIMEOUT)
        time.sleep(1)
        if verbose:
            import StringIO
            ssh_log = StringIO.StringIO()
            ssh.log_file = ssh_log
        ret_code = ssh.expect(
            [TEST_STRING, '.ssword:*', pexpect.EOF, pexpect.TIMEOUT])
        time.sleep(1)
        password_less = None
        if ret_code == 0:
            password_less = True
        elif ret_code == 1:
            password_less = False
        else:
            print "ERROR - SSH server '%s:%s' is unreachable" % (host, port)
        if verbose:
            nice_log(ssh_log.getvalue(), 'ssh')
            ssh_log.close()
        ssh.close()
        if password_less:
            print " INFO - SSH connection to '%s:%s' is password-less" % (host,
                                                                          port)
        else:
            print " INFO - SSH connection to '%s:%s' require password" % (host,
                                                                          port)
        return password_less

    ######################
    # Self checking phase
    ######################

    # Announce the first phase
    print "=" * 40
    print "Backup script self-checking phase"
    print "=" * 40

    # Check that we are running this script on a UNIX system
    from os import name as os_name
    if os_name != 'posix':
        print "FATAL - This script doesn't support systems other than POSIX's"
        sys.exit(1)

    # Check that every command is installed
    checkCommand(['rdiff-backup', 'rm', 'tar', 'bzip2'])

    # Check existence of main backup folder
    if not exists(abspath(BACKUP_DIR)):
        print "FATAL - Main backup folder '%s' does't exist !" % BACKUP_DIR
        sys.exit(1)

    # This variable indicate if pexpect module is required or not
    is_pexpect_required = False

    # Check datas and requirement for each backup
    # Doing this right now is nicer to the user: thanks to this he doesn't need to wait the end of the (X)th backup to get the error about the (X+1)th
    for backup in backup_list:
        # Normalize backup type
        backup_type = backup['type'].lower().strip()
        if backup_type.find('ftps') != -1:
            backup_type = 'FTPS'
        elif backup_type.find('ftp') != -1:
            backup_type = 'FTP'
        elif backup_type == 'ssh':
            backup_type = 'SSH'
        elif backup_type.find('mysql') != -1:
            if backup_type.find('ssh') != -1:
                backup_type = 'MYSQLDUMP+SSH'
            else:
                backup_type = 'MYSQLDUMP'
        else:
            print "ERROR - Backup type '%s' for '%s' is unrecognized: ignore it." % (
                backup['type'], title)
            # Reset backup type
            backup['type'] = ''
            continue
        backup['type'] = backup_type
        # Check if pexpect is required
        if backup_type.find('SSH') != -1:
            is_pexpect_required = True
        # Check requirements
        REQUIRED_COMMANDS = {
            'FTP': 'lftp',
            'FTPS': 'lftp',
            'SSH': ['rsync', 'ssh'],
            'MYSQLDUMP': 'mysqldump',
            'MYSQLDUMP+SSH':
            'ssh'  # TODO: How to check that 'mysqldump' is present on the distant machine ???
        }
        checkCommand(REQUIRED_COMMANDS[backup_type])
        # Set default parameters if missing
        DEFAULT_PARAMETERS = {
            'FTP': {
                'port': 21
            },
            'FTPS': {
                'port': 21
            },
            'SSH': {
                'port': 22
            },
            'MYSQLDUMP': {
                'db_port': 3306
            },
            'MYSQLDUMP+SSH': {
                'port': 22,
                'db_port': 3306
            }
        }
        default_config = DEFAULT_PARAMETERS.get(backup_type, {}).copy()
        default_config.update(backup)
        backup.update(default_config)

    # Import pexpect if necessary
    if is_pexpect_required:
        try:
            import pexpect
        except ImportError:
            print "FATAL - pexpect python module not found: it is required to make backup over SSH !"
            sys.exit(1)

    ######################
    # Proceed each backup
    ######################

    for backup in backup_list:

        # Announce the backup item
        title = backup['title']
        print ""
        print "=" * 40
        print "Backup item: %s" % title
        print "=" * 40

        # Create backup folder structure if needed
        main_folder = abspath(SEP.join([BACKUP_DIR, backup['local_dir']]))
        backup_folders = {
            'main': main_folder,
            'archives':
            abspath(SEP.join([main_folder,
                              'monthly-archives']))  # Contain monthly archives
            ,
            'diff':
            abspath(SEP.join([main_folder, 'rdiff-repository'
                              ]))  # Contain current month diferential backup
            ,
            'mirror':
            abspath(SEP.join([main_folder, 'mirror'
                              ]))  # Contain a mirror of the remote folder
        }
        for (folder_type, folder_path) in backup_folders.items():
            if not exists(folder_path):
                if not dry_run:
                    makedirs(folder_path)
                print " INFO - '%s' folder created" % folder_path

        ##########
        # Step 1 - Mirror data with the right tool
        ##########

        ### Start of this step
        backup_type = backup['type']
        print " INFO - Start mirroring via %s method" % backup_type

        ### Mirror remote data via FTP or FTPS
        if backup_type in ['FTP', 'FTPS']:
            # Generate FTP url
            remote_url = "ftp://%s:%s@%s:%s/%s" % (qp(
                backup['user']), qp(backup['password']), qp(
                    backup['host']), backup['port'], q(backup['remote_dir']))
            # Force SSL layer for secure FTP
            secure_options = ''
            if backup_type == 'FTPS':
                secure_options = 'set ftp:ssl-force true && set ftp:ssl-protect-data true && '
            # Get a copy of the remote directory
            ftp_backup = """lftp -c '%sset ftp:list-options -a && open -e "mirror -e --verbose=3 --parallel=2 . %s" %s'""" % (
                secure_options, backup_folders['mirror'], remote_url)
            run(ftp_backup, verbose, dry_run)

        ### Mirror remote data via SSH
        elif backup_type == 'SSH':

            ## Test SSH password-less connection
            password_less = isSSHPasswordLess(backup['host'], backup['user'],
                                              backup['port'])
            if password_less == None:
                print "ERROR - Can't guess authentication method of '%s:%s'" % (
                    backup['host'], backup['port'])
                continue
            if not password_less and not (backup.has_key('password')
                                          and len(backup['password']) > 0):
                print "ERROR - No password provided !"
                continue
            # Use rsync + ssh to make a mirror of the distant folder
            user_string = ''
            if backup['user'] not in (None, ''):
                user_string = "%s@" % backup['user']
            remote_url = "%s%s:%s" % (user_string, backup['host'],
                                      backup['remote_dir'])
            rsync_backup = """rsync -axHvz --numeric-ids --progress --stats --delete --partial --delete-excluded -e 'ssh -2 -p %s' %s %s""" % (
                backup['port'], remote_url, backup_folders['mirror'])

            # If it is passwordless, don't use pexpect but run() method instead
            if password_less:
                run(rsync_backup, verbose, dry_run)
            else:
                # In this case we use pexpect to send the password
                if verbose:
                    print " INFO - Run `%s`..." % rsync_backup  # XXX Duplicate with 'run()' method
                if not dry_run:
                    p = pexpect.spawn(
                        rsync_backup
                    )  # TODO: create a method similar to run() but that take a password as parameter to handle pexpect nicely
                    import StringIO
                    p_log = StringIO.StringIO()
                    p.setlog(p_log)
                    i = p.expect(['.ssword:*', pexpect.EOF, pexpect.TIMEOUT],
                                 timeout=TIMEOUT)
                    time.sleep(1)
                    # Password required
                    if i == 0:
                        # rsync ask for a password. Send it.
                        p.sendline(backup['password'])
                        print " INFO - SSH password sent"
                        j = p.expect([pexpect.EOF, pexpect.TIMEOUT],
                                     timeout=TIMEOUT)
                        time.sleep(1)
                        if j == 1:
                            print "ERROR - Backup via SSH reached timeout"
                            continue
                    elif i == 1:
                        print "ERROR - Backup via SSH didn't end correctly"
                        continue
                    elif i == 2:
                        print "ERROR - Backup via SSH reached timeout"
                        continue
                    # Terminate child process
                    nice_log(p_log.getvalue(), 'rsync')
                    p_log.close()
                    p.close()

        ### Mirror remote mysql database
        elif backup_type in ['MYSQLDUMP', 'MYSQLDUMP+SSH']:
            # Build mysqldump command
            mysqldump = """mysqldump --host=%s --port=%s --user=%s --password=%s --opt""" % (
                backup['db_host'], backup['db_port'], backup['db_user'],
                backup['db_pass'])
            # if no database name provided, dump all databases
            db_to_dump = '--all-databases'
            if backup.has_key('db_name') and len(backup['db_name']) > 0:
                db_to_dump = '--databases %s' % backup['db_name']
            mysqldump += ' %s' % db_to_dump
            # Build final command
            sql_file = abspath(
                SEP.join([backup_folders['mirror'], SQL_FILENAME]))
            if backup_type == 'MYSQLDUMP+SSH':
                # Test SSH password-less connection
                password_less = isSSHPasswordLess(backup['host'],
                                                  backup['user'],
                                                  backup['port'])
                if password_less == None:
                    print "FATAL - Can't guess authentication method of '%s:%s'" % (
                        backup['host'], backup['port'])
                    continue
                cmd = """ssh -C -2 -p %s %s@%s "%s" > %s""" % (
                    backup['port'], backup['user'], backup['host'], mysqldump,
                    sql_file)
            else:
                cmd = "%s > %s" % (mysqldump, sql_file)
            run(cmd, verbose, dry_run)

        ### Mirroring is successful
        print " INFO - %s mirroring succeed" % backup_type

        ##########
        # Step 2 - Update incremental backup
        ##########

        print " INFO - Add the mirror as increment"

        # Use rdiff-backup to do efficient incremental backups
        rdiff_cmd = """rdiff-backup "%s" "%s" """ % (backup_folders['mirror'],
                                                     backup_folders['diff'])
        run(rdiff_cmd, verbose, dry_run)

        print " INFO - Increment added"

        ##########
        # Step 3 - Generate monthly archives
        ##########

        # Generate monthly archive name
        today_items = datetime.date.today().timetuple()
        current_year = today_items[0]
        current_month = today_items[1]
        monthly_archive = abspath(
            "%s%s%04d-%02d.tar.bz2" %
            (backup_folders['archives'], SEP, current_year, current_month))
        snapshot_date = "%04d-%02d-01" % (current_year, current_month)

        # If month started, make a bzip2 archive
        if not exists(monthly_archive):
            print " INFO - Generate archive of previous month (= %s 00:00 snapshot)" % snapshot_date
            tmp_archives_path = abspath(backup_folders['archives'] + SEP +
                                        "tmp")
            if exists(tmp_archives_path):
                run("""rm -rf "%s" """ % tmp_archives_path, verbose, dry_run)
                print " INFO - Previous temporary folder '%s' removed" % tmp_archives_path
            if not dry_run:
                mkdir(tmp_archives_path)
            print " INFO - Temporary folder '%s' created" % tmp_archives_path
            rdiff_cmd = """rdiff-backup -r "%s" "%s" "%s" """ % (
                snapshot_date, backup_folders['diff'], tmp_archives_path)
            run(rdiff_cmd, verbose, dry_run)
            run(
                "tar c -C %s ./ | bzip2 > %s" %
                (tmp_archives_path, monthly_archive), verbose, dry_run)
            # Delete the tmp folder
            run("""rm -vrf "%s" """ % tmp_archives_path, verbose, dry_run)
        else:
            print " INFO - No need to generate archive: previous month already archived"

        # Keep last 32 increments (31 days = 1 month + 1 day)
        print " INFO - Remove increments older than 32 days"
        rdiff_cmd = """rdiff-backup --force --remove-older-than 32B "%s" """ % backup_folders[
            'diff']
        run(rdiff_cmd, verbose, dry_run)

        # Final message before next backup item
        print " INFO - Backup successful"
예제 #22
0
def cmd_cj(verb):
    """Montre la conjugaison du verbe sur leconjugueur.com"""
    redirect ("http://www.leconjugueur.com/la/conjugaison/du/verbe/%s.html" % qp(verb))
# /usr/local/bin/python

import os
import glob
from bs4 import BeautifulSoup
import urllib2
from urllib import quote_plus as qp

# High Quality Songs, yeah baby!
DEFAULT_AUDIO_QUALITY = '320K'

search = ''
# We do not want to accept empty inputs :)
while search == '':
  search = raw_input('Enter songname/ lyrics/ artist.. or whatever \n>')
search = qp(search)

print('Making a Query Request! ')

# Magic happens here.
response = urllib2.urlopen('https://www.youtube.com/results?search_query='+search)
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
for link in soup.find_all('a'):
    if '/watch?v=' in link.get('href'):
    	# May change when Youtube is updated in the future.
    	video_link = link.get('href')
    	break
    	
title = soup.find("a", "yt-uix-tile-link").text
print('Downloading ' + title + '...')
예제 #24
0
def cmd_imdb(term):
    """Searches Imdb or goes to it"""
    if term:
        redirect("http://www.imdb.com/find?s=all&q=%s" % qp(term))
    else:
        redirect("http://www.imdb.com/")
예제 #25
0
파일: bsd.py 프로젝트: Ar11rA/Lazyme
from bs4 import BeautifulSoup
import requests
import re
import os
import sys
from urllib2 import urlopen
from urllib import quote_plus as qp

goog_url = "https://www.google.co.in/search?q="
query=raw_input("Enter band:")
query=query+" top tens"
url = goog_url + qp(query)
print url
req = requests.get(url)
result = req.content
link_start = result.find("http://www.thetoptens.com")
link_end = result.find("&amp",link_start)
link = result[link_start:link_end]
ctr=1
print link
req=requests.get(link)
data=req.content
#print data
s2=''
s3=''
soup=BeautifulSoup(data,"html.parser")
print "Top 10:"
for s2 in soup.findAll("div", {"id" : re.compile('i[0-9]*')}):
    s3=s2.find("b").text
    s3 = s3.encode('utf-8')
    if(ctr!=1):
예제 #26
0
파일: views.py 프로젝트: yask123/API_UI
def index(request):
	if request.method == 'GET':
		print 'test'
		return render(request,'task/index.html')
	elif request.method == 'POST':
		common = ['address','timing','latitude','AddressCountry','Og:type','zipcode','name','AddressRegion','longitude','source']
		print request.META['HTTP_USER_AGENT']
		name =  request.POST.get("name", "")
		location = request.POST.get("location", "")
		temp_location = location
		fbtoken = request.POST.get("fbtoken", "")
		gkey = request.POST.get("gkey", "")
		acc_token = fbtoken
		searched_data=''
		try:
			print 'heya in cache'
			searched_data = SavedCache.objects.filter(name=name,location=location)[0].data

			print 'Cached data, i am here'


			if 'Chrome' in request.META['HTTP_USER_AGENT']:
				#Browser
				d = searched_data
				d= ast.literal_eval(d)
				google = d['google']['results']
				facebook = d['facebook']
				g_mapped={}
				fb_mapped={}

				for each in common:
					g_mapped[each]=''
				for each in common:
					fb_mapped[each]=''

				try:
					g_mapped['address']=google[0]['vicinity']
				except:
					pass
				try:
					g_mapped['name'] = google[0]['name']
				except:
					pass
				try:
					g_mapped['timing'] = google[0]['opening_hours']
				except:
					pass
				try:
					g_mapped['longitude'] = google[0]['geometry']['location']['lng']
				except:
					pass
				try:
					g_mapped['latitude'] = google[0]['geometry']['location']['lat']
				except:
					pass
				try:
					g_mapped['Og:type'] = google[0]['types']
				except:
					pass	
				try:
					g_mapped['Og:image'] = google[0]['photos'][0]['photo_reference']
					g_mapped['Og:image:height'] = google[0]['photos'][0]['height']
					g_mapped['Og:image:width'] = google[0]['photos'][0]['width']
				except:
					pass	
				

				
				try:
					g_mapped['types'] = google[0]['types']
				except:
					pass
				try:
					g_mapped['icon'] = google[0]['icon']
				except:
					pass
				try:
					g_mapped['rating'] = google[0]['rating']
				except:
					pass
				try:
					fb_mapped['website'] = facebook['website']
				except:
					pass	
				try:				
					fb_mapped['fb_page_url'] = facebook['fb_page_url']
				except:
					pass
				try:				
					fb_mapped['fb_page_likes'] = facebook['likes']
				except:
					pass
				try:				
					fb_mapped['fbpage_is_verified'] = facebook['is_verified']
				except:
					pass
				try:
					fb_mapped['parking'] = facebook['parking']
				except:
					pass
				try:
					fb_mapped['name'] = facebook['name']
				except:
					pass	
				try:
					fb_mapped['phone'] = facebook['phone']
				except:
					pass
				try:			
					fb_location = yaml.safe_load(facebook['location'].strip())
				except:
					pass
				for each in fb_location:
					try:
						if bool(each.replace('u','')[1:-1] == 'state'):
							fb_mapped['state']  = fb_location[each]	
						elif bool(each.replace('u','')[1:-1] == 'latitde'):		
							fb_mapped['latitude'] = fb_location[each]
						elif bool(each.replace('u','')[1:-1] == 'longitde'):		
							fb_mapped['longitude'] = fb_location[each]
						elif bool(each.replace('u','')[1:-1] == 'street'):		
							fb_mapped['address'] = fb_location[each]
						elif bool(each.replace('u','')[1:-1] == 'zip'):		
							fb_mapped['zipcode'] = fb_location[each]
						elif bool(each.replace('u','')[1:-1] == 'city'):		
							fb_mapped['city'] = fb_location[each]
						elif bool(each.replace('u','')[1:-1] == 'country'):		
							fb_mapped['AddressCountry'] = fb_location[each]
					except:
						pass					
				
				complete_map={}
				fb_uncommon=[]
				g_uncommon=[]
				for each in fb_mapped:
					if each not in common:
						fb_uncommon.append(each)
				for each in g_mapped:
					if each not in common:
						g_uncommon.append(each)	
				fb_mapped['source'] = 'Facebook Pages'
				g_mapped['source'] = 'Google Places API'			
				complete_map={'google':g_mapped,'facebook':fb_mapped,'common':common,'fb_uncommon':fb_uncommon,'g_uncommon':g_uncommon}
				fb_mapped['longitude'] = fb_mapped['longitude'][len("Decimal('"):-2]
				fb_mapped['latitude'] = fb_mapped['latitude'][len("Decimal('"):-2]
				fb_mapped['zipcode'] = fb_mapped['zipcode'][2:-1]	
				fb_mapped['address'] = fb_mapped['address'][2:-1]
				print facebook['name']

				return render(request,'task/temp_results.html',complete_map)
				pass
			else:	
				return StreamingHttpResponse(json.dumps(searched_data, sort_keys=True, indent=4, separators=(',', ': ')),   content_type='application/json')

		
		except Exception, e:
			print '---------'
			print searched_data
			print '---------'
			print "Couldn't do it: %s" % e
			print 'Damn'
			if 'lan:' in location:
				location = location.split(':')[1]
				print location
				lat = location.split(',')[0]
				lon = location.split(',')[1]
				location = lat+','+lon
				print 'lan: detected, formatted : ',location
			else:
				geolocator = Nominatim()
				location = geolocator.geocode(location,timeout=10)
				location = str(location.latitude)+','+str(location.longitude)

			if acc_token:
				graph = GraphAPI(acc_token)
			else:
				graph = GraphAPI('CAACEdEose0cBAKgyLsPPMIJy4ZB2UFIN3Q2a2XVgnqMY1ITNYyU6AQQCmQUBZAtNElA2NEkEl7R4ApavpbAf0QBoGY5MW3XD1kgPcpLhDYLqn3lQNA4Ih40mTFfgV7VjlWAPXZBY8R5EJdVTZCjJ84DVisSPJXfkKfgmhC0QWywjktClrQRNUNnGH22x96vmTxZBB8xBaBQZDZD')

			search=name
			search = qp(search)
			
			result = graph.get('search?type=place&q='+search+'&center='+location)
			page_id = result['data'][0]['id']

			params = 'fields=phone,likes,current_location,about,website,food_styles,description,hours,awards,price_range,location,booking_agent,is_verified,offers,public_transit,founded,products,emails,parking,phone,name'
			a =  str(page_id)+'?'+params
			cache={}
			cache['facebook'] = {}
			cache['google'] = {}

	 		cache['facebook'] = {'fb_page_url':'http://facebook.com/'+page_id}
	 		params = params.split(',')
	 		for each in params:
	 			try:
	 				cache['facebook'][each] = str(graph.get(a)[each]).encode('utf-8')
	 			except:
	 				pass		
	 		#Google Data
	 		if gkey:
	 			url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+location+'&radius=50000&name='+name+'&key='+gkey
	 		else:	
				url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+location+'&radius=50000&name='+name+'&key=AIzaSyDAERlVmOrLWdq0pHF5fK3c2cHmCSvy55I'
			r = requests.get(url)
			google_result = r.json()		
			cache['google']=google_result
			str_cache = str(cache)
			print name,location
			SavedCache.objects.create(name=name,location=temp_location,data = str_cache)
			if 'Chrome' in request.META['HTTP_USER_AGENT']:
				#Browser
				d = (str_cache)
				d =  ast.literal_eval(d)
				google = d['google']['results']
				facebook = d['facebook']
				g_mapped={}
				fb_mapped={}

				for each in common:
					g_mapped[each]=''
				for each in common:
					fb_mapped[each]=''

				try:
					g_mapped['address']=google[0]['vicinity']
				except:
					pass
				try:
					g_mapped['name'] = google[0]['name']
				except:
					pass
				try:
					g_mapped['timing'] = google[0]['opening_hours']
				except:
					pass
				try:
					g_mapped['longitude'] = google[0]['geometry']['location']['lng']
				except:
					pass
				try:
					g_mapped['latitude'] = google[0]['geometry']['location']['lat']
				except:
					pass
				try:
					g_mapped['Og:image'] = google[0]['photos'][0]['photo_reference']
					g_mapped['Og:image:height'] = google[0]['photos'][0]['height']
					g_mapped['Og:image:width'] = google[0]['photos'][0]['width']
				except:
					pass	
				

				
				try:
					g_mapped['types'] = google[0]['types']
				except:
					pass
				try:
					g_mapped['icon'] = google[0]['icon']
				except:
					pass
				try:
					g_mapped['rating'] = google[0]['rating']
				except:
					pass
				try:
					fb_mapped['website'] = facebook['website']
				except:
					pass	
				try:				
					fb_mapped['fb_page_url'] = facebook['fb_page_url']
				except:
					pass
				try:				
					fb_mapped['fb_page_likes'] = facebook['likes']
				except:
					pass
				try:				
					fb_mapped['fbpage_is_verified'] = facebook['is_verified']
				except:
					pass
				try:
					fb_mapped['parking'] = facebook['parking']
				except:
					pass
				try:
					fb_mapped['phone'] = facebook['phone']
				except:
					pass
				try:			
					fb_location = yaml.safe_load(facebook['location'])
				except:
					pass	
				try:
					fb_mapped['state'] = fb_location['state']
				except:
					pass	
				try:
					fb_mapped['latitude'] = fb_location['latitude']
				except:
					pass
				try:
					fb_mapped['longitude'] = fb_location['longitude']
				except:
					pass
				try:
					fb_mapped['AddressCountry'] = fb_location['country']
				except:
					pass
				try:
					fb_mapped['city'] = fb_location['city']
				except:
					pass
				try:
					fb_mapped['address'] = fb_location['street']
				except:
					pass
				try:
					fb_mapped['zipcode'] = fb_location['zip']
				except:
					pass
				complete_map={}
				fb_uncommon=[]
				g_uncommon=[]
				for each in fb_mapped:
					if each not in common:
						fb_uncommon.append(each)
				for each in g_mapped:
					if each not in common:
						g_uncommon.append(each)		
				complete_map={'google':g_mapped,'facebook':fb_mapped,'common':common,'fb_uncommon':fb_uncommon,'g_uncommon':g_uncommon}

				return render(request,'task/temp_results.html',complete_map)
				pass
			else:
				return StreamingHttpResponse(json.dumps(str_cache, sort_keys=True, indent=4, separators=(',', ': ')),   content_type='application/json')
예제 #27
0
def cmd_wi(term):
    """Searches Wikipedia or goes to it"""
    if term:
        redirect("http://www.wikipedia.org/search-redirect.php?search=%s&language=en&go=++%%E2%%86%%92++&go=Go" % qp(term))
    else:
        redirect("http://www.wikipedia.org/")
예제 #28
0
def index(request):
    if request.method == 'GET':
        print 'test'
        return render(request, 'task/index.html')
    elif request.method == 'POST':
        common = [
            'address', 'timing', 'latitude', 'AddressCountry', 'Og:type',
            'zipcode', 'name', 'AddressRegion', 'longitude', 'source'
        ]
        print request.META['HTTP_USER_AGENT']
        name = request.POST.get("name", "")
        location = request.POST.get("location", "")
        temp_location = location
        fbtoken = request.POST.get("fbtoken", "")
        gkey = request.POST.get("gkey", "")
        acc_token = fbtoken
        searched_data = ''
        try:
            print 'heya in cache'
            searched_data = SavedCache.objects.filter(
                name=name, location=location)[0].data

            print 'Cached data, i am here'

            if 'Chrome' in request.META['HTTP_USER_AGENT']:
                #Browser
                d = searched_data
                d = ast.literal_eval(d)
                google = d['google']['results']
                facebook = d['facebook']
                g_mapped = {}
                fb_mapped = {}

                for each in common:
                    g_mapped[each] = ''
                for each in common:
                    fb_mapped[each] = ''

                try:
                    g_mapped['address'] = google[0]['vicinity']
                except:
                    pass
                try:
                    g_mapped['name'] = google[0]['name']
                except:
                    pass
                try:
                    g_mapped['timing'] = google[0]['opening_hours']
                except:
                    pass
                try:
                    g_mapped['longitude'] = google[0]['geometry']['location'][
                        'lng']
                except:
                    pass
                try:
                    g_mapped['latitude'] = google[0]['geometry']['location'][
                        'lat']
                except:
                    pass
                try:
                    g_mapped['Og:type'] = google[0]['types']
                except:
                    pass
                try:
                    g_mapped['Og:image'] = google[0]['photos'][0][
                        'photo_reference']
                    g_mapped['Og:image:height'] = google[0]['photos'][0][
                        'height']
                    g_mapped['Og:image:width'] = google[0]['photos'][0][
                        'width']
                except:
                    pass

                try:
                    g_mapped['types'] = google[0]['types']
                except:
                    pass
                try:
                    g_mapped['icon'] = google[0]['icon']
                except:
                    pass
                try:
                    g_mapped['rating'] = google[0]['rating']
                except:
                    pass
                try:
                    fb_mapped['website'] = facebook['website']
                except:
                    pass
                try:
                    fb_mapped['fb_page_url'] = facebook['fb_page_url']
                except:
                    pass
                try:
                    fb_mapped['fb_page_likes'] = facebook['likes']
                except:
                    pass
                try:
                    fb_mapped['fbpage_is_verified'] = facebook['is_verified']
                except:
                    pass
                try:
                    fb_mapped['parking'] = facebook['parking']
                except:
                    pass
                try:
                    fb_mapped['name'] = facebook['name']
                except:
                    pass
                try:
                    fb_mapped['phone'] = facebook['phone']
                except:
                    pass
                try:
                    fb_location = yaml.safe_load(facebook['location'].strip())
                except:
                    pass
                for each in fb_location:
                    try:
                        if bool(each.replace('u', '')[1:-1] == 'state'):
                            fb_mapped['state'] = fb_location[each]
                        elif bool(each.replace('u', '')[1:-1] == 'latitde'):
                            fb_mapped['latitude'] = fb_location[each]
                        elif bool(each.replace('u', '')[1:-1] == 'longitde'):
                            fb_mapped['longitude'] = fb_location[each]
                        elif bool(each.replace('u', '')[1:-1] == 'street'):
                            fb_mapped['address'] = fb_location[each]
                        elif bool(each.replace('u', '')[1:-1] == 'zip'):
                            fb_mapped['zipcode'] = fb_location[each]
                        elif bool(each.replace('u', '')[1:-1] == 'city'):
                            fb_mapped['city'] = fb_location[each]
                        elif bool(each.replace('u', '')[1:-1] == 'country'):
                            fb_mapped['AddressCountry'] = fb_location[each]
                    except:
                        pass

                complete_map = {}
                fb_uncommon = []
                g_uncommon = []
                for each in fb_mapped:
                    if each not in common:
                        fb_uncommon.append(each)
                for each in g_mapped:
                    if each not in common:
                        g_uncommon.append(each)
                fb_mapped['source'] = 'Facebook Pages'
                g_mapped['source'] = 'Google Places API'
                complete_map = {
                    'google': g_mapped,
                    'facebook': fb_mapped,
                    'common': common,
                    'fb_uncommon': fb_uncommon,
                    'g_uncommon': g_uncommon
                }
                fb_mapped['longitude'] = fb_mapped['longitude'][len("Decimal('"
                                                                    ):-2]
                fb_mapped['latitude'] = fb_mapped['latitude'][len("Decimal('"
                                                                  ):-2]
                fb_mapped['zipcode'] = fb_mapped['zipcode'][2:-1]
                fb_mapped['address'] = fb_mapped['address'][2:-1]
                print facebook['name']

                return render(request, 'task/temp_results.html', complete_map)
                pass
            else:
                return StreamingHttpResponse(json.dumps(searched_data,
                                                        sort_keys=True,
                                                        indent=4,
                                                        separators=(',',
                                                                    ': ')),
                                             content_type='application/json')

        except Exception, e:
            print '---------'
            print searched_data
            print '---------'
            print "Couldn't do it: %s" % e
            print 'Damn'
            if 'lan:' in location:
                location = location.split(':')[1]
                print location
                lat = location.split(',')[0]
                lon = location.split(',')[1]
                location = lat + ',' + lon
                print 'lan: detected, formatted : ', location
            else:
                geolocator = Nominatim()
                location = geolocator.geocode(location, timeout=10)
                location = str(location.latitude) + ',' + str(
                    location.longitude)

            if acc_token:
                graph = GraphAPI(acc_token)
            else:
                graph = GraphAPI(
                    'CAACEdEose0cBAKgyLsPPMIJy4ZB2UFIN3Q2a2XVgnqMY1ITNYyU6AQQCmQUBZAtNElA2NEkEl7R4ApavpbAf0QBoGY5MW3XD1kgPcpLhDYLqn3lQNA4Ih40mTFfgV7VjlWAPXZBY8R5EJdVTZCjJ84DVisSPJXfkKfgmhC0QWywjktClrQRNUNnGH22x96vmTxZBB8xBaBQZDZD'
                )

            search = name
            search = qp(search)

            result = graph.get('search?type=place&q=' + search + '&center=' +
                               location)
            page_id = result['data'][0]['id']

            params = 'fields=phone,likes,current_location,about,website,food_styles,description,hours,awards,price_range,location,booking_agent,is_verified,offers,public_transit,founded,products,emails,parking,phone,name'
            a = str(page_id) + '?' + params
            cache = {}
            cache['facebook'] = {}
            cache['google'] = {}

            cache['facebook'] = {
                'fb_page_url': 'http://facebook.com/' + page_id
            }
            params = params.split(',')
            for each in params:
                try:
                    cache['facebook'][each] = str(
                        graph.get(a)[each]).encode('utf-8')
                except:
                    pass
    #Google Data
            if gkey:
                url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=' + location + '&radius=50000&name=' + name + '&key=' + gkey
            else:
                url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=' + location + '&radius=50000&name=' + name + '&key=AIzaSyDAERlVmOrLWdq0pHF5fK3c2cHmCSvy55I'
            r = requests.get(url)
            google_result = r.json()
            cache['google'] = google_result
            str_cache = str(cache)
            print name, location
            SavedCache.objects.create(name=name,
                                      location=temp_location,
                                      data=str_cache)
            if 'Chrome' in request.META['HTTP_USER_AGENT']:
                #Browser
                d = (str_cache)
                d = ast.literal_eval(d)
                google = d['google']['results']
                facebook = d['facebook']
                g_mapped = {}
                fb_mapped = {}

                for each in common:
                    g_mapped[each] = ''
                for each in common:
                    fb_mapped[each] = ''

                try:
                    g_mapped['address'] = google[0]['vicinity']
                except:
                    pass
                try:
                    g_mapped['name'] = google[0]['name']
                except:
                    pass
                try:
                    g_mapped['timing'] = google[0]['opening_hours']
                except:
                    pass
                try:
                    g_mapped['longitude'] = google[0]['geometry']['location'][
                        'lng']
                except:
                    pass
                try:
                    g_mapped['latitude'] = google[0]['geometry']['location'][
                        'lat']
                except:
                    pass
                try:
                    g_mapped['Og:image'] = google[0]['photos'][0][
                        'photo_reference']
                    g_mapped['Og:image:height'] = google[0]['photos'][0][
                        'height']
                    g_mapped['Og:image:width'] = google[0]['photos'][0][
                        'width']
                except:
                    pass

                try:
                    g_mapped['types'] = google[0]['types']
                except:
                    pass
                try:
                    g_mapped['icon'] = google[0]['icon']
                except:
                    pass
                try:
                    g_mapped['rating'] = google[0]['rating']
                except:
                    pass
                try:
                    fb_mapped['website'] = facebook['website']
                except:
                    pass
                try:
                    fb_mapped['fb_page_url'] = facebook['fb_page_url']
                except:
                    pass
                try:
                    fb_mapped['fb_page_likes'] = facebook['likes']
                except:
                    pass
                try:
                    fb_mapped['fbpage_is_verified'] = facebook['is_verified']
                except:
                    pass
                try:
                    fb_mapped['parking'] = facebook['parking']
                except:
                    pass
                try:
                    fb_mapped['phone'] = facebook['phone']
                except:
                    pass
                try:
                    fb_location = yaml.safe_load(facebook['location'])
                except:
                    pass
                try:
                    fb_mapped['state'] = fb_location['state']
                except:
                    pass
                try:
                    fb_mapped['latitude'] = fb_location['latitude']
                except:
                    pass
                try:
                    fb_mapped['longitude'] = fb_location['longitude']
                except:
                    pass
                try:
                    fb_mapped['AddressCountry'] = fb_location['country']
                except:
                    pass
                try:
                    fb_mapped['city'] = fb_location['city']
                except:
                    pass
                try:
                    fb_mapped['address'] = fb_location['street']
                except:
                    pass
                try:
                    fb_mapped['zipcode'] = fb_location['zip']
                except:
                    pass
                complete_map = {}
                fb_uncommon = []
                g_uncommon = []
                for each in fb_mapped:
                    if each not in common:
                        fb_uncommon.append(each)
                for each in g_mapped:
                    if each not in common:
                        g_uncommon.append(each)
                complete_map = {
                    'google': g_mapped,
                    'facebook': fb_mapped,
                    'common': common,
                    'fb_uncommon': fb_uncommon,
                    'g_uncommon': g_uncommon
                }

                return render(request, 'task/temp_results.html', complete_map)
                pass
            else:
                return StreamingHttpResponse(json.dumps(str_cache,
                                                        sort_keys=True,
                                                        indent=4,
                                                        separators=(',',
                                                                    ': ')),
                                             content_type='application/json')
예제 #29
0
파일: routing.py 프로젝트: gbour/Mother
    def mergeargs(key, value):
        key = qp(k)
        if isinstance(value, (tuple, list)):
            return "&".join("%s=%s" % (key, qp(un_unicode(v))) for v in value)

        return "%s=%s" % (key, qp(un_unicode(value)))
예제 #30
0
def cmd_tv(term):
    """Searches tvrage or goes to it"""
    if term:
        redirect("http://www.tvrage.com/search.php?search=%s" % qp(term))
    else:
        redirect("http://www.tvrage.com/")