示例#1
0
 def twitter_callback(self, path, value):
     value = value[0]
     import tinyurl
     self.twitter = Twitter(self.twitter_key, self.twitter_secret)
     self.twitter_mode = value
     message = "Station " + self.channel_url + " : received OSC message '%s' with arguments '%d'" % (path, value)
     self.m3u_tinyurl = tinyurl.create_one(self.channel.url + '/m3u/' + self.m3u.split(os.sep)[-1])
     self.rss_tinyurl = tinyurl.create_one(self.channel.url + '/rss/' + self.rss_playlist_file.split(os.sep)[-1])
     self.logger.write_info(message)
示例#2
0
文件: tnt.py 项目: aseba/TNT
    def tweetWithCheck(self, message, reply=None):
        status = list()
        for text in message.split(" "):
            if text.startswith("http") and len(text) >= 27:
                newLink = tinyurl.create_one(text)
                message = message.replace(text, newLink)
        if self.oauthapi is None:
            raise NotAuthorizedException()
        if len(message) > 140:
            if self.guiHandle.askIfSplit():
                # http://snippets.dzone.com/posts/show/5641
                import math

                # v = value to split, l = size of each chunk
                f = lambda v, l: [v[i * l : (i + 1) * l] for i in range(int(math.ceil(len(v) / float(l))))]
                messages = f(message, 140)
                for message in messages:
                    Logger().log("tweetWithCheck() : Tweeting")
                    status.append(self.oauthapi.PostUpdates(message.encode("utf-8"), in_reply_to_status_id=reply))
            else:
                message = message[:140]
                Logger().log("tweetWithCheck() : Tweeting")
                status.append(self.oauthapi.PostUpdates(message.encode("utf-8"), in_reply_to_status_id=reply))
        else:
            Logger().log("tweetWithCheck() : Tweeting")
            status.append(self.oauthapi.PostUpdates(message.encode("utf-8"), in_reply_to_status_id=reply))
        return status
    def send_context_facebook(self, context, token):
        logging.debug("Sending to facebook")

        url = 'https://graph.facebook.com/me/feed?access_token=%s' % token

        message = ''
        if context.get('status'):
            message += context.get('status')
            logging.debug("Context [status]: %s" % context.get('status'))

        if context.get('location'):
            map_url = 'http://maps.google.com/maps?z=18&q=%(location)s(%(text)s)' % {
                'location': context['location'],
                'text': context['status']
            }

            logging.debug("Context [map_url]: %s" % map_url)
            shortened = tinyurl.create_one(map_url)

            message += ' %s' % shortened
            logging.debug("Context [location]: %s" % shortened)

        try:
            return MSSCurl().post(url=url,
                                  port=None,
                                  postfields={'message': message})
        except Exception, e:
            logging.exception("Can not send post to Facebook: %s" % e)
示例#4
0
 def post_news(self, feed_name, title, url, date):
     """Cancel post if filter keyword is in title"""
     for keyword in self.filterkeywords:
         if keyword in title.lower():
             print datetime.datetime.now(
             ), u"Found", keyword, "keyword in title. Aborting post."
             sys.stdout.flush()
             return
     """Try shortening url"""
     if self.__config.shorturls:
         try:
             post_url = tinyurl.create_one(url)
             if ("error" in post_url.lower()):
                 post_url = url
         except Exception as e:
             post_url = url
             print datetime.datetime.now(), e
             sys.stdout.flush()
     else:
         post_url = url
     """Posts a new announcement to the channel"""
     try:
         msg = self.__get_colored_text(
             self.color_feedname, feed_name + ": ") + self.get_bolded_text(
                 self.__get_colored_text(self.color_newstitle, title)
             ) + " > " + self.__get_colored_text(
                 self.color_url, post_url + ", ") + self.__get_colored_text(
                     self.color_date, str(date))
         self.send_msg(self.__config.CHANNEL, msg)
     except Exception as e:
         print datetime.datetime.now(), e
         sys.stdout.flush()
 def shortenWithTinyURL(self, text):
     """
     Shortens the URL with tinyURL
     """
     print("in tinyurl")
     URL = tinyurl.create_one(text)
     self.outputURLTxt.SetValue(URL)
示例#6
0
def event_register(request):
    template = loader.get_template('event_register.html')
    successMsg = None
    if request.method == 'GET':
        if 'eventid' in request.GET:
            eid = request.GET["eventid"]
            if 'errorMsg' in request.GET:
                errorMsg = request.GET['errorMsg']
            else:
                errorMsg = None
            context = {
                'eventid': eid,
                'errorMsg': errorMsg,
                'successMsg': successMsg,
            }
        if 'success' in request.GET:
            theURL = "https://www.instamojo.com/api/1.1/payment-requests/" + request.GET[
                'payment_request_id'] + "/" + request.GET['payment_id']
            theURL = tinyurl.create_one(theURL)
            successMsg = "Done! Note down the URL " + theURL + ". Keep the message received as token"
            context = {
                'eventid': None,
                'errorMsg': None,
                'successMsg': successMsg,
            }
    return HttpResponse(
        renderContent(template.render(context, request), request))
示例#7
0
def player():
    if flask.request.method == 'POST':
        url = unicode(flask.request.form['url'])
        shorturl = tinyurl.create_one(url)
        tv.start('URL', shorturl, 'local')

    return flask.render_template('player.html')
示例#8
0
文件: main.py 项目: mattlokes/photodb
    def post(self, collection_name, primary):
        conn = db_connect.connect()

        if collection_name == "create":  #CREATE TABLE
            print("Creating New Table Entry....{0}".format(primary))
            query = conn.execute(
                "create table {0} ( photo_id integer PRIMARY KEY, photo_name text NOT NULL, photo_link text NOT NULL, photo_tiny_link text NOT NULL, photo_primary bool NOT NULL)"
                .format(primary))
            return
        elif collection_name == "delete":  #DELETE TABLE, not for normal use.
            print("Deleting Table Entry....{0}".format(primary))
            query = conn.execute("drop table {0}".format(primary))
            return
        else:  #INSERT ENTRY TO TABLE
            photo_name = request.json['photo_name']
            if "thumb" in photo_name:  #if thumb in picture dont register it
                return {'slink': 0}
            photo_link = request.json['photo_link']
            photo_tiny_link = tinyurl.create_one(str(photo_link))
            photo_primary = int(primary)

            query = conn.execute(
                "insert into {0} values(null,'{1}','{2}','{3}',{4})".format(
                    collection_name, photo_name, photo_link, photo_tiny_link,
                    photo_primary))
            return {'slink': photo_tiny_link}
示例#9
0
def new():
    #LOCATION = ['NE', 'NW', 'SOUTH', 'WEST']
    #app = []
    #for loc in LOCATION:
    app = []
    #loc = 'NE'
    LOCATION = ['HQ']
    #path = PATH % loc
    print("initiated")
    #[app.append("%s, %s" % ('%s_A%s' % (loc, i), tinyurl.create_one(URL % base64.b64encode('%s_A%s' % (loc, i))))) for i in range(1, 501)]

    for loc in LOCATION:
        path = PATH % loc
        for i in range(1, 2):
            print(i)
            app.append("%s, %s" %
                       ('%s_ubaM_%s' % (loc, i),
                        tinyurl.create_one(URL % base64.b64encode('%s_A%s' %
                                                                  (loc, i)))))

        with open(PATH % loc + '%sA500.csv' % loc, mode='w') as a500:
            new_csv = csv.writer(a500,
                                 delimiter=',',
                                 quotechar='"',
                                 quoting=csv.QUOTE_MINIMAL)
            for ap in app:
                new_csv.writerow([ap])
                create_qr(ap, path)
        app = []
示例#10
0
def get_last_calls_CBS():
    url = "http://riquelme.kope.cl/api/getLastCalls"
    r = requests.get(url)
    json_response = r.text
    j = json.loads(json_response)
    calls = j['data']['calls']
    text_buffer = "Last Calls CBS\n\n"

    for i in reversed(calls):
        text_buffer += i['call_date'] + "\n"
        text_buffer += i['call_code'] + ":  " + i['call_content'] + "\n"
        text_buffer += i['call_time'] + " - " + i['call_machines'] + "\n"
        url = "http://dongaby.pompefrance.cl/emergency?"
        url += "company=4&"
        url += "lat=" + i['call_lat'] + "&"
        url += "lon=" + i['call_lon'] + "&"
        url += "title=" + str(
            urllib.pathname2url(i['call_title'].encode('utf-8'))) + "&"
        url += "message=" + str(
            urllib.pathname2url(i['call_content'].encode('utf-8')))
        tiny = tinyurl.create_one(url)
        text_buffer += tiny
        text_buffer += "\n\n"

    return text_buffer
示例#11
0
def shorten(url):
    if url in cache:
        return cache[url]
    else:
        short = tinyurl.create_one(url)
        cache[url] = short
        return short
示例#12
0
def make_tiny(msg):
	print msg
	fixed_msg = '+'.join(msg)
	lmgtfy_address = ('http://lmgtfy.com/?q=%s' % fixed_msg)
	lmgtfy_tiny = tinyurl.create_one(lmgtfy_address)

	return lmgtfy_tiny
示例#13
0
    def googleThatForMe(self, text):
        """Return shortened URL to LMGTFY search

        @param text: text to search for
        @return: shortened URL
        """
        url = 'http://lmgtfy.com/?%s' % (urllib.urlencode({'q': text}))
        return tinyurl.create_one(url)
示例#14
0
    def googleThatForMe(self, text):
        """Return shortened URL to LMGTFY search

        @param text: text to search for
        @return: shortened URL
        """
        url = 'http://lmgtfy.com/?%s' % (urllib.urlencode({'q': text}))
        return tinyurl.create_one(url)
示例#15
0
def maybe_market_to_twitter(bounty, event_name, txid):

    if not settings.TWITTER_CONSUMER_KEY:
        return False
    if event_name not in ['new_bounty', 'remarket_bounty']:
        return False
    if bounty.get_natural_value() < 0.0001:
        return False
    if bounty.network != settings.ENABLE_NOTIFICATIONS_ON_NETWORK:
        return False
    return False  # per 2018/01/22 convo with vivek / kevin, these tweets have low engagement
    # we are going to test manually promoting these tweets for a week and come back to revisit this

    api = twitter.Api(
        consumer_key=settings.TWITTER_CONSUMER_KEY,
        consumer_secret=settings.TWITTER_CONSUMER_SECRET,
        access_token_key=settings.TWITTER_ACCESS_TOKEN,
        access_token_secret=settings.TWITTER_ACCESS_SECRET,
    )
    tweet_txts = [
        "Earn {} {} {} now by completing this task: \n\n{}",
        "Oppy to earn {} {} {} for completing this task: \n\n{}",
        "Is today the day you (a) boost your OSS rep (b) make some extra cash? 🤔 {} {} {} \n\n{}",
    ]
    if event_name == 'remarket_bounty':
        tweet_txts = tweet_txts + [
            "Gitcoin open task of the day is worth {} {} {} ⚡️ \n\n{}",
            "Task of the day 💰 {} {} {} ⚡️ \n\n{}",
        ]
    if event_name == 'new_bounty':
        tweet_txts = tweet_txts + [
            "Extra! Extra 🗞🗞 New Funded Issue, Read all about it 👇  {} {} {} \n\n{}",
            "Hot off the blockchain! 🔥🔥🔥 There's a new task worth {} {} {} \n\n{}",
            "💰 New Task Alert.. 💰 Earn {} {} {} for working on this 👇 \n\n{}",
        ]

    random.shuffle(tweet_txts)
    tweet_txt = tweet_txts[0]

    new_tweet = tweet_txt.format(
        round(bounty.get_natural_value(), 4), bounty.token_name,
        ("(${})".format(bounty.value_in_usdt) if bounty.value_in_usdt else ""),
        tinyurl.create_one(bounty.get_absolute_url()))
    new_tweet = new_tweet + " " + github_org_to_twitter_tags(
        bounty.org_name)  #twitter tags
    if bounty.keywords:  #hashtags
        for keyword in bounty.keywords.split(','):
            _new_tweet = new_tweet + " #" + str(keyword).lower().strip()
            if len(_new_tweet) < 140:
                new_tweet = _new_tweet

    try:
        api.PostUpdate(new_tweet)
    except Exception as e:
        print(e)
        return False

    return True
示例#16
0
	def gnews(self):
		""" Returns the top ten news of the selected section """
		feed_url = self.get_feed()
		feed_data = feedparser.parse(feed_url)
		print " "
		for data in feed_data["items"]:
			tiny_url = tinyurl.create_one(data["link"])
			print('\033[33m' + data["title"] + " : " + Style.RESET_ALL + tiny_url)
			print " "
示例#17
0
    def handleInput(self, Matchlist):
        Source = Matchlist[0]
        Target = Matchlist[1]
        Text = Matchlist[2].split()

        try:
            URL = tinyurl.create_one(Text[0])
        except Exception:
            PrivMsg(Target,
                    "4Error in 'TINYURL.Modul' >> '" + str(Exception) + "'")
            return

        Nick = re.match("(.+?)!", Source).group(1)

        if (len(Text) >=
                2) or (re.search("(?:.+)youtube.com/(?:.+)v=(\w+)", Text[0])
                       and len(Text) == 1):  #Beschreibung mit angegeben
            x = "[" + Nick + "] "

            #Zusatzinformation ermitteln, wie [YouTube] [PNG] [TIF]
            if (re.search("(?:.+)youtube.com/(?:.+)v=(\w+)", Text[0])):
                x += "[YouTube] "
            elif (re.search("(\w+).rofl.to", Text[0])):
                r = re.search("(\w+).rofl.to", Text[0]).group(1)
                x += "[rofl.to] (" + str(r) + ") "
            elif (re.search("collegehumor.com/(\w+)", Text[0])):
                r = re.search("collegehumor.com/(\w+)", Text[0]).group(1)
                x += "[CollegeHumor] (" + str(r) + ")"
            elif (re.search("newgrounds.com/", Text[0])):
                x += "[Newsground] "
            else:
                try:
                    Tag = re.search(
                        "\.(bmp|jpg|gif|img|jp2|jpeg|png|psd|tga|tif|txt)$",
                        Text[0]).group(1)
                    x += "[" + Tag.upper() + "] "
                except:
                    pass

            if (len(Text) > 1):
                x += URL + " " + " ".join(Text[1:])
            else:
                r = re.search("(?:.+)youtube.com/(?:.+)v=([-_\w]+)",
                              Text[0]).group(1)
                t = self.YouTube.getInfo(r)
                x += URL + " " + t

            #Twitter Tweets dürfen nicht länger als 140 Zeichen sein
            if (len(x) <= 140):
                self.Twitter.sendTweet(x)
                PrivMsg(Target, "hinzugefügt! - http://twitter.com/fptlnk",
                        "15Funlink:07 ")
            else:
                PrivMsg(Target,"Beschreibung zu lang. Max 140 Zeichen. Dein Add war " \
                + str(len(x)) + " Zeichen lang.","15Funlink:07 ")
        else:  #Keine Beschreibung
            PrivMsg(Target, "Die Beschreibung fehlt!", "15Funlink:07 ")
示例#18
0
def shortenUrl(url):
    """
    function to shorten a long Url.
    """
    try:
        shortUrl = tinyurl.create_one(url)
    except:
        return False

    return shortUrl
示例#19
0
文件: tnt.py 项目: aseba/TNT
 def tweet(self, message, reply=None):
     for text in message.split(" "):
         if text.startswith("http") and len(text) >= 27:  # len of a tinyurl
             newLink = tinyurl.create_one(text)
             message = message.replace(text, newLink)
     if self.oauthapi is None:
         raise NotAuthorizedException()
     Logger().log("tweet() : Tweeting")
     status = self.oauthapi.PostUpdates(message.encode("utf-8"), in_reply_to_status_id=reply)
     return status
示例#20
0
def shortenUrl(url):
    """
    function to shorten a long Url.
    """
    try:
        shortUrl = tinyurl.create_one(url)
    except:
        return False

    return shortUrl
示例#21
0
def updatePidgin():
    """This will check for youtube in tabs and retrieve the video name from gdata's URL using JSON"""
    firefoxTabs = readFirefoxTabs()
    for someValue in xrange(len(firefoxTabs)):
        eachURL = str(firefoxTabs[someValue]['entries'][-1]['url'])
        if "www.youtube.com/watch" in eachURL and eachURL != previousYoutubeURL[0]:
            video_id = findVideoID(eachURL)
            system("purple-remote 'setstatus?status=available&message=watching: %s on youtube[%s]'"\
            %(video_id['data']['title'],tinyurl.create_one(eachURL)))
            previousYoutubeURL[0] = eachURL
示例#22
0
    def request_authorized_url(self):
        self.url, self._secret = self.splitwise_handle.getAuthorizeURL()
        logger.info("Received url=%s and secrect=%s" %(self.url, self._secret))
        query_param = urlparse(self.url).query
        if 'oauth_token' in query_param:
            self.auth_token=query_param.split('=')[1]
            session = UserSession(userId=self.userId,
                                  oauth_token=self.auth_token,
                                  secret=self._secret)
            UserSessionManager().persist_user_session(session)

        return tinyurl.create_one(self.url)
示例#23
0
    def __fetch_feed(self, feed_info, callback, forever):
        """Fetches a RSS feed, parses it and updates the database and/or announces new news."""
        while 1:
            try:
                # Parse a feed's url
                news = feedparser.parse(feed_info['url'])

                # Reverse the ordering. Oldest first.
                for newsitem in news.entries[::-1]:
                    newstitle = newsitem.title
                    if self.__config.shorturls:
                        newsurl = tinyurl.create_one(
                            newsitem.link)  # Create a short link
                        if newsurl == "Error":  #If that fails, use the long version
                            newsurl = newsitem.link
                    else:
                        newsurl = newsitem.link

                    # Try to get the published or updated date. Otherwise set it to 'no date'
                    try:
                        # Get date and parse it
                        newsdate = dateutil.parser.parse(newsitem.published)
                        # Format date based on 'dateformat' in config.py
                        newsdate = newsdate.strftime(self.__config.dateformat)

                    except Exception as e:
                        try:
                            # Get date and parse it
                            newsdate = dateutil.parser.parse(newsitem.updated)
                            # Format date based on 'dateformat' in config.py
                            newsdate = newsdate.strftime(
                                self.__config.dateformat)

                        except Exception as e:
                            newsdate = "no date"

                    # Update the database. If it's a new issue, post it to the channel
                    is_new = self.__db.insert_news(feed_info['id'], newstitle,
                                                   newsitem.link, newsdate)
                    if is_new and callback is not None:
                        callback(feed_info['title'], newstitle, newsurl,
                                 newsdate)
                print "Updated: " + feed_info['title']
            except Exception as e:
                print e
                print "Failed: " + feed_info['title']

            if not forever:
                break

            # sleep frequency minutes
            time.sleep(int(feed_info['published']) * 60)
示例#24
0
def maybe_market_to_twitter(bounty, event_name, txid):

    if not settings.TWITTER_CONSUMER_KEY:
        return False
    if event_name not in ['new_bounty', 'remarket_bounty']:
        return False
    if bounty.get_natural_value() < 0.0001:
        return False
    if bounty.network != settings.ENABLE_NOTIFICATIONS_ON_NETWORK:
        return False

    api = twitter.Api(
        consumer_key=settings.TWITTER_CONSUMER_KEY,
        consumer_secret=settings.TWITTER_CONSUMER_SECRET,
        access_token_key=settings.TWITTER_ACCESS_TOKEN,
        access_token_secret=settings.TWITTER_ACCESS_SECRET,
    )
    tweet_txts = [
        "Earn {} {} {} now by completing this task: \n\n{}",
        "Oppy to earn {} {} {} for completing this task: \n\n{}",
    ]
    if event_name == 'remarket_bounty':
        tweet_txts = tweet_txts + [
            "Gitcoin open task of the day is worth {} {} {} ⚡️ \n\n{}",
            "Task of the day 💰 {} {} {} ⚡️ \n\n{}",
        ]
    if event_name == 'new_bounty':
        tweet_txts = tweet_txts + [
            "Hot off the blockchain! 🔥🔥🔥 There's a new task worth {} {} {} \n\n{}",
            "💰 New Task Alert.. 💰 Earn {} {} {} for working on this 👇 \n\n{}",
        ]

    random.shuffle(tweet_txts)
    tweet_txt = tweet_txts[0]

    new_tweet = tweet_txt.format(
        round(bounty.get_natural_value(), 4), bounty.token_name,
        ("(${})".format(bounty.value_in_usdt) if bounty.value_in_usdt else ""),
        tinyurl.create_one(bounty.get_absolute_url()))
    if bounty.keywords:
        for keyword in bounty.keywords.split(','):
            _new_tweet = new_tweet + " #" + str(keyword).lower().strip()
            if len(_new_tweet) < 140:
                new_tweet = _new_tweet

    try:
        api.PostUpdate(new_tweet)
    except Exception as e:
        print(e)
        return False

    return True
示例#25
0
 def handleInput(self,Matchlist):
     Source = Matchlist[0]
     Target = Matchlist[1]
     Text = Matchlist[2].split()
     
     try:
         URL = tinyurl.create_one(Text[0])
     except Exception:
         PrivMsg(Target,"4Error in 'TINYURL.Modul' >> '" + str(Exception) + "'")
         return
     
     Nick = re.match("(.+?)!", Source).group(1)
     
     if (len(Text) >= 2) or (re.search("(?:.+)youtube.com/(?:.+)v=(\w+)",Text[0]) and len(Text) == 1): #Beschreibung mit angegeben            
         x = "[" + Nick + "] "
         
         #Zusatzinformation ermitteln, wie [YouTube] [PNG] [TIF]
         if (re.search("(?:.+)youtube.com/(?:.+)v=(\w+)",Text[0])):
             x += "[YouTube] "
         elif (re.search("(\w+).rofl.to",Text[0])):
             r = re.search("(\w+).rofl.to",Text[0]).group(1)
             x += "[rofl.to] (" + str(r) +") "
         elif (re.search("collegehumor.com/(\w+)",Text[0])):
             r = re.search("collegehumor.com/(\w+)",Text[0]).group(1)
             x += "[CollegeHumor] (" + str(r) + ")"
         elif (re.search("newgrounds.com/",Text[0])):
             x += "[Newsground] "
         else:
             try:
                 Tag = re.search("\.(bmp|jpg|gif|img|jp2|jpeg|png|psd|tga|tif|txt)$",Text[0]).group(1)
                 x += "[" + Tag.upper() + "] "
             except:
                 pass
         
         if (len(Text) > 1):
             x += URL + " " + " ".join(Text[1:])
         else:
             r = re.search("(?:.+)youtube.com/(?:.+)v=([-_\w]+)",Text[0]).group(1)
             t = self.YouTube.getInfo(r)
             x += URL + " " + t
         
         #Twitter Tweets dürfen nicht länger als 140 Zeichen sein
         if (len(x) <= 140):
             self.Twitter.sendTweet(x)
             PrivMsg(Target,"hinzugefügt! - http://twitter.com/fptlnk","15Funlink:07 ")
         else:
             PrivMsg(Target,"Beschreibung zu lang. Max 140 Zeichen. Dein Add war " \
             + str(len(x)) + " Zeichen lang.","15Funlink:07 ")
     else: #Keine Beschreibung
             PrivMsg(Target,"Die Beschreibung fehlt!","15Funlink:07 ")
示例#26
0
    def post(self, collection_name, primary):
        conn = db_connect.connect()
        print request.json
        #photo_id = ????
        photo_name = request.json['photo_name']
        photo_link = request.json['photo_link']
        photo_tiny_link = tinyurl.create_one(str(photo_link))
        photo_primary = int(primary)

        query = conn.execute(
            "insert into {0} values(null,'{1}','{2}','{3}',{4})".format(
                collection_name, photo_name, photo_link, photo_tiny_link,
                photo_primary))
        return {'slink': photo_tiny_link}
示例#27
0
def updateTweet(tweet, link, tokens):
		auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET, secure=True)
		auth.set_access_token(*tokens)
		api = tweepy.API(auth)
		try:
			if len(tweet) > 140:
				url = tinyurl.create_one(link)
				limit = 136 - len(url)
				tweet = tweet[:limit] + "...\"" + url
			api.update_status(tweet)
			print "Tweet updated"
		except:
			print "Something wrong with updating tweet"
			raise
示例#28
0
def checkNewFeeds():
    now = time.strftime("%c")
    print bcolors.OKBLUE + time.strftime("%c") + bcolors.ENDC
    d = feedparser.parse('https://www.ncsc.nl/rss/beveiligingsadviezen')
    global i
    while (i < len(d['entries'])):
        titel = d['entries'][i]['title']
        url = d['entries'][i]['link']
        url = tinyurl.create_one(url)
        while (url == "Error") or (url == "http://tinyurl.com/yp9ewb"):
            url = tinyurl.create_one(url)
        compleet = titel + " - " + url

        #Fix: UnicodeEncodeError
        compleet = compleet.encode('utf-8')

        hashCompleet = hashlib.md5(compleet).hexdigest()
        if hashCompleet in open('md5sum').read():
            print bcolors.OKGREEN + compleet + bcolors.ENDC
            #continue
        else:
            global api
            print bcolors.WARNING + compleet + bcolors.ENDC
            try:
                twitterLogin()
            except:
                print bcolors.WARNING + "Twitter limit exceeded" + bcolors.ENDC
                time.sleep(900)
                checkNewFeeds()
            status = api.PostUpdate(compleet)
            f = open('md5sum', 'a')
            f.write(hashCompleet + '\n')
            f.close()
        i = i + 1
    time.sleep(14400)
    i = 0
    checkNewFeeds()
示例#29
0
文件: views.py 项目: hecsf/hecserver
def build_body_for(user):
    unique_id = str(uuid.uuid4())
    now = timezone.now()
    
    survey = Survey(user=user, date_sent=now, external_id=unique_id)
    survey.save()
    
    url = tinyurl.create_one("http://www.surveygizmo.com/s3/%s/HEC-Alumni-Form?sguid=%s&employer=%s&role=%s&program=%s&name=%s" % (
        settings.SURVEY_GIZMO_SURVEY_ID, unique_id,
        quote(user.last_employer.name if user.last_employer is not None else "-"),
        quote(user.last_role if user.last_role is not None else ""),
        quote(user.last_program.name),
        quote("%s %s" % (user.first_name, user.last_name)),
        ))
    return "%s checking in! Time to complete the survey: %s. Thanks for staying in touch." % (user.last_program.name, url)
示例#30
0
    def __fetch_feed(self, feed_info, callback, forever):
        """Fetches a RSS feed, parses it and updates the database and/or announces new news."""
        while 1:
            try:
                # Parse a feed's url
                news = feedparser.parse( feed_info['url'] )

                # Reverse the ordering. Oldest first.
                for newsitem in news.entries[::-1]:
                    newstitle = newsitem.title
                    if self.__config.shorturls:
                        newsurl = tinyurl.create_one(newsitem.link) # Create a short link
                        if newsurl == "Error": #If that fails, use the long version
                            newsurl = newsitem.link
                    else:
                        newsurl = newsitem.link

                    # Try to get the published or updated date. Otherwise set it to 'no date'
                    try:
                        # Get date and parse it
                        newsdate = dateutil.parser.parse(newsitem.published)
                        # Format date based on 'dateformat' in config.py
                        newsdate = newsdate.strftime(self.__config.dateformat)

                    except Exception as e:
                        try:
                            # Get date and parse it
                            newsdate = dateutil.parser.parse(newsitem.updated)
                            # Format date based on 'dateformat' in config.py
                            newsdate = newsdate.strftime(self.__config.dateformat)

                        except Exception as e:
                            newsdate = "no date"

                    # Update the database. If it's a new issue, post it to the channel
                    is_new = self.__db.insert_news(feed_info['id'], newstitle, newsitem.link, newsdate)
                    if is_new and callback is not None:
                        callback(feed_info['title'], newstitle, newsurl, newsdate)
                print "Updated: " + feed_info['title']
            except Exception as e:
                print e
                print "Failed: " + feed_info['title']

            if not forever:
                break

            # sleep frequency minutes
            time.sleep(int(feed_info['published'])*60)
示例#31
0
def tinyurlShortener(url):
    shortened = tinyurl.create_one(url)
    print "\n\tShortened url is {}".format(shortened)
    clipboard.copy(shortened)
    print "\n\tDone, your shortened url is on clipboard.!"
    print "\n\tLaunch your browser and use 'Command-V' OR 'Ctrl + V' to paste the link in\n\tyour browser."
    time.sleep(5)
    print "\n\tWant to Fetch QRCode? press 1 else press 0"
    choice = int(input("\n\t"))
    if choice == 1:
        getOnlyQRCode(shortened)
    elif choice == 0:
        return
    else:
        print "Error!"
        return
示例#32
0
def failgraph(args):
    args = parse_args(args)
    targetlist = ''
    colorpairs = 0
    targets = None
    for target in args.tests:
        targets = get_targets(target, COLORS[colorpairs % len(COLORS)],
                              avg=args.smoothing)
        colorpairs += 1
        subtarglist = '&'.join(targets)
        targetlist = '&'.join([targetlist, subtarglist])

    if not targets:
        return 'No data'

    url = '&'.join((graphite_base_url(since=args.duration,
                                      avg=args.smoothing), targetlist))
    return tinyurl.create_one(url)
示例#33
0
def TweetCurrentProject():
    now   = datetime.utcnow()
    later = now + timedelta(hours = 1)

    ps = Period.objects.filter(state__abbreviation = 'S').filter(start__gte = now).filter(start__lt = later).filter(session__project__project_type__type = 'science').order_by('start')

    if len(ps) == 0: # Nothing sciency going on anytime soon.
        return

    title = ps[0].session.project.name[:100]
    url = tinyurl.create_one('https://dss.gb.nrao.edu/project/%s/public' % ps[0].session.project.pcode)
    update = 'Next observation: %s %s' % (title, url)
    twitter.Api(
        consumer_key        = settings.TWITTER['consumer_key']
      , consumer_secret     = settings.TWITTER['consumer_secret']
      , access_token_key    = settings.TWITTER['access_token_key']
      , access_token_secret = settings.TWITTER['access_token_secret']
    ).PostUpdate(update)
示例#34
0
def failgraph(args):
    args = parse_args(args)
    targetlist = ''
    colorpairs = 0
    targets = None
    for target in args.tests:
        targets = get_targets(target, COLORS[colorpairs % len(COLORS)],
                              avg=args.smoothing)
        colorpairs += 1
        subtarglist = '&'.join(targets)
        targetlist = '&'.join([targetlist, subtarglist])

    if not targets:
        return 'No data'

    url = '&'.join((graphite_base_url(since=args.duration,
                                      avg=args.smoothing), targetlist))
    return tinyurl.create_one(url)
示例#35
0
    def get_five(self, bot, sock, buffer):
        rss = buffer.msg.split()[0].split(".")[1]
        d = feedparser.parse(self.feeds[rss])

        more = buffer.msg.split()
        if len(more) > 1 and more[1] == "more":
            more = True
        else:
            more = False

        num = 0
        for e in d.entries:
            if e.link not in self.printed or not more:
                url = tinyurl.create_one(e.link)
                if e.link not in self.printed: self.printed.append(e.link)
                sock.msg(buffer.to, '\x0304%s\x03 \x0300-- Read more: %s' % (e.title, url))
                num += 1
                if num == 5: break
示例#36
0
def read_file(filename, api):
    try:
        f = open(filename, "r")
        for line in f:
            if not line.strip():
                continue
            data = None
            try:
                data = urllib2.urlopen(line)
            except urllib2.HTTPError:
                print "Error in the http get of ", line
                continue
            content = data.read()
            if line.find("egotastic"):
                try:
                    data = StringIO.StringIO(content)
                    gzipper = gzip.GzipFile(fileobj=data)
                    html = gzipper.read()
                except zlib.error:
                    print "error"
                except IOError:
                    html = content
            else:
                html = content
            soup = BeautifulSoup.BeautifulSoup(html)
            title = soup.title.string.lstrip(" \t\n\r").replace("\n", " ")
            i = title.find(":")
            turl = tinyurl.create_one(line)
            if (len(title) + len(turl)) > 140:
                if i > 0:
                    if (i + len(turl) + 1) > 140:
                        i = 140 - (len(turl) + 1)
                    else:
                        i = 140 - (len(turl) + 1)
            message = title[:i] + " " + turl
            print message
            try:
                api.update_status(message)
            except tweepy.error.TweepError:
                print "Duplicated item, skipping."
        f.close
    except IOError as (errno, strerror):
        print "I/O error({0}): {1}".format(errno, strerror)
    def send_context_twitter(self, context, token):
        logging.debug("Sending to Twitter")

        consumer_key = "f1j3JookvHIoe2MBL7HEg"
        consumer_secret = 'kdgLHtmyFh24UVIDIBtFRC9T5LUlRhgtCskIlG1P08'
        access_token_key = '353770828-OeTG1nMJEuMHIKEdVQvrFloXnI9hcUXBROZ8oyiX'
        access_token_secret = 'u30TQhtFmWB9bKgyXrhJ7SNLGuuxO2n3dJfswv66k'

        api = twitter.Api(consumer_key, consumer_secret, access_token_key, access_token_secret)

        map_url = 'http://maps.google.com/maps?z=18&q=%(location)s(%(text)s)' % {'location': context['location'],'text': context['status']}

        shortened = tinyurl.create_one(map_url)

        try:
            return api.PostUpdates("%s %s #mss" % (context['status'], shortened))

        except twitter.TwitterError, e:
            logging.exception("Can not send tweet on %s" % e)
示例#38
0
    def get_five(self, bot, sock, buffer):
        rss = buffer.msg.split()[0].split(".")[1]
        d = feedparser.parse(self.feeds[rss])

        more = buffer.msg.split()
        if len(more) > 1 and more[1] == "more":
            more = True
        else:
            more = False

        num = 0
        for e in d.entries:
            if e.link not in self.printed or not more:
                url = tinyurl.create_one(e.link)
                if e.link not in self.printed: self.printed.append(e.link)
                sock.msg(
                    buffer.to,
                    '\x0304%s\x03 \x0300-- Read more: %s' % (e.title, url))
                num += 1
                if num == 5: break
示例#39
0
文件: lazycalc.py 项目: berten/merlin
    def execute(self, message, user, params):
        tick = Updates.current_tick()
        url = Config.get("URL", "bcalc")
        i = 1
        coords, clazz = params.groups()
        for coord in re.findall(loadable.coord, coords):
            planet = Planet.load(coord[0], coord[2], coord[4])
            if planet:
                scan = planet.scan("A")

                if scan and (int(tick) <= scan.tick + 12):
                    url = scan.addPlanetToCalc(url, False, i, self.class_translation[clazz] if clazz in self.class_translation.keys() else None)
                else:
                    message.reply("Missing a scan for %d:%d:%d" % (
                        planet.x, planet.y, planet.z))
                    return
            i = i + 1

        message.reply("Calc: %s" % (
        tinyurl.create_one("%s&att_fleets=%d" % (url, i - 1))))
 def check_tiny(self,conversation,url,message,group=True):
     if len(url) != 2:
         message.body = "<b>Please type /help for more help</b>"
     else:
         req = urllib2.Request(url[1])
         try:
             urllib2.urlopen(req)
             url = tinyurl.create_one(url[1])
             if group:
                 for conv in self.session.conversations.itervalues():
                     self._gui_message_(url,conv)
             else:
                 self._gui_message_(url,conversation)
                    
         except NameError:
             message.body = "<b>Please install tinyurl e.g: easy_install tinyurl or pip install tiny</b>"
         except ValueError:
             message.body = "<b>Check your URL please</b>"
         except urllib2.URLError:
             message.body = "<b>Check your URL please</b>"
示例#41
0
    def __fetch_feed(self, feed_info):
        """Fetches a RSS feed, parses it and updates the database and/or announces new news."""
        while 1:
            try:
                # Parse a feed's url
                news = feedparser.parse(feed_info[2])

                # Reverse the ordering. Oldest first.
                for newsitem in news.entries[::-1]:
                    newstitle = newsitem.title
                    if self.__config.shorturls:
                        newsurl = tinyurl.create_one(
                            newsitem.link)  # Create a short link
                        if newsurl == "Error":  #If that fails, use the long version
                            newsurl = newsitem.link
                    else:
                        newsurl = newsitem.link

                    # Try to get the published date. Otherwise set it to 'no date'
                    try:
                        newsdate = newsitem.published
                    except Exception as e:
                        try:
                            newsdate = newsitem.updated
                        except Exception as e:
                            newsdate = "no date"

                    # Update the database. If it's a new issue, post it to the channel
                    is_new = self.__db.insert_news(feed_info[0], newstitle,
                                                   newsitem.link, newsdate)
                    if is_new:
                        self.__irc.post_news(feed_info[1], newstitle, newsurl,
                                             newsdate)

                print "Updated: " + feed_info[1]
            except Exception as e:
                print e
                print "Failed: " + feed_info[1]

            # sleep frequency minutes
            time.sleep(int(feed_info[3]) * 60)
示例#42
0
def prepareText(*picurl):
  text = ''
  
  if picurl is not None:
    try:
        if len(picurl)>0:
            print "picurl: %s" % picurl
        else:
            print "len(picurl)=0"
    except Exception as err:
        print str(err)
        #return str(err)

  if picurl:
    print "picurl: %s" % picurl
    #randomly send or not link to comment on site (will have less space for BS on tweet)
    if random.randint(1,3)%3 == 0:
      SEND_LINK_TO_COMMENT=False
    else:
      SEND_LINK_TO_COMMENT=True
    
    if(SEND_LINK_TO_COMMENT):
      commenturl = buildCommentURL(picurl)
      commenturl = tinyurl.create_one(commenturl) #tiny url for comment link --19 char
      text += ' '
      text += commenturl
    
    else:
      text += ' www.creetic.io'
    
  else:
    print "picurl is None"
    text += ' www.creetic.io'
  
  available_length = 280-len(text) ##NOT USING TWITTER_CHARACTER_LIMIT
  print "Available length for BS: %d" %available_length
  
  text = bs_en.generatePhrase_short(available_length) + text
  print "Tweet length: %d characters" %len(text)
  return text
示例#43
0
文件: lazycalc.py 项目: berten/merlin
    def execute(self, message, user, params):
        tick = Updates.current_tick()
        url = Config.get("URL", "bcalc")
        i = 1
        coords, clazz = params.groups()
        for coord in re.findall(loadable.coord, coords):
            planet = Planet.load(coord[0], coord[2], coord[4])
            if planet:
                scan = planet.scan("A")

                if scan and (int(tick) <= scan.tick + 12):
                    url = scan.addPlanetToCalc(
                        url, False, i, self.class_translation[clazz]
                        if clazz in self.class_translation.keys() else None)
                else:
                    message.reply("Missing a scan for %d:%d:%d" %
                                  (planet.x, planet.y, planet.z))
                    return
            i = i + 1

        message.reply("Calc: %s" % (tinyurl.create_one("%s&att_fleets=%d" %
                                                       (url, i - 1))))
示例#44
0
def docify(prepend):
    """creates and uploads a shared google doc from the template in filename

    This method is only called when adding a new trip.

    This first alters the template document to have the trip info stored on the
    first line of the file. It then proceeds to set up a client for the google
    docs interactions using my secret credentials.

    The edited template is uploaded with a generic name and the link to the
    document is shortened to be returned. The document is then found in the
    DocsList and the ACL permissions are altered so anyone can edit the file.

    Before returning the shortened link, the template is restored by removing
    the trip description from the first line.

    """

    link = ''
    add_first(prepend)

    client = gdata.docs.client.DocsClient(source='trailbot')
    client.ClientLogin(sekret_username, sekret_password, client.source)

    entry = client.Upload(filename, 'trip', content_type='text/plain')
    link = tinyurl.create_one(entry.GetAlternateLink().href)

    feed = client.GetDocList(
        uri='https://docs.google.com/feeds/default/private/full')
    doc_entry = feed.entry[0]

    scope = gdata.acl.data.AclScope(type='default')
    role = gdata.acl.data.AclRole(value='writer')
    acl_entry = gdata.docs.data.Acl(scope=scope, role=role)
    client.Post(acl_entry, doc_entry.GetAclFeedLink().href)

    remove_first()

    return link
示例#45
0
def docify(prepend):
    """creates and uploads a shared google doc from the template in filename

    This method is only called when adding a new trip.

    This first alters the template document to have the trip info stored on the
    first line of the file. It then proceeds to set up a client for the google
    docs interactions using my secret credentials.

    The edited template is uploaded with a generic name and the link to the
    document is shortened to be returned. The document is then found in the
    DocsList and the ACL permissions are altered so anyone can edit the file.

    Before returning the shortened link, the template is restored by removing
    the trip description from the first line.

    """

    link = ''
    add_first(prepend)

    client = gdata.docs.client.DocsClient(source='trailbot')
    client.ClientLogin(sekret_username, sekret_password, client.source)

    entry = client.Upload(filename, 'trip', content_type='text/plain')
    link = tinyurl.create_one(entry.GetAlternateLink().href)

    feed = client.GetDocList(
        uri='https://docs.google.com/feeds/default/private/full')
    doc_entry = feed.entry[0]

    scope = gdata.acl.data.AclScope(type='default')
    role = gdata.acl.data.AclRole(value='writer')
    acl_entry = gdata.docs.data.Acl(scope=scope, role=role)
    client.Post(acl_entry, doc_entry.GetAclFeedLink().href)

    remove_first()

    return link
示例#46
0
    def alert_new_posts(self):
        """Compares self.last_post to each item in self.parsed_feed and sends
        alert texts for all new posts, updating self.last_post at the end."""

        for ind, post in enumerate(self.parsed_feed['items']):
            # Record when we match the last-seen post. We will send alerts for
            # all posts occuring after match.
            if not self.is_new_post(post):
                cutoff = ind
                break
        item_list = list(reversed(self.parsed_feed['items'][:ind]))
        if len(item_list) == 0:
            return
        print '%d posts to send alerts for' % len(item_list)
        for post in item_list:
            if self.last_post is None or self.is_new_post(post):
                # Set text body
                tiny_url = tinyurl.create_one(str(post['id']))
                text_body = str(post['title']) + ' - ' + tiny_url
                self.send_sms(text_body)
                print 'Sent text for %s' % tiny_url
            break
        self.set_last_post(post)
    def send_context_facebook(self, context, token):
        logging.debug("Sending to facebook")

        url = 'https://graph.facebook.com/me/feed?access_token=%s' % token

        message = ''
        if context.get('status'):
            message += context.get('status')
            logging.debug("Context [status]: %s" % context.get('status'))

        if context.get('location'):
            map_url = 'http://maps.google.com/maps?z=18&q=%(location)s(%(text)s)' % {'location': context['location'],'text': context['status']}

            logging.debug("Context [map_url]: %s" % map_url)
            shortened = tinyurl.create_one(map_url)

            message += ' %s' % shortened
            logging.debug("Context [location]: %s" % shortened)

        try:
            return MSSCurl().post(url=url, port=None, postfields={'message': message})
        except Exception, e:
            logging.exception("Can not send post to Facebook: %s" % e)
    def send_context_twitter(self, context, token):
        logging.debug("Sending to Twitter")

        consumer_key = "f1j3JookvHIoe2MBL7HEg"
        consumer_secret = 'kdgLHtmyFh24UVIDIBtFRC9T5LUlRhgtCskIlG1P08'
        access_token_key = '353770828-OeTG1nMJEuMHIKEdVQvrFloXnI9hcUXBROZ8oyiX'
        access_token_secret = 'u30TQhtFmWB9bKgyXrhJ7SNLGuuxO2n3dJfswv66k'

        api = twitter.Api(consumer_key, consumer_secret, access_token_key,
                          access_token_secret)

        map_url = 'http://maps.google.com/maps?z=18&q=%(location)s(%(text)s)' % {
            'location': context['location'],
            'text': context['status']
        }

        shortened = tinyurl.create_one(map_url)

        try:
            return api.PostUpdates("%s %s #mss" %
                                   (context['status'], shortened))

        except twitter.TwitterError, e:
            logging.exception("Can not send tweet on %s" % e)
示例#49
0
    def __fetch_feed(self, feed_info):
        """Fetches a RSS feed, parses it and updates the database and/or announces new news."""
        while 1:
            try:
                # Parse a feed's url
                news = feedparser.parse( feed_info[2] )

                # Reverse the ordering. Oldest first.
                for newsitem in news.entries[::-1]:
                    newstitle = newsitem.title
                    newsurl = tinyurl.create_one(newsitem.link) # Create a short link
                    if newsurl == "Error": #If that fails, use the long version
                        newsurl = newsitem.link
                    newsurl = Colours('', newsurl).get()

                    # Try to get the published date. Otherwise set it to 'no date'
                    try:
                        newsdate = newsitem.published
                    except Exception as e:
                        try:
                            newsdate = newsitem.updated
                        except Exception as e:
                            newsdate = "no date"

                    # Update the database. If it's a new issue, post it to the channel
                    is_new = self.__db.insert_news(feed_info[0], newstitle, newsurl, newsdate)
                    if is_new:
                        self.__irc.post_news(feed_info[1], newstitle, newsurl, newsdate)

                print Colours('7',"Updated: ").get() + feed_info[1]
            except Exception as e:
                print e
                print Colours('1',"Failed: ").get() + feed_info[1]

            # sleep frequency minutes
            time.sleep(int(feed_info[3])*60) 
示例#50
0
def bbgmore(phenny,input):
	url="http://www.bloomberg.com/quickview/"
	page=urllib2.urlopen(url)
	soup = BeautifulSoup(page.read())

	headlines=soup.findAll('li',{'class':'quick_view_bar'})

	newsmatrix = []

	for x in headlines:
		if x.find('a',{'data-type':'Story'}):
			# get the timestamp
			timestamp = x.find('p',{'class':'timestamp'})

			# if the timestamp has a string
			if timestamp.string:
				timestamp = timestamp.string
				tmin = timestamp[:2]
				tmin = int(tmin)

				# if the timestamp has minutes ago in the string
				if ('hour ago' in timestamp) or ('hours ago' in timestamp):

					# format the headline
					headline = x.find('a').string
					link = x.find('a')['href']
					link = "http://www.bloomberg.com%s" % link
					link = tinyurl.create_one(link)
					snippet = "%s - %s (%s)" % (headline,timestamp,link)
					derp = [float(tmin),snippet]
					newsmatrix.append(derp)

	newsmatrix.sort(key=lambda x: x[0])

	for x in newsmatrix:
		phenny.say(x[1])
示例#51
0
 def _tinyurl(self, url):
     return t.create_one(url)
示例#52
0
from time import sleep

import tinyurl

try:
    f = open('ressources/URL_bot_views.txt', 'w+')
    urls = tuple(open('ressources/Links_bot_views.txt', 'r'))
    print('Get file...')
    for i in range(0, len(urls)):
        if 'http' in urls[i]:
            link_tinyurl = tinyurl.create_one(urls[i])
            print('Content TinyURL: ' + link_tinyurl + '\n')
            sleep(1)
            f.write(link_tinyurl + '\n')
        else:
            continue
except:
    pass
f.close()
def noroeste_scraper():

	rss_page = urlopen('http://www.noroeste.com.mx/rss.php').read()
	rss_page_htmlel = html.fromstring(rss_page)

	rss_section_sel = CSSSelector('td > div:nth-child(5)')

	rss_links_sel = CSSSelector('.texto_normal')

	rss_section_match = rss_section_sel(rss_page_htmlel)

	rss_section_html = html.fromstring( html.tostring(rss_section_match[1]) )
	print html.tostring(rss_section_match[1])
	i=0
	count = 0
	db = MyDatabase()

	for rss_link in rss_links_sel(rss_section_html):
		if i%2 == 0:
			readable_section = rss_link.text
		if i%2 != 0:
			doc = etree.parse(rss_link.text)
			channel = doc.find("channel")
			items = channel.findall("item")

			for rss_item in items:
				readable_url = rss_item.findtext("link")
				readable_pubDate = unicode(rss_item.findtext("pubDate"))
				readable_title = unicode(rss_item.findtext("title"))

				tinyLink = tinyurl.create_one(str(readable_url))
				query_a = "SELECT count(1) from noroeste_articles WHERE url='"+str(readable_url)+"'"
				query_link = "SELECT count(1) from noroeste_noarticle_lnks WHERE link='"+tinyLink+"'"

				rl =db.query(query_link)
				rl = rl[0]['count(1)']
				ra = db.query(query_a)
				ra = ra[0]['count(1)']



				if readable_url and not ra and not rl:
					count+=1

					readingError = False

					docHtml = urllib.urlopen(readable_url).read()
					#using the lxml function html
					tree = html.fromstring(docHtml)

					newsArticle = CSSSelector('#tamano5 p')

					try:

						article_paragraphs = newsArticle(tree)

						article_p1 = article_paragraphs[0].text
						city_p = article_p1.encode("utf-8")

						readable_article = ""
						for x in article_paragraphs:
							 readable_article += x.text_content()


						city_match = re.search('([\w\s\/áéíóúüñçåÁÉÍÓÚÜÑÇÐ\-]+\._)',city_p)

						readable_city = city_match.group(0)
						readable_city = readable_city[:-2]

						readable_url = unicode(readable_url)

					except:
						print "Url no good: "+readable_url
						readingError = True
						count -=1

						tinyLink = tinyurl.create_one(str(readable_url))
						query_noarticle = "INSERT into noroeste_noarticle_lnks VALUES (%s)"
						params = (tinyLink)
						#db.insert_row(query_noarticle, params)


					if not readingError and readable_title and readable_url and readable_article:
						query_article = """INSERT into noroeste_articles VALUES (%s,%s,now(),%s,%s,%s,%s,%s,%s)"""
						params = (None,
								readable_url.encode("utf-8"),
								readable_title.encode("utf-8"),
								readable_pubDate.encode("utf-8"),
								readable_city,
								readable_article,
								None,
								readable_section.encode("utf-8"))
						#db.insert_row(query_article, params)
						print str(count)+") "+readable_url


		i+=1

	print "Success! "+str(count)+"""items scraped!"""
示例#54
0
              'religio',
              'socialia',
              'percontatio',
              'opiniones',
              'insolita']

for cat in categories:
    tree = etree.parse('http://www.alcuinus.net/ephemeris/archi2012/rubric1.php?categ=' + cat, parser)

    news_titles = []
    for t in tree.xpath('//b/a[@class="a_txt1"]'):
        news_titles.append(t.text.lower())

    news_links = []
    for u in tree.xpath('//b/a[@class="a_txt1"]//@href'):
        tiny = tinyurl.create_one('http://www.alcuinus.net/ephemeris/archi2012/' + u)
        news_links.append(tiny.replace('http://', ''))

    news_dates = []
    for d in tree.xpath('//em'):
        published = d.text.split('-')[1].strip()
        news_dates.append(published)

    data = zip(news_titles, news_links, news_dates)

    tweets = []
    for d in data:
        headline = d[0]
        postlink = d[1]
        postdate = d[2]
        stamp = date.today()
示例#55
0
文件: memes.py 项目: om3rta/memegen
def shorten(address):
    """
    Creates a short url with TinyUrl
    """
    tiny = tinyurl.create_one(address)
    return tiny
示例#56
0
def generate_payment(company=company, currency=currency, currency_html=currency_html):
    if not active:
        return redirect(url_for('offline'))
    # Generate forms for link generation
    form = GeneratorForm()

    if form.validate_on_submit():
        # Form is valid, lets get the data and process them
        name_receiver = form.name_receiver.data
        email_receiver = form.email_receiver.data
        reference = form.reference.data
        amount = form.amount.data
        amount_paid_out = ''
        if amount != u'':
            fee = get_fee(amount, False) #IS THIS CORRECT, or should it be TRUE? check!
            amount_paid_out = str(float(amount) - float(fee))
            amount=two_digit_string(amount)
            amount_paid_out=two_digit_string(amount_paid_out)

           
        # Construct the link paths
        rel_link = '/custom/'
        rel_link += form.account_number.data + '/'
        rel_link += form.sort_code.data + '/'
        rel_link += form.name_receiver.data + '/'
        # Add the optional statements in a special way
        def optional_add(rel_link, to_add):
            if to_add == u'':
                rel_link += 'empty/'
            else:
                rel_link += to_add + '/'
            return rel_link

        rel_link = optional_add(rel_link, form.amount.data)
        rel_link = optional_add(rel_link, form.reference.data)
        rel_link = optional_add(rel_link, form.email_receiver.data)      
        # Remove empty cells at the end of link paths
        def remove_end(link, suffix):
            while link.endswith(suffix):
                link = link[:-len(suffix)]
            return link

        rel_link = remove_end(rel_link, 'empty/')
        rel_link = convert_white_space_in_link(rel_link)
        abs_link = domain + rel_link
        abs_tiny_link = tinyurl.create_one('http://' + abs_link)
        
        return render_template('custom_link.html',
                               name_receiver=name_receiver,
                               amount_charged=amount,
                               amount_paid_out=amount_paid_out,
                               email_receiver=email_receiver,
                               rel_link=rel_link,
                               abs_link=abs_link,
                               abs_tiny_link=abs_tiny_link,
                               reference=reference,
                               currency=currency,
                               currency_html=currency_html,
                               company=company)
    else:
        # Forms not validated, resubmit
        return render_template('generate_payment.html',
                               form=form,
                               currency_html=currency_html,
                               company=company)
def milenio_scraper():

	rss_page = urlopen('http://www.milenio.com/rss-milenio.html').read()
	rss_page_htmlel = html.fromstring(rss_page)

	rss_section_sel = CSSSelector('table.canalesRSS')

	rss_names_sel = CSSSelector('td:nth-child(2)')
	rss_links_sel = CSSSelector('td~ td+ td a')

	rss_section_match = rss_section_sel(rss_page_htmlel)

	rss_section_html = html.fromstring( html.tostring(rss_section_match[0]) )

	i=0
	count = 0
	db = MyDatabase()
	rss_sections = []
	rss_links = []
	for rss_section in rss_names_sel(rss_section_html):
		rss_sections.append( rss_section.text )

	for rss_link in rss_links_sel(rss_section_html):
		rss_links.append(rss_link.text)
		readable_section = rss_sections[i]

		#doc = etree.parse(rss_link.text)
		doc = feedparser.parse(rss_link.text)
		#channel = doc.find("channel")
		#items = channel.findall("item")
		rss_items = doc["items"]

		for rss_item in rss_items:

			readable_url = rss_item["link"]
			readable_pubDate = unicode(rss_item["published"])
			readable_title = unicode(rss_item["title"])

			tinyLink = tinyurl.create_one(str(readable_url))
			query_a = "SELECT count(1) from milenio_articles WHERE url='"+str(readable_url)+"'"
			query_link = "SELECT count(1) from milenio_noarticle_lnks WHERE link='"+tinyLink+"'"

			rl =db.query(query_link)
			rl = rl[0]['count(1)']
			ra = db.query(query_a)
			ra = ra[0]['count(1)']



			if readable_url and not ra and not rl:
				count+=1

				readingError = False

				docHtml = urllib.urlopen(readable_url).read()
				#using the lxml function html
				tree = html.fromstring(docHtml)

				article_sel = CSSSelector('#folding div.mce-body.mce')
				city_sel = CSSSelector('p.pg-bkn-location small')

				try:

					article_paragraphs = article_sel(tree)
					city_p = city_sel(tree)

					article_p1 = city_p[0].text
					readable_city = article_p1.encode("utf-8")

					readable_article = ""
					for x in article_paragraphs:
						 readable_article += x.text_content()


					#city_match = re.search('([\w\s\/áéíóúüñçåÁÉÍÓÚÜÑÇÐ\-]+\._)',city_p)

					#readable_city = city_match.group(0)
					#readable_city = readable_city[:-2]

					readable_url = unicode(readable_url)

				except:
					print "Url no good: "+readable_url
					readingError = True
					count -=1

					tinyLink = tinyurl.create_one(str(readable_url))
					query_noarticle = "INSERT into milenio_noarticle_lnks VALUES (%s)"
					params = (tinyLink)
					db.insert_row(query_noarticle, params)


				if not readingError and readable_title and readable_url and readable_article:
					query_article = """INSERT into milenio_articles VALUES (%s,%s,now(),%s,%s,%s,%s,%s,%s)"""
					params = (None,
							readable_url.encode("utf-8"),
							readable_title.encode("utf-8"),
							readable_pubDate.encode("utf-8"),
							readable_city,
							readable_article,
							None,
							readable_section.encode("utf-8"))
					db.insert_row(query_article, params)
					print str(count)+") "+readable_url

		i+=1

	print "Milenio finished! "+str(count)+"""items scraped!"""