def checkFeed(args): if len(args) != 1: print 'No feed given. Please specify a link to an RSS feed to re-classify.' return print 'Existing feed data:' feedStatus(args) feed = Feed(url=args[0]) print 'Working with: %s' % feed.url print 'Attempting to download the feed.' couldDownload = feed.downloadFeed() if not couldDownload: print 'Could not download the feed. Is the URL correct? Is their site up? Is GTWifi working for you right now?' return print 'Successfully downloaded the feed.' print 'Attempting to parse the feed.' parseError, stats = feed.parseFeed() if parseError is None: print 'Successfully parsed the feed.' print stats if len(feed.articles) == 0: print 'No articles parsed. Something is wrong'
def load(self, store): for group in store.childGroups(): store.beginGroup(group) feed = Feed() feed.load(store) self.feeds.append(feed) store.endGroup()
def load_headlines(): global headlines_url global response_headline global headlines_data global stories headlines_url = "http://sanfrancisco.giants.mlb.com/gen/sf/news/headlines.json" feed = Feed(headlines_url) feed.load_and_prepare() succeeded, loaded_headlines_json = feed.get_representation length = len(loaded_headlines_json["members"]) story_elements = [] story_blurbs = [] story_url = [] for index in range(length): story_elements.append(loaded_headlines_json["members"][index]) length = len(story_elements) stories = [] for index in range(length): try: item = NewsItem(story_elements[index]["althead"], story_elements[index]["url"]) stories.append(item) except: print "No althead or url found at index %d; skipping to next item..." % index continue
def articles(): from feed import Feed # feed = Feed('data/2014-04-05_16-54.atom') feed = Feed() feed.load() return feed.to_json()
def country_articles(country=None): from feed import Feed country=country.encode('ascii', 'ignore') country=country.replace("Dem.", "Democratic") country=country.replace("Rep.", "Republic") country=country.replace("W.", "West") country=country.replace("Lao PDR", "Laos") country=country.replace("Bosnia and Herz.", "Bosnia and Herzegovina") country=country.replace("Eq. Guinea", "Equatorial Guinea") country=country.replace("Cte d'Ivoire", "Ivory Coast") country=country.replace("Fr. S. Antarctic Lands", "French Southern and Antarctic Lands") country=country.replace("Is.", "Islands") country=country.replace("S. Sudan", "South Sudan") country=country.replace(" ", "_") print country url1="http://api.feedzilla.com/v1/categories/19/articles/search.atom?q="+country+"&count=50" feed = Feed(url1) # url2="http://api.feedzilla.com/v1/categories/26/articles/search.atom?q="+country+"&count=10" # feed.add_feed(url2) # feed = Feed() # feed.load() # feed.filter_country(country) feed.extract() return feed.to_json()
def __init__(self, cache_dir, status_change_handler): Feed.__init__(self) print "init icecast feed" self.handler = IcecastHandler() self.cache_dir = cache_dir self.filename = os.path.join(self.cache_dir, "icecast.xml") self.uri = "http://dir.xiph.org/yp.xml" self.status_change_handler = status_change_handler
def POST(self): feed = Feed() if feed.add(web.input(url="url")["url"]): raise web.seeother("/") else: user_id = session.user_id error_message = "Invalid feed url" render.feed_add(error_message, user_id)
def __init__(self,cache_dir,status_change_handler): Feed.__init__(self) print "init local feed" self.handler = LocalHandler() self.cache_dir = cache_dir self.filename = os.path.join(self.cache_dir, "local.xml") self.uri = "http://www.programmierecke.net/programmed/local.xml" self.status_change_handler = status_change_handler
def __init__(self,cache_dir,status_change_handler): Feed.__init__(self) print("init shoutcast feed") self.handler = ShoutcastHandler() self.cache_dir = cache_dir self.filename = os.path.join(self.cache_dir, "shoutcast-genre.xml") self.uri = "http://www.shoutcast.com/sbin/newxml.phtml" self.status_change_handler = status_change_handler
def __init__(self,cache_dir,status_change_handler): Feed.__init__(self) print "init board feed" self.handler = BoardHandler() self.cache_dir = cache_dir self.filename = os.path.join(self.cache_dir, "board.xml") self.uri = "http://www.radio-browser.info/xml.php" self.status_change_handler = status_change_handler
def get_scoreboard_info(): global year global month global day global giants_pitcher_name global giants_pitcher_era global end_game_message global current_game_status global current_game_inning master_scoreboard_url = "http://mlb.mlb.com/gdcross/components/game/mlb/year_%s/month_%s/day_%s/master_scoreboard.json" % (year, str(month).zfill(2), str(day).zfill(2)) feed = Feed(master_scoreboard_url) feed.load_and_prepare() succeeded, loaded_schedule_json = feed.get_representation schedule_list = loaded_schedule_json["data"]["games"]["game"] send = client.sock.send for game in schedule_list: try: if game["away_team_name"] == "Giants" or game["home_team_name"] == "Giants": current_game_status = game["alerts"]["brief_text"] # if "Middle 7th" in game["alerts"]["brief_text"]: # msg = "PRIVMSG " + input[2] + " :" + "When the lights.. go down.. in the cityyy... https://www.youtube.com/watch?v=tNG62fULYgI" "\r\n" # send(msg) # https://www.youtube.com/watch?v=tNG62fULYgI except: if "winning_pitcher" in game and (game["home_team_name"] == "Giants" or game["away_team_name"] == "Giants"): winning_pitcher = "%s %s" % (game["winning_pitcher"]["first"], game["winning_pitcher"]["last"]) losing_pitcher = "%s %s" % (game["losing_pitcher"]["first"], game["losing_pitcher"]["last"]) end_game_message = "Game over. Winning pitcher: %s. Losing pitcher: %s." % (winning_pitcher, losing_pitcher) current_game_status = "" else: current_game_status = "No active game." if game["away_team_name"] == "Giants": if "away_probable_pitcher" in game: giants_pitcher_name = "%s %s" % (game["away_probable_pitcher"]["first"], game["away_probable_pitcher"]["last"]) giants_pitcher_era = game["away_probable_pitcher"]["era"] return elif "opposing_pitcher" in game: giants_pitcher_name = "%s %s" % (game["opposing_pitcher"]["first"], game["opposing_pitcher"]["last"]) giants_pitcher_era = game["opposing_pitcher"]["era"] return elif game["home_team_name"] == "Giants": if "home_probable_pitcher" in game: giants_pitcher_name = "%s %s" % (game["home_probable_pitcher"]["first"], game["home_probable_pitcher"]["last"]) giants_pitcher_era = game["home_probable_pitcher"]["era"] return elif "pitcher" in game: giants_pitcher_name = "%s %s" % (game["pitcher"]["first"], game["pitcher"]["last"]) giants_pitcher_era = game["pitcher"]["era"] return
def _subscribe_to_feed(self, asset_id, subscriber_id, callback, sub_type=SUBTYPE_MID): if asset_id in self._feeds: f = self._feeds[asset_id] else: f = Feed(asset_id, 5) self._feeds[asset_id] = f if sub_type == SUBTYPE_MID: f.subscribe_to_mid_updates(subscriber_id, callback) elif sub_type == SUBTYPE_LAST: f.subscribe_to_last_updates(subscriber_id, callback)
def __init__(self,cache_dir,status_change_handler,genre): Feed.__init__(self) self.handler = ShoutcastHandler() self.cache_dir = cache_dir self.filename = os.path.join(self.cache_dir, "shoutcast-"+genre+".xml") self.uri = "http://www.shoutcast.com/sbin/newxml.phtml?%s" % urllib.parse.urlencode({"genre":genre}) self.status_change_handler = status_change_handler self.genre = genre self.setAutoDownload(False) self.setUpdateChecking(False)
def frontpage(request): add_video_url = request.route_url('add_video') user_id = request.authenticated_userid user = DBHelper.get_user_from_id(user_id) topics = DBHelper.get_all_topics() topic_ids = [x.id for x in topics] feed = Feed() all_videos = feed.build_feed(user_id, topic_ids) return {'videos': all_videos, 'logged_in': user, 'topics':topics}
def create_feed(self, init_data): """ Create a feed-object by given parameters. """ feed = Feed(init_data, feedhandler=self) feed.connect('updated', self.sig_feed_updated) feed.connect( 'created', self._create_feed_deferred, init_data["url"], init_data["feed_name"] )
def vote_video(request): user_id = request.authenticated_userid vote = request.matchdict['vote'] video_id = int(request.matchdict['video_id']) topic_id = DBHelper.get_video(video_id).topic_id change = DBHelper.vote_video(user_id, video_id, vote) feed = Feed() feed.update_video_score(video_id, topic_id, change) return {'change': change}
def __init__(self,cache_dir,status_change_handler): Feed.__init__(self) self.handler = RadioTimeHandler() self.handler.cache_dir = cache_dir self.handler.status_change_handler = status_change_handler self.cache_dir = cache_dir self.status_change_handler = status_change_handler self.filename = os.path.join(self.cache_dir, "radiotime.xml") self.uri = "http://opml.radiotime.com/Browse.ashx?%s" % urllib.urlencode({"id":"r0","formats":"ogg,mp3,aac,wma"}) self._name = "RadioTime" self.setUpdateChecking(False)
def testSave(self): store = StoreMock() feed = Feed() feed.title = "T1" self.feedManager.feeds.append(feed) feed = Feed() feed.title = "T2" self.feedManager.feeds.append(feed) feed = Feed() self.feedManager.feeds.append(feed) self.feedManager.save(store) self.assertEqual(len(store.childGroups()), 3)
def gather_choices(self): temp = dict() # Create a new feed object dict from the possibly changed information for feed in self.feed_list: # For each feed in our temporary ListStore feed_name = feed[0] feed_uri = feed[1] temp[feed_name] = Feed(feed_name, feed_uri) if feed_name in self.choices: if self.choices[feed_name].uri == feed_uri: temp[feed_name].items = self.choices[feed_name].items return temp
def htmlSource(request): sites = ['FourChan', 'Fukung', 'Senorgif', 'Knowyourmeme' ] random.shuffle(sites) images = [] images = globals()[sites[0]]().do().images() while not images: random.shuffle(sites) images = globals()[sites[0]]().do().images() random.shuffle(images) if not Feed.gql("WHERE url = :1", images[0]).fetch(1): feed = Feed() feed.url = images[0] feed.put() return render_to_response('boardTemplate.html', {'image' : images[0]})
def post(self): taskqueue.Queue('feeds').purge() time.sleep(1) # TODO: anything but this; we need to wait 1 seconds, but how? for feed in Feed.query(): feed.schedule_update() self.response.write('done')
def setUp(self): import time self.feed = Feed() self.savefeed = Feed() self.savefeed.title = "MyTitle" self.savefeed.author = "MyAuthor" self.savefeed.url = "MyUrl" self.savefeed.homepage = "MyHomepage" self.savefeed.updated = time.gmtime(time.time()) self.savefeed.deleted_entry_ids = ["123", "456"]
def print_today(): full_date = "%s/%s/%s/" % (year, str(month).zfill(2), str(day).zfill(2)) # print "%d %d %d" % (today[0], today[1], today[2]) global today_game schedule_url = "http://sanfrancisco.giants.mlb.com/gen/schedule/sf/%s_%s.json" % (year, month) feed = Feed(schedule_url) feed.load_and_prepare() succeeded, loaded_schedule_json = feed.get_representation response_schedule = urllib2.urlopen(schedule_url) schedule_data = json.load(response_schedule) schedule_data = json.dumps(schedule_data, sort_keys=True, indent=4, separators=(',', ': ')) # pretty printed json file object data loaded_schedule_json = json.loads(schedule_data) start_time = "" for entry in loaded_schedule_json: try: if full_date in entry["game_id"]: versus = entry["away"]["full"] + " @ " + entry["home"]["full"] if entry["away"]["full"] == "Giants": start_time = entry["away"]["start_time_local"] home = False else: start_time = entry["home"]["start_time_local"] start_time = start_time[11:] start_time = start_time[:5] t = time.strptime(start_time, "%H:%M") new_start_time = time.strftime("%I:%M %p", t) # print versus + " starting at " + new_start_time today_game = versus + " starting at " + new_start_time except: continue
def testEqual(self): feed = Feed() feed.title = self.feed.title feed.updated = self.feed.updated feed.entries = self.feed.entries feed.url = self.feed.url feed.homepage = self.feed.homepage feed.author = self.feed.author self.assertEqual(self.feed, feed)
def games(title=None): ''' Usage: Get all games at /games Get one game at /games/title returns json or 404 if not found ''' feed = Feed(url=url) if title is None: return app.response_class(feed.get(), content_type='application/json') else: data = json.loads(feed.get()) for item in data: if item['title'] == title: return app.response_class(json.dumps(item), content_type='application/json') return not_found()
def test_from_dict(self): example_name = 'Name here' example_uri = 'URI here' attribute_dict = {'name': example_name, 'uri': example_uri} f = Feed.from_dict(attribute_dict) self.assertEquals(f.name, example_name) self.assertEquals(f.uri, example_uri) # Unsupported attributes should just be ignored invalid = 'invalid' attribute_dict[invalid] = 'better not show up' f = Feed.from_dict(attribute_dict) with self.assertRaises(AttributeError): f.invalid attribute_dict.pop(invalid) # Missing attributes should trigger an error attribute_dict.pop('name') with self.assertRaises(RuntimeError): Feed.from_dict(attribute_dict)
def parse_lineup_feed(): global starting_lineup_feed global team_id global lineup get_todays_date() starting_lineup_feed = "http://sanfrancisco.giants.mlb.com/gen/lineups/%s/%s/%s.json" % (year, str(month).zfill(2), str(day).zfill(2)) feed = Feed(starting_lineup_feed) feed.load_and_prepare() succeeded, loaded_lineup_data = feed.get_representation global players if succeeded == False: players = None return lineups_dict = loaded_lineup_data["list"] for entry in lineups_dict: # print "%s" % entry # print "%s" % entry["team_id"] if entry["team_id"] == team_id: lineup = entry break pos = 1 if "players" in lineup: for player in lineup["players"]: name_pos = "%d. %s (%s)" % (pos, player.get("last_name"), player.get("position")) players.append(name_pos) pos += 1 players = ", ".join(players) else: print "Players not found." players = None return
def entries(self): items = Feed.entries(self) if RadioTimeGenreList == None: self.loadGenreList() for item in items: if isinstance(item,RadioTimeRadioStation): #print "search for '"+item.genre_id+"'" if item.genre_id in RadioTimeGenreList: #print "found it!" item.genre = RadioTimeGenreList[item.genre_id] return items
def add_video(request): save_url = request.route_url('add_video') topics = DBHelper.get_all_topics() video = Video(title='', description='', url='', topic_id=0) message = None if 'form.submitted' in request.params: title = request.params['title'] description = request.params['description'] url = request.params['url'] topic_id = request.params['topic'] user_id = request.authenticated_userid video = Video(title=title, description=description, url=url, owner_id=user_id, topic_id=topic_id) if DBHelper.add_video(video): feed = Feed() feed.update_video_score(video_id, topic_id, 0) return HTTPFound(location=request.route_url('home')) else: message = "Error while adding video" return {'video':video, 'save_url':save_url, 'topics':topics, 'message':message}
def _subscribe_multi(uid, urls): sources = {url: ensure_source(url) for url in urls} # TODO: make 4 a shared value source_json_futures = {url: source.json(include_articles=True, return_promise=True, article_limit=4) for url, source in sources.iteritems()} source_json = {url: f() for url, f in source_json_futures.iteritems()} source_json = {url: source_json for url, source_json in source_json.iteritems() if len(source_json['articles'])} subscription_futures = {url: Subscription.get_or_insert_async(Subscription.id_for_subscription(source.url, uid)) for url, source in sources.iteritems()} subscriptions = {url: f.get_result() for url, f in subscription_futures.iteritems()} for url, sub in subscriptions.iteritems(): canonical_url = sources[url].url sub.url = canonical_url sub.uid = uid ndb.put_multi(subscriptions.values()) Feed.get_for_user(uid).update_in_place(just_added_sources_json=source_json.values()) sub_json_promises = {url: sub.json(return_promise=True) for url, sub in subscriptions.iteritems()} sub_jsons = {url: f() for url, f in sub_json_promises.iteritems()} return {"source_jsons": source_json, "subscription_jsons": sub_jsons}
import json from common.constants import Constants from common.utils import Logger, printInfo, printWarning, printError from common.scheduler.scheduler import JobScheduler from feed import Feed from brain import Brain if __name__ == '__main__': # load config file with open(Constants.CONFIG_FILE) as config_file: config = json.load(config_file) printInfo('Loading config:') printInfo(config) # initialize logger Logger.init(Constants.LOG_FOLDER) # initialize trading brain Brain.init() # setup data feed pulling scheduler = JobScheduler( instance_id = "data_feed", job_execution_interval_in_seconds = Constants.FEED_EXECUTION_FREEQUENCY) job_list = [] job_list.append(Feed(config)) scheduler.addJobs(job_list) printInfo('Setting up data feed pulling. Will execute every ' + str(Constants.FEED_EXECUTION_FREEQUENCY) + ' seconds..') scheduler.run()
def feed_dal(self): return Feed()
def load_rss(self, feedUrl): self.feed_url = feedUrl print "Loading RSS Feed: " + self.feed_url feedparser.USER_AGENT = "FeedToYoutube/0.0.3 +http://ldk.net/" f = feedparser.parse( self.feed_url, request_headers={'content-type': 'text/html; charset=UTF-8'}) if not hasattr(f, "etag"): if hasattr(f.feed, "updated"): etag = f.feed.updated else: raise LookupError( 'Can\'t find any update indicator. please contact the author.' ) else: etag = f.etag if not self._feed_has_changed(etag): print "Nothing has changed" return {'status': "304", "message": "Not Modified"} if not hasattr(f.feed, "updated"): f.feed.updated = unicode(datetime.datetime.now()) imageUrl = "" if hasattr(f.feed, 'image') and hasattr(f.feed.image, "href"): imageUrl = f.feed.image.href summary = f.feed.title if hasattr(f.feed, "summary"): summary = f.feed.summary feed = Feed(self.config, feedUrl, image=imageUrl, etag=etag, subtitle=summary, title=f.feed.title, updated=f.feed.updated) self.save_feed(feed) feed.episodes = [] print "Importing " + feed.title for episode in f.entries: sys.stdout.write(" Episode " + episode.title) sys.stdout.flush() if self._is_known_episode(episode.id): print " - old" feed.episodes.append( self.load_episode_by_rss_id(rss_episode_id=episode.id)) continue print " - new" is_new = True # chapter handling cs = [] if hasattr(episode, "psc_chapters"): for chapter in episode.psc_chapters.chapters: link = "" image = "" if hasattr(chapter, 'href'): link = chapter.href if hasattr(chapter, 'image'): image = chapter.image c = Chapter(start=chapter.start, image=image, href=link, title=chapter.title) print "\t" + c.start + ": " + c.title + " Image= " + c.image + " Href= " + c.href cs.append(c) image = "" duration = "" if hasattr(episode, 'image') and hasattr(episode.image, "href"): image = episode.image.href if hasattr(episode, 'itunes_duration'): duration = episode.itunes_duration link = None if hasattr(episode, "links"): for link in episode.links: if link.type == 'audio/mpeg': link = link.href break if link is None: continue subtitle = "" if hasattr(episode, "subtitle"): subtitle = episode.subtitle e = Episode(feed_id=feed.feed_id, rss_episode_id=episode.id, duration=duration, link=link, title=episode.title, subtitle=subtitle, description=episode.summary, published=episode.published, chapters=cs, image=image, is_new=is_new) self._insert_episode(e) if hasattr(feed.episodes, 'append'): feed.episodes.append(e) self.feed = feed return self.feed
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. You can email inquiries to [email protected] """ from db import Db from util import Util from feed import Feed feed_worker = Feed() #Process feed data pull feed_worker.process_feed('cnbc') feed_worker.process_feed('the_athletic') feed_worker.process_feed('market_watch') feed_worker.process_feed('business_insider') feed_worker.process_feed('detroit_free_press') feed_worker.process_feed('aol') feed_worker.process_feed('cnn') feed_worker.process_feed('new_york_post') feed_worker.process_feed('the_hill') feed_worker.process_feed('sputnik') feed_worker.process_feed('vice')
def test_duplicate_articles(self): feed = Feed('data/2014-04-05_16-54.atom') feed.add_feed('data/2014-04-05_16-54.atom') self.assertEqual(len(feed.articles),89)
def test_non_existing_user(self): feed = Feed() self.assertEqual([], feed.get_messages_of("John"))
#!/usr/bin/env python from feed import Feed from bs4 import BeautifulSoup import tweepy, feedparser, urllib, sqlite3, time, os #Separate keys.py file holds secrets from keys import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET DATABASE = '../database/rss_entries.db' # Initialize the list of desired feeds # Feed(Name, XML, Media, Hashtags) FEEDS = [ Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=aw&rss=1', '', '#liquidations #commercial #OpenGovt #gazetteNZ', Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=aa&rss=1', '', '#appointmentreleaseofadministrators #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=al&rss=1', '', '#appointmentreleaseofliquidators #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=ar&rss=1', '', '#appointmentreleaseofrecieversmanagers #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=ba&rss=1', '', '#bankrupcy #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=cb&rss=1', '', '#cessation #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=ct&rss=1', '', '#charitabletrusts #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=fs&rss=1', '', '#FriendlySocietiesCreditUnions #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=gn&rss=1', '', '#GeneralNotices #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=is&rss=1', '', '#IncorporatedSocieties #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=lt&rss=1', '', '#LandTransfers #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=md&rss=1', '', '#MeetingLastDates #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=ot&rss=1', '', '#Other #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=pn&rss=1', '', '#Partnerships #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=ds&rss=1', '', '#Removals #commercial #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=au&rss=1', '', '#Authorities #Government #OpenGovt #gazetteNZ') Feed('New Zealand Gazette', 'https://gazette.govt.nz/home/NoticeSearch?noticeType=dl&rss=1', '', '#DelegatedLegislation #Government #OpenGovt #gazetteNZ')
def test_get_wall_include_own_post(self): feed = Feed() own_message = "Message message message" feed.post_message("A", own_message) self.assertEqual([own_message], feed.get_wall_for("A"))
import os import cv2 from PIL import ImageGrab import numpy as np from feed import Feed if __name__ == '__main__': raw_feed = 'stream' if raw_feed[-3:] in ['jpg', 'mp4'] and not os.path.isfile(raw_feed): print('feed does not exist.') exit(-1) if raw_feed[-3:] == 'jpg': # image feed = Feed(raw_feed, 'image') feed.robo_vis() feed.show_lanes() elif raw_feed[-3:] == 'mp4': # video cap = cv2.VideoCapture(raw_feed) while (cap.isOpened()): _, frame = cap.read() feed = Feed(frame, 'video') feed.robo_vis() if feed.show_lanes() == -2: break elif raw_feed == 'stream': # stream """its getting the stream of desktop from inside wsl and not whats on my windows""" while True: screen = np.array(ImageGrab.grab(bbox=(0, 0, 800, 640))) cv2.imshow('', cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)) if cv2.waitKey(25) & 0xFF == ord('q'):
def test_get_wall_own_post_and_newest_order(self): feed = Feed() own_message = "Message message message" feed.post_message("A", own_message) feed.follow("A", "B") B_msg = "it's your birthday" feed.post_message("B", B_msg) own_new_msg = "speechless" feed.post_message("A", own_new_msg) self.assertEqual([own_new_msg, B_msg, own_message], feed.get_wall_for("A"))
import os import logging from feed import Feed from feedsources import FeedSources def init_logging(): logfile = 'fetcher.log' if os.path.isfile(logfile): os.remove(logfile) logging.basicConfig(filename=logfile, level=logging.DEBUG) logger = logging.getLogger('') console = logging.StreamHandler() console.setLevel(logging.INFO) logger.addHandler(console) if __name__ == '__main__': init_logging() feedsources = FeedSources() feedsources.read('feeds.yml') for url in feedsources.urls: feed = Feed() feed.parse(url) entries = feed.entries for entry in entries: print (entry)
def test_none_token(self): feed_req_obj = Feed(FeedType.ITEM.value, FeedScope.BOOTSTRAP.value, '220', 'EBAY_US', None) get_response = feed_req_obj.get() self.assertEqual(get_response.status_code, FAILURE_CODE) self.assertIsNotNone(get_response.message) self.assertIsNone(get_response.file_path, 'file_path is not None in the response')
start = time.time() if args.filteronly: # create the filtered file feed_filter_obj = FeedFilterRequest(args.downloadlocation, args.itemf, args.lf, args.sellerf, args.gtinf, args.epidf, args.pricelf, args.priceuf, args.locf, args.iepidf, args.qf, args.format) filter_response = feed_filter_obj.filter() if filter_response.status_code != SUCCESS_CODE: print(filter_response.message) else: # download the feed file if --filteronly option is not set feed_obj = Feed(FeedType.ITEM.value, args.scope, args.c1, args.mkt, args.token, args.dt, args.env, args.downloadlocation, args.format) get_response = feed_obj.get() if get_response.status_code != SUCCESS_CODE: logger.error( 'Exception in downloading feed. Cannot proceed\nFile path: %s\n Error message: %s\n', get_response.file_path, get_response.message) else: # create the filtered file feed_filter_obj = FeedFilterRequest(get_response.file_path, args.itemf, args.lf, args.sellerf, args.gtinf, args.epidf, args.pricelf, args.priceuf, args.locf, args.iepidf, args.qf, args.format) filter_response = feed_filter_obj.filter() if filter_response.status_code != SUCCESS_CODE:
def __init__(self): with open(CONFIG_FILE_PATH) as config: self.config = json.load(config) self.loop = asyncio.get_event_loop() self.feed = Feed(self.config) self.run_flag = True
def test_get_wall_msgs_when_following_multiple_users(self): feed = Feed() follower = "Alice" followed_b = "Bob" followed_c = "Charlie" feed.follow(follower=follower, followed=followed_b) feed.follow(follower=follower, followed=followed_c) old_msg_b = "Sup?" feed.post_message(followed_b, old_msg_b) old_msg_c = "Not much" feed.post_message(followed_c, old_msg_c) new_msg = "Busy day copy pasting from stackoverflow" feed.post_message(followed_b, new_msg) res = feed.get_wall_for(follower) self.assertEqual([new_msg, old_msg_c, old_msg_b], res)
def test_run_display_own_msg(self): spy = mock.Mock(wraps=Feed()) cli = Cli(spy) cli.run(self.jonny_display_cmd) spy.get_messages_of.assert_called()
def test_follow_another_relationship(self): feed = Feed() feed.follow(follower="John", followed="Jamie") feed.follow(follower="John", followed="Jess") res = feed.users_followed_by(username="******") self.assertSetEqual({"Jess", "Jamie"}, res)
def test_run_follow_cmd(self): spy = mock.Mock(wraps=Feed()) cli = Cli(spy) cli.run(Command(Action.FOLLOW, "Josh", "Bob")) spy.follow.assert_called_once_with(follower="Josh", followed="Bob") spy.get_messages_of.assert_not_called()
def test_from_json(self): # TODO: more thorough testing here... entity = util.example_feed() roundtrip = Feed.from_json(entity.json()) self._sanity(roundtrip)
def test_run_follow_cmd(self): spy = mock.Mock(wraps=Feed()) cli = Cli(spy) cli.run(Command(Action.DISPLAY_RELEVANT_POSTS, "Josh", None)) spy.get_wall_for.assert_called_once_with("Josh") spy.get_messages_of.assert_not_called()
def run(self): feed = Feed(["Resources/feeds/0ba94a1ed2e0449c.json"], ["Resources/feeds/0ba94a1ed2e0449c-0.mov"], ["Resources/feeds/0ba94a1ed2e0449c-seg"]) map_handler = MapAdaptor("Resources/data/high_res_full_UPB_hybrid.jpg") assessor = Assessor() _, segmentation, log = feed.fetch_vs() geographical_position = (log['lat'], log['long']) course = log['course'] img_y, img_x, angle = map_handler.convert_from_map_to_image( geographical_position, course) pos = (img_x, img_y) _, filter_log = feed.fetch_pf(log['tp']) # TODO: replace these with commented raw location raw_lat = log['lat'] - random.uniform(0.00005, 0.00015) # filter_log['lat'] raw_long = log['long'] - random.uniform(0.00005, 0.00015) # filter_log['long'] screen = pygame_adaptor.init_screen(self.dimension) map_image, real_size = pygame_adaptor.init_map( self.dimension, "Resources/data/high_res_full_UPB_hybrid.jpg", self.scale) car_screen_position = pygame_adaptor.init_car( self.dimension, "Resources/images/car.png") seg_sprite = pygame_adaptor.display_segmentation( car_screen_position, segmentation) while not self.exit: self.check_event_queue() sleep(self.frame_delay) current_map_image = pygame_adaptor.blit_transform( screen, map_image, pos, real_size, self.dimension, self.scale, angle) raw_img_y, raw_img_x, angle = map_handler.convert_from_map_to_image( (raw_lat, raw_long), course) x_from_center, y_from_center = map_handler.convert_to_snippet( raw_img_x, raw_img_y, pos, angle) particles = init_particles((x_from_center + self.dimension / 2, y_from_center + self.dimension / 2), angle, NO_PARTICLES) assessor.asses(particles, current_map_image, cv2.imread(segmentation)) _, segmentation, log = feed.fetch_vs() geographical_position = (log['lat'], log['long']) course = log['course'] img_y, img_x, angle = map_handler.convert_from_map_to_image( geographical_position, course) pos = (img_x, img_y) seg_sprite = pygame_adaptor.display_segmentation( car_screen_position, segmentation, seg_sprite) pygame_adaptor.update() pygame.quit()
""" Obsidian - News aggregation application Copyright 2021 Ben Milliron This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. You can email inquiries to [email protected] """ from db import Db from util import Util from feed import Feed feed_worker = Feed() #Process feed data pull feed_worker.process_feed('cbs')
def test_cray(self): url = "http://api.feedzilla.com/v1/categories/16/articles.atom?count=10" feed = Feed('data/2014-04-05_16-54.atom') feed.add_feed(url) self.assertEqual(len(feed.articles),99)
def create_feed(feed: Feed) -> Feed: db.put_item(TableName=TABLE, Item=feed.to_record()) return feed
def test_from_file(self): feed = Feed('data/2014-04-05_16-54.atom') self.assertEqual(len(feed.articles),89)
def _feeds_modified_cb(self): if not self.feeds_list.selected: return # TODO: add support for special non-id entries (like e.g. 'All') f = Feed(self.feeds_list.selected.data) self.episodes_list.display(Episode.getbyfeed(f))
def createItemsOfInterestFeed(self): ioiFeed = Feed() ioiFeed.m_feedFavicon = getResourceFilePixmap(kStarIcon) ioiFeed.m_feedName = "Items of Interest" ioiFeed.m_feedId = kItemsOfInterestFeedId return ioiFeed
class Main(Base): @property def name(self): return 'main' def __init__(self): Base.__init__(self) self.style_sheets.append('css/feed.css') self.javascript_src.extend(['js/tags.js', 'js/feed.js']) self.feed = Feed(self) self.debug_cgi = 0 def process(self): Base.process(self) self.feed.process() if 'search' in self.form: self.search = self.form['search'].value.strip() elif 'school_search' in self.form: self.school_id = int(self.form['school_search'].value) def _getBody(self): left = self._getSchoolPanel() if self.search: center = self.feed.getMessages(search=self.search) elif self.school_id: center = self.feed.getMessages(school_id=self.school_id) else: center = self.feed.getNewMessageCard() + self.feed.getMessages() right = self._getTagsPanel() return open('body-section.html', 'r').read() % (left, center, right) # 8/25/2016 - removed this form because there are inner forms # and you cannot nest forms. This was causing liking to toggle # when user hit like, then posted a test. # leaving commented out for a time to see if there are any # unexpected results. #return form(o, name='form1', method='POST') def _getSchoolPanel(self): def mk_link(name, sid): return p(name, onclick="javascript:school_search('%s')" % sid) school_header = p('Schools', id='school-header') schools = [] for s in self.session.user.schools: pair = [s['school'], s['school_id']] if pair not in schools: schools.append(pair) links = '' for school, sid in schools: class_ = 'cursor-pointer' if self.school_id == sid: class_ += ' red' links += li(mk_link(school, sid), class_=class_) school_links = ul(links) school_search = input(type='hidden', name='school_search') form_ = form(school_header + school_links + school_search, id='school-search-form', method='post') return div(form_, id='school-panel') def _getTagsPanel(self): def mk_button(tag, class_=''): return input(value=tag, type='button', class_='btn btn-default btn-xs', onclick="javascript: search('%s')" % tag) def mk_mock_button(tag, class_=''): class_ += ' btn btn-default btn-xs' return input(value=tag, type='button', class_='btn btn-default btn-xs disabled') tags = ['Music', 'SAT', 'Devices', 'Special Ed','Football','Math','Research', 'Honors Class', 'AP', 'Art', 'Life Skills', 'DIY', 'Cub Scouts', 'After School', 'Transportation', 'Open House', 'Back To School', 'Social Media', 'Olympics', 'Health', 'Programming', 'Volunteer', 'Soccer', 'Supplies', 'Meals', 'Futura', 'Health', 'Bullying', 'Kindergarten', 'PTA', 'Library', 'Autism', 'Grit', 'Science', 'Spectrum', 'AOS','Safety', 'Beach', 'Common Core', 'Economics', 'Cafeteria', 'ESL', 'AP Latin','IB', 'Field Trip', 'ACT', 'Movies', 'Parks', 'Technology', 'History', 'Geography', 'Engineering', 'Economics', 'Literature', 'Drama', 'Swimming', 'Lacrosse', 'Basketball', 'Softball', 'Drones', 'Halloween', 'Camping', 'Internship', 'Weather', 'Culture', 'Projects', 'Space', 'SnowDays', 'Teaching', 'Performance', 'Tennis', 'Aerobics', 'FLE', 'SexEd', 'Leadership', 'White House', 'Character', 'Team', 'Books', 'Moms', 'Recognition', 'Tweens' ] real_tags = ['Music', 'SAT', 'Devices', 'Special Ed','Football','Math','Research', 'Honors Class', 'AP', 'Art', 'Life Skills', 'DIY', 'Cub Scouts', 'After School', 'Transportation', 'Open House', 'Back To School', 'Social Media', 'Olympics', 'Health', 'Programming', 'Volunteer', 'Soccer', 'Supplies', 'Meals', 'Futura', 'Health', 'Bullying', 'Kindergarten', 'PTA', 'Library', 'Autism', 'Grit', 'Science', 'Spectrum', 'AOS','Safety', 'Beach', 'Common Core', 'Economics', 'Cafeteria', 'ESL', 'AP Latin','IB', 'Field Trip', 'ACT', 'Movies', 'Parks', 'Technology', 'History', 'Geography', 'Engineering', 'Economics', 'Literature', 'Drama', 'Swimming', 'Lacrosse', 'Basketball', 'Softball', 'Drones', 'Halloween', 'Camping', 'Internship', 'Weather', 'Culture', 'Projects', 'Space', 'SnowDays', 'Teaching', 'Performance', 'Tennis', 'Aerobics', 'FLE', 'SexEd', 'Leadership', 'White House', 'Character', 'Team', 'Books', 'Moms', 'Recognition', 'Tweens' ] tag_buttons = '' for tag in tags: if tag in real_tags: tag_buttons += mk_button(tag) else: tag_buttons += mk_mock_button(tag) table = HtmlTable(class_='table borderless') table.addHeader(['Trending Topics']) table.addRow([tag_buttons]) return table.getTable()
def main(): # OrderedDict is used so that the order of capture_props is preserved # (setting exposure_absolute before setting exposure_auto will cause an exception, so this is necessary) cfg = json.load( open(os.path.dirname(os.path.realpath(__file__)) + '/config.json'), object_pairs_hook=OrderedDict) # configure root logger logger = logging.getLogger() logger.setLevel(config_utils.to_log_level(cfg['log_level'])) formatter = logging.Formatter( '[%(asctime)s][%(levelname)s] %(name)-8s: %(message)s', datefmt='%Y-%m-%d %H:%M:%M') if cfg['log_to_console']: sh = logging.StreamHandler() sh.setFormatter(formatter) logger.addHandler(sh) if cfg['log_to_file']: filename = 'log/' + datetime.now().strftime('%Y%m%d-%H:%M') + '.log' fh = logging.FileHandler(filename) fh.setFormatter(formatter) logger.addHandler(fh) # get logger for this file logger = logging.getLogger(__name__) logger.info('Using OpenCV version %s, Python version %s', cv2.__version__, platform.python_version()) # is the source a webcam or a video file? source_is_webcam = cfg['capture_source'].startswith('/dev') logger.info('Initializing capture from source %s (%s)', cfg['capture_source'], 'webcam' if source_is_webcam else 'file') # continue trying to open camera until it works, maybe the cord was slightly unplugged or something? # it's important not to error irrecoverably during a competition while True: try: cap = v4l2.Capture(bytes(cfg['capture_source'])) break except IOError: if source_is_webcam: logger.exception('Opening capture failed!') logger.info("Retrying in 3 seconds...") time.sleep(3) else: # if a file isn't found, there's no point in continuing to retry, so we just exit logger.exception('File not found!') sys.exit(1) # set camera hardware properties only if the capture is a webcam # (setting the exposure on a video doesn't make sense, etc.) if source_is_webcam: logger.info('Setting V4L2 capture properties:') for prop in cfg['capture_props']: try: cap.set_control(config_utils.to_v4l2_prop(prop), cfg['capture_props'][prop]) logger.info(' * %s: %d', prop, cfg['capture_props'][prop]) except TypeError: logger.error('* Unable to set property %s to %d! skipping...', prop, cfg['capture_props'][prop]) # initialize feed feed_queue = Queue() feed = Feed(feed_queue, port=cfg['feed_port']) if cfg['feed_enabled']: logger.info('Starting live feed') feed.start() # initialize comms context = zmq.Context() socket = context.socket(zmq.PUB) if cfg['comms_enabled']: logger.info('Initializing comms on socket %d', cfg['comms_port']) socket.bind('tcp://*:%d' % cfg['comms_port']) else: logger.warn('Comms not enabled!') # main targeting loop while True: frame = cap.get_frame().bgr # calculate the average of the value (brightness) channel to get a dummy value so we can test comms average_brightness = cv2.mean(frame[2])[0] if cfg['comms_enabled']: msg = {'average_brightness': average_brightness} logger.debug('Sending message %s', msg) socket.send_json(msg) if cfg['feed_enabled']: feed_queue.put(frame) if cfg['gui_enabled']: cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): logger.info('Exiting main loop') break logger.info('Closing capture...') cap.close() if cfg['feed_enabled']: logger.info('Terminating live feed thread...') feed.terminate() logger.info('Done')
sort = {'$sort': {'milliSecondsUntilRedo': -1}} # most important ones first limit = {'$limit': config['feedsNum']} # we only get the 150 most pressing sources :) feedList = db.aggregateFeeds([match, project, match2, sort, limit]) newArticlesCount = 0 duplicateArticlesCount = 0 validArticlesCount = 0 feedsCount = 0 i = 0 newArticles = [] batchSize = config['batchSize'] while i < len(feedList): tempList = feedList[i:(i + batchSize)] feeds = [Feed(url=feed['feed'], stamp=feed.get('stamp', None)) for feed in tempList] feeds = downloadFeeds(feeds) feeds = parseFeeds(feeds) feeds = downloadArticlesInFeeds(feeds) newArticles = [] for feed in feeds: newArticles.extend(feed.articles) newArticles = parseArticles(newArticles) validArticles = [article for article in newArticles if article.isValid()] duplicateArticlesC = [article.save() for article in validArticles].count(True) for feed in feeds: print '%s => +%d' % (feed.url, len(feed.articles)) feed.save() i += batchSize newArticlesCount += len(newArticles)