def test_should_find_entries(): """Entries: The articles can be retrieved by their path""" entries = Entries() entries._entries = {"path": Entry("Title", "23. Februar 2013", "path")} entry = entries.find("path") assert_equal(entry.title, "Title") assert_equal(entry.date, Date.from_str("23. Februar 2013"))
def get_inventory(): user_id = request.args.get('user_id') entries = Entries() inventory = entries.get_entries(user_id) return Response(json.dumps(inventory), mimetype='application/json')
def test_entries_should_cache_results(): """Entries: The articles should be cached and only loaded once.""" source = ["11. April 2013, Artikel, article"] entries = Entries(source) entries.load = register_call(entries.load) # Act times(4, lambda: entries.all) assert_was_called_times(entries.load, once)
def test_should_sort_entries_by_date(): """Entries (Sorting): Should sort entries by date""" entries = Entries() entries._entries = { "c": Entry("C", "12. Januar 2013", "Path"), "b": Entry("B", "01. Oktober 2013", "Path"), "a": Entry("A", "24. Februar 2013", "Path"), } entries = entries.by_date() assert_equal(entries[0].date, "12. Januar 2013") assert_equal(entries[1].date, "24. Februar 2013") assert_equal(entries[2].date, "1. Oktober 2013")
def test_should_sort_entries_by_title(): """Entries (Sorting): Should sort entries by title""" entries = Entries() entries._entries = { "c": Entry("B Title", "23. Februar 2013", "Path"), "b": Entry("A Title", "23. Februar 2013", "Path"), "a": Entry("C Title", "23. Februar 2013", "Path"), } entries = entries.by_title() assert_equal(entries[0].title, "A Title") assert_equal(entries[1].title, "B Title") assert_equal(entries[2].title, "C Title")
def __init__(self, teamName): self.teamName = teamName self.databaseName = 'data/' + make_safe_name( self.teamName) + '.sqlite3' self.entries = Entries() if not os.path.exists(self.databaseName): with self.dbConnect() as connection: self.entries.createTable(connection) with self.dbConnect() as connection: data = connection.execute("PRAGMA schema_version").fetchone() if data[0] != self.entries.SCHEMA_VERSION: self.entries.migrate(connection, data[0])
def main(args: List[str]): """The main function.""" wconfig = WatcherConfig(os.environ) wcache = WatcherCache(wconfig) entries = Entries(wconfig, wcache, args[0]) # Get the sites and compare comparisons = entries.get_comparison() # Send the email (if needed) if len(comparisons) > 0: send_email(wconfig, comparisons) else: print('No differences found; no need to send emails') # Write to cache for next time entries.write_to_cache()
class Team(object): def __init__(self, teamName): self.teamName = teamName self.databaseName = 'data/' + make_safe_name( self.teamName) + '.sqlite3' self.entries = Entries() if not os.path.exists(self.databaseName): with self.dbConnect() as connection: self.entries.createTable(connection) with self.dbConnect() as connection: data = connection.execute("PRAGMA schema_version").fetchone() if data[0] != self.entries.SCHEMA_VERSION: self.entries.migrate(connection, data[0]) def dbConnect(self): return sqlite3.connect(self.databaseName, detect_types=sqlite3.PARSE_DECLTYPES)
def structure_json_formatted_output(matches): """ Once we get to this point we have the matches that we want. Therefore, we need to save off the matches to our sql database and :param matches: :return: """ # conn = SQLConn() matched_output = [] for match in matches: headline = Headline_jclass() headline.headline = match[0].title headline.percentage = float(len(match)) / float(len(news_list)) headline.stories_id = 1 #Entries().get_next_story_id(conn.s) for article in match: headline.agencies.append(article.publisher_code) ent = Entries() ent.story_id = headline.stories_id ent.headline = article.title ent.image_url = article.urlToImage ent.website_url = article.url # conn.s.add(ent) # conn.s.commit() matched_output.append(headline) # Return the matches in the correct order. ranked_match_output = [] for match in matched_output: if len(ranked_match_output) == 0: ranked_match_output.append(match) else: for index in range(len(ranked_match_output)): if match.percentage > ranked_match_output[index].percentage: ranked_match_output.insert(index, match) break elif index == (len(ranked_match_output) - 1): ranked_match_output.append(match) return_matches = [] for match in ranked_match_output: return_matches.append(json.loads(match.unclassify())) return json.dumps(return_matches)
logger = logging.getLogger(__name__) # @tracker9000 TOKEN = open('token.txt').read() # conversation states are defined below TRACK = range(1) # interval to check for change CHECK_INTERVAL = 60 * 60 * 12 # in seconds # CHECK_INTERVAL = 10 # simulate check for changes SIMULATION = False entries = Entries() def format_tracking_reply(track_results): date, event, office, location = track_results format_str = 'Data: {}\n' format_str += 'Eventi: {}\n' format_str += 'Zyra: {}' location = location.strip() if location != '': format_str += '\nVendndodhja: {}' return format_str.format(date, event, office, location)
def getFeed(feedName): feedInstance = entries.get(feedName) if feedInstance is None: entries[feedName] = Entries(feedName, config) return entries.get(feedName) return feedInstance
return TaskStages.workingOn if __name__ == "__main__": DB_STRING = os.path.join(os.path.dirname(__file__), 'data/database.sqlite3') try: os.remove(DB_STRING) except IOError: pass #delete File, if it doesn't exist we don't care with sqlite3.connect(DB_STRING) as connection: tasks = Tasks() members = Members() entries = Entries() smugmugConfig = json.load(open('secrets.json', 'r')) #Create the Tables tasks.createTable(connection) members.createTable(connection) entries.createTable(connection) #Initate our Members Table membersList = [ "Andrew Vo", "Chirag Sreedhara", "Eric Wong", "Evan Spiering", "Izaak Kreykes", "Nithya Golla", "Philip Smith", "Preeti Thirukonda", "Rishi Maroju" ] members.insertMembers(connection, membersList)
def add_food_entry(): data_entry = json.loads(request.get_data()) entries = Entries() entries.add_entry(data_entry) return 'success'