getGUID(item), getComments(item), getLink(item), getDescription(item)) itemContainers.append(tmpItem) return itemContainers def getItems(data): channels = separateChannels(data) itemContainers = "" if channels: itemStrings = separateItems(channels[0]) itemContainers = createItemContainers(itemStrings) return itemContainers if __name__ == "__main__": dataFile = open("testFeed.xml", "r") data = dataFile.read() dataFile.close() dlItems = autoDownloads.getAutoDownloads() channels = separateChannels(data) if channels: itemStrings = separateItems(channels[0]) itemContainers = createItemContainers(itemStrings) for item in itemContainers: for dlItem in dlItems: if dlItem.matchRegex(item): print(dlItem.regex, item.title)
while running: try: data = urllib.request.urlopen(options.rss_url) data = data.read().decode() except Exception as err: print(time.strftime("%Y-%m-%d %H:%M:%S Error:"), err, sep = "\t") success = False if data: print(time.strftime("%Y-%m-%d %H:%M:%S"), "fetched RSS", sep = "\t") else: print(time.strftime("%Y-%m-%d %H:%M:%S"), "no data", sep = "\t") if success: itemContainers = feedParser.getItems(data) autoList = autoDownloads.getAutoDownloads() for autoItem in autoList: for item in itemContainers: if (autoItem.matchRegex(item)) and (item.link not in downloaded): print("Match:", autoItem.name, item.title, sep = "\t") item.download() downloaded.append(item.link) data = "" success = True time.sleep(options.sleeptime) loop += 1 if loop >= 400: downloaded = [] loop = 0