def ingestFromMRSSFile(mrssFile, accountId): count = 0 limit = _CFG.get('limit', 0) mi = MediaItem(_CFG, _CFG['accountId']) feed = feedparser.parse(mrssFile) items = feed['items'] log("Processing {0} items in MRSS feed".format(len(items))) for i in items: if limit > 0 and count >= limit: log("Defined limit of {0} reached.".format(limit)) break count = count + 1 # for k in i.keys(): # print "{0}: {1}\n".format(k,i.get(k,"")) itemData = {} itemData['name'] = i.get('title', "")[:256] itemData['description'] = i.get('summary' "")[:250] itemData['reference_id'] = i.get('id', "") #itemData['tags'] = i.get('media_tags',"").split(',') #itemData['custom_fields'] = res = mi.Create(itemData) if not res: # Error creating object, skip the ingest. (if goal is to re-ingest, don't skip) print "ERROR creating media item {0}, skipping {1}.".format( itemData['reference_id'], itemData) continue videoId = res.get('id', "") log("[{0}] created.".format(videoId)) ingestData = {} master = {} thumb = {} master['url'] = i['media_content'][0].get('url', "") thumb['url'] = i['media_thumbnail'][0].get('url', "") ingestData['master'] = master ingestData['thumbnail'] = thumb ingestData['poster'] = thumb if _CFG.get('ingestprofile', ""): ingestData['profile'] = _CFG['ingestprofile'] r = mi.Ingest(videoId, ingestData) if not r: log("Ingest Failed") continue log("[{0}] Ingested. JobId={1}".format(videoId, r.get('id', "")))
def ingestFromJsonFile(jsonFile, accountId): if not accountId: log("\nError: This command requires accountId be set.") sys.exit() try: data = open(jsonFile) except IOError as e: log("\nError: Cound not open file: {0}: error {1}".format(jsonFile, e)) sys.exit() try: jsonData = json.load(data) except ValueError as e: log("\nError: Invalid Json File {0}: {1}".format(jsonFile, e)) sys.exit() count = 0 limit = _CFG.get('limit', 0) mi = MediaItem(_CFG, _CFG['accountId']) items = jsonData.get('items', []) for item in items: if limit > 0 and count >= limit: log("Defined limit of {0} reached.".format(limit)) break count = count + 1 if not item.get('title', ""): log("title required to process item, skipping: {0}".format(item)) continue if not item.get('video_url', ""): log("video_url required to process item, skipping: {0}".format( item)) continue itemData = {} customFields = {} customFields['rating'] = item.get('rating', "") customFields['publish_date'] = item.get('publish_date', "") customFields['original_publish_date'] = item.get( 'original_publish_date', "") customFields['website'] = "CollegeHumor" itemData['name'] = item.get('title', "")[:256] itemData['description'] = item.get('description', "")[:250] if item.get('original_publish_date', ""): schedule = {} date = dateutil.parser.parse(item.get('original_publish_date', "")) schedule['starts_at'] = date.isoformat() itemData['schedule'] = schedule # For electus guid is /post/xxx or /video/xxx. Just use xxx as the refid search = re.search(r'/.*/(\d+)', item.get('guid', ""), re.M | re.I) if search: itemData['reference_id'] = search.group(1) if item.get('tags', ""): itemData['tags'] = item.get('tags', []).split(',') itemData['custom_fields'] = customFields res = mi.Create(itemData) if not res: # Error creating object, skip the ingest. (if goal is to re-ingest, don't skip) print "ERROR creating media item {0}, skipping.".format( itemData['reference_id']) continue videoId = res.get('id', "") log("[{0}] created.".format(videoId)) ingestData = {} master = {} thumb = {} master['url'] = item.get('video_url', "") thumb['url'] = item.get('thumbnail_url', "") ingestData['master'] = master ingestData['thumbnail'] = thumb ingestData['poster'] = thumb if _CFG.get('ingestprofile', ""): ingestData['profile'] = _CFG['ingestprofile'] r = mi.Ingest(videoId, ingestData) if not r: log("Ingest Failed") continue log("[{0}] Ingested. JobId={1}".format(videoId, r.get('id', "")))