def ingestFromMRSSFile(mrssFile, accountId): count = 0 limit = _CFG.get('limit', 0) mi = MediaItem(_CFG, _CFG['accountId']) feed = feedparser.parse(mrssFile) items = feed['items'] log("Processing {0} items in MRSS feed".format(len(items))) for i in items: if limit > 0 and count >= limit: log("Defined limit of {0} reached.".format(limit)) break count = count + 1 # for k in i.keys(): # print "{0}: {1}\n".format(k,i.get(k,"")) itemData = {} itemData['name'] = i.get('title', "")[:256] itemData['description'] = i.get('summary' "")[:250] itemData['reference_id'] = i.get('id', "") #itemData['tags'] = i.get('media_tags',"").split(',') #itemData['custom_fields'] = res = mi.Create(itemData) if not res: # Error creating object, skip the ingest. (if goal is to re-ingest, don't skip) print "ERROR creating media item {0}, skipping {1}.".format( itemData['reference_id'], itemData) continue videoId = res.get('id', "") log("[{0}] created.".format(videoId)) ingestData = {} master = {} thumb = {} master['url'] = i['media_content'][0].get('url', "") thumb['url'] = i['media_thumbnail'][0].get('url', "") ingestData['master'] = master ingestData['thumbnail'] = thumb ingestData['poster'] = thumb if _CFG.get('ingestprofile', ""): ingestData['profile'] = _CFG['ingestprofile'] r = mi.Ingest(videoId, ingestData) if not r: log("Ingest Failed") continue log("[{0}] Ingested. JobId={1}".format(videoId, r.get('id', "")))
def copyItemFields(): q1 = MediaItem(_CFG, _CFG['accountId']) q1.Query(_CFG['query']) item = q1.Next() while item: print "[{0}]: id: {1} refid: {2} long: {3}".format( q1.returnedCount, item['id'], item['reference_id'], item['long_description']) item = q1.Next() customFields = item.get('custom_fields', {}) customFields['website'] = 'CollegeHumor' date = dateutil.parser.parse( customFields.get('original_publish_date', "")) updateData = {} schedule = {} schedule['starts_at'] = date.isoformat() updateData['schedule'] = schedule updateData['custom_fields'] = customFields q1.Update(item['id'], updateData) print "{0}: Updated.".format(item['id']) item = q1.Next()
def testme(): # text = 'Thu, 16 Dec 2010 12:14:05 +0000' # date = (dateutil.parser.parse(text)) # print(date.isoformat()) # # 2010-12-16T12:14:05+00:00 # sys.exit() q1 = MediaItem(_CFG, _CFG['accountId']) q1.Query(_CFG['query']) log("Query returned [{0}] items.".format(q1.QueryCount())) item = q1.Next() while item: print "item: [{0}]: id: {1} refid: {2}".format(q1.returnedCount,item['id'], item['reference_id']) # r = q1.Delete(item['id']) # if r: # log("Delete failed: [{0}]".format(r)) item = q1.Next()
def deleteMediaItems(): q1 = MediaItem(_CFG, _CFG['accountId']) q1.Query(_CFG['query']) log("Query returned [{0}] items.".format(q1.QueryCount())) item = q1.Next() while item: print "Deleting: [{0}]: id: {1} refid: {2}".format(q1.returnedCount,item['id'], item['reference_id']) r = q1.Delete(item['id']) if r: log("Delete failed: [{0}]".format(r)) item = q1.Next()
def copyItemFields(): q1 = MediaItem(_CFG, _CFG['accountId']) q1.Query(_CFG['query']) item = q1.Next() while item: print "[{0}]: id: {1} refid: {2} long: {3}".format(q1.returnedCount,item['id'], item['reference_id'],item['long_description']) if item: data = {} data['long_description'] = item.get('reference_id',"") if not item.get('long_description', None): q1.Update( item['id'], data) print "Updated." item = q1.Next()
def ingestFromJsonFile(jsonFile, accountId): if not accountId: log("\nError: This command requires accountId be set.") sys.exit() try: data = open(jsonFile) except IOError as e: log("\nError: Cound not open file: {0}: error {1}".format(jsonFile, e)) sys.exit() try: jsonData = json.load(data) except ValueError as e: log("\nError: Invalid Json File {0}: {1}".format(jsonFile, e)) sys.exit() count = 0 limit = _CFG.get('limit', 0) mi = MediaItem(_CFG, _CFG['accountId']) items = jsonData.get('items', []) for item in items: if limit > 0 and count >= limit: log("Defined limit of {0} reached.".format(limit)) break count = count + 1 if not item.get('title', ""): log("title required to process item, skipping: {0}".format(item)) continue if not item.get('video_url', ""): log("video_url required to process item, skipping: {0}".format( item)) continue itemData = {} customFields = {} customFields['rating'] = item.get('rating', "") customFields['publish_date'] = item.get('publish_date', "") customFields['original_publish_date'] = item.get( 'original_publish_date', "") customFields['website'] = "CollegeHumor" itemData['name'] = item.get('title', "")[:256] itemData['description'] = item.get('description', "")[:250] if item.get('original_publish_date', ""): schedule = {} date = dateutil.parser.parse(item.get('original_publish_date', "")) schedule['starts_at'] = date.isoformat() itemData['schedule'] = schedule # For electus guid is /post/xxx or /video/xxx. Just use xxx as the refid search = re.search(r'/.*/(\d+)', item.get('guid', ""), re.M | re.I) if search: itemData['reference_id'] = search.group(1) if item.get('tags', ""): itemData['tags'] = item.get('tags', []).split(',') itemData['custom_fields'] = customFields res = mi.Create(itemData) if not res: # Error creating object, skip the ingest. (if goal is to re-ingest, don't skip) print "ERROR creating media item {0}, skipping.".format( itemData['reference_id']) continue videoId = res.get('id', "") log("[{0}] created.".format(videoId)) ingestData = {} master = {} thumb = {} master['url'] = item.get('video_url', "") thumb['url'] = item.get('thumbnail_url', "") ingestData['master'] = master ingestData['thumbnail'] = thumb ingestData['poster'] = thumb if _CFG.get('ingestprofile', ""): ingestData['profile'] = _CFG['ingestprofile'] r = mi.Ingest(videoId, ingestData) if not r: log("Ingest Failed") continue log("[{0}] Ingested. JobId={1}".format(videoId, r.get('id', "")))