def flickr_download_media(job): logging.info("flickr_download_media started for %s" % job) print "aregeuhktdjqhkdat" jobinfo = JobInfo.objects.get(id=job.arg) try: if jobinfo.status.startswith == "Complete": # job finished previously return flickr = FlickrSearch() arg = simplejson.loads(jobinfo.arg) record = Record.objects.get(id=arg["record"], manager="flickr") url = arg["url"] storage = flickr.get_storage() file = urllib2.build_opener(urllib2.ProxyHandler({"http": "http://localhost:3128"})).open(url) setattr(file, "size", int(file.info().get("content-length"))) mimetype = file.info().get("content-type") media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) media.save_file(record.name + guess_extension(mimetype), file) jobinfo.complete("Complete", "File downloaded") except Exception, ex: logging.info("flickr_download_media failed for %s (%s)" % (job, ex)) jobinfo.update_status("Failed: %s" % ex)
def shared_download_media(shared_id, record_id, url): shared = SharedSearch(shared_id) record = Record.objects.get( id=record_id, manager=shared.get_source_id(), ) storage = shared.get_storage() username = shared.shared.username password = shared.shared.password # do an authenticated request if we have a username and password if username and password: r = requests.get(url, auth=(username, password)) else: r = requests.get(url) # turn our content into a "file-like" object file = StringIO(r.content) setattr(file, 'size', int(r.headers['content-length'])) mimetype = r.headers['content-type'] media = Media.objects.create( record=record, storage=storage, name=record.name, mimetype=mimetype, ) media.save_file(record.name + guess_extension(mimetype), file)
def flickr_download_media(job): logging.info('flickr_download_media started for %s' % job) print "aregeuhktdjqhkdat" jobinfo = JobInfo.objects.get(id=job.arg) try: if jobinfo.status.startswith == 'Complete': # job finished previously return flickr = FlickrSearch() arg = simplejson.loads(jobinfo.arg) record = Record.objects.get(id=arg['record'], manager='flickr') url = arg['url'] storage = flickr.get_storage() file = urllib2.build_opener( urllib2.ProxyHandler({"http": "http://localhost:3128"})).open(url) setattr(file, 'size', int(file.info().get('content-length'))) mimetype = file.info().get('content-type') media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) media.save_file(record.name + guess_extension(mimetype), file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: logging.info('flickr_download_media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
def shared_download_media(job): log.info('shared_download_media started for %s' % job) jobinfo = JobInfo.objects.get(id=job.arg) try: if jobinfo.status.startswith == 'Complete': # job finished previously return arg = json.loads(jobinfo.arg) shared = SharedSearch(arg['shared_id']) record = Record.objects.get(id=arg['record'], manager=shared.get_source_id()) url = arg['url'] storage = shared.get_storage() file = urllib2.urlopen(url) setattr(file, 'size', int(file.info().get('content-length'))) mimetype = file.info().get('content-type') media = Media.objects.create( record=record, storage=storage, name=record.name, mimetype=mimetype, ) media.save_file(record.name + guess_extension(mimetype), file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: log.info('shared_download_media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
def nasa_download_media(job): logging.info('nasa_download_media started for %s' % job) jobinfo = JobInfo.objects.get(id=job.arg) try: if jobinfo.status.startswith == 'Complete': # job finished previously return nasa = NasaImageExchange() arg = simplejson.loads(jobinfo.arg) record = Record.objects.get(id=arg['record'], manager='nasaimageexchange') url = arg['url'] storage = nasa.get_storage() file = urllib2.urlopen(url) setattr(file, 'size', int(file.info().get('content-length'))) mimetype = file.info().get('content-type') media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) media.save_file(record.name + guess_extension(mimetype), file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: logging.info('nasa_download_media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
def flickr_download_media(job): logging.info('flickr_download_media started for %s' % job) jobinfo = JobInfo.objects.get(id=job.arg) try: if jobinfo.status.startswith == 'Complete': # job finished previously return flickr = FlickrSearch() arg = simplejson.loads(jobinfo.arg) record = Record.objects.get(id=arg['record'], manager='flickr') url = arg['url'] storage = flickr.get_storage() file = urllib2.urlopen(url) mimetype = file.info().get('content-type') media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) # should be done better: loading file into StringIO object to make it # seekable file = StringIO(file.read()) media.save_file(record.name + guess_extension(mimetype), file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: logging.exception('flickr_download_media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
def flickr_download_media(job): logging.info('flickr_download_media started for %s' % job) jobinfo = JobInfo.objects.get(id=job.arg) try: if jobinfo.status.startswith == 'Complete': # job finished previously return flickr = FlickrSearch() arg = simplejson.loads(jobinfo.arg) record = Record.objects.get(id=arg['record'], manager='flickr') url = arg['url'] storage = flickr.get_storage() file = urllib2.urlopen(url) mimetype = file.info().get('content-type') media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) # should be done better: loading file into StringIO object to make it # seekable file = StringIO(file.read()) file = rotateImageBasedOnExif(file) media.save_file(record.name + guess_extension(mimetype), file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: logging.exception('flickr_download_media failed for %s' % job) jobinfo.update_status('Failed: %s' % ex)
def flickr_download_media(record_id, url): flickr = FlickrSearch() record = Record.objects.get(id=record_id, manager='flickr') storage = flickr.get_storage() file = urllib2.urlopen(url) mimetype = file.info().get('content-type') media = Media.objects.create( record=record, storage=storage, name=record.name, mimetype=mimetype, ) # should be done better: loading file into StringIO object to make it # seekable file = StringIO(file.read()) file = rotateImageBasedOnExif(file) media.save_file(record.name + guess_extension(mimetype), file)
def shared_download_media(job): logging.info('shared_download_media started for %s' % job) jobinfo = JobInfo.objects.get(id=job.arg) try: if jobinfo.status.startswith == 'Complete': # job finished previously return arg = simplejson.loads(jobinfo.arg) shared = SharedSearch(arg['shared_id']) record = Record.objects.get(id=arg['record'], manager=shared.get_source_id()) url = arg['url'] storage = shared.get_storage() username = shared.shared.username password = shared.shared.password # do an authenticated request if we have a username and password if username and password: r = requests.get(url, auth=(username, password)) else: r = requests.get(url) # turn our conent into a "file-like" object :) file = StringIO(r.content) setattr(file, 'size', int(r.headers['content-length'])) mimetype = r.headers['content-type'] media = Media.objects.create( record=record, storage=storage, name=record.name, mimetype=mimetype, ) media.save_file(record.name + guess_extension(mimetype), file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: logging.info('shared_download_media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
print ex try: print "** foo" if jobinfo.status.startswith == 'Complete': return print "** bar" arg = simplejson.loads(jobinfo.arg) record = Record.objects.get(id=arg['record'], manager='dummy') url = arg['url'] print "** baz" storage = _get_storage() # file = urllib2.urlopen(url) file = urllib2.build_opener( urllib2.ProxyHandler({"http": "http://localhost:3128"})).open(url) setattr(file, 'size', int(file.info().get('content-length'))) mimetype = file.info().get('content-type') print "** 4" media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) media.save_file(record.name + guess_extension(mimetype), file) print "** 5" jobinfo.complete('Complete', 'File downloaded') print "** 6" except Exception, ex: logging.info('dummy download media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
print 'have image file' else: print 'do not have image file' #size = file.info().get('content-length') #setattr(file, 'size', int(size if size else 0)) setattr(image_file, 'size', int(size if size else 0)) mimetype = file.info().get('content-type') print 'mimetype is :' + mimetype file.close() if storage: print 'storage is valid in workers.py before 53' media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) ext = guess_extension(mimetype) extension = guess_extension(mimetype) if not extension : # wasn't a matched pattern extension = "" #media.save_file(record.name + guess_extension(mimetype), file) print "saving file " + record.name + extension print record.name print extension media.save_file(record.name + extension, image_file) #media.save_file(record.name + extension, file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: print traceback.print_exc() print('unitedsearch download media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
def presentation_import(pres_ids, rc): print pres_ids for pres_id in pres_ids: pres_url = 'http://mdid3.temple.edu/api/presentation/' + str(pres_id) + '/' print 'fetching %s' % pres_url theShow = requests.get(pres_url, cookies=rc) #print theShow.json() jp = simplejson.loads(theShow.content) concat_description = jp['description'] presentation = Presentation.objects.create(title=jp['title'], owner=target_user, description=concat_description) # jp['content'] contains every slide for order, slide in enumerate(jp['content']): #print order, slide rec_exists = False rec_id = None print 'using storage %s' % store.base for metadata in slide['metadata']: #print 'metadata for slide %s, %s' % (slide['name'], str(metadata)) #print metadata if metadata['label'] == 'ID': print 'metadata for slide %s, %s' % (slide['name'], str(metadata)) rec_id = metadata['value'] print '%s is an ID field' % rec_id #print metadata['value'] if Record.by_fieldvalue(fid, rec_id): rec_exists = True print '%s already exists' % rec_id break # when finished checking for ID either add existing record to pres # or create record and then add it if rec_exists: # note that record is the first record in the list that is returned byfieldvalue # which should be checked for accuracy in multiple tests if there's any chance that # there could be multiple records print 'Check the following list list of records for multiple values:' print Record.by_fieldvalue(fid, rec_id) record = Record.by_fieldvalue(fid, rec_id)[0] presentation.items.create(order=order, record=record) presentation.save() print 'adding %s to presentation at position %s' % (rec_id, order) else: print 'creating record for %s' % rec_id print 'metadata:' print slide['metadata'] #record = Record.objects.create(name=rec_id, owner=target_user) record = Record.objects.create(owner=target_user) record.save() for metadata in slide['metadata']: try: target = Field.objects.get(label=metadata['label'], standard__prefix='aae') record.fieldvalue_set.create(field=target, value=metadata['value'], label=metadata['label'], ) except Exception as e: print e try: target = Field.objects.filter(label=metadata['label']) record.fieldvalue_set.create(field=target[0], value=metadata['value'], label=metadata['label'], ) print "Ok, went with %s the first field I could find to go with!" % target[0] except Exception as e_two: print e_two print "ok, giving up!" continue continue try: title = slide['title'] except: title = 'Untitled' FieldValue.objects.create(record=record, field=standardfield('title'), order=0, value=title) col_i = CollectionItem.objects.create(collection=collection, record=record) print 'collection item created: %s' % col_i ## file biz # media_req.content contains the image media_url = mdid_base_url + slide['image'] print 'media_url: %s' % media_url media_req = requests.get(media_url, cookies=rc) mimetype = media_req.headers['content-type'] file = StringIO(media_req.content) if guess_extension(mimetype) == '.jpeg': filename = record.name + '.jpg' extension = 'JPEG' else: filename = os.path.join(record.name + guess_extension(mimetype)) extension = os.path.splitext(mimetype)[0] print 'extension %s' % extension file_path = os.path.join(store.base, filename) print 'saving media file for %s to %s' % (record.name, file_path) media = Media.objects.create(record=record, #name=os.path.splitext(file.name)[0], name=record.name, storage=store, mimetype=mimetype) media.save_file(filename, file) presentation.items.create(order=order, record=record) presentation.save()
else: print 'do not have image file' #size = file.info().get('content-length') #setattr(file, 'size', int(size if size else 0)) setattr(image_file, 'size', int(size if size else 0)) mimetype = file.info().get('content-type') print 'mimetype is :' + mimetype file.close() if storage: print 'storage is valid in workers.py before 53' media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) print "storage set!!!!!!11!!!1!!!!!!!11!!!1!!!!!1!!" ext = guess_extension(mimetype) extension = guess_extension(mimetype) if not extension: # wasn't a matched pattern extension = "" #media.save_file(record.name + guess_extension(mimetype), file) print "saving file " + record.name + extension print record.name print extension media.save_file(record.name + extension, image_file) #media.save_file(record.name + extension, file) jobinfo.complete('Complete', 'File downloaded') except Exception, ex: logging.info('unitedsearch download media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)
except Exception, ex: print ex try: print "** foo" if jobinfo.status.startswith == 'Complete': return print "** bar" arg = simplejson.loads(jobinfo.arg) record = Record.objects.get(id=arg['record'], manager='dummy') url = arg['url'] print "** baz" storage = _get_storage() # file = urllib2.urlopen(url) file = urllib2.build_opener(urllib2.ProxyHandler({"http": "http://localhost:3128"})).open(url) setattr(file, 'size', int(file.info().get('content-length'))) mimetype = file.info().get('content-type') print "** 4" media = Media.objects.create(record=record, storage=storage, name=record.name, mimetype=mimetype) media.save_file(record.name + guess_extension(mimetype), file) print "** 5" jobinfo.complete('Complete', 'File downloaded') print "** 6" except Exception, ex: logging.info('dummy download media failed for %s (%s)' % (job, ex)) jobinfo.update_status('Failed: %s' % ex)