def savevideo(list): try: list = [] logger.info('start job') list.append(models.Video(**{'name':'test3','type':'tv','tag':'omtv','url':'http://www.baidu.com'})) list.append(models.Video(**{'name':'test4','type':'tv','tag':'omtv','url':'http://www.baidu.com'})) models.Video.objects.bulk_create(list) except: logger.error('save error')
def post(self): # k3YmNzaTE6XAeocyVuctpmcXe4g_iNWh args = parser.parse_args() video = models.Video(**args) db.add(video) db.commit() executor.submit(TrimVideoProcessor(video.id)) db.close() return video.as_dict
def add_video(video: schemas.VideoBase, db: Session = Depends(get_db)): db_video = models.Video( cloudflare_video_id=video.cloudflare_video_id, title=video.title, thumbnail=video.thumbnail ) db.add(db_video) db.commit() db.refresh(db_video) return db_video
def savetv(list): try: batchlist = [] for i in range(len(list)): vname = list[i].string vurl = basepath + list[i].get('href') #batchlist.append(models.Video(name=vname[0:-1]+']',url=vurl,type='tv',tag='omtv')) batchlist.append(models.Video(name=vname.encode('utf-8'),url=vurl,type='tv',tag='omtv')) models.Video.objects.bulk_create(batchlist) except Exception,e: logger.error(e.message)
def addFile(self, txt): file = fd.askopenfilename(initialdir="/") if file != '': s = str(file) left = s.rfind('/') right = s.rfind('.') name = s[left + 1:right] self.col.add(models.Video(name, path=file)) self.listUpdate(txt) else: print('File was not specified.')
def post(self): url = self.get_argument('url', '') title = self.get_argument('title', '') text = self.get_argument('text', '') if not url: return self.reload(message='missing_url', copyargs=True) elif not title: return self.reload(message='missing_title', copyargs=True) elif not text: return self.reload(message='missing_text', copyargs=True) youtube = re.findall(r'youtube\.com\/watch\?\S*v=([^&\s]+)', url) vimeo = re.findall(r'vimeo\.com\/(\d+)', url) youtube = youtube[0] if youtube else None vimeo = vimeo[0] if vimeo else None if youtube: api = 'http://gdata.youtube.com/feeds/api/videos/' + youtube + '?v=2&alt=json&key=AI39si5nwWTo74H9slMDgT-fO_PkbBGIKzEMwJ53iWOUUKaHFmphCjXDoYFyagU5W15Q7ZB7wRpON-fHNLdMnGOWsiz18_kQig' response = urllib.urlopen(api).read() try: data = json.loads(response) except ValueError: logging.error(response) return self.reload(message='not_found', copyargs=True) thumbnail = data['entry']['media$group']['media$thumbnail'][1][ 'url'] elif vimeo: api = 'http://vimeo.com/api/v2/video/' + vimeo + '.json' response = urllib.urlopen(api).read() data = json.loads(response) if not data: return self.reload(message='not_found', copyargs=True) thumbnail = data[0]['thumbnail_large'] else: return self.reload(message='not_found', copyargs=True) if youtube: dupe = models.Video.all().filter('youtube =', youtube).get() elif vimeo: dupe = models.Video.all().filter('vimeo =', vimeo).get() if dupe: return self.reload(message='dupe', dupe=dupe.key().id(), copyargs=True) video = models.Video(youtube=youtube, vimeo=vimeo, title=title, text=text, thumbnail=thumbnail) video.update_score() video.put() self.redirect('/' + str(video.key().id()))
def Submitvideo(request): if request.method == 'POST': form = video(request.POST) cur = models.Video() description = form['description'].value() name = form['name'].value() htmlembedder = form['htmlembedder'].value() print htmlembedder cur.__addvideo__(name, description, htmlembedder) return HttpResponseRedirect('/L')
def batchSave(list,parentId): try: logger.info('start batchsave---------------------------------------------------------') batchList = [] for i in range(len(list)): surl = basepath + list[i].get('href') if parentId == 0 else getthunderlink(list[i].get('href')) sname = list[i].string logger.info(len(surl)) if(len(sname)>6): batchList.append(models.Video(name=sname.encode('utf-8'),url=surl,type=vtype,tag=vtag,parentid=parentId)) #batchList.append(models.Video(name=sname.encode('utf-8'),url=surl,type=vtype,tag=vtag,parentid=parentId)) printbatchlist(batchList) models.Video.objects.bulk_create(batchList) return True except: return False
def index(): video_upload_form = models.video_upload_form(request.form) if request.method == 'POST' and video_upload_form.validate(): #create filename uploaded_file = request.files['fileupload'] if uploaded_file and allowed_file(uploaded_file.filename): filename = secure_filename(uploaded_file.filename) #connecting to s3 s3 = boto.connect_s3(os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')) bucket = s3.get_bucket(os.environ.get('AWS_BUCKET')) k = bucket.new_key(bucket) k.key = filename #set filename k.set_metadata("Content Type", uploaded_file.mimetype) #identify MIME type k.set_contents_from_string( uploaded_file.stream.read()) # file contents to be added k.set_acl('public-read') # make publicly readable #if the file was actually uploaded: if k and k.size > 0: submission = models.Video() submission.title = request.form.get('title') submission.filename = filename submission.save() return redirect('/') else: return "Uh-oh, there was an error" + uploaded_file.filename else: templateData = {'form': video_upload_form} return render_template('main.html', **templateData)
def buildVideoBatch(cls, rows): """Build video documents and their related datastore entities, in batch, given a list of params dicts. Should be used for new videos, as does not handle updates of existing video entities. This method does not require that the doc ids be tied to the video ids, and obtains the doc ids from the results of the document add.""" docs = [] dbps = [] for row in rows: try: params = cls._normalizeParams(row) doc = cls._createDocument(**params) docs.append(doc) # create video entity, sans doc_id dbp = models.Video(id=params[cls.UNIQUEID], duration_min=int(params[cls.DURATION_MIN]), category=params[cls.CATEGORY]) dbps.append(dbp) except errors.OperationFailedError: logging.error('error creating document from data: %s.', row) logging.debug('buildVideoBatch: docs=%s.', len(docs)) logging.debug('buildVideoBatch: dbps=%s.', len(dbps)) try: add_results = cls.add(docs) except search.Error: logging.exception('Add failed') return if len(add_results) != len(dbps): # this case should not be reached; if there was an issue, # search.Error should have been thrown, above. raise errors.OperationFailedError( 'Error: wrong number of results returned from indexing operation' ) # now set the entities with the doc ids, the list of which are returned in # the same order as the list of docs given to the indexers for i, dbp in enumerate(dbps): dbp.doc_id = add_results[i].id # persist the entities ndb.put_multi(dbps)
def ref_id_check(res_container): for item in res_container: print item if None in item: print "None type found in tuple, we're assuming None is the lack of ref id == no Presto import" else: presto_imports.append(item) for stuff in item: print(stuff) print(presto_imports) ref_id_check(res_container) Video = models.Video() # parsing out tuples in presto_imports list for tupes in presto_imports: # this is a Brightcove video id bc_id = tupes[0] #Video = models.Video() foo = models.session.query(models.Video).filter_by(video_id=bc_id).all() print(foo) # using implicit boolean-ness of empty list to check if list is empty # if not list is equivalent to checking if len(list) == 0, which makes a # list false if not foo: video = models.Video(video_id=tupes[0], name=tupes[1], ref_id=tupes[2]) models.session.add(video)
# -*- coding: utf-8 -*- # This is our Brightcove "module" containing both our oAuth procedure and the functions necessary for working with Brightcove's # Dynamic Ingest API print "Content-type: application/json\n\n" import httplib import urllib import base64 import json import requests import models # create video table object: Video = models.Video() # Read the oauth secrets and account ID from our oauth configuration file "brightcove_oauth.txt" located in # same directory as our Python scripts def loadSecret(): credsFile = open('brightcove_oauth.json') creds = json.load(credsFile) return creds # get the oauth 2.0 token def getAuthToken(creds): conn = httplib.HTTPSConnection("oauth.brightcove.com") url = "/v3/access_token"
# For Dynamic Ingest API, see here: # http://docs.brightcove.com/en/video-cloud/di-api/samples/batch-dynamic-ingest.html # Importing the other script in our app: brightcove.py — this contains all the logic for creating a video # object in the Brightcove CMS and using the Dynamic Ingest API to ingest the video asset via its # source URL import BC_01 import json import feedparser import models import re # create video table object: Video = models.Video() d = feedparser.parse({INSERT YOUR FEED HERE}) response_array = [] # For each item in the feed for index, post in enumerate(d.entries): if index >= 4: break print post.title + ":" # print post.link+"" # print post.description+":" print post.description