def postVideoNoMerge(user, url, tags, copy, pid, rank, repost_type): log(obj={'url': url, 'tags': tags, 'copy': copy, 'pid': pid, 'rank': rank}) filterOperation('postVideo', user) tags = [tag.strip() for tag in tags] if not url: raise UserError('EMPTY_URL') if len(url) > VideoConfig.MAX_URL_LENGTH: raise UserError('URL_TOO_LONG') if len(tags) > VideoConfig.MAX_TAGS_PER_VIDEO: raise UserError('TAGS_LIMIT_EXCEEDED') obj, cleanURL = dispatch(url) if obj is None: log(level='WARN', obj={'url': url}) raise UserError('UNSUPPORTED_WEBSITE') if not cleanURL: raise UserError('EMPTY_URL') uid = obj.unique_id(obj, cleanURL) vid_item = tagdb.retrive_item({'item.unique_id': uid}) if vid_item is None: tags = filterTags(tags) log(obj={'url': cleanURL}) task_id = postTask( _createJsonForPosting(cleanURL, tags, copy, pid, rank, [], repost_type, user, use_autotag=True)) return task_id else: return 'no-suck-task'
def postVideoIPFS_new(user, url, tags, copy, pid, rank, desc, title, cover_file_key): log(obj = {'url': url, 'tags': tags, 'copy': copy, 'pid': pid, 'rank': rank}) filterOperation('postVideo', user) tags = [tag.strip() for tag in tags] # TODO: check title and desc clength if not url : raise UserError('EMPTY_URL') if len(url) > VideoConfig.MAX_URL_LENGTH : raise UserError('URL_TOO_LONG') if len(tags) > VideoConfig.MAX_TAGS_PER_VIDEO : raise UserError('TAGS_LIMIT_EXCEEDED') if len(title) > VideoConfig.MAX_TITLE_LENGTH : raise UserError('TITLE_TOO_LONG') if len(desc) > VideoConfig.MAX_DESC_LENGTH : raise UserError('DESC_TOO_LONG') cover_file = None if cover_file_key.startswith("upload-image-") : filename = rdb.get(cover_file_key) if filename : cover_file = filename.decode('ascii') if cover_file is None : raise UserError('NO_COVER') obj, cleanURL = dispatch(url) if obj is None: raise UserError('UNSUPPORTED_WEBSITE') if not cleanURL : raise UserError('EMPTY_URL') if obj.NAME != 'ipfs' : raise UserError('NOT_IPFS') _verifyTags(tags) log(obj = {'url': cleanURL}) task_id = postTask(_createJsonForPosting(cleanURL, tags, copy, pid, rank, [], user, field_overrides = {'title': title, 'desc': desc, 'cover_image_override': cover_file, '__condition': 'any'})) return task_id
def postVideoBatch(user, videos, tags, copy, pid, rank, as_copies): log(obj = {'urls': videos, 'tags': tags, 'copy': copy, 'pid': pid, 'rank': rank, 'as_copies': as_copies}) filterOperation('postVideoBatch', user) tags = [tag.strip() for tag in tags] if not videos : raise UserError('EMPTY_LIST') if len(videos) > VideoConfig.MAX_BATCH_POST_COUNT : raise UserError('POST_LIMIT_EXCEEDED') if len(tags) > VideoConfig.MAX_TAGS_PER_VIDEO : raise UserError('TAGS_LIMIT_EXCEEDED') _verifyTags(tags) cleanURL_objs = [] unique_ids = [] for url in videos : url = url.strip() if not url: continue obj, cleanURL = dispatch(url) # Here we allow batch post to be partially successful if obj is not None : uid = obj.unique_id(obj, cleanURL) if not uid in unique_ids : # remove duplicated items cleanURL_objs.append((obj, cleanURL)) unique_ids.append(uid) else : log('dispatcher', level = 'WARN', obj = {'failed_url': url}) task_ids = [] for idx, (obj, cleanURL) in enumerate(cleanURL_objs) : log(obj = {'url': cleanURL}) next_idx = idx if rank >= 0 else 0 task_id = postTask(_createJsonForPosting(cleanURL, tags, copy, pid, rank + next_idx, unique_ids if as_copies else [], user, unique_ids)) task_ids.append(task_id) return task_ids
def getVideosByURLs(urls): ret = [] for url in urls: obj, cleanURL = dispatch(url) if obj is None: ret.append({ 'url': url, 'exist': False, 'reason': 'UNSUPPORTED_WEBSITE' }) continue if not cleanURL: ret.append({'url': url, 'exist': False, 'reason': 'EMPTY_URL'}) continue uid = obj.unique_id(obj, cleanURL) obj = db.retrive_item({'item.unique_id': uid}) if obj: ret.append({'url': url, 'exist': True, 'id': obj['_id']}) else: ret.append({ 'url': url, 'exist': False, 'reason': 'VIDEO_NOT_EXIST' }) return ret
def getVideoByURL(url): obj, cleanURL = dispatch(url) if obj is None: raise UserError('UNSUPPORTED_WEBSITE') if not cleanURL: raise UserError('EMPTY_URL') uid = obj.unique_id(obj, cleanURL) obj = db.retrive_item({'item.unique_id': uid}) if obj: return obj raise UserError('VIDEO_NOT_EXIST')
async def get_twitter_info(request): url = (await request.json())['url'] #log(obj = {'url': url}) obj, cleanURL = dispatch(url) if obj.NAME != 'twitter' : #log(obj = {'msg': 'NOT_TWITTER'}) return makeResponseFailed('NOT_TWITTER') info = await obj.get_metadata_async(obj, cleanURL, False) if info["status"] != 'SUCCEED' : #log(obj = {'msg': 'FETCH_FAILED', 'info': info}) return makeResponseFailed('FETCH_FAILED') return info
def postVideo(user, url, tags, copy, pid, rank): log(obj = {'url': url, 'tags': tags, 'copy': copy, 'pid': pid, 'rank': rank}) filterOperation('postVideo', user) tags = [tag.strip() for tag in tags] if not url : raise UserError('EMPTY_URL') if len(url) > VideoConfig.MAX_URL_LENGTH : raise UserError('URL_TOO_LONG') if len(tags) > VideoConfig.MAX_TAGS_PER_VIDEO : raise UserError('TAGS_LIMIT_EXCEEDED') obj, cleanURL = dispatch(url) if obj is None: raise UserError('UNSUPPORTED_WEBSITE') if not cleanURL : raise UserError('EMPTY_URL') _verifyTags(tags) log(obj = {'url': cleanURL}) task_id = postTask(_createJsonForPosting(cleanURL, tags, copy, pid, rank, [], user)) return task_id
def inferTagsFromVideo(utags, title, desc, user_language, video_url: str = '', user_urls: [str] = []): log( obj={ 'title': title, 'desc': desc, 'utags': utags, 'lang': user_language, 'video_url': video_url, 'user_urls': user_urls }) video_url = video_url.strip() tagids = [] if video_url: obj, cleanURL = dispatch(video_url) if obj is not None: uid = obj.unique_id(obj, cleanURL) vid_item = db.retrive_item({'item.unique_id': uid}) if vid_item is not None: tagids = list( filter(lambda x: x < 0x80000000, vid_item['tags'])) if not tagids: utags = [u.lower() for u in utags] utags.append(title) utags.append(desc) all_text = ' 3e7dT2ibT7dM '.join(utags) tagids = inferTagidsFromText(all_text) matched_author_records, matched_author_tags = matchUserSpace(user_urls) matched_common_ids = itertools.chain.from_iterable( [x['common_tagids'] for x in matched_author_records]) tagids = list( set(tagids) | set([x['id'] for x in matched_author_tags]) | set(matched_common_ids)) return db.translate_tag_ids_to_user_language(tagids, user_language)[0]