def api_get_media_hot(request): num = request.GET.get('num') curpage = request.GET.get('curpage') pagesize = request.GET.get('pagesize') total_count = 0 last_page_or_not = 0 print(curpage) print(pagesize) #分页请求 if curpage and pagesize: curpage = int(curpage) pagesize = int(pagesize) offset = (curpage - 1) * pagesize dataList = yield from Media.getAssetHot(orderBy='hot desc', limit=[offset, pagesize]) total_count = len(dataList) last_page_or_not = is_last_page(total_count, pagesize = pagesize) #首页请求 else: if not num : num = 3; num = int(num) dataList = yield from Media.getAssetHot(orderBy='hot desc', limit=num) total_count = len(dataList) return dict(create_json_head(total_count, last_page_or_not), dataList=dataList)
def index_content(root_dir, file_types, content_type): """ Scan the media directory, creating an index of file properties for display and serving """ logger.debug('indexing') hasher = sha1() content_dir = os.path.join(root_dir, app.config['CONTENT_DIR']) files = file_paths(filtered_walk(content_dir, included_files=file_types, )) for contentfile in files: rel_path = os.path.relpath(contentfile, root_dir) filepath = os.path.join(root_dir, rel_path) filename = os.path.split(contentfile)[1] local_path = os.path.relpath(filepath, root_dir) if os.path.exists(os.path.join(filepath, 'folder.jpg')): img = os.path.join(filepath, 'folder.jpg') else: img = '' hasher.update(local_path) file_key = hasher.hexdigest() tags = _get_tags(filepath) media = Media() media.type = content_type media.path = filepath media.filename = filename media.file_id = file_key media.tags = tags media.img = img media.type = content_type media.save()
def update_media(id): payload = request.form.to_dict() if 'www.youtube.com/watch?v=' in payload['url']: payload['media_type'] = 'video' else: return jsonify(data={}, status={'code': 401, 'message': 'URL input is not valid.'}) if payload['media_type'] == 'video': v_location = payload['url'].index('v') eq_location = payload['url'].index('=') if eq_location == v_location + 1: url_id = payload['url'][eq_location + 1: eq_location + 12] else: return jsonify(data={}, status={'code': 401, 'message': 'URL input is not valid.'}) payload['full_html'] = '<iframe width="560" height="315" src="https://www.youtube.com/embed/{}" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'.format(url_id) payload['thumbnail_html'] = 'http://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(url_id) query = Media.update(**payload).where(Media.id == id) query.execute() media_dict = model_to_dict(Media.get_by_id(id)) return jsonify(data=media_dict, status={'code': 201, 'message': 'Success'})
def api_get_media_taglist(request): querytype = request.GET.get('querytype') curpage = request.GET.get('curpage') pagesize = request.GET.get('pagesize') #默认不是最后一页 last_page_or_not = 0 total_count = 0 #分页请求 if curpage and pagesize and querytype: curpage = int(curpage) pagesize = int(pagesize) offset = (curpage - 1) * pagesize print ("querytype is %s"%querytype) dataList = yield from Media.getAssetList('`style` like ?',dict_style[querytype],limit=(offset, pagesize)) total_count = len(dataList) last_page_or_not = is_last_page(total_count, pagesize = pagesize) #默认请求 else: curpage = 1 pagesize = 3 dataList = yield from Media.getAssetList(limit=num) total_count = len(dataList) last_page_or_not = is_last_page(total_count, pagesize = pagesize) return dict(create_json_head(total_count, last_page_or_not), dataList=dataList)
def post(self): upload_files = self.get_uploads('gallery_images') id = self.request.get("team_id") team = Team.get_by_id(long(id)) redirect_url = self.uri_for('view-team', team_id = id ) logger.info('Uploaded files: ' + str(upload_files)) #logger.info('Get Request: ' + str(self.request.get())) if upload_files is not None and len(upload_files) > 0: files_count = len(upload_files) logger.info('no of files uploaded ' + str(files_count)) for x in xrange(files_count): blob_info = upload_files[x] media_obj = Media() #media_obj.name = self.form.media.__getitem__(x).data['name'] media_obj.type = constants.PHOTO media_obj.status = True #media_obj.primary = self.form.media.__getitem__(x).data['primary'] media_obj.link = blob_info.key() media_obj.url = images.get_serving_url(blob_info.key()) media_obj.entity_id = team.key media_obj.entity_type = constants.TEAM logger.info('Upload file detail: ' + str(media_obj)) #self.mediaDao.persist(media_obj) media_obj.put() logger.info('Link to picture file ' + images.get_serving_url(media_obj.link)) return self.redirect(redirect_url)
def save_media(params): query = Media.insert(params).on_conflict(conflict_target=[Media.source], update=params) query.execute() mark_processed(params['source']) return Media.select().where(Media.digest == params['digest']).execute()[0]
def mediamgnt(): page = int(request.args.get("page", 1)) perpage = g.config["ADMIN_ITEM_COUNT"] medialist = Media.get_page(perpage * (page-1), perpage) pager = gen_pager(page, Media.count(), perpage, request.url) return render_template('admin/mediamgnt.html', admin_url="mediamgnt", medialist=medialist, pager=pager )
def delete_user(id): User.delete().where(User.id == id).execute() Media.delete().where(Media.user_id == id).execute() Favorite.delete().where(Favorite.user_id == id).execute() Comment.delete().where(Comment.user_id == id).execute() return jsonify(data={}, status={ 'code': 200, 'message': 'User deleted from all resources' })
def get_active_media(self, key, sport, type): logger.info('NdbMediaDao:: DBHIT: get_active_media for %s ' % type) media_query = Media.query(Media.entity_id == key, Media.entity_type == type, Media.status == True) media = media_query.fetch() if media is None or len(media) == 0: default_pic_url = get_default_media(type, sport) default_pic = Media() default_pic.url = default_pic_url media.append(default_pic) return media
def parse_tweet_media(tweet: Tweet, extended_entities: list): for entity in extended_entities['media']: tweet.text = tweet.text.replace(entity['url'], '') if 'video_info' in entity: video_urls = entity['video_info']['variants'] video_url = max( [video for video in video_urls if ('bitrate') in video], key=lambda x: x['bitrate'])['url'] tweet.add_media(Media('video', video_url)) else: photo_url = entity['media_url_https'] tweet.add_media(Media('photo', photo_url))
def deduplicate(): gold = Media.select( peewee.fn.first_value(Media.id).over( partition_by=Media.digest, order_by=[ Media.organized_at.asc(nulls='LAST'), peewee.fn.length(Media.stem).asc(), Media.taken_at.asc() ]).distinct()) with Media.db: Media.update(duplicate=False).execute() Media.update(duplicate=True).where(Media.id.not_in(gold)).execute()
def populate_data_into_db(media_list): for media in media_list: new_media = Media(media_dict=media) if Media.query.filter_by(name=new_media.name, artist_id=new_media.artist_id).first(): continue # just in case you have same two media in db if Artist.query.filter_by(id=new_media.artist_id).first() == None: artist_dict = look_up_artist_info(new_media.artist_id) new_artist = Artist(artist_dict=artist_dict) new_artist.save_to_db( ) # you have to save artist first, before you save a new media new_media.save_to_db() print("* Finish populating the data.")
def parse_tweet_media(self, tweet: Tweet, extended_entities: list): for entity in extended_entities.media: tweet.text = tweet.text.replace(entity.url, '') if 'video_info' in entity: video_urls = entity.video_info.variants video_url = max( [video for video in video_urls if ('bitrate') in video], key=lambda x: x.bitrate).url tweet.media_list.append(Media('video', video_url)) self.logger.debug("- - Found video URL in tweet: " + video_url) else: photo_url = entity.media_url_https tweet.media_list.append(Media('photo', photo_url)) self.logger.debug("- - Found photo URL in tweet: " + photo_url)
def setUp(self): # set TEMPLATE_DEBUG to True to ensure {% include %} will raise # exceptions since that is how inlines are rendered and #9498 will # bubble up if it is an issue. self.original_template_debug = settings.TEMPLATE_DEBUG settings.TEMPLATE_DEBUG = True self.client.login(username='******', password='******') # Can't load content via a fixture (since the GenericForeignKey # relies on content type IDs, which will vary depending on what # other tests have been run), thus we do it here. e = Episode.objects.create(name='This Week in Django') m = Media(content_object=e, url='http://example.com/podcast.mp3') m.save()
def upload_photos(self, photos): upload_files = self.get_uploads() if upload_files is not None and len(upload_files) > 0: files_count = len(upload_files) logger.info('no of files uploaded ' + str(files_count)) for x in xrange(files_count): blob_info = upload_files[x] media_obj = Media() media_obj.name = self.form.media.__getitem__(x).data['name'] media_obj.status = self.form.media.__getitem__( x).data['status'] media_obj.link = blob_info.key() photos.append(media_obj) logger.info('Link to picture file ' + media_obj.name + ', ' + images.get_serving_url(media_obj.link)) return photos
def get(self): min_created_time = self.get_argument("since", 0) ids = Media.find_by_tag_and_created_time(self.tags[0], min_created_time) ret = dict(posts=ids, meta=dict(tags=self.tags)) self.set_header("Content-Type", "application/json") self.write(json.dumps(ret))
def fetch(service, artist): if service not in services: return False try: metadata = utils.fetch(service, artist) db.connect('testing') artist_count = Artist.objects(name=artist, service=service).count() if not artist_count: mongo_artist = Artist(name=artist, service=service).save() else: mongo_artist = Artist.objects(name=artist, service=service).first() for track in metadata[artist]['tracks']: track_title = track track_album = metadata[artist]['tracks'][track_title]['album'] track_encoding = metadata[artist]['tracks'][track_title]['encoding'] track_url = metadata[artist]['tracks'][track_title]['url'] track_folder = metadata[artist]['tracks'][track_title]['track_folder'] track_filename = metadata[artist]['tracks'][track_title]['track_filename'] media_count = Media.objects(artist=mongo_artist, title=track_title).count() if not media_count: data = open(track_folder + '/' + track_filename, 'rb') f = File(name=track_filename, data=data).save() media = Media(artist=mongo_artist, title=track_title, data=f).save() return True except: err = sys.exc_info()[0] return False
def test_02_00(self): dbsession = createdbsession('sqlite:///testdatabase.db', sqlecho=False, cleardown=True) with open('testlocations.csv', mode='r') as fp: csvreader = csv.reader(fp) for data in csvreader: add_location = Locations(locationid=data[0], name=data[1], description=data[2], sublocation=data[3]) dbsession.add(add_location) print data with open('testmedia.csv', mode='r') as fp: csvreader = csv.reader(fp) for data in csvreader: add_media = Media(mediaid=data[0], mediatype=data[1], title=data[2], description=data[3], url=data[4], barcode=data[5], locationid=data[6]) dbsession.add(add_media) print data dbsession.commit()
def reverse(): reverselist = request.json for item in reverselist: media = Media.get_media(fileid=item["fileid"]) if media: media.display = not media.display media.save() return jsonify(success=True, message="success")
def publish_items_db(media_items, lang, headers): """Publish all items but it check if the post exists, in this case, it will update the post. :param media_items: List of items to will publish """ """Define all variables""" """Create session""" _session = app.apps.get("db_sqlalchemy")() """For each novels do to the following""" for _item in media_items: _published_episodes = [] """For each new episode do to the following""" for _episode in _item['new_episodes']: """If the posts doesn't exists, create the post""" """Upload cover""" _cover = images.upload_images_from_urls( urls=[_episode['cover']], # watermark_code=watermark_code ) """Get media type""" media_type = media.get_media_type(_episode['media_type']) """Prepare all chapters""" _media_db = Media( media_id_reference=_episode['media_id'], collection_id_reference=_episode['collection_id'], number=_episode['episode_number'], title=_episode['title'], url=_episode['url'], cover=_cover['data']['images'][-1]['link'], serie=_item['serie'], media_type=media_type, status=constants.STATUS['initial'], ) """Save chapters in database""" _session.add(_media_db) _session.flush() _published_episodes.append(_media_db.to_dict(), ) _item['published'] = _published_episodes """Save in database""" _session.commit() """Close session""" _session.close() """Return the posts list""" return media_items
def reverse(): reverselist = request.json for item in reverselist: media = Media.get_by_id(item["fileid"]) if media: media.display = not media.display media.save() return jsonify(success=True, message="success")
def upload_photos(self, key): upload_files = self.get_uploads() if upload_files is not None and len(upload_files) > 0: files_count = len(upload_files) logger.info('no of files uploaded ' + str(files_count)) for x in xrange(files_count): blob_info = upload_files[x] media_obj = Media() media_obj.name = self.form.media.__getitem__(x).data['name'] media_obj.type = constants.PHOTO media_obj.status = self.form.media.__getitem__(x).data['status'] media_obj.primary = self.form.media.__getitem__(x).data['primary'] media_obj.link = blob_info.key() media_obj.url = images.get_serving_url(blob_info.key()) media_obj.entity_id = key media_obj.entity_type = constants.PLAYGROUND self.mediaDao.persist(media_obj) logger.info('Link to picture file ' + media_obj.name + ', ' + images.get_serving_url(media_obj.link))
def post(self, request, *args, **kwargs): user = request.user media = Media.from_request(request, user) if media is None: return HttpResponse(status=413) resp = media.preview(user) return HttpResponse(json.dumps(resp), mimetype='application/json')
def api_get_media_detail(request): id = request.GET.get('id') if id: id = int(id) dataList = yield from Media.getAssetDetail('id=?',id) total_count = len(dataList) else : raise APIValueError('getAssetDetail', 'parse param error id must > 0.') return dict(create_json_head(total_count),dataList=dataList)
def mediamgnt(filename=None): if request.method == "GET": if "fileid" not in request.args: abort(400) else: media = Media.get_by_id(request.args["fileid"]) if not media or media.filename != filename: abort(404) return send_from_directory( current_app.config["UPLOAD_FOLDER"], media.local_filename ) elif request.method == "POST": f = request.files["files[]"] if f: filename = f.filename version = Media.get_version(filename) local_filename = Media.new_local_filename(filename, version) filepath = os.path.join( current_app.config['UPLOAD_FOLDER'], local_filename) f.save(filepath) filesize = os.stat(filepath).st_size now = datetime.now() hashstr = local_filename + now.strftime("%Y-%m-%d %H:%M:%S") hashstr = hashstr.encode("utf8") media = Media( fileid=hashlib.sha256(hashstr).hexdigest(), filename=filename, version=Media.get_version(filename), content_type=f.content_type, size=filesize, create_time=now, display=True ) media.save() return json.dumps( {"files": []}) elif request.method == "DELETE": removelist = request.json for eachfile in removelist: fileid = eachfile["fileid"] filename = eachfile["filename"] onemedia = Media.get_by_id(fileid) if onemedia.filename != filename: continue onemedia.delete() return jsonify( success=True, message="success")
def api_get_media_recommend(request): type = request.GET.get('recommondtype') id = request.GET.get('id') total_count = 0 #媒资推荐媒资 if type and id: id = int(id) if type == 'asset' : dataList = yield from Media.getAssetRecommend('id=?' ,id, type='asset') total_count = len(dataList) #首页媒资推荐 else: if type == 'index' : dataList = yield from Media.getAssetRecommend("rmdposter<>'' ",args=None, type='index') total_count = len(dataList) else : raise APIValueError('getAssetRecomment', 'parse param error.') return dict(create_json_head(total_count),dataList=dataList)
def medias(): page = int(request.args.get("page", 1)) perpage = g.config["ADMIN_ITEM_COUNT"] count, medias = Media.get_page(page, order_by=Media.id.desc(), limit=perpage) pager = gen_pager(page, count, perpage, request.url) return render_template('admin/medias.html', admin_url="medias", medias=medias, pager=pager)
def getMedia(self, mediaMessageProtocolEntity, message): media = Media( type=MediaType.get_mediatype(mediaMessageProtocolEntity.getMediaType()), preview=mediaMessageProtocolEntity.getPreview()) if mediaMessageProtocolEntity.getMediaType() in ( MediaMessageProtocolEntity.MEDIA_TYPE_IMAGE, MediaMessageProtocolEntity.MEDIA_TYPE_AUDIO, MediaMessageProtocolEntity.MEDIA_TYPE_VIDEO ): self.setDownloadableMediaData(mediaMessageProtocolEntity, media) media.encoding = mediaMessageProtocolEntity.encoding if mediaMessageProtocolEntity.getMediaType() != MediaMessageProtocolEntity.MEDIA_TYPE_AUDIO: message.content = mediaMessageProtocolEntity.getCaption() elif mediaMessageProtocolEntity.getMediaType() == MediaMessageProtocolEntity.MEDIA_TYPE_LOCATION: message.content = mediaMessageProtocolEntity.getLocationName() self.setLocationMediaData(mediaMessageProtocolEntity, media) elif mediaMessageProtocolEntity.getMediaType() == MediaMessageProtocolEntity.MEDIA_TYPE_VCARD: message.content = mediaMessageProtocolEntity.getName() self.setVCardMediaData(mediaMessageProtocolEntity, media) return media
def delete_media(id): try: query = Media.delete().where(Media.id == id) query.execute() query2 = Comment.delete().where(Comment.media_id == id) query2.execute() query3 = Favorite.delete().where(Favorite.media_id == id) query3.execute() return jsonify(data={}, status={"code": 200, "message": "resource deleted"}) except Media.DoesNotExist: return jsonify(data={}, status={"code": 401, "message": "There is no media at that id"})
def fetch_media(user_pks, api, session, force_update=False): """ Fetch the media for these user pk's and isnert them into the DB :user_pks a list of instagram user primary keys :api The isntagram API :session the dbsession :force_update if true the queries will overide previous user entries in the db to update them :returns a list of media pk's """ logger.info("Fetching media") pks = [] for user_pk in user_pks: api.getUserFeed(user_pk) media = api.LastJson["items"] # The user likely has more photos but I am not sure they are relevant # older photos leads to less currently relevant data # is medium the singular? probably not... for medium in media: media_pk = medium["pk"] pks.append(media_pk) # make sure pk not in db if session.query(Media).get(media_pk) != None and not force_update: # we still want to check for comments at this point so # append the pk anyway continue is_picture = True if medium['media_type'] == 1 else False instagram_media = Media(media_id=media_pk, instagram_user_id=user_pk, is_picture=is_picture) session.add(instagram_media) logger.debug("Got media " + str(media_pk) + " for user " + str(user_pk)) # can't make requests too fast time.sleep(config.SLEEP_TIME) session.commit() session.commit() logger.info("Gatered media committed to database") return pks
def get_one_media(id): try: media = model_to_dict(Media.get_by_id(id)) comments = Comment.select().where(Comment.media_id == media['id']) comments_dict = [model_to_dict(comment, exclude=[Comment.user_id.password]) for comment in comments] media['comments'] = comments_dict favorites = Favorite.select().where(Favorite.media_id == media['id']) favorites_dict = [model_to_dict(favorite, exclude=[Comment.user_id.password]) for favorite in favorites] media['favorites'] = favorites_dict return jsonify(data=media, status={"code": 200, "message": "Success"}) except Media.DoesNotExist: return jsonify(data={}, status={"code": 401, "message": "There was an error getting the resource"})
def organize(args, pool, log): photos = Media.select()\ .where(Media.organized_at.is_null(), ~Media.duplicate)\ .order_by(Media.id.asc()) for parent in set(pathlib.Path(photo.path).parent for photo in photos): parent.mkdir(parents=True, exist_ok=True) for photo in photos: photo.organized_at = datetime.datetime.now() photo.save() source = pathlib.Path(photo.source) try: source.rename(photo.path) print('mv:', photo.source, photo.path) except FileNotFoundError as e: print('error:', str(e))
def sign_up_view(): title = "Sign up page" form = MediaSignUpForm() """ if form.media_name is not None: return redirect(url_for('account_print.success_sign_up_view')) """ if request.method == 'POST': name = form.media_name.data type = form.media_type.data email = form.media_email.data if Media.query.filter_by(media_name=name).first() is not None: message = "Media house already exists" return render_template("accounts/sign_up_page.html", title=title, form=form, message=message) password = bcrypt.generate_password_hash( form.media_password.data) #.decode('utf-8') media = Media(media_name=name, media_type=type, media_email=email, media_password=password) db.session.add(media) db.session.commit() media_id = media.id message = "Please confirm email to complete sign up " link = '<html><head><title>C Email</title></head><body><a href="http://127.0.0.1:5000/accounts/email-confirmation/{}">Confirm</a></body></html>'.format( str(media_id)) msg = Message(subject='Announcement Media confirmation', sender=app.config.get("MAIL_USERNAME"), recipients=[email], body="Click here to confirm email {}".format(link)) mail.send(msg) """"if (name or email or type) == None: return return render_template("accounts/sign_up_page.html",title=title,form=form,error)""" return render_template("accounts/sign_up_page.html", title=title, form=form, message=message, media_id=media_id) else: return render_template("accounts/sign_up_page.html", title=title, form=form)
def import_csv(): with open('media_list.csv') as csvfile: data = csv.reader(csvfile) # display data in the console for row in data: print(row) media_title = row[0] media_type = row[1] artist = row[2] genre = row[3] published_date = clean_date(row[4]) price = clean_price(row[5]) new_media = Media(Title=media_title, Type=media_type, Artist=artist, Genre=genre, Date=published_date, Price=price) session.add(new_media) session.commit()
def main(): logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) url = 'http://feeds.feedburner.com/descargas2020new' feed = feedparser.parse(url) posts = [] for post in feed.entries: post = Post(post.title, post.link) posts.append(post) #logging.info(post) for post in posts: contar = (Media.select().where(Media.url == post.url).count()) if contar == 0: scrapingRssPost(post) notificar.notificar(post.title) else: break
def add_media(): try: payload = request.form.to_dict() payload['user_id'] = str(model_to_dict(current_user)['id']) if 'www.youtube.com/watch?v=' in payload['url']: payload['media_type'] = 'video' else: return jsonify(data={}, status={'code': 401, 'message': 'URL input is not valid.'}) if payload['media_type'] == 'video': v_location = payload['url'].index('v') eq_location = payload['url'].index('=') if eq_location == v_location + 1: url_id = payload['url'][eq_location + 1: eq_location + 12] else: return jsonify(data={}, status={'code': 401, 'message': 'URL input is not valid.'}) payload['full_html'] = '<iframe width="560" height="315" src="https://www.youtube.com/embed/{}" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'.format(url_id) payload['thumbnail_html'] = 'http://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(url_id) media = Media.create(**payload) media_dict = model_to_dict(media) return jsonify(data=media_dict, status={'code': 201, 'message': 'Success'}) except: return jsonify(data={}, status={'code': 401, 'message': 'Something went wrong!'})
def get(self): user, logout = check_user(users.get_current_user()) if user: upload_url = blobstore.create_upload_url('/upload') template_args = {'logout_url': users.create_logout_url('/')} media_list = list() more = True curs = None while more: m, curs, more = Media.query( ).order( -Media.when).fetch_page( 10, start_cursor=curs) for mitem in m: media_list.append(mitem) template_args['media'] = media_list template_args['upload_url'] = upload_url template = JINJA_ENVIRONMENT.get_template('media.html') self.response.write(template.render(template_args)) else: self.redirect('/admin')
def get_primary_media(self, entities, type): logger.info('NdbMediaDao:: DBHIT: get_primary_media for %s' % type) media_map = dict() entity_ids = map(lambda entity: entity.key, entities) if entity_ids is not None and len(entity_ids) > 0: #Assigning a default pic to all the entities #Then they will be replaced with primary media, if they have. for entity in entities: media_map[entity.key] = get_default_media(type, entity.sport) logger.debug('entity ids before dedupe %s ' % len(entity_ids)) entity_ids = list( set(entity_ids) ) #Dedupe entity ids. Not doing this at entities level, as they are not hashable logger.debug('entity ids after dedupe %s ' % len(entity_ids)) media_query = Media.query(Media.entity_id.IN(entity_ids), Media.entity_type == type, Media.primary == True) media_list = media_query.fetch() for media in media_list: media_map[media.entity_id] = media.url return media_map
def job_finished_cb(patero, job): tmp = [] for filename in job['output']['files']: copy_or_link(filename, common.output_dir) os.unlink(filename) tmp.append( os.path.join(common.output_dir, os.path.basename(filename))) job['output']['files'] = tmp job.save() # here tell Caspa the file is ready media = Media() for key in ['metadata', 'stat']: media.update(job['output'][key]) media['_id'] = job['output']['checksum'] media['checksum'] = job['output']['checksum'] media['files'] = job['output']['files'] media['file'] = job['output']['transcoded'] media['stat'] = job['output']['stat'] media.save()
def add_mediasource(self, data): mediasource = MediaSource(name=data['name'], user_id=data['user_id'], module_id=data['module_id']) db.session.add(mediasource) db.session.commit() data['media_source_id'] = mediasource.id module_class = mediasources.__dict__[mediasource.module.name].get_module_class() module = module_class(data) db.session.add(module) db.session.commit() medias_model = [] medias = module.get_all_medias() for media in medias: content_type = db.session.query(ContentType).filter(ContentType.name == media['content_type']).first() if content_type: content_type_id = content_type.id content_type = '_'.join(content_type.name.split('/')) content_type = content_types.__dict__[content_type] media_url = module.get_media_url(media['url']) thread = Thread(target=self._get_media_dict, args=(content_type, media_url,)) self._threads.append(thread) thread.start() for thread in self._threads: thread.join() for media_dict in self._medias_dict: artist = Artist.query.filter(Artist.name == media_dict['artist']).first() if not artist: artist = Artist(name=media_dict['artist']) db.session.add(artist) db.session.commit() media_model = Media(name=media_dict['name'], content_type_id=content_type_id, url=media_dict['url'], source_id=mediasource.id, artist=artist) media_model.genre = media_dict['genre'] if media_dict.has_key('date'): media_model.date = media_dict['date'] if media_dict.has_key('collection'): media_model.collection = media_dict['collection'] if media_dict.has_key('collection_position'): media_model.coll_pos = media_dict['collection_position'] db.session.add(media_model) medias_model.append(media_model) db.session.commit() for media in medias_model: for genre in media.genre: genre_id = db.session.query(Genre.id).filter(Genre.name == genre).first() if genre_id: db.session.add(MediaGenre(genre_id=genre_id[0], media_id=media.id)) else: genre_ = Genre(name=genre) db.session.add(genre_) db.session.commit() db.session.add(MediaGenre(genre_id=genre_.id, media_id=media.id)) if hasattr(media, 'collection'): playlist_id = db.session.query(Playlist.id).filter(Playlist.collection == True, Playlist.name == media.collection).first() if playlist_id: db.session.add(MediaPlaylist(playlist_id=playlist_id[0], media_id=media.id)) else: playlist = Playlist(name=media.collection, collection=True) db.session.add(playlist) db.session.commit() db.session.add(MediaPlaylist(playlist_id=playlist.id, media_id=media.id)) db.session.commit() return True
def artist(name): return render_template('artist.html', artists=Artist.objects(name=name), media=Media.objects(artist=Artist.objects(name=name).first()), user=login.current_user)
def app(): app_running = True while app_running: choice = menu() if choice == '1': # add book/media title = input('Media Title: ') media_type = input('Media Type: ') author = input('Media Author: ') genre = input('Genre: ') date_error = True while date_error: date = input( 'Published Date: (Use Date Format: January 1, 2021 ): ') date = clean_date(date) if type(date) == datetime.date: date_error = False price_error = True while price_error: price = input('Price (Use Price format 22.99): £') price = clean_price(price) if type(price) == int: price_error = False # add data to db new_media_add = Media(media_title=title, media_type=media_type, artist=author, genre=genre, published_date=date, price=price) session.add(new_media_add) session.commit() # print menu then pause before menu display print("\n\n**Media Successfully Added! **") time.sleep(1.3) elif choice == '2': # view book for media in session.query(Media): print(media) # add print for genre print( f'{media.media_title} >> {media.media_type} >> {media.artist} >> {media.published_date} >> {media.price} ... \n' ) input("\n Press Enter to return to the main menu") elif choice == '3': # option 3 id_options = [] for media in session.query(Media): id_options.append(media.id) id_error = True while id_error: id_choice = input(f''' \nMedia ID Options: { id_options } \rEnter Book id: ''') id_choice = clean_id(id_choice, id_options) if type(id_choice) == int: id_error = False # display_searched_book - a descriptive variable name that describes what it is storing display_searched_book = session.query(Media).filter( Media.id == id_choice).first() #Formatted String to display media information print(f''' \n{display_searched_book.media_title} by {display_searched_book.artist} \nFormatted String \n P: £{ display_searched_book.price / 100 }''') # Edit and Delete existing media items sub_choice = sub_menu() if sub_choice == "1": # edit selected item display_searched_book.media_title = edit_check( 'Title', display_searched_book.media_title) display_searched_book.media_type = edit_check( 'Media Type', display_searched_book.media_type) display_searched_book.artist = edit_check( 'Artist', display_searched_book.artist) display_searched_book.genre = edit_check( 'Genre', display_searched_book.genre) display_searched_book.published_date = edit_check( 'Date', display_searched_book.published_date) display_searched_book.price = edit_check( 'Price', display_searched_book.price) session.commit() print("\nBook Details Updated!") time.sleep(1.5) elif sub_choice == "2": # delete session.delete(display_searched_book) session.commit() print('\nBook Deleted!') time.sleep(1.5) time.sleep(1.5) # end of option 3 elif choice == '4': # analyse - media item details # Data points oldest_media = session.query(Media).order_by( Media.published_date).first() newest_media = session.query(Media).order_by( Media.published_date.desc()).first() # Get a total count of all the books total_media = session.query(Media).count() # Search records with specified text and return number search_media_count = session.query(Media).filter( Media.media_title.like('%Media%')).count() # records after 2015 # after_2015 = session.query(Media).filter(Media.published_date >= datetime.date("2015 01 01")) # print - formatting analyses for command line print('\n*****Analyse Media on DB*****') print(f''' \rOldest Media: { oldest_media } \rNewest Media: { newest_media } \r\nTotal Records in the Database: { total_media } \r\nSearch "%Media%" Count: { search_media_count } ''') input('\n Press Enter to return to the main menu.') else: # default option to exit the application print('\n\nGOODBYE') app_running = False
def get(media_id): media = Media.get_visible_or_404(media_id) image = Media.get_image_file(media, request.args.get('size')) return Response(image, mimetype='image/' + image.format, direct_passthrough=True)
def put(media_id): return Media.put(media_id, request.files.get('image'))
def delete(media_id): Media.delete(media_id) return '', 204
def medias(filename=None): if request.method == "GET": media = Media.get_media(filename=filename) if not media: abort(404) return send_from_directory(current_app.config["UPLOAD_FOLDER"], media.filename) elif request.method == "POST": f = request.files["files[]"] if not f: return filename = f.filename # save file to local folder, if file exists, delete it filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], filename) if os.path.exists(filepath): os.remove(filepath) f.save(filepath) filesize = os.stat(filepath).st_size now = datetime.now() # if file with same name exists, replace it media = Media.get_one(Media.filename == filename) if not media: media = Media(filename=filename) media.fileid = hashlib.sha256(filename.encode("utf-8")).hexdigest() media.size = filesize media.content_type = f.content_type media.size = filesize media.create_time = now media.save() return jsonify({"files": []}) elif request.method == "DELETE": removelist = request.json for eachfile in removelist: fileid = eachfile["fileid"] filename = eachfile["filename"] onemedia = Media.get_one(Media.fileid == fileid) if onemedia.filename != filename: continue # remove file from folder try: os.remove( onemedia.filepath(current_app.config["UPLOAD_FOLDER"])) except Exception: pass # remove file from database onemedia.delete_instance() return jsonify(success=True, message="success")
def post(): return Media.create(request.files.get('image')), 201
def get(self): bootstrap_data = dict(posts=Media.find_by_tag(self.tags[0], 15)) self.render("index.html", bootstrap_data_json=json.dumps(bootstrap_data), tags=self.tags)
def import_blog(): f = request.files["file"] try: data = f.stream.read().decode("utf-8") data = json.loads(data) links = data.pop("links", []) medias = data.pop("medias", []) posts = data.pop("posts", []) for link in links: new_link = Link.get_by_href(link["href"]) if new_link: continue else: new_link = Link() for item in link: new_link.__dict__[item] = link[item] new_link.link_id = None new_link.create_time = \ datetime.fromtimestamp(new_link.create_time) new_link.save() for media in medias: new_media = Media.get_by_fileid(media["fileid"]) if new_media: continue else: new_media = Media() for item in media: new_media.__dict__[item] = media[item] # Notice, media id should not be set to None new_media.media_id = None new_media.create_time = \ datetime.fromtimestamp(new_media.create_time) new_media.save() for post in posts: # If posts exist, continue new_post = Post.get_by_url(post["url"], public_only=False) if new_post: continue else: new_post = Post() for item in post: new_post.__dict__[item] = post[item] new_post.post_id = None new_post.create_time = \ datetime.fromtimestamp(new_post.create_time) new_post.update_time = \ datetime.fromtimestamp(new_post.update_time) new_post.raw_content = re.sub('<[^<]+?>', "", new_post.content) newtags = new_post.tags new_post.tags = "" new_post.update_tags(newtags) new_post.save() # Restore all posts comments = post["commentlist"] for comment in comments: new_comment = Comment() for item in comment: new_comment.__dict__[item] = comment[item] new_comment.post_id = new_post.post_id new_comment.comment_id = None new_comment.create_time = \ datetime.fromtimestamp(new_comment.create_time) new_comment.save() except Exception as e: return str(e) return "Done"