async def post(self, request: HTTPConnection): form = await request.form() form = ChallengeForm(form) is_valid = form.validate() if not slug(form.title.data): is_valid = False form.title.errors.append( "A valid url-safe name cannot be generated for this title.") if (await Challenge.query.where( sa.or_( Challenge.title == form.title.data, Challenge.slug == slug(form.title.data), )).gino.first() is not None): is_valid = False form.title.errors.append( f"A challenge with the title conflicting with '{form.title.data}' already exists." ) if is_valid: f_a = form.flag_or_answer.data flag, answer = (f_a, None) if form.is_flag.data else (None, f_a) challenge = await Challenge.create_auto( title=form.title.data, content=form.content.data, flag=flag, answer=answer, hidden=form.hidden.data, depreciated=form.depreciated.data, points=form.points.data, tags=form.tags.data, ) url = request.url_for("challenge_view", slug=challenge.slug) if not challenge.hidden: await log_create("challenge", challenge.title, request.user.username, url) return redirect_response(url=url) images = await encoded_existing_images(request) tags = orjson.dumps(await get_all_tags(include_hidden=True)) return templates.TemplateResponse( "challenge/new.j2", { "request": request, "form": form, "existing_images": images, "existing_tags": tags, }, )
async def post(self, request: HTTPConnection): id = request.path_params["id"] blog = await Blog.get(id) if blog is None: return abort(404, "Blog not found") if not can_edit(request, blog.author_id): return abort(400) form = await request.form() form = PostForm(form) is_valid = form.validate() if not slug(form.title.data): is_valid = False form.title.errors.append( "A valid url-safe name cannot be generated for this title.") if blog.title != blog.title.data: if (await Blog.query.where( sa.or_(Blog.title == form.title.data, Blog.slug == slug(form.title.data))).gino.first() is not None): is_valid = False form.title.errors.append( f"A blog with the title '{form.title.data}' already exists." ) if is_valid: await blog.update_auto(title=form.title.data, tags=form.tags.data, content=form.content.data).apply() url = request.url_for("blog_view", slug=blog.slug) await log_edit("blog", blog.title, request.user.username, url) return redirect_response(url=url) images = await encoded_existing_images(request) tags = orjson.dumps(await get_all_tags()) return templates.TemplateResponse( "blog/edit.j2", { "request": request, "form": form, "blog": blog, "existing_images": images, "existing_tags": tags, }, )
def _save_pdf(self, district, year, release, filename, bcontent): 'saves a pdf to disk' district = slug(district) filename = slug(filename) release = release.strip().split('/') release.reverse() release = slug(''.join(list(release))) os.makedirs(f'output/{district}/{year}', exist_ok=True) with open(f'output/{district}/{year}/{filename}-{release}.pdf', 'wb') as file: file.write(bcontent)
async def post(self, request: HTTPConnection): form = await request.form() form = WriteupForm(form) is_valid = form.validate() if not slug(form.title.data): is_valid = False form.title.errors.append( "A valid url-safe name cannot be generated for this title." ) if ( await Writeup.query.where( sa.or_( Writeup.title == form.title.data, Writeup.slug == slug(form.title.data), ) ).gino.first() is not None ): is_valid = False form.title.errors.append( f"A writeup with the title conflicting with '{form.title.data}' already exists." ) if is_valid: writeup = await Writeup.create_auto( author_id=request.user.discord_id, title=form.title.data, tags=form.tags.data, content=form.content.data, private=form.private.data, ) url = request.url_for("writeups_view", slug=writeup.slug) await log_create("writeup", writeup.title, request.user.username, url) return redirect_response(url=url) images = await encoded_existing_images(request) tags = orjson.dumps(await get_all_tags(True)) return templates.TemplateResponse( "writeups/new.j2", { "request": request, "form": form, "existing_images": images, "existing_tags": tags, }, )
def writejob(job): """ Converts job object into XML for upload into WPJobBoard :param job: job object :return: Job formatted as XML (lxml.etree.Element object) """ if not type(job) == Job: raise TypeError("Argument job must be type Job") #Creation of job xml format jobel = ET.Element('job') #add each subelement to the xml ET.SubElement(jobel, 'id').text = str(job.id) ET.SubElement(jobel, 'employer_id').text = str(job.company.id) ET.SubElement(jobel, 'company_name').text = str(job.company.name) ET.SubElement(jobel, 'company_url').text = str(job.company.url) ET.SubElement(jobel, 'company_email').text = str(job.company.email) ET.SubElement(jobel, 'job_title').text = str(job.title) ET.SubElement(jobel, 'job_slug').text = slug( str(job.company.name) + str(job.title) + str(job.startdate)) ET.SubElement(jobel, 'job_description').text = ET.CDATA(str(job.desc)) ET.SubElement(jobel, 'job_country').text = str(job.location.country) ET.SubElement(jobel, 'job_state').text = str(job.location.state) ET.SubElement(jobel, 'job_city').text = str(job.location.city) ET.SubElement(jobel, 'job_created_at').text = str(job.startdate) ET.SubElement(jobel, 'job_expires_at').text = str(job.enddate) ET.SubElement(jobel, 'is_active').text = str(job.active) ET.SubElement(jobel, 'is_approved').text = str(job.approved) ET.SubElement(jobel, 'is_filled').text = str(job.filled) ET.SubElement(jobel, 'is_featured').text = str(job.feat) #Only add tags to XML if tags are present if job.tags: tags = ET.SubElement(jobel, 'tags') #For each tag in list, add subelement for tg in job.tags: tag = ET.SubElement(tags, 'tag') ET.SubElement(tag, 'type').text = str(tg.type) ET.SubElement(tag, 'title').text = str(tg.title) ET.SubElement(tag, 'slug').text = slug(str(tg.title)) if job.metas: metas = ET.SubElement(jobel, 'metas') for mt in job.metas: #For each meta in list, add subelement meta = ET.SubElement(metas, 'meta') ET.SubElement(meta, 'name').text = str(mt.name) ET.SubElement(meta, 'value').text = str(mt.value) return jobel
def CreateShop(): body = request.get_json() body['slug'] = slug.slug(body['name']) items = Shop(**body).save() path = '%s/public/%s' % (os.getcwd(), items.id) os.mkdir(path, 755) return Response(items.to_json(), mimetype="application/json", status=200)
def _handle_web_page(web_page_url: str) -> str: page_html = fetch_html_page(web_page_url) bs = html_parser_from(page_html) web_page_title = slug(bs.title.string if bs.title and bs.title.string else web_page_url) target_file = OUTPUT_DIR / f"{web_page_title}.pdf" cmd = f'./webpage_to_pdf.py -i "{web_page_url}" -o "{target_file}" --headless' py_executable_checklist.workflow.run_command(cmd) return target_file.as_posix()
def get_filename(key, value): try: timestamp = int(key[10:]) // 1000 timestamp = datetime.datetime.utcfromtimestamp(timestamp) titile_slug = slug.slug(value.get('title')) return '%s-%s.md' % (timestamp.strftime('%Y-%m-%d'), titile_slug) except Exception: return None
def page_title_from(self, child_links_folder, link_in_comment): page_slug = slug(link_in_comment) page_path = f"{page_slug}.html" post_html_page_file = child_links_folder / page_path page_html = fetch_html(link_in_comment, post_html_page_file) bs = html_parser_from(page_html) return bs.title.string if bs.title and bs.title.string else link_in_comment
def create_component_view(self): cv = ComponentView() cv.title = f"Component view for {self.name}" cv.key = slug.slug(self.name) cv.containerId = self.id for component in self.components: cv.addComponent(component) return cv
def create_project(project): """creates a new project and creates the corresponding folder""" with app.app_context(): location = app.config.get("LIBINTEL_DATA_DIR") project.project_id = slug.slug(project.name) save_project(project) path = location + '/out/' + project.project_id if not os.path.exists(path): os.makedirs(path) return project
def create_container_view(self): cv = ContainerView() cv.title = f"Container view for {self.name}" cv.key = slug.slug(self.name) cv.softwareSystemId = self.id for container in self.containers: cv.addContainer(container) for relationship in container.relationships: cv.addContainer(relationship.destinationId) return cv
def get_current_user_name(): def unix(): user_info = get_current_user_info() return user_info.pw_name def windows(): import getpass return getpass.getuser() return slug(exec_by_platform(unix, windows, unix))
def _load_buffers(buffers_path=None): buffers = {} for b_filename in glob.glob(os.path.join(buffers_path, '*')): buffer_file = open(b_filename, 'r') b_filename = os.path.basename(b_filename.split('.')[0]) buffer_slug = slug.slug(u"%s" % b_filename) buffers[buffer_slug] = map(lambda l: l.replace('\n', ''), buffer_file.readlines()) return buffers
def add_success(self, appstruct): title = appstruct['title'] artist = appstruct['artist'] timing = appstruct['timing'] name = slug.slug(title) stream = appstruct['file']['fp'] song = self.request.registry.content.create( 'Song', title, artist, timing, stream) self.context[name] = song return HTTPFound(self.request.sdiapi.mgmt_path(self.context))
def run(self, context): browser = context["browser"] context["thread_topic"] = browser.find_elements_by_css_selector( "h2.topic-title")[0].text context["slugged_thread_topic"] = slug(context["thread_topic"]) context["complete_html_page"] = "{}.html".format( context["slugged_thread_topic"]) context["complete_pdf_page"] = context["slugged_thread_topic"] + ".pdf" total_pages_elem = browser.find_elements_by_css_selector( "div.pagination")[0].find_elements_by_css_selector("li a")[-2] context["total_pages"] = int(total_pages_elem.text)
def handle(self, *args, **options): with open('phones.csv', 'r') as csvfile: phone_reader = csv.reader(csvfile, delimiter=';') next(phone_reader) for line in phone_reader: Phone.objects.create(name=line[1], image=line[2], price=float(line[3]), release_date=line[4], lte_exists=line[5], slug=slug.slug(line[1]))
def _load_samples(samples_path=None, ext_list=[]): samples = {} if samples_path: samples_path = os.path.abspath(samples_path) if os.path.isdir(samples_path): samples_dir = slug.slug(u"%s" % os.path.basename(samples_path)) samples[samples_dir] = {} for ext in ext_list: s_path = os.path.join(samples_path, '*.%s' % ext) for sample in glob.glob(s_path): s_filename = os.path.basename(sample).replace(ext, '') sample_slug = slug.slug(u"%s" % s_filename) samples[samples_dir][sample_slug] = O(path=sample) samples['packs'] = {} for directory in glob.glob(os.path.join(cfg.samples_path, "*/")): d_name = os.path.basename(re.sub("/$","", directory)) d_slug = slug.slug(u"%s" % d_name) samples['packs'][d_slug] = {} return samples
def get_current_user_name() -> str: hostname = generate_urlsafe_hash(node()) def unix(): user_info = get_current_user_info() return user_info.pw_name def windows(): import getpass return getpass.getuser() return slug("%s-%s" % (exec_by_platform(unix, windows, unix), hostname))
def _init_tables(self, source_key): """ """ if "tables" in self.database[source_key] and len( self.database[source_key]["tables"]) > 0: _global_defaults = copy.deepcopy( self.database[source_key]["defaults"]["tables"]) if "audit_logger" in _global_defaults["extensions"]: del _global_defaults["extensions"]["audit_logger"] self.database[source_key]["tables"] = [ dict( dict_deepmerge( {}, dict_deepmerge( {}, _table_default_config, _global_defaults, ), t, )) for t in self.database[source_key]["tables"] ] for key, _table in enumerate(self.database[source_key]["tables"]): print(_table) if "fields_excluded" in _table: for _method in _table["fields_excluded"].keys(): if _method == "all": continue for _endpoint in _table["fields_excluded"][ _method].keys(): if _table["fields_excluded"][_method][_endpoint]: _table["fields_excluded"][_method][ _endpoint].extend( x for x in _table["fields_excluded"] ["all"] if x not in _table["fields_excluded"] [_method][_endpoint]) else: _table["fields_excluded"][_method][ _endpoint] = _table["fields_excluded"][ "all"] _model_name = self.get_class_name_from_model_name( _table["name"] if not _table["alias"] else _table["alias"]) _table["model_name"] = _model_name _table["slug"] = slug.slug( _table["name"] if not _table["alias"] else _table["alias"]) if _model_name in self.model_names: raise ConfigError(f""" Table '{_table['name']}' with alias '{_table['alias']}' is already defined. Please add or change the alias """) self.model_names.append(_model_name)
def accessible(self, link, child_links_folder): page_slug = slug(link) page_path = f"{page_slug}.html" post_html_page_file = child_links_folder / page_path try: if post_html_page_file.exists(): return True fetch_html(link, post_html_page_file) except Exception as e: logging.error(f"💥 {e}") return False return True
def CreateProduct(): name = '' path = '%s/public/5ef26c251a8d3f34c2351c66/' % (os.getcwd()) print(path) body = request.get_json() if request.files: f = request.files['file'] f.save(path, f.filename) return 'file uploaded successfully' # body['image'] = hashlib.md5(slug.slug(body['name']).encode()).hexdigest()[:15] body['slug'] = slug.slug(body['name']) print(body) items = Product(**body).save() return Response(items.to_json(), mimetype="application/json", status=200)
def run(self, context): blog_title_slug = slug(self.post_title) download_folder = f"{os.getcwd()}/.temp" target_folder = Path(download_folder) / blog_title_slug child_links_folder = target_folder / "links" thumbnails_folder = target_folder / "thumbnails" for f in [target_folder, child_links_folder, thumbnails_folder]: f.mkdir(parents=True, exist_ok=True) # output context["target_folder"] = target_folder context["child_links_folder"] = child_links_folder context["thumbnails_folder"] = thumbnails_folder
def on_click(self): CityValue = self.citytext.text() CategoryValue = self.categorytext.text() cuty = slug.slug(self.proc.transliterate(CityValue)) category = slug.slug(self.proc.transliterate(CategoryValue)) if cuty and category != '': tasks = [ asyncio.ensure_future(self.proc.result_hub(cuty, category, self.loop), loop=self.loop), asyncio.ensure_future(self.progress_checked()), ] self.loop.run_until_complete(asyncio.wait(tasks)) else: return False if self.proc.status: message = str('Сканирование завершено успешно! \n резултат: ~/Desktop/avito_parser/' + cuty + '_' + category + '.xslx') QMessageBox.information(self, 'успешно', message, QMessageBox.Ok, QMessageBox.Ok) self.citytext.setText("") self.categorytext.setText("") else: message = str('Сканирование незавершено. Вероятно введены неверные данные.') QMessageBox.information(self, 'что-то пошло не так', message, QMessageBox.Ok, QMessageBox.Ok)
def thumbnail(self, thumbnails_folder, page_link): page_slug = slug(page_link) target_path = thumbnails_folder / f"{page_slug}.png" cmd = f"./thumbnail_generator.py -i '{page_link}' -o {target_path} --headless" if target_path.exists(): logging.info(f"🌕 Thumbnail already exists for {page_link}. Run {cmd} to update it") return target_path.as_posix() failed_commands = [] try: run_command(cmd) except: # noqa: B001, E722 failed_commands.append(cmd) for failed_command in failed_commands: logging.info(f"❌ Command failed. Try running it again {failed_command}") return target_path
def run(self, context): post_date = datetime.now().strftime("%Y-%m-%dT%H:%M:%S") post_title = self.markdown_text.splitlines()[0].replace("#", "").strip() post_header = f"""+++ date = {post_date} title = "{post_title}" description = "" slug = "" tags = ["hacker-news-links"] categories = [] externalLink = "" series = [] +++ """ post_with_header = post_header + os.linesep + os.linesep.join(self.markdown_text.splitlines()[2:]) # output post_file_name = slug(post_title) + ".md" context["post_file_name"] = post_file_name context["post_with_header"] = post_with_header
def init_db(db: Session) -> None: # Tables should be created with Alembic migrations # But if you don't want to use migrations, create # the tables un-commenting the next line # Base.metadata.create_all(bind=engine) # insert_organizations(db=db) organization_in_db = organization.get_by_name(db, name=settings.ORGANIZATION) if not organization_in_db: organization_in = OrganizationCreate(name=settings.ORGANIZATION, slug=slug.slug( settings.ORGANIZATION), mode="private") organization_in_db = organization.create(db, obj_in=organization_in) user_in_db = user.get_by_email(db, email=settings.FIRST_SUPERUSER) if not user_in_db: user_in = UserCreate( full_name=settings.FIRST_SUPERUSER_FULLNAME, email=settings.FIRST_SUPERUSER, password=settings.FIRST_SUPERUSER_PASSWORD, ) user_in_db = user.create(db, obj_in=user_in) # noqa: F841 user_in_db.is_superuser = True user_in_db.is_verified = True db.add(user_in_db) db.commit() db.refresh(user_in_db) if len(enforcer.get_roles_for_user_in_domain("1", "1")) == 0: enforcer.add_role_for_user_in_domain("1", "admin", "1")
def main(): parser = argparse.ArgumentParser(description='', usage=f'\npython {argv[0]} [options]') parser._optionals.title = "Basic Help" basicFuncs = parser.add_argument_group('Actions') basicFuncs.add_argument('-u', '--email', action="store", dest="email", default=False, help='Email for Yoast') basicFuncs.add_argument('-p', '--password', action="store", dest="password", default=False, help='Password for Yoast') basicFuncs.add_argument('-c', '--course-url', action="store", dest="course_url", default=False, help='Course URL') args = parser.parse_args() if not (args.email and args.password and args.course_url): parser.print_help() return COURSE_URL = args.course_url MAIN_SITE = f"{urlparse(COURSE_URL).scheme}://{urlparse(COURSE_URL).netloc}" ses = login(args.email, args.password) r = ses.get(COURSE_URL) soup = bs(r.text, 'lxml') try: courseTitle = soup.find(class_='gtm__course-title').get_text( strip=True) except: print("[-] Unable to Download the Course") return print("[+] Course:", courseTitle) if not os.path.exists(courseTitle): os.mkdir(courseTitle) if soup.findAll(class_='wistia_embed'): print("[>] Video Found in Overview Page") for video in soup.findAll(class_='wistia_embed'): print("[+] Getting JSON Data") videoUrl, videoName, videoExt = getVideo(video) print("[>] Downloading Video") download_file( videoUrl, os.path.join(courseTitle, videoName + '.' + videoExt)) print() chapters = [(chapter, chapter.find('a').get_text(strip=True)) for chapter in soup.findAll(class_='list_lessons')] chapterId = 1 for chapter in chapters: chapterData = chapter[0] chapterName = chapter[1] print("[+] Chapter:", chapterName) chapterName = slug.slug(chapterName) path = os.path.join(courseTitle, chapterName) if not os.path.exists(path): os.mkdir(path) lessons = [(topic.find('a').get('href'), topic.find('a').get_text(strip=True)) for topic in chapterData.findAll(class_='topic_item')] for lesson in lessons: lessonUrl = lesson[0] lessonName = lesson[1] print("[+] Lesson:", lessonName) r = ses.get(lessonUrl) soup = bs(r.text, 'lxml') topics = soup.findAll(class_='h3')[:-1] for topic in topics: topicName = topic.get_text(strip=True) print("[+] Topic:", topicName) videoTag = topic.next_sibling.next_sibling.next_sibling.next_sibling if not videoTag.find(class_='wistia_embed'): videoTag = videoTag.previous_sibling.previous_sibling print("[+] PDF File Found") videoUrl = videoTag.find('a').get('href') videoName = '.'.join( videoUrl.split('/')[-1].split('.')[:-1]) videoExt = videoUrl.split('.')[-1] else: print("[+] Video Found") videoUrl, videoName, videoExt = getVideo( videoTag.find(class_='wistia_embed')) videoName = slug.slug(videoName) print("[>] Downloading") download_file(videoUrl, os.path.join(path, videoName + '.' + videoExt)) print("[+] Downloaded") print() print("Completed")
import os import slug import urllib2 ########### Edit From Here ########### #This list is used to search keywords. You can edit this list to search for google images of your choice. You can simply add and remove elements of the list. search_keyword = ['green nuclear'] #This list is used to further add suffix to your search term. Each element of the list will help you download 100 images. First element is blank which denotes that no suffix is added to the search keyword of the above list. You can edit the list by adding/deleting elements from it.So if the first element of the search_keyword is 'Australia' and the second element of keywords is 'high resolution', then it will search for 'Australia High Resolution' keywords = [''] ########### End of Editing ########### directory = slug.slug(unicode('/'+search_keyword[0], 'UTF-8')) if not os.path.exists(directory): os.makedirs(directory) #Downloading entire Web Document (Raw Page Content) def download_page(url): version = (3,0) cur_version = sys.version_info if cur_version >= version: #If the Current Version of Python is 3.0 or above import urllib.request #urllib library for Extracting web pages try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib.request.Request(url, headers = headers)
def run(self, edit, separator): for region in self.view.sel(): val = self.view.substr(region) self.view.replace(edit, region, slug.slug(val, -1, separator))
'accept-language': 'en-US,en;q=0.9', } cookies = login() import json js = json.load(open('response.json')) if not os.path.exists('Advance SEO Course'): os.mkdir('Advance SEO Course') chaps = {} for chap in js['chapters']: chaps[chap['id']] = chap['name'] contents = {} for cont in js['contents']: if contents.get(chaps[cont['chapter_id']]): contents[chaps[cont['chapter_id']]].append(cont['contentable_id']) else: contents[chaps[cont['chapter_id']]] = [cont['contentable_id']] for chap in chaps.values(): if not os.path.exists(os.path.join('Advance SEO Course', slug.slug(chap))): os.mkdir(os.path.join('Advance SEO Course', slug.slug(chap))) print("[+] Chapter:", chap) for lesson in contents[chap]: r = get( f'https://craigs-school-3964.thinkific.com/api/course_player/v2/lessons/{lesson}', headers=headers, cookies=cookies) if r.json().get('videos'): for video in r.json().get('videos'): download_file('Advance SEO Course', video['url'], os.path.join(chap, video['url'].split('/')[-1]))
def slug(self): return slug.slug(unicode(self.title))
def writejob(element, eid, cname, curl, cemail, jobtitle, jobdesc, jobcountry, jobstate, jobcity, jobzip="", jobdate=datetime.today().strftime('%Y-%m-%d'), jobexp=(datetime.today() + timedelta(days=30)).strftime('%Y-%m-%d'), active=1, approved=0, filled=0, feat=0, tags="", metas=""): # Creates hash for id. If the hashinput is the same as a previous job on the site, it will overwrite. # Can change the hash input if there are multiple jobs in the same place hashinput = str(jobtitle + cname + jobdesc + jobcity) id = int(hashlib.sha1(hashinput.encode("ascii")).hexdigest(), 16) newhash = open("newidlist.txt", "w+") newhashtxt = loadtxt("newidlist.txt") open("idlist.txt", "w+") hash = loadtxt("idlist.txt") if id in hash or id in newhashtxt: return newhash.write("%s\n" % id) # MASTER SCRIPT MUST CHANGE OVERWRITE OLD HASHTABLE WITH NEW, COMPLETED HASHTABLE job = ET.SubElement(element, 'job') # add each subelement to the xml ET.SubElement(job, 'id').text = str(id) ET.SubElement(job, 'employer_id').text = str(eid) ET.SubElement(job, 'company_name').text = str(cname) ET.SubElement(job, 'company_url').text = str(curl) ET.SubElement(job, 'company_email').text = str(cemail) ET.SubElement(job, 'job_title').text = str(jobtitle) ET.SubElement(job, 'job_slug').text = slug(str(jobtitle)) ET.SubElement(job, 'job_description').text = str(jobdesc) ET.SubElement(job, 'job_country').text = str(jobcountry) ET.SubElement(job, 'job_state').text = str(jobstate) ET.SubElement(job, 'job_city').text = str(jobcity) # only add jobzip to XML if zipcode is present if jobzip: ET.SubElement(job, 'job_zip_code').text = str(jobzip) ET.SubElement(job, 'job_created_at').text = str(jobdate) ET.SubElement(job, 'job_expires_at').text = str(jobexp) ET.SubElement(job, 'is_active').text = str(active) ET.SubElement(job, 'is_approved').text = str(approved) ET.SubElement(job, 'is_filled').text = str(filled) ET.SubElement(job, 'is_featured').text = str(feat) # Only add tags to XML if tags are present if tags: tags = ET.SubElement(job, 'tags') # For each tag in list, add subelement for tg in tags: if len(tg) != 2: raise Exception( 'Wrong number of parameters for tags. Each tag must have 2 values in the list - 1 for type and 1 for title') tag = ET.SubElement(tags, 'tag') ET.SubElement(tag, 'type').text = str(tg[0]) ET.SubElement(tag, 'title').text = str(tg[1]) ET.SubElement(tag, 'slug').text = slug(str(tg[1])) if metas: metas = ET.SubElement(job, 'metas') for mt in metas: # For each meta in list, add subelement if len(mt) != 2: raise Exception( 'Wrong number of parameters for metas. Each meta must have 2 values in the list - 1 for name and 1 for value') meta = ET.SubElement(metas, 'meta') ET.SubElement(meta, 'name').text = str(mt[0]) ET.SubElement(meta, 'value').text = str(mt[1]) return
def create_auto(cls, *args, **kwargs): if "slug" not in kwargs: kwargs["slug"] = slug(kwargs["title"]) return cls.create(*args, **kwargs)
def update_auto(self, *args, **kwargs): if "slug" not in kwargs: kwargs["slug"] = slug(kwargs["title"]) return self.update(*args, **kwargs)
def main(argv=sys.argv): def usage(msg): print (msg) sys.exit(2) description = "Import a set of video files into the songs folder" parser = optparse.OptionParser( "usage: %prog config_uri input_filenames", description=description ) parser.add_option( '-d', '--dir', dest='directory', help='Use this directory as working directory instead of a tempdir' ) parser.add_option( '-o', '--overwrite', dest='overwrite', help='Overwrite songs in the songs folder instead of skipping dupes', action='store_true', ) parser.add_option( '-a', '--av-only', dest='av_only', help='Overwrite audio/video of songs only in songs folder (not metadata)', action='store_true', ) opts, args = parser.parse_args(argv[1:]) overwrite = opts.overwrite av_only = opts.av_only try: config_uri = args[0] except KeyError: usage('Requires a config_uri as an argument') outdir = opts.directory or tempfile.mkdtemp() setup_logging(config_uri) env = bootstrap(config_uri) root = env['root'] registry = env['registry'] songs = root['songs'] restricted = songs.get('restricted') if restricted is None: restricted = registry.content.create('Folder') songs['restricted'] = restricted set_acl( restricted, [(Allow, 'system.Authenticated', ['view']), (Allow, 'system.Authenticated', ['yss.indexed']), (Deny, 'system.Everyone', ['view']), (Deny, 'system.Everyone', ['yss.indexed'])] ) try: for input_filename in args[1:]: logging.info(input_filename) md5 = hashlib.md5() f = open(input_filename, 'rb') while True: data = f.read(1<<19) if not data: break md5.update(data) hexdigest = md5.hexdigest() command = [ 'ffmpeg', '-i', input_filename, '-f', 'ffmetadata', 'pipe:1', ] print (' '.join([ shlex.quote(s) for s in command ])) proc = subprocess.Popen( command, universal_newlines=True, stdout=subprocess.PIPE, ) stdout, _ = proc.communicate() md = {} for line in stdout.split('\n'): if '=' in line: k, v = line.strip().split('=', 1) md[k.lower()] = v[1:-1] name = slug.slug(md['title']) title=md['title'] artist=md['artist'] name = '%s-%s' % (name, hexdigest) if name in restricted and not (overwrite or av_only): logging.info('Not overwriting %s' % name) continue stream = open(input_filename, 'rb') if name in restricted and av_only: logger.info('replacing video for %s' % title) song = restricted[name] song.upload(stream) song.mimetype = 'video/webm' else: try: del restricted[name] except KeyError: pass song = registry.content.create( 'Song', title=title, artist=artist, lyrics='', timings='', stream=stream, mimetype='video/webm', ) restricted[name] = song blameme = root['performers']['blameme'] song.uploader = blameme event = ObjectModified(song) registry.subscribers((event, song), None) print ('done %s, %s, %s' % (name, title, artist)) transaction.commit() songs._p_jar.sync() finally: if not opts.directory: shutil.rmtree(outdir, ignore_errors=True)
def upload(self): context = self.context request = self.request schema = SongUploadSchema().bind(request=request, context=context) form = deform.Form(schema, buttons=('Save',)) rendered = None if 'Save' in request.POST: controls = request.POST.items() try: appstruct = form.validate(controls) except deform.ValidationFailure as e: rendered = e.render() else: audio_file = appstruct['audio_file'] tmpdir = request.registry.settings['substanced.uploads_tempdir'] job = uuid.uuid4().hex jobdir = os.path.join(tmpdir, job) try: try: os.makedirs(jobdir) except FileExistsError: pass inputfn = os.path.join(jobdir, 'inputfile') inputfile = open(inputfn, 'wb') fp = audio_file['fp'] fp.seek(0) shutil.copyfileobj(fp, inputfile) M = 1024 * 1024 * 1024 md5 = hashlib.md5() f = open(inputfn, 'rb') while True: data = f.read(M) if not data: break md5.update(data) opus_filename = os.path.join(jobdir, 'output.opus') ffmpeg.input(inputfn).output( opus_filename, ar=48000).run() song = request.registry.content.create( 'Song', appstruct['title'], appstruct['artist'], appstruct['lyrics'], timings='', audio_stream=open(opus_filename, 'rb'), audio_mimetype='audio/opus', language=appstruct['language'], ) finally: shutil.rmtree(jobdir, ignore_errors=True) request.session.flash( 'Song uploaded. Now voice lyrics like William Shatner in ' 'rhythm with the song in order to time the karaoke ' 'display text.', 'info') song.language = appstruct['language'] song.genre = appstruct['genre'] song.year = appstruct['year'] songname = slug.slug(appstruct['title']) hashval = md5.hexdigest() songname = f'{songname}-{hashval}' if songname in self.context: request.session.flash('this song has already been uploaded') raise HTTPFound(request.resource_url(self.context)) self.context[songname] = song song.uploader = request.performer # NB must be after seating set_acl(song, [ (Allow, request.user.__oid__, ['yss.edit']), (Deny, Everyone, ['yss.indexed']), ] ) event = ObjectModified(song) self.request.registry.subscribers((event, song), None) return HTTPFound(request.resource_url(song, '@@retime')) else: appstruct = { 'title':colander.null, 'artist':colander.null, 'audio_file':colander.null, 'genre':colander.null, 'language':colander.null, 'lyrics':colander.null, 'year':colander.null, } if rendered is None: rendered = form.render(appstruct, readonly=False) return {'form':rendered, 'page_title':'Upload Song'}