def generate_usable_slug(brew): """ Generate a usable slug for a given brew. This method will try to slugify the brew date + owner and then append an integer if needed, increasing this integer until no existing brew would be overwritten. """ base = brew.started.strftime('%d-%b-%Y') + '-' + brew.owner.name slug = slugify(base) # Reuse existing slug if we can if brew.slug and brew.slug == slug: return brew.slug append = 0 while True: count = Brew.all()\ .filter('owner =', brew.owner)\ .filter('recipe =', brew.recipe)\ .filter('slug =', slug)\ .count() if not count: break append += 1 slug = slugify(base) + str(append) return slug
def generate_usable_slug(recipe): """ Generate a usable slug for a given recipe. This method will try to slugify the recipe name and then append an integer if needed, increasing this integer until no existing recipe would be overwritten. """ slug = slugify(recipe.name) # Reuse existing slug if we can if recipe.slug and recipe.slug == slug: return recipe.slug append = 0 while True: count = Recipe.all()\ .filter('owner =', recipe.owner)\ .filter('slug =', slug)\ .count() if not count: break append += 1 slug = slugify(recipe.name) + str(append) return slug
def get_gplay_channels(): soup = bs(get_page(GLOBOSAT_URL)) # get lists # uls = soup.find('ul', attrs={'class': 'lista-canais'}).findAll('li') # uls = soup.find('ul', attrs={'id': 'mobile-submenu-canais-on-demand'}).findAll('li')[1:] channels, live = soup.findAll('ul', attrs={'class': 'submenu-desktop'}) # get children tags and filter as imgs channels = dict([ (util.slugify(img['alt']), (img['alt'], img['src'].replace(img['src'][7:img['src'].index('=/') + 2], ''))) for img in channels.findChildren()[2::3] ]) # build live data live = dict([ ( util.slugify(img['alt']), { 'name': img['alt'], 'logo': json['canal_logotipo'], # 'plot': ', '.join(reversed(json['programacao'].values())), # some items have a null value for programacao 'plot': '', 'id': json['midia']['id_midia'], }) for img, json in zip(live.findChildren()[2::3], get_page(GLOBOSAT_LIVE_JSON)) ]) return (channels, live)
def newCatalogItem(): catalog = session.query(Catalog).filter_by(parent_id=1).all() catalog_all = session.query(Catalog) newest_catalog = session.query(Catalog).filter(Catalog.id != 1).order_by( Catalog.id.desc()).limit(1).first() try: parentid = int(request.args.get('parentid')) except (ValueError, TypeError): parentid = 1 # print u'new_catalog_item: ', login_session if request.method == 'POST': if not session.query(Catalog).all(): Catalogitem1 = Catalog(name="(ROOT)", id=1, lvl=0, description="Real Root(does not display)", slug="/") session.add(Catalogitem1) try: session.commit() except: # (IntegrityError,InvalidRequestError): session.rollback() if request.form['name']: newname = request.form['name'] else: newname = 'No Name #' + str(newest_catalog.id + 1) if request.form['slug']: newslug = slugify(request.form['slug']) else: newslug = str(newest_catalog.id) new_item_slug = slugify(newslug) parent_node = catalog_all.filter_by( id=int(request.form['moveto'])).first() createdby = session.query(User).filter_by( email=login_session['email']).first() newItem = Catalog(name=newname, description=request.form['description'], parent_id=parent_node.id, slug=new_item_slug, lvl=parent_node.lvl + 1, user_id=createdby.id) if valid_item(newItem, session, catalog_all.all()): session.add(newItem) session.commit() else: response = make_response( json.dumps('422 Unprocessable Entity: Duplicated Slug?'), 422) response.headers['Content-Type'] = 'application/json' return response return redirect(url_for('catalogItem', catalog_path=newItem.slug)) else: return render_template('newCatalogItem.html', catalog=catalog, parentid=parentid)
def run(self, parent, blocks): accumulated_tex_blocks = [] # Get all the tex from the document while blocks: block = blocks.pop(0) accumulated_tex_blocks.append(block) # Does this block also end the latex environment? if ("\n%s" % block).rstrip().endswith(LaTeXBlockProcessor.ENV_END): break latex_raw = "\n\n".join(accumulated_tex_blocks) latex_match = LaTeXBlockProcessor.latex_re.match(latex_raw) # Check the latex environment conforms to spec! if latex_match is None: raise Exception("Invalid latex environment:\n%s" % latex_raw) alt = latex_match.group(1) src = latex_match.group(2) # Check to see if this is a block defining the preamble for latex blocks in # this document. if alt == "<preamble>": # For the preamble section self.preamble += src else: # A Normal LaTeX file to render # Make the image a link to the PDF? link_pdf = False if "--pdf" in alt: alt = alt.replace("--pdf", "").strip() link_pdf = True img = os.path.join(self.configs["latex_img_dir"], "%s.png" % (slugify(alt, "_"))) if link_pdf: pdf = os.path.join(self.configs["latex_img_dir"], "%s.pdf" % (slugify(alt, "_"))) else: pdf = None self.render_latex(src, img, pdf) # Add the image of the latex supplied if link_pdf: blocks.insert( 0, "[![%s](file://%s)](file://%s)" % (alt, img, pdf)) else: blocks.insert(0, "![%s](file://%s)" % (alt, img))
def check_word(self): solution = self.current_card.solution solution_attempt = self.solution_text_box.text() if util.slugify(solution) == util.slugify(solution_attempt): self.result_label.setText("Richtig!") else: self.result_label.setText("Leider falsch, Lösung: " + solution) self.enter_button.setEnabled(False) self.solution_text_box.setEnabled(False) Timer(3.0, self.next_word).start()
def clone_infra(plan, environment, name, team, project, description, task=None, clone=None): if not plan.provider == plan.CLOUDSTACK: dbinfra = DatabaseInfra.best_for(plan= plan, environment= environment, name= name) if dbinfra: database = Database.provision(databaseinfra=dbinfra, name=name) database.team = team database.description = description database.project = project database.save() return build_dict(databaseinfra= dbinfra, database= database, created= True) return build_dict(databaseinfra=None, created= False) workflow_dict = build_dict(name= slugify(name), plan= plan, environment= environment, steps= get_clone_settings(plan.engine_type.name), qt= get_vm_qt(plan= plan, ), dbtype = str(plan.engine_type), team= team, project= project, description= description, clone= clone ) start_workflow(workflow_dict= workflow_dict, task=task) return workflow_dict
def error_handler(e): logging.exception(e) try: e.code except AttributeError: e.code = 500 e.name = "Internal Server Error" if flask.request.path.startswith("/_s/"): return ( util.jsonpify( { "status": "error", "error_code": e.code, "error_name": util.slugify(e.name), "error_message": e.name, "error_class": e.__class__.__name__, } ), e.code, ) return ( flask.render_template( "error.html", title="Error %d (%s)!!1" % (e.code, e.name), html_class="error-page", error=e ), e.code, )
def make_infra( plan, environment, name, team, project, description, subscribe_to_email_events=True, task=None, is_protected=False ): if not plan.provider == plan.CLOUDSTACK: dbinfra = DatabaseInfra.best_for( plan=plan, environment=environment, name=name ) if dbinfra: database = Database.provision(databaseinfra=dbinfra, name=name) database.team = team database.description = description database.project = project database.subscribe_to_email_events = subscribe_to_email_events database.save() return build_dict( databaseinfra=dbinfra, database=database, created=True ) return build_dict(databaseinfra=None, created=False) workflow_dict = build_dict( name=slugify(name), plan=plan, environment=environment, steps=get_deploy_settings( plan.replication_topology.class_path ), qt=get_vm_qt(plan=plan, ), dbtype=str(plan.engine_type), team=team, project=project, description=description, subscribe_to_email_events=subscribe_to_email_events, is_protected=is_protected ) start_workflow(workflow_dict=workflow_dict, task=task) return workflow_dict
def create_setlist(cls,name,description,sharing,tasks,based_on = None): # tasks: [ ( id, text, parent, prev, next, first, last, notes ) ] new_setlist = cls( name = name, slug = slugify(name), description = description, #creator = users.get_current_user(), sharing = sharing, deleted = False, based_on = based_on ) new_setlist.put() new_setlist.enqueue_indexing(url='/tick/tasks/searchindexing') max_id = 0 mappings = {} for task in tasks: mappings[task[0]] = max_id max_id += 1 try: parent_id = mappings[task[2]] except KeyError: parent_id = None SetTask( id = mappings[task[0]], setlist = new_setlist, text = task[1], parent_task = parent_id, notes = task[7], parent = new_setlist # belongs to the model's entity group ).put() new_setlist.put() return new_setlist
def _smart(url, tries): # let's get the content of the page soup = BeautifulSoup(urllib2.urlopen(url)) # we want to build a short with the page title if soup.title is None: return _random(url) title = soup.title.string if title is None: return _random(url) words = [word.lower() for word in [word.strip() for word in title.split()] if len(word) > 4 and word[0] != '&'] # that works for Sphinx :) if len(words) > 1: short = words[0] + '-' + words[1] elif len(words) == 1: short = words[0] else: return _random(url) if tries > 0: short += '-' + str(tries) return slugify(short)
def database_pre_save(sender, **kwargs): database = kwargs.get('instance') if database.is_in_quarantine: if database.quarantine_dt is None: database.quarantine_dt = datetime.datetime.now().date() if not database.quarantine_user: from dbaas.middleware import UserMiddleware database.quarantine_user = UserMiddleware.current_user() else: database.quarantine_dt = None database.quarantine_user = None if database.id: saved_object = Database.objects.get(id=database.id) if database.name != saved_object.name: raise AttributeError(_("Attribute name cannot be edited")) else: # new database if database_name_evironment_constraint(database.name, database.environment.name): raise AttributeError( _('%s already exists in production!') % database.name) LOG.debug("slugfying database's name for %s" % database.name) database.name = slugify(database.name)
def wrap(self, gen: Generator[Tuple[Book, Chapter], None, None]) -> Generator[Tuple[Book, Chapter], None, None]: unique_id = slugify(self.novel.title) filepath = self.join_to_path(self.filename) with EpubFile( file=filepath, unique_id=unique_id, title=self.novel.title, language=self.novel.language if self.novel.language else '', identifier=str(self.novel.url), rights=self.novel.rights if self.novel.rights else '', publisher=self.novel.url.hostname, subject=' / '.join(['Web Novel', *(self.novel.tags if self.novel.tags else [])]), date=self.novel.release_date, description=self.novel.description.text, creator=self.novel.author if self.novel.author else self.novel.translator if self.novel.translator else '', cover_image=self.novel.cover, mode='w') as epub: self.log.debug(f"Opened file '{filepath}'") last_book = None for book, chapter in gen: if book != last_book: # New book last_book = book book = chapter.book book_file = BookFile(book) epub.add_book(book_file) self.log.debug(f"Saved book {book} to ({book_file.unique_id}): {book_file.filepath}") chapter_file = ChapterFile(chapter) epub.add_chapter(book_file, chapter_file) self.log.debug(f"Saved chapter {chapter} to ({chapter_file.unique_id}): {chapter_file.filepath}") yield book, chapter
def __init__(self, module_path): self.module_path = module_path self.filename = filename = os.path.basename(module_path) module_name = filename.split('.')[0].lstrip('_01234567890') imp_desc = ('', 'r', imp.PY_SOURCE) with open(module_path) as module_file: self.module = imp.load_module(module_name, module_file, module_path, imp_desc) module_file.seek(0) self.module_src = module_file.read() self.title = self.module.title self.author = self.module.author # TODO: settings.AUTHORS lookup self.tags = getattr(self.module,'tags',()) self.is_draft = getattr(self.module,'draft',False) self.pub_date = datetime.datetime(*self.module.pub_date) updated = getattr(self.module, 'updated', self.pub_date) self.updated = updated or datetime.datetime(*updated) try: self.id = int(getattr(self.module, int_id_name, None)) except ValueError: raise ValueError('Internal IDs should be integers.'+str(file_path)) self.slug = slugify(unicode(self.title)) self.parts = get_parts(self.module_src) self.run_examples()
def clone_infra(plan, environment, name, team, project, description, task=None, clone=None): if not plan.provider == plan.CLOUDSTACK: dbinfra = DatabaseInfra.best_for( plan=plan, environment=environment, name=name) if dbinfra: database = Database.provision(databaseinfra=dbinfra, name=name) database.team = team database.description = description database.project = project database.save() return build_dict(databaseinfra=dbinfra, database=database, created=True) return build_dict(databaseinfra=None, created=False) workflow_dict = build_dict(name=slugify(name), plan=plan, environment=environment, steps=get_clone_settings(plan.engine_type.name), qt=get_vm_qt(plan=plan, ), dbtype=str(plan.engine_type), team=team, project=project, description=description, clone=clone ) start_workflow(workflow_dict=workflow_dict, task=task) return workflow_dict
def __init__(self, module_path): self.module_path = module_path self.filename = filename = os.path.basename(module_path) module_name = filename.split('.')[0].lstrip('_01234567890') imp_desc = ('', 'r', imp.PY_SOURCE) with open(module_path) as module_file: self.module = imp.load_module(module_name, module_file, module_path, imp_desc) module_file.seek(0) self.module_src = module_file.read() self.title = self.module.title self.author = self.module.author # TODO: settings.AUTHORS lookup self.tags = getattr(self.module, 'tags', ()) self.is_draft = getattr(self.module, 'draft', False) self.pub_date = datetime.datetime(*self.module.pub_date) updated = getattr(self.module, 'updated', self.pub_date) self.updated = updated or datetime.datetime(*updated) try: self.id = int(getattr(self.module, int_id_name, None)) except ValueError: raise ValueError('Internal IDs should be integers.' + str(file_path)) self.slug = slugify(unicode(self.title)) self.parts = get_parts(self.module_src) self.run_examples()
def parse_section(self, res): title = res.html.find('.breadcrumb li:last-child span a span', first=True).text content = self.parse_content(res, title) for a in content.find('a'): href = a.attrs.get('href') if not href: continue section_prefix = "https://campus.exactas.uba.ar/course/view.php?id={}§ion=".format( self._course_id) if '/mod/resource' in href: self.fetch_resource(href, slugify(title)) elif '/mod/forum' in href: pass # ignoring forum elif '/mod/url' in href: self.fetch_shortened_url(href, a.text) elif '/mod/page' in href: self.fetch_page_resource(href) elif href.startswith(section_prefix): self.fetch_section(href) else: print("unhandled resource", href, title, file=sys.stderr)
def parse_content(self, res, title): content = res.html.find('#region-main .content', first=True) if content is None: content = res.html.find('#region-main [role="main"]', first=True) extra = [] for iframe in content.find('iframe'): src = iframe.attrs.get('src') if not src: continue extra.append("- iframe: URL=" + src) h = HTML2Text(baseurl='') h.ul_item_mark = '-' md_content = h.handle(content.html) if extra: md_extra_content = '\n\n'.join(extra) md_content += md_extra_content if md_content.strip() != '': with open(self.path(slugify(title) + '.md'), 'w') as f: f.write('# ' + title + '\n([fuente](' + res.url + '))\n---\n') f.write(md_content) return content
def clean(self): if not self.pk: self.name = slugify(self.name) if self.name in self.__get_database_reserved_names(): raise ValidationError( _("{} is a reserved database name".format(self.name)))
def handle_error(e): if not e: e = {} else: logging.exception(e) try: e.code except AttributeError: e.code = 500 e.name = e.description = 'Internal Server Error' result = { 'status': 'error', 'error_code': e.code, 'error_name': util.slugify(e.name), 'error_message': e.name, 'error_class': e.__class__.__name__, 'description': e.description, 'data': None, 'validations': None } if hasattr(e, 'data'): result['data'] = e.data if hasattr(e, 'validations'): result['validations'] = e.validations return util.jsonpify(result), e.code
def _build_globo(self, channel=None): categories, shows = scraper.get_globo_shows() data = { 'globo': {} } for cat, show_list in zip(categories, shows): slug = util.slugify(cat) data['globo'].update({slug: (cat, None)}) data[slug] = show_list return data
def get_folder_name(self, dialog_id, dialog_name=None): known_dialogs = self.store.dialog_names if dialog_id in known_dialogs: return known_dialogs[dialog_id]['folder'] assert dialog_name is not None, "Unkown dialog, give me its name first" logger.info(f"Adding new dialog info: {dialog_name}") folder_name = slugify(dialog_name) return folder_name
def __init__(self, index, display, interface, change=2, data_rate=64): self.index = index self.display = display self.id = util.slugify(display) self.change = change self.data_rate = data_rate self.interface = interface self.logger = util.get_logger("%s.%s" % (self.__module__, self.__class__.__name__))
def before_first_run(self, device, path, *args, **kwargs): super(NativeExperiment, self).before_first_run(device, path) filename = op.basename(path) paths.OUTPUT_DIR = op.join(paths.OUTPUT_DIR, slugify(filename)) makedirs(paths.OUTPUT_DIR) self.logger.info('APK: %s' % filename) device.install(path) self.package = op.splitext(op.basename(path))[0]
def clean(self): #slugify name if not self.pk: # new database self.name = slugify(self.name) if self.name in self.__get_database_reserved_names(): raise ValidationError(_("%s is a reserved database name" % self.name))
def get_gplay_channels(): #import rpdb2; rpdb2.start_embedded_debugger('pw') page = 1 headers = {'Authorization': GLOBOSAT_API_AUTHORIZATION} channel_info = get_page(GLOBOSAT_API_CHANNELS % page, headers=headers) results = channel_info['results'] #loop through pages while channel_info['next'] <> None: page += 1 channel_info = get_page(GLOBOSAT_API_CHANNELS % page, headers=headers) results.update(channel_info['results']) #create channels index channels = dict([(result['slug'], (result['title'], result['color_logo'], result['id'])) for result in results]) soup = bs(get_page(GLOBOSAT_URL)) # get lists # uls = soup.find('ul', attrs={'class': 'lista-canais'}).findAll('li') # uls = soup.find('ul', attrs={'id': 'mobile-submenu-canais-on-demand'}).findAll('li')[1:] channels_dummy, live, dummy = soup.findAll( 'ul', attrs={'class': 'submenu-desktop'}) ''' # get children tags and filter as imgs channels = dict([(util.slugify(img['alt']), (img['alt'], img['src'].replace(img['src'][7:img['src'].index('=/')+2], ''))) for img in channels.findChildren()[2::3]]) ''' # build live data live = dict([(util.slugify(img['alt']) if util.slugify(img['alt']) != 'sportv' else 'sportvlive', { 'name': img['alt'], 'logo': json['canal_logotipo'], 'playable': json['status'] == 'ativa', 'plot': ', '.join(reversed(json['programacao'].values())) if json['programacao'] != None else '', 'id': json['midia']['id_midia'], }) for img, json in zip(live.findChildren()[2::3], get_page(GLOBOSAT_LIVE_JSON))]) return (channels, live)
def render_author_pages(self): author_dict = self.author_dict for author, posts in author_dict.items(): author_slug = slugify(unicode(author)) with open(os.path.join(OUTPUT_DIR, 'author', author_slug+'.html'), 'w') as a_file: a_file.write(render_to('post_list.html', posts=posts, list_desc="Posts by "+author+""))
def clone_infra(plan, environment, name, team, backup_hour, maintenance_window, maintenance_day, project, description, subscribe_to_email_events, task=None, clone=None): if not plan.provider == plan.CLOUDSTACK: infra = DatabaseInfra.best_for( plan=plan, environment=environment, name=name, backup_hour=backup_hour, maintenance_window=maintenance_window, maintenance_day=maintenance_day, ) if infra: database = Database.provision(databaseinfra=infra, name=name) database.team = team database.description = description database.project = project database.save() return build_dict( databaseinfra=infra, database=database, created=True, subscribe_to_email_events=subscribe_to_email_events) return build_dict(databaseinfra=None, created=False, subscribe_to_email_events=subscribe_to_email_events) workflow_dict = build_dict( name=slugify(name), plan=plan, environment=environment, steps=get_clone_settings(plan.replication_topology.class_path), qt=get_vm_qt(plan=plan), dbtype=str(plan.engine_type), team=team, backup_hour=backup_hour, maintenance_window=maintenance_window, maintenance_day=maintenance_day, project=project, description=description, clone=clone, subscribe_to_email_events=subscribe_to_email_events, ) start_workflow(workflow_dict=workflow_dict, task=task) return workflow_dict
def clean(self): # slugify name if not self.pk: # new database self.name = slugify(self.name) if self.name in self.__get_database_reserved_names(): raise ValidationError( _("%s is a reserved database name" % self.name))
def get_sportv_live(logo): live = dict([(util.slugify(json['title']), { 'name': json['title'], 'logo': logo, 'playable': True, 'plot': '', 'id': json['videoId'], }) for json in get_page(SPORTV_LIVE_JSON)]) return live
def add_post(self, title, post, tags): url = slugify(title) self.collection.save( dict(title=title, post=post, tags=tags, comments=[], time=datetime.now(), url=url))
def add_post(self, title, post, tags): url = slugify(title) self.collection.save(dict(title=title, post=post, tags=tags, comments=[], time=datetime.now(), url=url) )
def create_database(name, plan, environment, team, project, description, task, backup_hour, maintenance_window, maintenance_day, subscribe_to_email_events=True, is_protected=False, user=None, retry_from=None): topology_path = plan.replication_topology.class_path name = slugify(name) base_name = gen_infra_names(name, 0) infra = get_or_create_infra(base_name, plan, environment, backup_hour, maintenance_window, maintenance_day, retry_from) instances = get_instances_for(infra, topology_path) database_create = DatabaseCreate() database_create.task = task database_create.name = name database_create.plan = plan database_create.environment = environment database_create.team = team database_create.project = project database_create.description = description database_create.subscribe_to_email_events = subscribe_to_email_events database_create.is_protected = is_protected database_create.user = user.username if user else task.user database_create.infra = infra database_create.database = infra.databases.first() database_create.save() steps = get_deploy_settings(topology_path) since_step = None if retry_from: since_step = retry_from.current_step if steps_for_instances(steps, instances, task, database_create.update_step, since_step=since_step): database_create.set_success() task.set_status_success('Database created') database_create.database.finish_task() else: database_create.set_error() task.set_status_error('Could not create database\n' 'Please check error message and do retry')
def create_new_credential(cls, user, database): credential = Credential() credential.database = database credential.user = user[:cls.USER_MAXIMUM_LENGTH_NAME] credential.user = slugify(credential.user) credential.password = make_db_random_password() credential.full_clean() credential.driver.create_user(credential) credential.save() return credential
def clean(self): if not self.pk: self.name = slugify(self.name) if self.name in self.__get_database_reserved_names(): raise ValidationError( _("{} is a reserved database name".format( self.name )) )
def get_globo_shows(): soup = bs(get_page(GLOBOTV_MAIS_URL)) content = soup.findAll('div', attrs={'class': re.compile('trilho-tag')}) categories = [c.find('h2').text for c in content] shows = [dict([(util.slugify(img['alt']), (img['alt'], img['data-src'].replace(img['data-src'][7:img['data-src'].index('=/')+2], ''))) for img in c.findAll('img') if '=/' in img['data-src']]) for c in content] return (categories, shows)
def database_pre_save(sender, **kwargs): database = kwargs.get('instance') #slugify name database.name = slugify(database.name) if database.id: saved_object = Database.objects.get(id=database.id) if database.name != saved_object.name: raise AttributeError(_("Attribute name cannot be edited"))
def __init__(self, novel: Novel, ext: str, out_path: str = 'out'): super(Output, self).__init__() if not novel.success: self.log.warning("Novel wasn't parsed successfully.") self.novel = novel self.ext = ext self.out_path = out_path self.slug_title = slugify(novel.title, lowercase=False) self.filename = f"{self.slug_title}.{self.ext}" self.path = os.path.join(out_path, self.novel.url.hostname) make_sure_dir_exists(self.path)
def get_premiere_live(logo): #provider_id is hardcoded right now. provider_id = '520142353f8adb4c90000008' live = dict([(util.slugify(json['time_mandante']['sigla'] + 'x' + json['time_visitante']['sigla']), { 'name': json['time_mandante']['sigla'] + ' x ' + json['time_visitante']['sigla'], 'logo': logo, 'playable': True, 'plot': json['campeonato'] + ': ' + json['time_mandante']['nome'] + ' x ' + json['time_visitante']['nome'] + ' (' + json['estadio'] + '). ' + json['data'], 'id': json['id_midia'], }) for json in get_page(PREMIERE_LIVE_JSON % provider_id)['jogos']]) return live
def run(self, device, path, run): for browser in self.browsers: paths.OUTPUT_DIR = op.join(paths.OUTPUT_DIR, slugify(unicode(browser.package_name))) self.before_run(device, path, run, browser) self.after_launch(device, path, run, browser) self.start_profiling(device, path, run, browser) self.interaction(device, path, run, browser) self.stop_profiling(device, path, run, browser) self.before_close(device, path, run, browser) self.after_run(device, path, run, browser) paths.OUTPUT_DIR = op.abspath(op.join(paths.OUTPUT_DIR, os.pardir))
def __remove_user(database): from logical.models import Credential from util import slugify credential = Credential() credential.database = database credential.user = '******'.format(database.name[:Credential.USER_MAXIMUM_LENGTH_NAME]) credential.user = slugify(credential.user) try: credential.driver.remove_user(credential) except InvalidCredential: pass
def __init__(self, *arg, **kwargs): self.name = util.slugify(kwargs.pop('name', 'my-camera')) self.directory = kwargs.pop('directory', path.dirname(path.realpath(__file__))) self.width = kwargs.pop('width', 640) self.height = kwargs.pop('height', 480) self.rotation = kwargs.pop('rotation', 0) self.init_camera() if self.is_working(): util.print_out('CAMERA LOADED', self.full_name()) else: util.print_out('CAMERA FAILD', self.full_name())
def render_tag_pages(self): tag_dict = self.tag_dict for tag, posts in tag_dict.items(): tag_slug = slugify(unicode(tag)) with open(os.path.join(OUTPUT_DIR, 'tag', tag_slug+'.html'), 'w') as t_file: t_file.write(render_to('post_list.html', posts=posts, list_desc="Posts tagged <em>"+tag+"</em>")) with open(os.path.join(OUTPUT_DIR, 'tag', 'tag_cloud.html'), 'w') as t_file: t_file.write(render_to('tag_cloud.html', tag_dict=tag_dict))
def download(self): """Download all images in the index.""" for image, photoset in self: subdir = "__photostream" subdir = slugify(photoset.title['_content']) if photoset != None else subdir out_path = os.path.join(self.out, subdir) current_set_count = self.__photoset_counts.get(subdir, 0) self.__photoset_counts[subdir] = current_set_count + 1 prefix = "%.4d" % self.__photoset_counts[subdir] image.save(outdir=out_path, prefix=prefix)
def get_gplay_channels(): soup = bs(get_page(GLOBOSAT_URL)) # get lists # uls = soup.find('ul', attrs={'class': 'lista-canais'}).findAll('li') # uls = soup.find('ul', attrs={'id': 'mobile-submenu-canais-on-demand'}).findAll('li')[1:] channels, live, dummy = soup.findAll('ul', attrs={'class': 'submenu-desktop'}) # get children tags and filter as imgs channels = dict([(util.slugify(img['alt']), (img['alt'], img['src'].replace(img['src'][7:img['src'].index('=/')+2], ''))) for img in channels.findChildren()[2::3]]) # build live data live = dict([(util.slugify(img['alt']), { 'name': img['alt'], 'logo': json['canal_logotipo'], 'playable': json['status'] == 'ativa', 'plot': ', '.join(reversed(json['programacao'].values())) if json['programacao'] != None else '', 'id': json['midia']['id_midia'], }) for img, json in zip(live.findChildren()[2::3], get_page(GLOBOSAT_LIVE_JSON))]) return (channels, live)
def credential_pre_save(sender, **kwargs): credential = kwargs.get('instance') #slugify user credential.user = slugify(credential.user) if credential.id: saved_object = Credential.objects.get(id=credential.id) if credential.user != saved_object.user: raise AttributeError(_("Attribute user cannot be edited")) if credential.database != saved_object.database: raise AttributeError(_("Attribute database cannot be edited"))
def handle_error(e): logging.exception(e) try: e.code except AttributeError: e.code = 500 e.name = e.description = 'Internal Server Error' return util.jsonpify({ 'status': 'error', 'error_code': e.code, 'error_name': util.slugify(e.name), 'error_message': e.name, 'error_class': e.__class__.__name__, 'description': e.description, }), e.code
def convertToDict(graph, resource, visited=None, lang=None): """ Converts RDF @graph (instance of rdflib.graph.Graph) into dict. The dict's root is @resource. """ # Mutable default arguments gotcha # http://docs.python-guide.org/en/latest/writing/gotchas/#what-you-should-do-instead if visited is None: visited = [] resultDict = defaultdict(dict) for pred in set(graph.predicates(resource)): predicateTransformed = transformNode(pred) for obj in set(graph.objects(resource, pred)): if list(graph.predicates(obj)) and (obj not in visited): # Node with child nodes visited.append(obj) # Special handling of :extras that need to have values in the form of [key, value] if predicateTransformed == "extras": extraKey = list(graph.predicates(obj))[0] extraValue = list(graph.objects(obj, extraKey))[0] obj = [transformNode(item) for item in [extraKey, extraValue]] # Usual handling of embedded resources else: obj = convertToDict(graph, obj, visited) else: # Leaf node if predicateTransformed in RESOLVED_PROPERTIES: try: obj = resolveResource(obj, lang) except TimeoutError: pass obj = transformNode(obj) # Tags must be slugified if predicateTransformed == "tags": obj = util.slugify(obj) if predicateTransformed in resultDict: prevObjects = resultDict[predicateTransformed] if isinstance(prevObjects, list): resultDict[predicateTransformed].append(obj) else: resultDict[predicateTransformed] = [prevObjects, obj] else: if predicateTransformed == "extras": obj = [obj] resultDict[predicateTransformed] = obj return resultDict
def database_pre_save(sender, **kwargs): database = kwargs.get("instance") if database.is_in_quarantine: if database.quarantine_dt is None: database.quarantine_dt = datetime.datetime.now().date() else: database.quarantine_dt = None if database.id: saved_object = Database.objects.get(id=database.id) if database.name != saved_object.name: raise AttributeError(_("Attribute name cannot be edited")) else: # new database LOG.debug("slugfying database's name for %s" % database.name) database.name = slugify(database.name)
def database_pre_save(sender, **kwargs): from notification.tasks import TaskRegister database = kwargs.get('instance') if database.is_in_quarantine: if database.quarantine_dt is None: database.quarantine_dt = datetime.datetime.now().date() if not database.quarantine_user: from dbaas.middleware import UserMiddleware database.quarantine_user = UserMiddleware.current_user() else: database.quarantine_dt = None database.quarantine_user = None if database.id: saved_object = Database.objects.get(id=database.id) if database.name != saved_object.name: raise AttributeError(_("Attribute name cannot be edited")) if database.team and saved_object.team: if database.team.organization != saved_object.team.organization: TaskRegister.update_organization_name_monitoring( database=database, organization_name=database.team.organization.name) if saved_object.team.external: TaskRegister.update_database_monitoring( database=database, hostgroup=saved_object.team.organization.grafana_hostgroup, action='remove') if database.team.external: TaskRegister.update_database_monitoring( database=database, hostgroup=database.team.organization.grafana_hostgroup, action='add') else: # new database if database_name_evironment_constraint( database.name, database.environment.name): raise AttributeError( _('%s already exists in production!') % database.name ) LOG.debug("slugfying database's name for %s" % database.name) database.name = slugify(database.name)
def create_list(cls,name,sharing='private'): owner = TickUser.get_current_user(keys_only=True) slug = slugify(name) exists = cls.all(keys_only=True).filter('slug =',slug).filter('owner =',owner).filter('deleted =',False).count(1) if exists > 0: raise Exception('active list of this name already exists') new_list = cls( name = name, slug = slug, next_task_id = 0, owner = owner, open = True, num_tasks = 0, num_completed_tasks = 0, sharing=sharing, ) new_list.put() new_list.enqueue_indexing(url='/tick/tasks/searchindexing',condition=('version',new_list.version)) return new_list
def database_pre_save(sender, **kwargs): database = kwargs.get('instance') if database.is_in_quarantine: if database.quarantine_dt is None: database.quarantine_dt = datetime.datetime.now().date() else: database.quarantine_dt = None if database.id: saved_object = Database.objects.get(id=database.id) if database.name != saved_object.name: raise AttributeError(_("Attribute name cannot be edited")) else: # new database if database_name_evironment_constraint( database.name, database.environment.name): raise AttributeError( _('%s already exists in production!') % database.name ) LOG.debug("slugfying database's name for %s" % database.name) database.name = slugify(database.name)
def error_handler(e): logging.exception(e) try: e.code except AttributeError: e.code = 500 e.name = 'Internal Server Error' if flask.request.path.startswith('/_s/'): return util.jsonpify({ 'status': 'error', 'error_code': e.code, 'error_name': util.slugify(e.name), 'error_message': e.name, 'error_class': e.__class__.__name__, }), e.code return flask.render_template( 'error.html', title='Error %d (%s)!!1' % (e.code, e.name), html_class='error-page', error=e, ), e.code