Exemplo n.º 1
0
 def fetch(self):
     categories = []
     zendesk_categories = self.req.get_items(model.Category)
     for zendesk_category in zendesk_categories:
         category_filename = utils.slugify(zendesk_category['name'])
         category = model.Category(zendesk_category['name'],
                                   zendesk_category['description'],
                                   category_filename)
         print('Category %s created' % category.name)
         category.meta = zendesk_category
         zendesk_sections = self.req.get_items(model.Section, category)
         categories.append(category)
         for zendesk_section in zendesk_sections:
             section_filename = utils.slugify(zendesk_section['name'])
             section = model.Section(category, zendesk_section['name'],
                                     zendesk_section['description'],
                                     section_filename)
             print('Section %s created' % section.name)
             section.meta = zendesk_section
             zendesk_articles = self.req.get_items(model.Article, section)
             category.sections.append(section)
             for zendesk_article in zendesk_articles:
                 logging.debug('Article Info:' + zendesk_article['title'])
                 if zendesk_article['body']:
                     body = h2t.handle(zendesk_article.get('body', ''))
                     article_filename = utils.slugify(
                         zendesk_article['title'])
                     article = model.Article(section,
                                             zendesk_article['title'], body,
                                             article_filename)
                     print('Article %s created' %
                           article.name.encode('utf-8').strip())
                     article.meta = zendesk_article
                     section.articles.append(article)
     return categories
Exemplo n.º 2
0
 def fetch(self):
     categories = []
     zendesk_categories = self.req.get_items(model.Category)
     for zendesk_category in zendesk_categories:
         category_filename = utils.slugify(zendesk_category['name'])
         category = model.Category(zendesk_category['name'], zendesk_category['description'], category_filename)
         print('Category %s created' % category.name)
         category.meta = zendesk_category
         zendesk_sections = self.req.get_items(model.Section, category)
         categories.append(category)
         for zendesk_section in zendesk_sections:
             section_filename = utils.slugify(zendesk_section['name'])
             section = model.Section(category, zendesk_section['name'],
                                     zendesk_section['description'], section_filename)
             print('Section %s created' % section.name)
             section.meta = zendesk_section
             zendesk_articles = self.req.get_items(model.Article, section)
             category.sections.append(section)
             for zendesk_article in zendesk_articles:
                 body = html2text.html2text(zendesk_article.get('body', ''))
                 article_filename = utils.slugify(zendesk_article['title'])
                 article = model.Article(section, zendesk_article['title'], body, article_filename)
                 print('Article %s created' % article.name)
                 article.meta = zendesk_article
                 section.articles.append(article)
     return categories
Exemplo n.º 3
0
def add_task():
    form = TaskForm(request.form)
    if form.validate():
        tags = [slugify(t) for t in form.tags.data.split(' ')]
        statusp = [slugify(t) for t in form.statusp.data.split(' ')]
        new_task = Task(task=form.task.data,
                        user=auth.get_logged_in_user(),
                        due=form.due.data)
        new_task.save()
        for t in tags:
            try:
                new_tag = Tag.get(tag=t)
            except:
                new_tag = Tag(tag=t)
                new_tag.save()
            tasktag = TaskTag(task=new_task.id, tag=new_tag.id)
            tasktag.save()

        for t in statusp:
            try:
                new_status = Status.get(status=t)
            except:
                new_status = Status(status=t)
                new_status.save()
            statustag = TaskStatus(task=new_task.id, status=new_status.id)
            statustag.save()

        flash("New Task: %s" % (new_task.task))
    return redirect(url_for('home'))
Exemplo n.º 4
0
Arquivo: logr.py Projeto: rtorr/Logr
def index():
    """
    Lists all articles, separated by category. This method maps to the front
    page.
    """
    # Create a dictionary `files` that separates articles by category.
    for file_ in listdir(ARTICLE_DIR):
        if isfile(ARTICLE_DIR + file_) and file_ != 'empty':
            files = dict(Miscellaneous=[])
            break
        files = dict()

    for file_ in listdir(ARTICLE_DIR):
        if isdir(os.path.join(ARTICLE_DIR, file_)):
            files[file_] = []
            for f in listdir(os.path.join(ARTICLE_DIR, file_)):
                if f.endswith('.md'):
                    with open(os.path.join(ARTICLE_DIR, file_, f), 'r') as f_open:
                        title=f_open.readline()
                        files[file_].append(dict(file_=f, slug=slugify(title), title=title.decode('utf-8')))
        else:
            if file_.endswith('.md'):
                with open(os.path.join(ARTICLE_DIR, file_), 'r') as f_open:
                    title=f_open.readline()
                    files['Miscellaneous'].append(dict(file_=file_, slug=slugify(title), title=title.decode('utf-8')))

    blurb = open('pages/front.md', 'r').read()

    return render_template('index.html', files=files, blurb=blurb)
Exemplo n.º 5
0
 def fetch(self):
     categories = []
     zendesk_categories = self.req.get_items(model.Category)
     for zendesk_category in zendesk_categories:
         category_filename = utils.slugify(zendesk_category['name'])
         category = model.Category(zendesk_category['name'],
                                   zendesk_category['description'],
                                   category_filename)
         print('Category %s created' % category.name)
         category.meta = zendesk_category
         zendesk_sections = self.req.get_items(model.Section, category)
         categories.append(category)
         for zendesk_section in zendesk_sections:
             section_filename = utils.slugify(zendesk_section['name'])
             section = model.Section(category, zendesk_section['name'],
                                     zendesk_section['description'],
                                     section_filename)
             print('Section %s created' % section.name)
             section.meta = zendesk_section
             zendesk_articles = self.req.get_items(model.Article, section)
             category.sections.append(section)
             for zendesk_article in zendesk_articles:
                 body = html2text.html2text(zendesk_article.get('body', ''))
                 article_filename = utils.slugify(zendesk_article['title'])
                 article = model.Article(section, zendesk_article['title'],
                                         body, article_filename)
                 print('Article %s created' % article.name)
                 article.meta = zendesk_article
                 section.articles.append(article)
     return categories
Exemplo n.º 6
0
def convert_to_yaml(year, series, slug, pretalx_results, yaml_filename):
    with open(pretalx_results) as json_file:
        talks = json.load(json_file)

        for index, talk in enumerate(talks['results']):
            slug = slugify(talk['title'] + '-' + talk['speakers'][0]['name'])
            yamldoc.append(
                OrderedDict([
                    ('title', talk['title']),
                    ('slug', slug),
                    ('series', series),
                    ('series_slug', slug),
                    ('year', int(year)),
                    ('speakers', []),
                    ('abstract', markdown.markdown(talk['abstract'])),
                ]))

            for s in talk['speakers']:
                yamldoc[index]['speakers'].append(
                    OrderedDict([
                        ('name', s['name']),
                        ('slug', slugify(s['name'])),
                        ('avatar', s['avatar']),
                    ]))

        yaml_obj = yaml.YAML(typ='safe')
        yaml_obj.Representer = RoundTripRepresenter
        yaml_obj.default_flow_style = False
        with open(yaml_filename, 'w+') as yaml_file:
            yaml_obj.dump(yamldoc, yaml_file)
Exemplo n.º 7
0
 def create_question(self, title, description, userprofile, tags):
     if self.exists(slug=slugify(title)):
         raise QuestionAlreadyExistsException
     question = Question(title=title, slug=slugify(title),
                         description=description, raised_by=userprofile)
     question.save()
     question.tags.add(*tags)
     return question 
Exemplo n.º 8
0
 def find_or_create(cls, continent, realm, name):
     key_name = cls.key_name( continent, realm, name )
     c = cls.get_by_key_name( key_name )
     if c:
         return c
     c = Character( key_name = key_name, continent = continent, realm = realm, name = name )
     c.urltoken = slugify(name)
     c.realm_urltoken = slugify(realm)
     return c
Exemplo n.º 9
0
 def create_question(self, title, description, userprofile, tags):
     if self.exists(slug=slugify(title)):
         raise QuestionAlreadyExistsException
     question = Question(title=title,
                         slug=slugify(title),
                         description=description,
                         raised_by=userprofile)
     question.save()
     question.tags.add(*tags)
     return question
Exemplo n.º 10
0
    def __init__(self, file_path, extra_data={}):
        self.text_file = TextFile(file_path)
        self.title = self.text_file.title
        self.extra_data = extra_data
        self.filename, self.ext = path.splitext(self.text_file.file_path)
        self.slug = self.text_file.headers.get('slug') or slugify(self.title) or \
                                                          slugify(self.filename)
        self.body = self.markup()

        super(Post, self).__init__(self.slug)
        self.headers = self.text_file.headers
Exemplo n.º 11
0
    def __init__(self, file_path, extra_data={}):
        self.text_file = TextFile(file_path)
        self.title = self.text_file.title
        self.extra_data = extra_data
        self.filename, self.ext = path.splitext(self.text_file.file_path)
        self.slug = self.text_file.headers.get('slug') or slugify(self.title) or \
                                                          slugify(self.filename)
        self.body = self.markup()

        super(Post, self).__init__(self.slug)
        self.headers = self.text_file.headers
Exemplo n.º 12
0
 def post(self):
     assert self.creator
     name = self.read('name')
     if not (name and name.strip()): raise PostingError("Please enter a name for your Boragle") 
     slug = utils.slugify(self.read('url'))
     if slug == '': slug = utils.slugify(name)
     if Boragle.find_by_slug(slug): raise PostingError('This url is already in use.')
     new_boragle = Boragle(name = self.read('name'),
             slugs = [slug],
             desc = self.read('desc'),
             creator = self.creator)
     new_boragle.put()
     self.redirect(new_boragle.url)
Exemplo n.º 13
0
 def __init__(self, req, fs):
     self.req = req
     self.fs = fs
     self.users = {}
     self.user_segments = {
         utils.slugify(segment['name']): segment['id']
         for segment in self.req.get_user_segments()
     }
     self.user_segments['all'] = None
     self.permission_groups = {
         utils.slugify(segment['name']): segment['id']
         for segment in self.req.get_permission_groups()
     }
Exemplo n.º 14
0
def read_player_line(line):
    n1 = line.index('"')
    n2 = line.index('"', n1 + 1)
    nick = line[n1 + 1 : n2]
    p = Player()
    setattr(p, "nick", nick)
    setattr(p, "slug_nick", slugify(p.nick))
    attribs = line[n2 + 2 :].split(" ")
    for attr in attribs[2:]:
        attr = attr.split(":")
        if len(attr) == 2:
            key, val = attr
            if key in _LOG_FLOAT_PROPERTIES:
                setattr(p, key, float(val))
            else:
                assert key in _LOG_INT_PROPERTIES
                setattr(p, key, int(val))
        elif len(attr) == 5:
            weapon, shots, hits, kills, deaths = attr
            try:
                hitrate = float(hits) / float(shots)
            except ZeroDivisionError:
                hitrate = 0.0
            setattr(
                p, weapon, dict(shots=int(shots), hits=int(hits), kills=int(kills), deaths=int(deaths), hitrate=hitrate)
            )
        else:
            print attr
    for prop in _LOG_INT_PROPERTIES:
        if not hasattr(p, prop):
            setattr(p, prop, 0)
    for prop in _LOG_FLOAT_PROPERTIES:
        if not hasattr(p, prop):
            setattr(p, prop, 0.0)
    return p
Exemplo n.º 15
0
def save_aviso(form):
    entity=exists_entity(Aviso, 'titulo', form.titulo)
    slug = slugify(form.titulo)
    if (entity is not None):
        entity.titulo=form.titulo
        entity.slug=slug
        entity.data_publicacao=datetime.strptime(form.data_publicacao,'%Y-%m-%d %H:%M:%S')
        entity.author=users.get_current_user()
        entity.texto=form.texto
        entity.ativo=form.ativo
        db.put(entity)
    elif (str(form.key) != ''):
        entity=db.get(form.key)
        entity.titulo=form.titulo
        entity.slug=slug
        entity.data_publicacao=datetime.strptime(form.data_publicacao,'%Y-%m-%d %H:%M:%S')
        entity.author=users.get_current_user()
        entity.texto=form.texto
        entity.ativo=form.ativo
        db.put(entity)
    else:
        while find_slug(Aviso, slug):
            slug = versionate(slug)
        db.put(Aviso(
            titulo=form.titulo,
            slug=slug,
            data_publicacao = datetime.strptime(form.data_publicacao,'%Y-%m-%d %H:%M:%S'),
            author = users.get_current_user(),
            texto=form.texto,
            ativo=form.ativo))
Exemplo n.º 16
0
def new_job():
    if not g.site.domain == g.user:
        abort(403)

    j = Job()
    if request.method == "POST":
        portfolio = Portfolio.objects.get(site=g.site.domain)
        job_name = request.form.get("name")
        slugs = [__j.slug for __j in Job.objects.filter(site=g.site.domain)]
        counter = 1
        slug = slugify(job_name)
        __slug = slug
        while __slug in slugs:
            counter += 1
            __slug = "%s_%d" % (slug, counter)
        j.slug = __slug
        j.name = job_name
        j.site = g.site.domain
        j.categories = [ c.strip() for c in request.form.get("categories").split(",") ]
        j.intro = request.form.get("intro")
        j.description = request.form.get("description")
        j.slides = []
        texts = request.form.getlist("text")
        image_urls = request.form.getlist("image_url")
        captions = request.form.getlist("caption")
        caption_links = request.form.getlist("caption_link")
        for text, image_url, caption, caption_link in zip(texts, image_urls, captions, caption_links):
            if text or image_url:
                j.slides.append(Slide(text=text, image_url=image_url, caption=caption, caption_link=caption_link))
        j.save()
        portfolio.jobs.append(j)
        portfolio.save()
        return redirect(url_for(".job", slug=j.slug))
    return render_template("edit_job.html", job=j)
Exemplo n.º 17
0
 def test_college_valid_creation(self):
     data = {'name':'SVPCET'}
     College.objects.create_college(name=data['name'])
     college = College.objects.latest()
     self.assertTrue(college)
     self.assertEquals(college.name, data['name'])
     self.assertEquals(college.slug, slugify(data['name']))
Exemplo n.º 18
0
 def __init__(self, dflog_path, messaging=None, **time_kwargs):
     #calculate the beginning of the week from logfile name for ublox timestamps
     week_epoch = utils.logpath2dt(dflog_path)
     self.read_dflog(dflog_path, epoch=week_epoch)
     self.set_dtype_from_fmt()
     self.flight_name = utils.slugify(dflog_path.split('/')[-1])
     self.messaging = messaging
Exemplo n.º 19
0
def remove(configuration, env, branch):
    # Init Fabric
    fabric_initer(configuration, env)

    # Params
    branch_slug = utils.slugify(branch)
    domain_path = utils.get_domain_path(configuration, env)

    if api.env.role == "local":
        return False

    else:
        with api.cd(domain_path):
            domain_filename = utils.get_domain_filename(configuration, env, branch)
            domain_available = domain_path + "/sites-available/" + domain_filename
            domain_available_response = False
            if ffiles.exists(domain_available):
                api.run("rm %s" % domain_available)
                domain_available_response = True

            domain_enabled = domain_path + "/sites-enabled/" + domain_filename
            domain_enabled_response = False
            if ffiles.exists(domain_enabled):
                api.run("rm %s" % domain_enabled)
                domain_enabled_response = True

            return (domain_available_response and domain_enabled_response)
Exemplo n.º 20
0
def save_grupo(form):
    entity=exists_entity(Grupo, 'titulo', form.titulo)
    slug = slugify(form.titulo)
    if (entity is not None):
        entity.titulo=form.titulo
        entity.ordem=int(form.ordem)
        entity.slug=slug
        entity.thumb_banner=form.thumb_banner
        entity.endereco_link=form.endereco_link
        db.put(entity)
    elif (str(form.key) != ''):
        entity=db.get(form.key)
        entity.titulo=form.titulo
        entity.ordem=int(form.ordem)
        entity.slug=slug
        entity.thumb_banner=form.thumb_banner
        entity.endereco_link=form.endereco_link
        db.put(entity)
    else:
        while find_slug(Grupo, slug):
            slug = versionate(slug)
        db.put(Grupo(
            titulo=form.titulo,
            ordem=int(form.ordem),
            slug=slug,
            thumb_banner = form.thumb_banner,
            endereco_link = form.endereco_link))
Exemplo n.º 21
0
 def _get_group_attributes_and_filename(self, group):
     attributes = {
         'name': group['name'],
         'description': group['description']
     }
     filename = utils.slugify(group['name'])
     return attributes, filename
Exemplo n.º 22
0
def convert_to_yaml(year, series, series_slug, yaml_output, pretalx_slug):
    if not os.environ.get('PRETALX_TOKEN'):
        print('Error: PRETALX_TOKEN not found in environment variables.')
        return
    http_headers = {'Authorization': 'Token ' + os.environ['PRETALX_TOKEN']}
    submissions_url = f'https://pretalx.com/api/events/{pretalx_slug}/submissions/?state=confirmed'
    print(f'Loading submissions from {submissions_url}...')
    submissions = requests.get(submissions_url, headers=http_headers)
    if submissions.status_code != 200:
        print(f'Error: submissions request failed: {submissions.status_code}: {submissions.text}')
        return

    for index, talk in enumerate(submissions.json()['results']):
        slug = slugify(talk['title'] + '-' + talk['speakers'][0]['name'])
        print(f'Processing talk {slug}...')

        speaker_info = retrieve_speaker_info([s['code'] for s in talk['speakers']], http_headers, pretalx_slug)
        if not speaker_info:
            print(f'Error: failed to retrieve info for speaker s["code"]')
            return

        yamldoc.append(OrderedDict([
            ('title', talk['title']),
            ('speakers', speaker_info),
        ]))

    print(f'Writing output to YAML file {yaml_output}')
    yaml_obj = yaml.YAML(typ='safe')
    yaml_obj.Representer = RoundTripRepresenter
    yaml_obj.default_flow_style = False
    with open(yaml_output, 'w+') as yaml_file:
        yaml_obj.dump(yamldoc, yaml_file)
    print('Completed!')
Exemplo n.º 23
0
def retrieve_speaker_info(speaker_codes, http_headers, pretalx_slug):
    result = []
    for speaker_code in speaker_codes:
        speaker_url = f'https://pretalx.com/api/events/{pretalx_slug}/speakers/{speaker_code}/'
        print(f'Loading speaker info from {speaker_url}...')
        speaker_response = requests.get(speaker_url, headers=http_headers)
        if speaker_response.status_code != 200:
            print(f'Error: speaker request failed: {speaker_response.status_code}: {speaker_response.text}')
            return

        def search_answers(speaker_dict, search_string):
            for answer in speaker_dict['answers']:
                if search_string in answer['question']['question']['en'].lower():
                    return answer['answer']

        speaker = speaker_response.json()
        speaker_slug = slugify(speaker['name'])

        result.append(OrderedDict([
            ('name', speaker['name']),
            ('pronouns', search_answers(speaker, 'pronouns')),
            ('facts', search_answers(speaker, 'facts')),
            ('pronounce', search_answers(speaker, 'pronounce')),
        ]))
    return result
Exemplo n.º 24
0
Arquivo: logr.py Projeto: durden/Logr
def show(slug):
    """
    Search the `articles` directory for an article whose slug matches the URL
    parameter. When we find the article, render it.
    """
    article = None

    # Searching articles ..
    for file_ in listdir(ARTICLE_DIR):
        if file_.endswith(EXTENSIONS):
            with open(os.path.join(ARTICLE_DIR, file_), 'r') as f:
                if slug == slugify(f.readline()):
                    article = os.path.join(ARTICLE_DIR, file_)
                    break

    # If we didn't find the article, it doesn't exist.
    if not article:
        article = os.path.join(PAGES_DIR, 'article-404.md')

    with open(article, 'r') as f:
        lines = f.read().split('\n')
        # Title should be the first line of the file. 
        title = lines.pop(0).strip().decode('utf8')
        # Category should be second.
        category = lines.pop(0).strip().decode('utf8')
        # The rest is the article itself.
        source = '\n'.join(lines).decode('utf8')
        
    return render_template('show.html', article=dict(title=title, source=source))
Exemplo n.º 25
0
def process_item_fn(row):
    links = row.xpath('.//a[contains(@class,"title")]')
    for link in links:
        try:
            votes = row.xpath('.//div[contains(@class, "score likes")]')[0].text_content().strip()
            row_link = (
                link.attrib["href"]
                if link.attrib["href"].startswith("http")
                else "http://reddit.com" + link.attrib["href"]
            )
            if int(votes) < 20:
                return False
            comment_a = row.xpath('.//a[contains(text(), "comment")]')[0]
            comments = comment_a.text.split()[0]
            comments = "0" if "comment" in comments else comments
            title = normalize(link.text_content())
            tagline = row.xpath('.//p[@class="tagline"]')[0].text_content().split("by")
            date = row.xpath(".//time/@datetime")[0]
            author = tagline[1].split()[0]
            return {
                "_id": slugify(title),
                "title": title,
                "author": author,
                "likes": {"at": datetime.datetime.now().isoformat()[:19], "n": int(votes)},
                "comments": comments,
                "date": date,
                "url": row_link,
                "description": "",
                "comment_link": comment_a.attrib["href"],
            }
        except ValueError as e:
            print("reddit error", e)
    return False
Exemplo n.º 26
0
    def dedup(self):

        seen = set()
        rules = []
        tokens = defaultdict(set)

        for rule in self.rules:

            if rule.is_terminal:

                for tar in rule.tars:
                    tokens[rule.src].add(tar)

            else:

                key = hash(slugify( [rule.src] + list(rule.tars) ))

                if key not in seen:

                    rules.append(rule)
                    seen.add(key)

        for (src, tars) in tokens.items():
            rules.append(Rule(src, tars, True))

        self.rules = rules
Exemplo n.º 27
0
	def clean_subject(self):
		try:
			p = Post.objects.get(slug = slugify(self.cleaned_data['subject']))
			raise ValidationError(_("Post with identical slug already exists, try modifying your post subject."))
		except ObjectDoesNotExist:
			pass
		return self.cleaned_data['subject']
Exemplo n.º 28
0
def generate_place_slug(sender, instance, **kwargs):
    if not instance.slug:
        slug_proposal = slugify(u'%s' % (instance.name))
        prev_slug = Place.objects.filter(slug__startswith=slug_proposal).exclude(pk=instance.pk)
        if prev_slug:
            slug_proposal += u'-%s' % len(prev_slug)
        instance.slug = slug_proposal
Exemplo n.º 29
0
 def create_college(self, name):
     slug = slugify(name)
     if self.exists(slug=slug):
         raise CollegeAlreadyExistsException
     college = College(name=name, slug=slug)
     college.save()
     return college
Exemplo n.º 30
0
def buildDualFaced(cardData):
    cachedImages = os.listdir('cache')
    sluggedFileName = '{}.jpg'.format(utils.slugify(cardData['name']))
    if sluggedFileName in cachedImages:
        print("file in cache")
        return sluggedFileName
    else:
        print("file not in cache")
        imageRequests = map(get, cardData['imageurls'])
        images = [
            Image.open(img)
            for img in [BytesIO(image.content) for image in imageRequests]
        ]
        widths, heights = zip(*(i.size for i in images))
        total_width = 976
        max_height = 680
        new_im = Image.new('RGB', (total_width, max_height))

        x_offset = 0

        for im in images:
            new_im.paste(im, (x_offset, 0))
            x_offset += im.size[0]
        new_im.save('./cache/{}'.format(sluggedFileName))
        return sluggedFileName
Exemplo n.º 31
0
def generate_location_slug(sender, instance, **kwargs):
    if not instance.slug:
        slug_proposal = slugify(u'%s' % (instance.name))
        prev_slug = Location.objects.filter(slug__startswith=slug_proposal)
        if prev_slug:
            slug_proposal += u'-%s' % len(prev_slug)
        instance.slug = slug_proposal
Exemplo n.º 32
0
 def validate_id(form, field):
     form.id.data = slugify(field.data)
     if Project.query.get(form.id.data):
         raise ValidationError(
             Markup(
                 _("The project identifier is used to log in and for the URL of the project. We tried to generate an identifier for you but a project with this identifier already exists. Please create a new identifier you will be able to remember."
                   )))
Exemplo n.º 33
0
def process_item_fn(row):
    lnk = row.xpath('.//div[@class="summary"]/h3/a/@href')
    if not lnk:
        return
    lnk = str(lnk[0])
    row_link = lnk if lnk.startswith('http') else 'https://stackoverflow.com' + lnk
    title = row.xpath('.//div[@class="summary"]/h3/a')[0].text
    user_details = row.xpath('.//div[@class="user-details"]/a/@href')[0].split('/')
    author, author_profile = user_details[1], user_details[2]
    author_src = str(row.xpath('.//div[contains(@class, "gravatar-wrapper-32")]/img/@src')[0])
    bounty = row.xpath('.//div[@class="bounty-indicator"]')[0].text[1:]
    date = str(row.xpath('.//span[@class = "relativetime"]/@title')[0][:10])
    votes = row.xpath('.//span[contains(@class, "vote-count-post")]/strong')[0].text
    answers = row.xpath('.//div[@class="stats"]/div[contains(@class, "answered")]/strong')[0].text
    views = row.xpath('.//div[contains(@class, "views")]')[0].text
    desc = row.xpath(
        './/div[@class = "summary"]/div[@class = "excerpt"]')[0].text.replace('\r\n', ' ').replace('\n', ' ')
    tags = [x.split('/')[-1] for x in row.xpath('.//a[@class = "post-tag"]/@href')]
    sohub_item = {'_id': slugify(title),
                  'title': title,
                  'author': author,
                  'author_src': author_src,
                  'author_profile': author_profile,
                  'bounty': bounty,
                  'date': date,
                  'likes': {'at': datetime.datetime.now().isoformat()[:19], 'n': int(votes)},
                  'views': views,
                  'answers': answers,
                  'description': desc,
                  'tags': tags,
                  'url': row_link}
    return sohub_item
Exemplo n.º 34
0
def page_edit(page_id=None):
    """view edits/creates a page (if called with no page.id)"""
    if page_id == None:
        try:
            page = Page(author=g.user_id, content="")
        except:
            flash("Problems creating a new page", category="danger")
            return redirect(url_for('index'))
    else:
        page = get_object_or_404(Page, page_id)

    if request.method == 'POST':
        title = request.form.get('title', '')
        slug = request.form.get('slug', '')
        author = g.user_id
        content = request.form.get('content', '')
        is_published = request.form.get('is_published') == 'on'
        show_sidebar = request.form.get('show_sidebar') == 'on'
        show_title = request.form.get('show_title') == 'on'
        show_nav = request.form.get('show_nav') == 'on'
        if len(title) > 0 and len(content) > 0:
            page.title = title
            page.slug = slugify(slug)
            page.content = content
            page.is_published = is_published
            page.show_sidebar = show_sidebar
            page.show_nav = show_nav
            page.show_title = show_title
            page.save()
            flash("Page saved.", category="success")
            return redirect(url_for('index'))
        else:
            flash("Please fill in BOTH title and content.", category="danger")

    return render_template('page_edit.html', page=page)
Exemplo n.º 35
0
 def test_company_valid_creation(self):
     data = {'name':'Infosys'}
     Company.objects.create_company(**data)
     company = Company.objects.latest()
     self.assertTrue(company)
     self.assertEquals(company.name, data['name'])
     self.assertEquals(company.slug, slugify(data['name']))
Exemplo n.º 36
0
 def create_college(self, name):
     slug = slugify(name)
     if self.exists(slug=slug):
         raise CollegeAlreadyExistsException
     college = College(name=name, slug=slug)
     college.save()
     return college 
Exemplo n.º 37
0
    def new_post(post_pages, is_post=True):
        # Guess where we should put this
        for path, _, _, use_in_rss in post_pages:
            if use_in_rss == is_post:
                break
        else:
            path = post_pages[0][0]

        print "Creating New Post"
        print "-----------------\n"
        title = raw_input("Enter title: ").decode(sys.stdin.encoding)
        slug = utils.slugify(title)
        data = u"\n".join([title, slug, datetime.datetime.now().strftime("%Y/%m/%d %H:%M")])
        output_path = os.path.dirname(path)
        meta_path = os.path.join(output_path, slug + ".meta")
        pattern = os.path.basename(path)
        if pattern.startswith("*."):
            suffix = pattern[1:]
        else:
            suffix = ".txt"
        txt_path = os.path.join(output_path, slug + suffix)

        if os.path.isfile(meta_path) or os.path.isfile(txt_path):
            print "The title already exists!"
            exit()

        with codecs.open(meta_path, "wb+", "utf8") as fd:
            fd.write(data)
        with codecs.open(txt_path, "wb+", "utf8") as fd:
            fd.write(u"Write your post here.")
        print "Your post's metadata is at: ", meta_path
        print "Your post's text is at: ", txt_path
Exemplo n.º 38
0
def process_item_fn(row):
    lnk = row.xpath('h3/a')[0]
    row_link = lnk.attrib['href'] if lnk.attrib['href'].startswith(
        'http') else 'https://github.com' + lnk.attrib['href']
    res = normalize(row.text_content()).split('\n')
    github_item = {
        '_id':
        slugify(res[1] + '-' + res[3]),
        'name':
        res[3],
        'author':
        res[1],
        'date':
        datetime.datetime.now().isoformat()[:19],
        'contributors': [{
            'src': k,
            'name': v
        } for k, v in zip(row.xpath('.//a/img/@src'),
                          row.xpath('.//a/img/@title'))],
        'description':
        res[4],
        'url':
        "https://github.com/{}/{}/".format(res[1], res[3])
    }
    github_item.update(get_repo_page(row_link))
    return github_item
Exemplo n.º 39
0
def process_item_fn(row):
    links = row.xpath('.//a[contains(@class,"title")]')
    for link in links:
        try:
            votes = row.xpath('.//div[contains(@class, "score likes")]'
                              )[0].text_content().strip()
            row_link = link.attrib['href'] if link.attrib['href'].startswith(
                'http') else 'http://reddit.com' + link.attrib['href']
            if int(votes) < 20:
                return False
            comment_a = row.xpath('.//a[contains(text(), "comment")]')[0]
            comments = comment_a.text.split()[0]
            comments = '0' if 'comment' in comments else comments
            title = normalize(link.text_content())
            tagline = row.xpath(
                './/p[@class="tagline"]')[0].text_content().split('by')
            date = row.xpath('.//time/@datetime')[0]
            author = tagline[1].split()[0]
            return {
                '_id': slugify(title),
                'title': title,
                'author': author,
                'likes': {
                    'at': datetime.datetime.now().isoformat()[:19],
                    'n': int(votes)
                },
                'comments': comments,
                'date': date,
                'url': row_link,
                'description': '',
                'comment_link': comment_a.attrib['href']
            }
        except ValueError as e:
            print('reddit error', e)
    return False
Exemplo n.º 40
0
 def __init__(self, dflog_path, messaging=None, **time_kwargs):
     #calculate the beginning of the week from logfile name for ublox timestamps
     week_epoch=utils.logpath2dt(dflog_path)
     self.read_dflog(dflog_path, epoch=week_epoch)
     self.set_dtype_from_fmt()
     self.flight_name=utils.slugify(dflog_path.split('/')[-1])
     self.messaging=messaging
Exemplo n.º 41
0
def save_foto(form):
    entity=exists_entity(Foto, 'titulo', form.titulo)
    slug = slugify(form.titulo)
    if (entity is not None):
        entity.titulo=form.titulo
        entity.slug=slug
        entity.data_publicacao=datetime.strptime(form.data_publicacao,'%Y-%m-%d %H:%M:%S')
        entity.thumb=form.thumb
        entity.foto=form.foto
        entity.status=form.status
        entity.link_acao_foto=form.link_acao_foto
        db.put(entity)
    elif (str(form.key) != ''):
        entity=db.get(form.key)
        entity.titulo=form.titulo
        entity.slug=slug
        entity.data_publicacao=datetime.strptime(form.data_publicacao,'%Y-%m-%d %H:%M:%S')
        entity.thumb=form.thumb
        entity.foto=form.foto
        entity.status=form.status
        entity.link_acao_foto=form.link_acao_foto
        db.put(entity)
    else:
        while find_slug(Foto, slug):
            slug = versionate(slug)
        db.put(Foto(
            titulo=form.titulo,
            slug=slug,
            data_publicacao = datetime.strptime(form.data_publicacao,'%Y-%m-%d %H:%M:%S'),
            thumb = form.thumb,
            foto = form.foto,
            status = form.status,
            link_acao_foto=form.link_acao_foto))
Exemplo n.º 42
0
def show(slug):
    """
    Search the `articles` directory for an article whose slug matches the URL
    parameter. When we find the article, render it.
    """
    # Find the right article
    for file_ in listdir(ARTICLE_DIR):
        if file_.endswith(EXTENSIONS):
            with open(os.path.join(ARTICLE_DIR, file_), 'r') as f:
                if slug == slugify(f.readline()):
                    article = os.path.join(ARTICLE_DIR, file_)
                    break

    # Now that we've found the right article, let's process it.
    with open(article, 'r') as f:
        lines = f.read().split('\n')
        
        # We don't need title or category, but it's easier to explicitly state
        # why we're popping the first two lines.
        title = lines.pop(0).strip() # Title should appear on the first line
        category = lines.pop(0).strip() # Category should appear on the second

        source = '\n'.join(lines).decode('utf8')
        
    return render_template('show.html', article=dict(source=source))
Exemplo n.º 43
0
def edit_entry(slug):
    """Allows the user to edit the current entry."""
    try:
        entry = models.Entry.get(
            models.Entry.slug == slug
        )
    except models.DoesNotExist:
        abort(404)
    else:
        form = forms.EditEntry()
        if form.validate_on_submit():
            edited_entry = models.Entry.update(
                title=form.title.data.strip(),
                date=form.date.data,
                time=form.time.data,
                learned=form.learned.data.strip(),
                resources=form.resources.data.strip(),
                tags=form.tags.data.strip()
            ).where(models.Entry.slug == slug)
            edited_entry.execute()
            new_entry = models.Entry.get(
                models.Entry.slug == slug
            )
            new_entry.slug = utils.slugify(entry.title)
            new_entry.save()
            flash("Entry edited successfully", "success")
            return redirect(url_for('show_entries'))
        return render_template('edit.html', form=form)
Exemplo n.º 44
0
 def __init__(self, title=None, created_at=None):
     if title:
         self.title = title
         self.slug = slugify(title)
     if created_at:
         self.created_at = created_at
         self.updated_at = created_at
Exemplo n.º 45
0
def process_item_fn(row, conf):
    links = row.xpath('.//a[contains(@class,"title")]')
    for link in links:
        try:
            votes = row.xpath('.//div[contains(@class, "score likes")]')[0].text_content().strip()
            row_link = link.attrib['href'] if link.attrib['href'].startswith(
                'http') else 'http://reddit.com' + link.attrib['href']
            if int(votes) < conf['reddit_minimum_votes']:
                return False
            comment_a = row.xpath('.//a[contains(text(), "comment")]')[0]
            comments = comment_a.text.split()[0]
            comments = '0' if 'comment' in comments else comments
            title = normalize(link.text_content())
            tagline = row.xpath('.//p[@class="tagline"]')[0].text_content().split('by')
            date = row.xpath('.//time/@datetime')[0]
            author = tagline[1].split()[0]
            return {'_id': slugify(title),
                    'title': title,
                    'author': author,
                    'likes': {'at': datetime.datetime.now().isoformat()[:19], 'n': int(votes)},
                    'comments': comments,
                    'date': date,
                    'url': row_link,
                    'description': '',
                    'comment_link': comment_a.attrib['href']}
        except ValueError as e:
            print('reddit error', e)
    return False
Exemplo n.º 46
0
  def update(self, body, is_draft=False):
    if is_draft:
      self.draft = body
    else:
      memcache.flush_all()
      self.updated = datetime.datetime.now()
      self.draft = None
      self.body = body

    if not self.path and not is_draft:
      # Post is being published for the first time
      self.published = self.updated = datetime.datetime.now()
      same_path = True
      count = 0
      while same_path:
        path = utils.format_post_path(self, count)
        same_path = BlogPost.get_by_key_name(path)
        count += 1

      self.path = path
      if self.is_saved() or not is_draft:
        new_post = self.set_key_name(path)
        new_post.put()
        self.is_saved() and self.delete()
        BlogDate.create_for_post(new_post)
        return new_post

    if not self.is_saved():
      new_post = self.set_key_name('/draft:' + utils.slugify(self.title))
      new_post.put()
      return new_post

    self.put()
    return self
Exemplo n.º 47
0
    def build_key(self, data):

        values = (i for i in data)
        text_values = (value if value else 'none' for value in values)
        joined_values = '_'.join(text_values)

        return utils.slugify(joined_values)
Exemplo n.º 48
0
 def _pre_put_hook(self, ):
     self.key = ndb.Key(self.__class__, self.identifier)
     self.search_fields = self.name.split(' ')
     self.search_fields.extend(
         [name.lower() for name in self.name.split(' ')])
     self.search_fields.extend(slugify(self.name).split('-'))
     self.search_fields = set(self.search_fields)
Exemplo n.º 49
0
 def create_company(self, name):
     slug = slugify(name)
     if self.exists(slug=slug):
         raise CompanyAlreadyExistsException
     company = Company(name=name, slug=slug)
     company.save()
     return company
Exemplo n.º 50
0
Arquivo: views.py Projeto: gcr/twg
def create_new_story(request):
    if request.method == "POST":
        # They attempted to create a story
        story_form = forms.NewStoryForm(request.POST)
        frag_form = forms.AddFragmentForm(request.POST)
        if story_form.is_valid() and frag_form.is_valid():
            # Store the new story and the fragment
            story = Story(
                name = story_form.cleaned_data['story_name'],
                last_update_date = datetime.datetime.now()
            )
            # Give 'er a slug!
            story.slug = slugify(story.name, instance=story)
            story.save()
            story.add_fragment(frag_form.cleaned_data['fragment_text'], request.user)
            return HttpResponseRedirect(reverse('story_detail', kwargs={'slug':story.slug}))
    else:
        # Return a blank form page
        story_form = forms.NewStoryForm()
        frag_form = forms.AddFragmentForm()
        
    return render_to_response("stories/add_new_story.html",
        RequestContext(request, {
            'story_form':story_form,
            'frag_form':frag_form,
        })
    )
Exemplo n.º 51
0
def handle_upload():
    document_name = request.form['document-name']
    f = request.files['input-file']

    # Upload to S3 synchronously
    s3_key = s3_upload(f)

    # Create slug for document
    document_slug = slugify(document_name)

    # Insert counts into DB
    upload_meta = insert_file_upload_meta(
        document_name = document_name,
        document_slug = document_slug,
        s3_key = s3_key,
        filename = f.filename,
    )

    put_doc_on_queue(
        document_slug = document_slug,
        time_uploaded = upload_meta['time_uploaded'],
        s3_key = s3_key,
    )

    flash(
        '"{document_name}" uploaded to S3 as <a href="{dst}">{dst}</a>'
        '<br><br>Check out your word cloud at '
        '<a href="/wordcloud/{slug}">{base}wordcloud/{slug}</a>!'
        .format(
            document_name = document_name,
            dst = get_s3_path(s3_key),
            slug = document_slug,
            base = request.base_url,
    ))
    return render_template('index.html')
Exemplo n.º 52
0
def remove(configuration, env, branch):
    # Init Fabric
    fabric_initer(configuration, env)

    # Params
    branch_slug = utils.slugify(branch)
    domain_path = utils.get_domain_path(configuration, env)

    if api.env.role == "local":
        return False

    else:
        with api.cd(domain_path):
            domain_filename = utils.get_domain_filename(
                configuration, env, branch)
            domain_available = domain_path + "/sites-available/" + domain_filename
            domain_available_response = False
            if ffiles.exists(domain_available):
                api.run("rm %s" % domain_available)
                domain_available_response = True

            domain_enabled = domain_path + "/sites-enabled/" + domain_filename
            domain_enabled_response = False
            if ffiles.exists(domain_enabled):
                api.run("rm %s" % domain_enabled)
                domain_enabled_response = True

            return (domain_available_response and domain_enabled_response)
Exemplo n.º 53
0
def generate_location_slug(sender, instance, **kwargs):
    if not instance.slug:
        slug_proposal = slugify(u'%s' % (instance.name))
        prev_slug = Location.objects.filter(slug__startswith=slug_proposal)
        if prev_slug:
            slug_proposal += u'-%s' % len(prev_slug)
        instance.slug = slug_proposal
Exemplo n.º 54
0
def process_item_fn(row):
    try:
        desc = row.xpath('.//p[contains(@class, "tweet-text")]'
                         )[0].text_content().encode('utf8').strip()
        date = int(row.xpath('.//span/@data-time')[0])
        url = row.xpath('.//a[contains(@href, "status/")]/@href')
        if not url:
            return False
        url = 'https://twitter.com' + url[-1]
        handle_counter = Counter([
            x.text_content() for x in row.xpath(
                '//span[@class="username js-action-profile-name"]//b')
        ])
        name = handle_counter.most_common(1)[0][0] if handle_counter else ''
        likes = row.xpath(
            './/span[@class="ProfileTweet-actionCountForPresentation"]/text()')
        twitter_item = {
            '_id': slugify(desc.decode()),
            'name': name,
            'date': str(arrow.get(date)),
            'description': desc.decode(),
            'url': url,
            'likes': int(likes[-1]) if likes else 0
        }
        return twitter_item
    except:
        return False
Exemplo n.º 55
0
def show(slug):
    """
    Search the `articles` directory for an article whose slug matches the URL
    parameter. When we find the article, render it.
    """
    # Find the right article
    for file_ in listdir(ARTICLE_DIR):
        if file_.endswith(EXTENSIONS):
            with open(os.path.join(ARTICLE_DIR, file_), 'r') as f:
                if slug == slugify(f.readline()):
                    article = os.path.join(ARTICLE_DIR, file_)
                    break

    # Now that we've found the right article, let's process it.
    with open(article, 'r') as f:
        lines = f.read().split('\n')

        # We don't need title or category, but it's easier to explicitly state
        # why we're popping the first two lines.
        title = lines.pop(0).strip()  # Title should appear on the first line
        category = lines.pop(0).strip()  # Category should appear on the second

        source = '\n'.join(lines).decode('utf8')

    return render_template('show.html', article=dict(source=source))
Exemplo n.º 56
0
 def join_workplace(self,
                    workplace_name,
                    workplace_type,
                    designation='',
                    years_of_exp=''):
     workplace_slug = slugify(workplace_name)
     if workplace_type and (self.is_employee or self.is_professor):
         if workplace_type == 'Company':
             if Company.objects.exists(slug=workplace_slug):
                 workplace = Company.objects.get(slug=workplace_slug)
             else:
                 workplace = Company.objects.create_company(
                     name=workplace_name)
         elif workplace_type == 'College':
             if College.objects.exists(slug=workplace_slug):
                 workplace = College.objects.get(slug=workplace_slug)
             else:
                 workplace = College.objects.create_college(
                     name=workplace_name)
         WorkInfo.objects.filter(userprofile=self).delete()
         WorkInfo.objects.create_workinfo(self,
                                          workplace=workplace,
                                          designation=designation,
                                          years_of_exp=years_of_exp)
     else:
         raise Exception, 'Student cant join a Workplace. He needs to be an Employee'
Exemplo n.º 57
0
 def create_company(self, name):
     slug = slugify(name)
     if self.exists(slug=slug):
         raise CompanyAlreadyExistsException
     company = Company(name=name, slug=slug)
     company.save()
     return company
Exemplo n.º 58
0
def get_posts():
    url = 'https://pypi.python.org/pypi?%3Aaction=packages_rss'

    response = requests.get(url)

    if hasattr(response.content, 'decode'):
        tree = xml.etree.ElementTree.fromstring(
            response.content.decode('utf8'))
    else:
        tree = xml.etree.ElementTree.fromstring(response.content)

    channel = tree.find('channel')
    items = channel.findall('item')

    trending_posts = []
    for item in items:
        i_dict = {
            '_id':
            slugify(item[0].text.split()[0]),
            'name':
            item[0].text.split()[0],
            'url':
            item[1].text,
            'description':
            item[3].text or '',
            'date':
            str(
                arrow.get(item[4].text.split(' GMT')[0],
                          'DD MMM YYYY HH:mm:ss')),
            'likes': []
        }
        trending_posts.append(i_dict)

    return trending_posts
Exemplo n.º 59
0
def generate_place_slug(sender, instance, **kwargs):
    if not instance.slug:
        slug_proposal = slugify(u'%s' % (instance.name))
        prev_slug = Place.objects.filter(
            slug__startswith=slug_proposal).exclude(pk=instance.pk)
        if prev_slug:
            slug_proposal += u'-%s' % len(prev_slug)
        instance.slug = slug_proposal
Exemplo n.º 60
0
def scrape(output_format, ids, outfile='', indent=1, processes=2):
    # Start with including the old MP list (those not on Parlamento.pt)
    # TODO
    # from utils import getpage, load_csv
    # csvkeys = ('leg', 'constituency_code', 'constituency', 'party', 'name',
    #            'date_start', 'date_end')
    # data = load_csv('deputados-antigos.csv', keys=csvkeys, header=True)
    # return data

    pool = multiprocessing.Pool(processes=processes)
    mprows = {}
    active_ids = get_active_mps()

    processed_mps = []
    try:
        processed_mps = (
            processed_mp
            for processed_mp in pool.map(process_mp, ids, chunksize=4)
            if processed_mp)
    except KeyboardInterrupt:
        pool.terminate()

    for processed_mp in processed_mps:
        shortname = processed_mp['shortname']
        if shortname in mprows:
            log.warning(
                "Duplicate shortname: %s (%s, %s)" %
                (shortname, mprows[shortname]['id'], processed_mp['id']))
        mprows[shortname] = processed_mp

    for k in mprows.keys():
        if mprows[k]['id'] in active_ids:
            mprows[k]['active'] = True
        else:
            mprows[k]['active'] = False

    # Ordenar segundo o shortname (ordenamos pela slug para não dar molho
    # com os acentos)
    mprows = OrderedDict(sorted(mprows.items(), key=lambda x: slugify(x[0])))

    logger.info("Saving to file %s..." % outfile)
    if output_format == "json":
        depsfp = io.open(outfile, 'w+')
        depsfp.write(dumps(mprows, ensure_ascii=False, indent=indent))
        depsfp.close()
    elif output_format == "csv":
        depsfp = open(outfile, 'w+')
        writer = csv.DictWriter(depsfp, fieldnames=FIELDNAMES)
        writer.writeheader()
        for rownumber in mprows:
            row = mprows[rownumber]
            row.pop("mandates")
            for key in row:
                if type(row[key]) == list:
                    # convert lists to ;-separated strings
                    row[key] = "; ".join(row[key])
            row = {k: str(v).strip() for k, v in row.items()}
            writer.writerow(row)