Exemplo n.º 1
0
def load_articles():
    """Load articles into database."""

    print ("Articles")

    Article.query.delete()

    for row in open("seed_data/articles_seed.txt"):
        row = row.rstrip()
        owner_id, type_id, size, color, material, notes, is_private, is_loanable, is_giveaway = row.split("|")

        is_private = True if is_private == "T" else False
        is_loanable = True if is_loanable == "T" else False
        is_giveaway = True if is_giveaway == "T" else False

        article = Article(owner_id=owner_id,
                    type_id=type_id,
                    size=size,
                    color=color,
                    material=material,
                    notes=notes,
                    is_private=is_private,
                    is_loanable=is_loanable,
                    is_giveaway=is_giveaway)

        # We need to add to the session or it won't ever be stored
        db.session.add(article)

    # Once we're done, we should commit our work
    db.session.commit()
Exemplo n.º 2
0
    def _get_full_article(self, short_article):
        url = short_article.url
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')

        if "article-lock" in response.content.decode('utf-8'):
            logging.error("Pay-wall: %s" % url)
            return None

        try:
            article_text = soup.find('div', class_='article-body article-wrap')
            if article_text.find('article') is not None:
                article_text = article_text.find('article')
            text = self.get_formatted_article(text=article_text,
                                              lead=soup.find('p',
                                                             class_='lead'))
            author = soup.find('div', class_='article-source')
            if author is None:
                author = ""
            else:
                author = author.text

            comments = self._get_comments()
            full_article = Article(short_article, text, author, comments)
            return full_article
        except AttributeError:
            logging.error("Invalid URL: %s" % url)
        return None
Exemplo n.º 3
0
    def post(self):
        class_id = self.get_body_argument('class_id', None)
        title = self.get_body_argument('title', None)
        image_url = self.get_body_argument('image_url', None)
        note = self.get_body_argument('note', None)
        content = self.get_body_argument('content', None)

        now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        data = {
            'class_id': class_id,
            'title': title,
            'image_url': image_url,
            'note': note,
            'content': content,
            'author': 'LiJiaF',
            'create_date': now,
            'write_date': now
        }

        log.info('添加文章:' + json.dumps(data))

        try:
            session = DBSession()
            new_article = Article(**data)
            session.add(new_article)
            session.commit()
            session.close()
        except Exception as e:
            log.error(e)
            return self.finish(json.dumps({'code': -1, 'msg': '添加失败'}))

        return self.finish(json.dumps({'code': 0, 'msg': '添加成功'}))
Exemplo n.º 4
0
    def _get_full_article(self, short_article):
        url = short_article.url
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        try:
            text = self.get_formatted_article(text=soup.find('div', class_='article-content mt3 mb3'),
                                              lead=soup.find('div', class_='h4 mt0 mb2 regular roboto-slab'))
            author_tag = soup.find('meta', attrs={'name': 'author'})
            author = author_tag['content'] if 'content' in author_tag.attrs else ""

            foreign_id_tag = soup.find('form', class_='clearfix mxn1 comment-form')
            if foreign_id_tag is not None:
                foreign_id = soup.find('form', class_='clearfix mxn1 comment-form')['data-foreign-key']
                comments = self._get_comments(foreign_id)
            else:
                comments = []
                logging.warning("Foreign ID is None.")
            total_comments = int(soup.find('a', class_='px1 light-blue').text)
            if total_comments != len(comments) and len(comments) > 0:
                logging.warning("Scraped wrong number of comments")
            full_article = Article(short_article, text, author, comments)
            return full_article
        except AttributeError:
            with open(r'log/politika_errors.txt', 'a') as f:
                f.write("%s\n" % url)
            logging.error("Invalid URL: %s" % url)
        return None
Exemplo n.º 5
0
def combine_articles(group_id, articles):
    if len(articles) == 1:
        return articles[0]

    keywords = set()
    for article in articles:
        keywords |= set(kw.strip() for kw in article.keywords.split(','))
    new_article['keywords'] = ','.join(keywords)

    titles = defaultdict(int)
    for article in articles:
        titles[article.title] += 1

    _, title = max(
        (count * len(title)**2, title) for title, count in titles.items())
    new_article['title'] = title

    for name in 'pmc pmid doi'.split():
        identifiers = defaultdict(int)
        for article in articles:
            if getattr(article, name):
                identifiers[getattr(article, name)] += 1
        if identifiers:
            _, identifier = max(map(swap, identifiers.items()))
            new_article[name] = identifier

    return Article(**new_article)
def run(input_filename, output_filename):
    articles = defaultdict(set)

    without_identifiers = set()

    reader = csv.reader(open(input_filename, 'r'))

    try:
        biggest = 0

        for i, article in enumerate(reader):
            article = Article(*article)
            identifiers = [(k, v) for k, v in article._asdict().items()
                           if k in IDENTIFIERS and v]
            data = None  # dict(identifiers)
            if not identifiers:
                without_identifiers.add(article.id)
                continue
            articles[identifiers[0]].add(article.id)
            for identifier in identifiers[1:]:
                if articles[identifiers[0]] is not articles[identifier]:
                    articles[identifiers[0]] |= articles[identifier]
                    articles[identifier] = articles[identifiers[0]]
                    if len(articles[identifier]) > biggest:
                        biggest = len(articles[identifier])

            if i % 10000 == 0:
                print "%7d" % i, resource.getrusage(
                    resource.RUSAGE_SELF)[2], biggest
                if resource.getrusage(resource.RUSAGE_SELF)[2] > 1e7:
                    print "Using too much memory"
                    raise Exception
    except Exception, e:
        print e
Exemplo n.º 7
0
    def _get_full_article(self, short_article):
        url = short_article.url
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        try:
            text = self.get_formatted_article(
                text=soup.find('div', class_='itemFullText'),
                lead=soup.find('h2', class_='itemSubtitle'))
            author = soup.find('span', class_='itemAuthor')
            if author is None:
                author = ""
            else:
                author = author.text.split('Piše:')[-1].strip()

            facebook_id = soup.find('meta', {'property': 'fb:app_id'})
            if facebook_id is not None:
                facebook_id = facebook_id['content']
                domain = self._generic_url.split('https://')[1].split('/')[0]
                comments = self._get_facebook_comments(url=url,
                                                       facebook_id=facebook_id,
                                                       domain=domain)
            else:
                comments = []

            if len(comments) > 0:
                logging.info('Total comments: %d' % len(comments))
            full_article = Article(short_article, text, author, comments)
            return full_article
        except AttributeError:
            logging.error("Invalid URL: %s" % url)
        return None
Exemplo n.º 8
0
def watcher():
    try:
        # Check for email every 10 seconds. 
        print(">> Listening for new emails")
        messages = inbox.Items
        message = messages.GetLast()
        if "[article]" in str.lower(message.subject):
            # Article is found

            # Check if the article is pre-existing 

            db_search = session.query(Article).filter(
                Article.name == str(message.Sender),
                Article.urlslug == generate_slug(message.subject)
            ).first()
            if db_search is not None:
                print("   - Found existing article")
            else:
                sub = str.lower(message.subject)
                new_article = Article(
                    title= sub.split("[article]")[1].strip(),
                    name = str(message.Sender),   
                    body = message.HTMLBody,
                    alias = get_alias(message.Sender.GetExchangeUser().PrimarySmtpAddress),
                    time = dateutil.parser.parse(str(message.SentOn)),
                    urlslug = generate_slug(message.subject)
                    )
                session.add(new_article)
                session.commit()
            
        time.sleep(10)
    except Exception as e:
        print(e)
        time.sleep(10)
Exemplo n.º 9
0
def run_decode():
    logging.debug('decode your input by our pretrained model')
    try:
        source = request.get_json()['source'] # GET request with String from frontend directly
        logging.debug('input: {}'.format(source)) # GET String-type context from the backend
        try:
            logging.debug('using the pretrained model.')
            sentNums, summary = summarizationModel.decode.run_(source)
        except Exception as e:
            logging.error(e)
        else:
            logging.debug('The number of sentences is {}'.format(sentNums))
            logging.debug('The abstract is that {}'.format(summary))
            results = {'sent_no': sentNums, 'final': summary}
            
        try:
            article = Content(text=source)
            abstract = Summary(text=summary)
            pair = Article(article=article.id, abstract=abstract.id)
            article.save()
            abstract.save()
            pair.save()
        except Exception as e:
            logging.error(e)

        return json.dumps(results)
    except:
        message = {'message' : 'Fail to catch the data from client.'}
        return json.dumps(message)
Exemplo n.º 10
0
    def _get_full_article(self, short_article: ShortArticle):
        url = short_article.url
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')

        try:
            text = self.get_formatted_article(text=soup.find(
                'div', class_='article-text article-video-scroll clearfix'),
                                              lead=soup.find(
                                                  'p',
                                                  {'itemprop': 'description'}))
            author = soup.find('span', class_='inline-flex items-center')
            if author is None:
                author = ""
            else:
                author = author.text.strip()

            facebook_id = soup.find('meta',
                                    {'property': 'fb:app_id'})['content']
            domain = self._generic_url.split('https://')[1].split('/')[0]

            comments = self._get_facebook_comments(url=url,
                                                   facebook_id=facebook_id,
                                                   domain=domain)

            full_article = Article(short_article, text, author, comments)
            return full_article
        except AttributeError:
            logging.error("Invalid URL: %s" % url)
        return None
Exemplo n.º 11
0
    def _get_full_article(self, short_article):
        url = short_article.url
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        try:
            text = self.get_formatted_article(text=soup.find('div', {'id': 'newsContent'}),
                                              lead=soup.find('p', class_='lead'))
            author_tag = soup.find('span', attrs={'class': 'article-author'})
            author = author_tag.text if True else ""

            comments = []
            # TODO: scrape title and date if not exists
            total_comments = int(soup.find('div', {'class': 'all-comments-link'}).text.split(" ")[-1][1:-1])
            if total_comments > 0:
                tag = soup.find('li', {'id': 'main-comment'})
                # Add first comment and it sub-comments from page
                c, last_comment_id = self._get_comment_and_sub_comments(tag)
                comments.extend(c)
                article_id = soup.find('input', {'type': 'hidden', 'id': 'articleId'})['value']
                # Add all other comments
                comments.extend(self._get_comments(article_id=article_id, last_comment_id=last_comment_id))
            # Check if number on comments on page and scraped number of comments is equal
            if total_comments != len(comments) and len(comments) > 0:
                logging.warning("Scraped wrong number of comments: %d/%d" % (len(comments), total_comments))

            full_article = Article(short_article, text, author, comments)
            return full_article
        except AttributeError:
            with open(r'log/alo_errors.txt', 'a') as f:
                f.write("%s\n" % url)
            logging.error("Invalid URL: %s" % url)
        return None
def run(input_filename, output_filename):
    reader = itertools.imap(lambda a: Article(*[f.decode('utf-8') for f in a]),
                            csv.reader(open(input_filename, 'r')))
    writer = csv.writer(open(output_filename, 'w'))

    group_counter, split_counter = 0, 0

    for i, (group_id,
            articles) in enumerate(itertools.groupby(reader,
                                                     lambda a: a.group)):
        group_id, articles = int(group_id), list(articles)
        if i % 1000 == 0 and i:
            print "%8i %8i %8i %8i %8.5f%%" % (i, group_id, split_counter,
                                               group_counter, split_counter /
                                               group_counter * 100)

        groups = list(recluster(articles))

        groups.sort(key=lambda g: -len(g))
        if len(groups) > 1 or sum(len(g) for g in groups[1:]) > 8:
            split_counter += 1
            print len(groups), sorted(map(len, groups))
            #            for identifier in IDENTIFIERS:
            #                print "  ", identifier, [count(getattr(a, identifier) for a in g) for g in groups]
            split(groups)

        for group in groups:
            gc = unicode(group_counter)
            for article in group:
                article = article._replace(group=gc)
                writer.writerow([f.encode('utf-8') for f in article])
            group_counter += 1
Exemplo n.º 13
0
def load_articles():
    """Load articles from seed-article.txt into database."""

    print("Articles")

    # Delete all rows in table, so if we need to run this a second time,
    # we won't be trying to add duplicate users
    Article.query.delete()

    # Read seed category file and insert data
    for row in open("seed/seed-article-2.txt"):
        row = row.rstrip()
        # Works for original seed data
        # user_id, category_id, description = row.split("|")

        # These are metadata lines in the file
        if not row.startswith('--'):
            article_id, description, image, purchase_price, times_worn, sell_price, user_id, category_id = row.split(
                "|")

            # Prevent passing an empty string into field expecting float
            if not purchase_price:
                purchase_price = None

            article = Article(
                article_id=int(article_id),
                description=description,
                image=image,
                purchase_price=purchase_price,
                times_worn=times_worn,
                user_id=int(user_id),
                category_id=int(category_id),
            )
            db.session.add(article)
    db.session.commit()
Exemplo n.º 14
0
    def _get_full_article(self, short_article):
        url = short_article.url
        response = requests.get(url)

        while response.status_code == 429:
            time.sleep(5)
            print('Retry')
            response = requests.get(url)

        soup = BeautifulSoup(response.content, 'html.parser')
        try:
            text = self.get_formatted_article(
                text=soup.find('div', class_='itemFullText'),
                lead=soup.find('h2', class_='itemSubTitle'))
            author = soup.find('div', class_='col-authorname')
            if author is None:
                author = ""
            else:
                author = author.text
            facebook_id = soup.find('meta',
                                    {'property': 'fb:app_id'})['content']
            domain = self._generic_url.split('https://')[1].split('/')[0]

            comments = self._get_facebook_comments(url=url,
                                                   facebook_id=facebook_id,
                                                   domain=domain)
            if len(comments) > 0:
                logging.info('Total comments: %d' % len(comments))
            full_article = Article(short_article, text, author, comments)
            return full_article
        except AttributeError:
            logging.error("Invalid URL: %s" % url)
        return None
Exemplo n.º 15
0
def get_NYT_articles():

    parameters = {
        "api-key": os.environ["nytimes_api"],
        "section": "World",
        "time-period": "1"
    }

    article_request_string = "https://api.nytimes.com/svc/mostpopular/v2/mostviewed/{}/{}.json".format(
        parameters["section"], parameters["time-period"])
    geo_request_string = "https://api.nytimes.com/svc/semantic/v2/geocodes/query.json"

    article_response = requests.get(article_request_string, params=parameters)
    article_response_dict = article_response.json()
    articles = article_response_dict['results']

    filtered_articles = [
        article for article in articles if len(article["geo_facet"]) > 0
    ]

    for article in filtered_articles:
        geo_facet = article["geo_facet"][0]
        geo_facet = geo_facet.split('(')[0].title()

        geo_parameters = {
            "api-key": os.environ["nytimes_api"],
            "name": geo_facet
        }

        geo_response = requests.get(geo_request_string, params=geo_parameters)
        geo_response_dict = geo_response.json()

        if ('results' in geo_response_dict
                and len(geo_response_dict['results']) > 0
                and not Geo.query.filter_by(
                    geo_facet=geo_response_dict['results'][0]['name']).first()
            ):

            new_geo = Geo(
                geo_facet=geo_response_dict['results'][0]['name'],
                lat=geo_response_dict['results'][0]['latitude'],
                longt=geo_response_dict['results'][0]['longitude'],
                country_name=geo_response_dict['results'][0]['country_name'])
            db.session.add(new_geo)

            if not Article.query.filter_by(article_id=article['id']).first():
                new_article = Article(
                    article_id=article['id'],
                    article_title=article['title'],
                    news_source="nytimes",
                    abstract=article['abstract'],
                    geo_facet=geo_response_dict['results'][0]['name'],
                    lat=geo_response_dict['results'][0]['latitude'],
                    longt=geo_response_dict['results'][0]['longitude'],
                    category=article['section'],
                    url=article['url'])
                db.session.add(new_article)

    db.session.commit()
Exemplo n.º 16
0
 def combine_articles(self, articles):
     #        for article in articles:
     #            if article.id.count(':') == 1:
     #                return article
     article = {}
     for field_name in article_fields:
         article[field_name] = getattr(self, field_name)(articles)
     return Article(**article)
Exemplo n.º 17
0
async def index(request):
    summary = 'test string'
    articles = [
        Article(id='1',
                name='Test Blog',
                summary=summary,
                create_at=time.time() - 120),
        Article(id='1',
                name='Test Blog',
                summary=summary,
                create_at=time.time() - 120),
        Article(id='1',
                name='Test Blog',
                summary=summary,
                create_at=time.time() - 120),
    ]
    return {'__template__': 'blog.html', 'aricles': articles}
def get_MVVoices_articles():

    quote_page = 'https://www.mv-voice.com/news/'
    page = urllib2.urlopen(quote_page)
    soup = BeautifulSoup(page, 'html.parser')

    mountainview_news = {}
    article_counter = 0

    for line in soup.findAll('span', attrs={'id': 'slider_blurb'}):
        if "abstract" not in mountainview_news:
            mountainview_news["abstract"] = [line.text]
            article_counter += 1
        else:
            mountainview_news["abstract"].append(line.text)
            article_counter += 1

    all_links = soup.findAll('a', attrs={'href': re.compile("^/news/")})

    # for a in soup.find_all('a', href=True):
    #     if a.text:
    #         print(a['href'])

    for tag in all_links:
        for title in tag.findAll('span', attrs={'id': 'slider_headline'}):
            if "url" not in mountainview_news:
                mountainview_news["url"] = [tag['href']]
            else:
                mountainview_news["url"].append(tag['href'])

            if "article_title" not in mountainview_news:
                mountainview_news["article_title"] = [title.text]
            else:
                mountainview_news["article_title"].append(title.text)
    print(mountainview_news)

    if not Geo.query.filter_by(geo_facet="Mountain View").first():

        new_geo = Geo(geo_facet="Mountain View",
                      lat=37.386052,
                      longt=-122.083851,
                      country_name="United States")

        db.session.add(new_geo)

    for i in range(article_counter):
        new_article = Article(
            article_title=mountainview_news["article_title"][i],
            news_source="mountainviewvoice",
            abstract=mountainview_news['abstract'][i],
            geo_facet="Mountain View",
            lat=37.386052,
            longt=-122.083851,
            url='https://www.mv-voice.com' + mountainview_news['url'][i])

        db.session.add(new_article)

    db.session.commit()
Exemplo n.º 19
0
def _create_structure():
    category = Category('test category', 'category test', 'test_category')
    category.meta = {'id': 1, 'webtranslateit_ids': {'content': 1}}
    section = Section(category, 'test section', 'section test', 'test_section')
    section.meta = {'id': 2, 'webtranslateit_ids': {'content': 2}}
    category.sections.append(section)
    article = Article(section, 'test article', 'article body', 'test_article')
    article.meta = {'id': 3, 'webtranslateit_ids': {'content': 3, 'body': 4}}
    section.articles.append(article)
    return category, section, article
Exemplo n.º 20
0
def update_article():
  title = request.args.get('Title')
  author = request.args.get('Author')
  email = request.args.get('Email')
  date = request.args.get('Date')
  url = request.args.get('URL')
  content = request.args.get('Content')
  status = request.args.get('Status')
  article = Article()
  article.create(title, author, email, date, url, content, status)
  return redirect('/blog', code=302)
Exemplo n.º 21
0
 def _createArticle(self, row):
     article = Article()
     article.title = self._getFieldValue(row, 'article', 'title')
     article.pagination = self._getFieldValue(row, 'article', 'pagination')
     for i in ['author', 'author2']:
         author_fullname = self._getFieldValue(row, 'article', i)
         if author_fullname:
             author = self.authorFctry.fromFullName(author_fullname)
             article.authors.append(author)
     article.periodique = self.periodique
     return article
Exemplo n.º 22
0
def add_article():
    """Adds new clothing article and redirects to the previous category page."""

    category_id = request.form.get('category')
    description = request.form.get('article-description')
    file = request.files['article-image-upload']
    tag_ids = request.form.getlist('article-tags')
    new_tag_string = request.form.get('new-tags')
    purchase_price = request.form.get('purchase-price')

    category = Category.query.get(category_id)

    if not allowed_file(file.filename):
        flash(f'File extension .{file.filename.rsplit(".", 1)[1]} not allowed')
    if file and allowed_file(file.filename):

        # Sanitizes user input
        filename = secure_filename(file.filename)

        # Cloudinary upload function: 1) folders by user and category name,
        # 2) unique filename is true,
        # 3) use cloudinary's AI to remove background
        # ^ (commented out b/c paid service)
        upload_file = upload(
            file,
            folder=f"user/{session['user_email']}/{category.name}",
            unique_filename=1,
            # background_removal = "cloudinary_ai",
        )

        # For purchase_price, an empty string not ok, but okay to pass None
        new_article = Article(user_id=session['user_id'],
                              category_id=category_id,
                              image=upload_file['secure_url'],
                              description=description,
                              purchase_price=purchase_price or None)

        all_tags = []
        for tag_id in tag_ids:
            all_tags.append(Tag.query.filter_by(tag_id=tag_id).one())

        # Any newly created tags should be added to this as well
        all_tags += Tag.parse_str_to_tag(new_tag_string, session['user_id'])

        # Then create all the tag relationships
        for tag in all_tags:
            new_article.add_tag(tag)

        db.session.add(new_article)
        db.session.commit()
        flash(f"Created new item in {category.name}")

    return redirect(f'/categories/{category_id}')
Exemplo n.º 23
0
def publish():
    if request.method != 'POST':
        return render_template('publish.html')

    title, content = request.form.get('title', ''), request.form.get('content', '')
    a = Article()
    a.title = title
    a.content = content
    a.uid = session['uid']
    db.session.add(a)  # insert
    db.session.commit()
    return '发布成功'
Exemplo n.º 24
0
def load_articles():
    """Load articles into articles table. Adds categories to categories table."""

    for url in category_urls:
        #creates a newspaper object.
        category_newspaper = newspaper.build(url, memoize_articles=False)
        #gets the category code from the url.
        category_name = url[7:-11]

        #Queries for the category in the database.
        result = Category.query.filter_by(category_code=category_name)

        # #If the category is not already in the database, adds
        # #to the categories table.
        if not result:
            #Adds category to the database in the categories table.
            db_category = Category(
                category_code=category_name,
                url=url,
                english_category=category_dict[category_name])

            #Verifying that the category has been added.
            db.session.add(db_category)
            db.session.commit()

        print "\n\n\n\n\nArticle Category: %s \n\n\n\n\n" % (category_name)

        #creates a list of article objects.
        category_articles = category_newspaper.articles[:21]

        #iterates over the list of article objects.
        for article in category_articles:
            #downloads and parses through the article.
            article.download()
            print 'after download'
            article.parse()
            print 'after parse'

            #instantiates an instance in the articles table.
            db_article = Article(mainsite=url,
                                 title=article.title,
                                 authors=article.authors,
                                 language='es',
                                 url=article.url,
                                 category_code=category_name,
                                 top_image=article.top_image)

            #adds the article content to the database.
            db.session.add(db_article)
            db.session.commit()
            #Verifying article is committed.
            print "commited %s" % (db_article)
Exemplo n.º 25
0
def add_article(request):
    if 'form.submitted' in request.POST:
        with transaction.manager:
            article = Article(title=request.POST['title'],
                              body=request.POST['body'])
            DBSession.add(article)

        return HTTPFound(location=route_url('article_list', request))
    else:
        return render_to_response(
            'templates/add_article.pt',
            {'back_url': route_url('article_list', request)},
            request=request)
Exemplo n.º 26
0
def update_articles(session):
    articles = download_data_from_url()

    counter = 0
    for key, value in articles.items():
        for node, article_data in value.items():
            for q in article_data:
                exist = session.query(Article).filter_by(
                    article_id=q["ID"]).all()
                if not exist:
                    counter = counter + 1
                    new_article = Article(
                        article_id=q["ID"],
                        published_date=q["CREATED_DATE"],
                        title=q["TITLE"],
                        creator=q["AUTHOR"],
                        image_src=q["IMAGE"],
                        link_src=q["LINK"],
                        text=q["PEREX"],
                        keywords="TODO",
                        media_name="cti-doma",
                    )
                    session.add(new_article)
                    session.commit()
                    session.refresh(new_article)

                    new_question = Questions(
                        news_id=new_article.id,
                        question_text=q["QUIZ"]["QUIZ_TITLE"],
                        question_type=1,
                        order=1,
                    )
                    session.add(new_question)
                    session.commit()
                    session.refresh(new_question)

                    order = 1  # tODO
                    for key, answer in q["QUIZ"]["QUIZ_OPTIONS"].items():
                        for option in answer:
                            new_answer = Answers(
                                question_id=new_question.id,
                                answer_text=option["OPTION_LABEL"],
                                correct_answer_text="TBD",
                                correct_answers=True
                                if option["CORRECT"] is not None else False,
                                order=1,
                            )
                            session.add(new_answer)
                            session.commit()
    print(f"Added {counter} articles.")
Exemplo n.º 27
0
 def test_articles_to_xml(self):
     articles = []
     per = Periodique()
     per.id = 1
     per.name = "Le Seigneur des anneaux"
     bull = Bulletin()
     bull.title = "La communaute de l anneau"
     bull.number = "1"
     bull.period = "1960"
     bull.periodique = per
     article = Article()
     article.title = "Concerning hobbit"
     article.pagination = "p. 1-100"
     article.language = 'fre'
     author = Author()
     author.last_name = 'TOLKIEN'
     author.first_name = 'J.R.R'
     article.authors.append(author)
     article.bulletin = bull
     article.periodique = per
     articles.append(article)
     article = Article()
     article.title = "La comte"
     article.pagination = "p. 101-200"
     article.language = 'fre'
     article.authors.append(author)
     article.bulletin = bull
     article.periodique = per
     articles.append(article)
     conv = XmlConverter()
     flow = conv._toXml(articles)
     xml = len(etree.tostring(flow))
     xml_proof = len(
         '<unimarc><notice><rs>n</rs><dt>a</dt><bl>a</bl><hl>2</hl><el>1</el><rs>i</rs><f c="200"><s c="a">Concerning hobbit</s></f><f c="101"><s c="a">fre</s></f><f c="215"><s c="a">p. 1-100</s></f><f c="700"><s c="a">TOLKIEN</s><s c="b">J.R.R</s><s c="4">070</s></f><f c="461"><s c="t">Le Seigneur des anneaux</s><s c="9">id:1</s><s c="9">lnk:perio</s></f><f c="463"><s c="t">La communaute de l anneau</s><s c="e">1960</s><s c="v">1</s><s c="9">lnk:bull</s></f></notice><notice><rs>n</rs><dt>a</dt><bl>a</bl><hl>2</hl><el>1</el><rs>i</rs><f c="200"><s c="a">La comte</s></f><f c="101"><s c="a">fre</s></f><f c="215"><s c="a">p. 101-200</s></f><f c="700"><s c="a">TOLKIEN</s><s c="b">J.R.R</s><s c="4">070</s></f><f c="461"><s c="t">Le Seigneur des anneaux</s><s c="9">id:1</s><s c="9">lnk:perio</s></f><f c="463"><s c="t">La communaute de l anneau</s><s c="e">1960</s><s c="v">1</s><s c="9">lnk:bull</s></f></notice></unimarc>'
     )
     self.assertEqual(xml, xml_proof)
Exemplo n.º 28
0
def create_article(session: helpers.extend.session, user: hug.directives.user, response, warehouse_id: int, reference_id: int, quantity: int, expiry: fields.Date(allow_none=True)=None, location_id: fields.Int(allow_none=True)=None, tags=None):
    """Creates a article"""
    db_tags = queries.get_tags_from_ids(session, user, warehouse_id, tags)
    if db_tags == None:
        return response.error("invalid_tag_ids", falcon.HTTP_400)
    try:
        warehouse = queries.with_editor_role(queries.user_warehouse(session, user, warehouse_id)).one()
        location = queries.with_editor_role(queries.user_location(session, user, location_id)).one() if location_id else None
        reference = queries.with_editor_role(queries.user_reference(session, user, reference_id)).one()
        if warehouse.id != reference.warehouse.id or (location is not None and warehouse.id != location.warehouse.id):
            return helpers.response.error("bad_referenece_and_or_location", falcon.HTTP_400)
        article = Article(warehouse=warehouse, location=location, reference=reference, quantity=quantity, expiry=expiry, tags=db_tags)
        return article
    except NoResultFound:
        return helpers.response.error("warehouse_location_or_reference_not_found", falcon.HTTP_401)
Exemplo n.º 29
0
def article_add_confirm():
	# print(request.files)
	user_id = session["current_user"]
	type_id=request.form.get("type_id")
	image_file_1=request.files.get("image")
	image_file_2=request.files.get("image_2")
	image_file_3=request.files.get("image_3")
	image_file_4=request.files.get("image_4")
	size=request.form.get("size")
	color=request.form.get("color")
	material=request.form.get("material")
	notes=request.form.get("notes")
	is_private=request.form.get("is_private")
	is_loanable=request.form.get("is_loanable")
	is_giveaway=request.form.get("is_giveaway")

	bool_convert = {"True": True, "False": False}
	article = Article(
				owner_id=user_id,
				type_id=type_id,
				size=size,
				color=color,
				material=material,
				notes=notes,
				is_private=bool_convert[is_private],
				is_loanable=bool_convert[is_loanable],
				is_giveaway=bool_convert[is_giveaway]
				)

	#check if images are there
	img_1 = upload_to_s3(image_file_1)
	image = Image (img_url=img_1)
	article.images.append(image)
	
	#check if images exist and if yes add them to the database
	img_in_form = [image_file_2, image_file_3, image_file_4]
	for img_file in img_in_form:
		if img_file != None:
			img = upload_to_s3(img_file)
			image = Image (img_url=img)
			article.images.append(image)

	
	db.session.add(article)
	db.session.commit()
	
	flash('New article added.')
	return redirect('/my_closet')
Exemplo n.º 30
0
def parse_article(record):
    # For some attributes, directly extract text from XML element
    pmid = record.findtext('.//PMID')
    print(pmid)
    vol = record.findtext('.//JournalIssue/Volume')
    issue = record.findtext('.//JournalIssue/Issue')
    journal_iso = record.findtext('.//Journal/ISOAbbreviation')
    doi = record.findtext('.//ArticleIdList/ArticleId[@IdType="doi"]')
    pages = record.findtext('.//Pagination/MedlinePgn')

    # For other attributes, process XML elements
    title = parse_title(record)
    pub_year = parse_pub_year(record)
    journal = parse_journal(record)
    # Parse authors and collective authors
    authors = record.find('.//AuthorList')
    parsed_authors = []
    collective_authors = []
    # NOTE The current method below uses total programming. Might consider
    # using Defensive programming. At the moment, no distinction is made
    # between an author that was not parsed correctly (for some reason) and
    # an author that is not an Author but rather a CollectiveAuthor.
    for author in authors:
        collective_author = parse_collective_author(author)
        author = parse_author(author)
        # TODO Should we get rid of duplicate authors/collective authors at
        # this stage, or after creating an Author/CollectiveAuthor model
        # object?
        if collective_author and (collective_author not in collective_authors):
            collective_authors.append(collective_author)
        if author:
            parsed_authors.append(author)

    # Initialize an Article
    article = Article(
                title=title,
                pub_year=pub_year,
                journal=journal,
                journal_iso=journal_iso,
                vol=vol,
                issue=issue,
                pages=pages,
                doi=doi,
                pmid=pmid,
                authors=parsed_authors,
                collective_authors=collective_authors
    )
    return article