def get_all_entries():
    with sqlite3.connect("./daily-journal.db") as conn:

        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        db_cursor.execute("""
        SELECT
            e.id,
            e.concept,
            e.entry,
            e.date,
            e.moodId,
            m.id,
            m.label
        FROM entries e
        JOIN moods m
            ON m.id = e.moodId
        """)

        entries = []

        dataset = db_cursor.fetchall()

        for row in dataset:
            entry = Entry(row['id'], row['concept'], row['entry'], row['date'],
                          row['moodId'])

            mood = Mood(row['id'], row['label'])

            entry.mood = mood.__dict__
            entries.append(entry.__dict__)

    return json.dumps(entries)
def get_single_entry(id):
    with sqlite3.connect("./daily-journal.db") as conn:
        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        db_cursor.execute(
            """
        SELECT
            e.id,
            e.concept,
            e.entry,
            e.date,
            e.moodId,
            m.id,
            m.label
        FROM entries e
        JOIN moods m
            ON m.id = e.moodId
        WHERE e.id = ?
        """, (id, ))

        data = db_cursor.fetchone()

        entry = Entry(data['id'], data['concept'], data['entry'], data['date'],
                      data['moodId'])

        mood = Mood(data['id'], data['label'])

        entry.mood = mood.__dict__

        return json.dumps(entry.__dict__)
Beispiel #3
0
 def post(self):
     url = self.request.get("url", "")
     if url:
         slug = nice_guid()
         entry = Entry(time=datetime.datetime.now(),
                       url=url, slug=slug, packaged=False)
         entry.put()
         self.render_template("code.html", slug=slug)
     else:
         return self.get()
Beispiel #4
0
    def post(self):
        upload_files = self.get_uploads('package')
        blob_info = upload_files[0]

        slug = nice_guid()
        entry = Entry(time=datetime.datetime.now(),
                      url=str(blob_info.key()), slug=slug, packaged=True)
        entry.put()

        self.render_template("code.html", slug=slug)
Beispiel #5
0
    def test_insert_entry__invalid_entry_type__expect_raise_error(self):
        connection = Connection('postgres', 'root', '127.0.0.1:5432', 'planting_manager_teste')
        session = connection.session()
        meeiro_id = create_meeiro(name='tadeu', cpf='55584447213', rg='50658045x')
        session.expunge_all()
        session.close()

        with self.assertRaises(RowNotFound):
            Entry.insert(meeiro_id=meeiro_id, entry_date=datetime(2018, 10, 1),
                         entry_type_id=23, entry_value=100.0,
                         description='veneno', db_session=connection.session())
Beispiel #6
0
    def test_insert_entry__invalid_meeiro__expect_raise_error(self):
        connection = Connection('postgres', 'root', '127.0.0.1:5432', 'planting_manager_teste')
        session = connection.session()
        entry_type_id = create_entry_type('despesas', connection)
        session.expunge_all()
        session.close()

        with self.assertRaises(RowNotFound):
            Entry.insert(meeiro_id=21, entry_date=datetime(2018, 10, 1),
                         entry_type_id=entry_type_id, entry_value=100.0,
                         description='veneno', db_session=connection.session())
Beispiel #7
0
    def create(self, patch: Patch, spoiler: Spoiler, settings: Settings) -> str:
        if not self.enabled:
            raise EnvironmentError("Database not enabled")

        if not spoiler is None:            
            entry = Entry(settings.seed, patch.version, patch.patch, patch.patchName, spoiler.spoiler, spoiler.spoilerName, settings)
        else:
            entry = Entry(settings.seed, patch.version, patch.patch, patch.patchName, None, None, settings)

        key = self.collection.insert_one(entry.__dict__)
        return str(key.inserted_id)
Beispiel #8
0
 def post(self):
     entry = Entry(
         author   = users.get_current_user().email(),
         title    = self.request.get('title'),
         slug     = self.request.get("slug"),
         # tags     = self.request.get("tags"),
         template = self.request.get("template"),
         body     = self.request.get("body"),
         draft    = bool(self.request.get("draft"))
     )
     entry.put()
     entry.index()
     self.redirect("/" + entry.category.slug + "/" + entry.slug)
def search_entry(entry):
    with sqlite3.connect('./daily-journal.db') as conn:
        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        db_cursor.execute(f"""
        SELECT
            e.id,
            e.concept,
            e.entry,
            e.date,
            e.moodId
        FROM entries e
        WHERE e.entry LIKE '%{entry}%'
        """)

        entries = []

        dataset = db_cursor.fetchall()

        for row in dataset:
            entry = Entry(
                row['id'],
                row['concept'],
                row['entry'],
                row['date'],
                row['moodId'],
            )
            entries.append(entry.__dict__)

        return json.dumps(entries)
Beispiel #10
0
def wp_editPage(
    blogid,
    pageid,
    struct,
    publish,
    ):

    entry = Entry.get_by_id(int(pageid))

    if struct.has_key('wp_slug'):
        entry.slug = struct['wp_slug']

    if struct.has_key('wp_page_order'):
        entry.menu_order = int(struct['wp_page_order'])

    entry.title = struct['title']
    entry.content = struct['description']
    if struct.has_key('mt_text_more'):
        entry.content = entry.content + '<!--more-->' + struct['mt_text_more']
    if publish:
        entry.publish(True)
    else:
        entry.save()

    return True
Beispiel #11
0
def entries_query(query):
    my_query = '%{}%'.format(query)

    with sqlite3.connect('./daily_journal.db') as conn:
        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        db_cursor.execute(
            """
        SELECT
            e.id,
            e.concept,
            e.entry,
            e.date,
            e.moodId
        FROM entries e
        WHERE e.entry LIKE ?
        """, (my_query, ))

        entries = []

        dataset = db_cursor.fetchall()

        for row in dataset:
            entry = Entry(row['id'], row['concept'], row['entry'], row['date'],
                          row['moodId'])

            entries.append(entry.__dict__)

    return json.dumps(entries)
Beispiel #12
0
    def action_updatelink(self):
        link_format = self.param('linkfmt')

        if link_format:
            link_format = link_format.strip()
            g_blog.link_format = link_format
            g_blog.save()
            for entry in Entry.all():
                vals = {
                'year': entry.date.year,
                'month': str(entry.date.month).zfill(2),
                'day': entry.date.day,
                'postname': entry.slug,
                'post_id': entry.post_id,
                }

                if entry.slug:
                    newlink = link_format % vals
                else:
                    newlink = '?p=%(post_id)s' % vals

                if entry.link != newlink:
                    entry.link = newlink
                    entry.put()
            self.write('"ok"')
        else:
            self.write('"Please input url format."')
def get_data(session,
             limit,
             offset=None,
             order_descending=False,
             entry_id=False,
             entry_type=False,
             handled_utc=False,
             original_url=False,
             canonical_url=False,
             note=False):
    log.info(f"Getting data of type {entry_type} from {Table.__tablename__},"
             f" limit = {limit}, order_descending = {order_descending}")

    # Store the values in a dict
    filter_options = {
        Table.entry_id: entry_id,
        Table.entry_type: entry_type,
        Table.handled_utc: handled_utc,
        Table.original_url: original_url,
        Table.canonical_url: canonical_url,
        Table.note: note
    }

    # Create a new query
    q = session.query(Table)

    # Loop through the dict and add it to the query if a value was specified
    for attr, value in filter_options.items():
        log.debug(f"attr= {attr}, value={value}")
        if value is True:
            q = q.filter(attr.isnot(None))
        elif value is not False:
            q = q.filter(attr == value)

    # Sort descending (returns most recent rows)
    if order_descending:
        q = q.order_by(Table.entry_id.desc())

    if offset:
        q = q.offset(offset)

    # Set a limit
    q = q.limit(limit)
    log.info(q)
    log.info(f"Received data, amount of rows returned: {q.count()}")

    # Generate entry instance for each returned row, add these to a list
    entries = []
    for entry in q:
        entries.append(
            Entry(entry_id=entry.entry_id,
                  entry_type=entry.entry_type,
                  handled_utc=entry.handled_utc,
                  original_url=entry.original_url,
                  canonical_url=entry.canonical_url,
                  note=entry.note))

    log.info("Generated entry instances for each row")

    return entries
Beispiel #14
0
 def action_updatecomments(self):
     for entry in Entry.all():
         cnt = entry.comments().count()
         if cnt != entry.commentcount:
             entry.commentcount = cnt
             entry.put()
     self.write('"ok"')
Beispiel #15
0
def get_single_entry(id):
    with sqlite3.connect("./dailyjournal.db") as conn:
        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        # Use a ? parameter to inject a variable's value
        # into the SQL statement.
        db_cursor.execute(
            """
        SELECT
            a.id,
            a.concept,
            a.entry,
            a.date,
            a.moods_id
        FROM JournalEntries a
        WHERE a.id = ?
        """, (id, ))

        # Load the single result into memory
        data = db_cursor.fetchone()

        # Create an entry instance from the current row
        entry = Entry(data['id'], data['concept'], data['entry'], data['date'],
                      data['moods_id'])

        return json.dumps(entry.__dict__)
Beispiel #16
0
    def get(self):

        urls = []

        def addurl(loc, lastmod=None, changefreq=None, priority=None):
            url_info = {
                'location': loc,
                'lastmod': lastmod,
                'changefreq': changefreq,
                'priority': priority,
            }
            urls.append(url_info)

        addurl(g_blog.baseurl, changefreq='daily', priority=1)

        entries = Entry.all().filter('published =', True).order('-date').fetch(g_blog.sitemap_entries)

        for item in entries:
            loc = '%s/%s' % (g_blog.baseurl, item.link)
            addurl(loc, item.date, 'daily', 0.9)

        if g_blog.sitemap_include_category:
            cats = Category.all()
            for cat in cats:
                loc = '%s/category/%s' % (g_blog.baseurl, cat.slug)
                addurl(loc, None, 'weekly', 0.8)

        if g_blog.sitemap_include_tag:
            tags = Tag.all()
            for tag in tags:
                loc = '%s/tag/%s' % (g_blog.baseurl, urlencode(tag.tag))
                addurl(loc, None, 'weekly', 0.8)

        self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
        self.render2('views/sitemap.xml', {'urlset': urls})
Beispiel #17
0
    def doget(self, page):
        try:

            page = int(page)
            max_page = (self.blog.entrycount - 1) / self.blog.posts_per_page
            
            if max_page < 0:
                max_page = 0

            if page < 0 or page > max_page:
                return self.error(404)

            query = Entry.gql("WHERE entrytype = 'post' AND published = TRUE ORDER BY date DESC")
            entries = query.fetch(self.blog.posts_per_page, page * self.blog.posts_per_page)

            show_prev = entries and not page == 0
            show_next = entries and not page == max_page

            logging.info("generated main page")
        
            return self.render('index', {
                'entries':      entries,
                'show_prev':    show_prev,
                'show_next':    show_next,
                'pageindex':    page,
                'ishome':       True,
                })
        except:
            logging.error(traceback.format_exc())
            return self.error(404)    
Beispiel #18
0
    def get(self, tags=None):
        entries = Entry.all().order('-date')
        cates = Category.all()
        tags = Tag.all()

        self.response.headers['Content-Type'] = 'binary/octet-stream'  # 'application/atom+xml'
        self.render2('views/wordpress.xml', {'entries': entries, 'cates': cates, 'tags': tags})
Beispiel #19
0
def get_all_entries():
    with sqlite3.connect("./daily_journal.db") as conn:

        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        db_cursor.execute("""
    SELECT
        e.id,
        e.concept,
        e.entry,
        e.date,
        e.mood_id
    FROM entries e
    """)

        entries = []

        dataset = db_cursor.fetchall()

        for row in dataset:
            entry = Entry(row['id'], row['concept'], row['entry'], row['date'],
                          row['mood_id'])

            entries.append(entry._dict_)

    return json.dumps(entries)
Beispiel #20
0
 def get(self, slug):
     app = Entry.all().filter('slug =', slug).get()
     if app.packaged:
         self.render_template("install.html", url='/minifest/%s' % slug,
                              packaged=True)
     else:
         self.render_template("install.html", url=app.url, packaged=False)
def search_for_entry(search_term):
    with sqlite3.connect("./dailyjournal.db") as conn:

        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        db_cursor.execute(
            """
        SELECT
            a.id,
            a.concept,
            a.entry,
            a.date,
            a.moodId
        FROM entries a
        WHERE a.entry LIKE ?
        """, ('%' + search_term + '%', ))

        entries = []

        dataset = db_cursor.fetchall()

        for row in dataset:

            entry = Entry(row['id'], row['concept'], row['entry'], row['date'],
                          row['moodId'])

            entries.append(entry.__dict__)

        return json.dumps(entries)
    def update_site_entry(self):
        for rss_entry in self.entries:
        
            entry = Entry.find_by_link(rss_entry.link)
            if entry.empty:
            
                Entry.create(rss_entry, self.site_url)
            
            elif entry.entry_md5 != rss_entry.entry_md5:
#               print "rss_link : %s" %(rss_entry.link)
#               print "link : %s" %(entry['link'])
#               print "entry_md5     : %s"  %(entry.entry_md5())
#               print "rss_entry_md5 : %s"  %(rss_entry.entry_md5())
                entry['description'] = rss_entry.description
                entry['title'] = rss_entry.title
#               print "After entry_md5 : %s"  %(entry.entry_md5())
                entry.save()
Beispiel #23
0
    def get_entries(self):
        """Gets film entries as list comprehension

        Returns:
            list comprehension of film entries

        """
        return [Entry(item) for item in self.data["entries"]]
Beispiel #24
0
    def _test_url(self, url):
        self.response.write('%s<br>' % url)
        resp = urlfetch.fetch('%s/media/revision.txt' % url)
        assert resp.status_code == 200
        rev = resp.content.strip()

        resp = urlfetch.fetch('%s?mobile=true' % url)
        if resp.status_code != 200:
            return

        size = len(resp.content)
        asset_size = 0
        css_size = 0
        js_size = 0

        for asset in (m.group(2) for m in
                      asset_url_pattern.finditer(resp.content)):

            # Handle relative URLs
            if '://' not in asset:
                asset = url + asset

            if ('.js?' in asset or '.css' in asset or
                asset.endswith('.js')):

                self.response.write('%s<br>' % asset)
                try:
                    data = urlfetch.fetch(asset).content
                except Exception:
                    continue
                if data:
                    data_len = len(data)
                    if '.js?' in asset:
                        js_size += data_len
                    elif '.css' in asset:
                        css_size += data_len
                    asset_size += data_len

        entry = Entry(time=datetime.datetime.now(), size=size,
                      domain=url, with_assets=size + asset_size,
                      commit=rev, size_css=css_size, size_js=js_size)
        entry.put()
        self.response.write('Size: %d<br>' % size)
        self.response.write('Assets Size: %d ' % asset_size)
        self.response.write('(CSS: %d, JS: %d)<br>' % (css_size, js_size))
Beispiel #25
0
 def get(self, tags=None):
     entries = Entry.all().filter('entrytype =', 'post').filter('published =', True).order('-date').fetch(20)
     if entries and entries[0]:
         last_updated = entries[0].date
         last_updated = last_updated.strftime('%Y-%m-%dT%H:%M:%SZ')
     for e in entries:
         e.formatted_date = e.date.strftime('%Y-%m-%dT%H:%M:%SZ')
     self.response.headers['Content-Type'] = 'application/atom+xml'
     self.render2('views/atom.xml', {'entries': entries, 'last_updated': last_updated})
Beispiel #26
0
def wp_newPage(blogid, struct, publish):

    entry = Entry(title=struct['title'], content=struct['description'])
    if struct.has_key('mt_text_more'):
        entry.content = entry.content + '<!--more-->' + struct['mt_text_more']

    if struct.has_key('wp_slug'):
        entry.slug = struct['wp_slug']
    if struct.has_key('wp_page_order'):
        entry.menu_order = int(struct['wp_page_order'])
    entry.entrytype = 'page'
    if publish:
        entry.publish(True)
    else:
        entry.save()

    postid = entry.key().id()
    return str(postid)
Beispiel #27
0
def insert_entries(base_url, db_path, event_ids_dict):
    current_file = os.path.basename(__file__)
    current_file_name = os.path.splitext(current_file)[0]

    for key in event_ids_dict:
        for event_id in event_ids_dict[key]:

            url = base_url + "/" + current_file_name + "/" + str(
                event_id) + "/"

            try:
                print(url)
                response = requests.get(url)
            except requests.exceptions.RequestException as e:
                print(e)
                sys.exit(1)

            if response.status_code == 200:

                doc = pq(response.text)
                connection = sqlite3.connect(db_path)

                try:

                    # Entries
                    startlist = doc("table.results")
                    startlist('td.entry-sct > span.text-danger').parents(
                        'tr').remove()  # Remove course cars

                    for tr in startlist('tr').items():
                        entry = Entry(event_id, tr)
                        if entry.driver_id:
                            connection.execute(
                                '''INSERT INTO entries 
							(event_id,car_number,driver_id,codriver_id,team,car,plate,tyres,category,startlist_m,championship,created_at,updated_at,deleted_at)
							VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', entry.get_tuple())

                    connection.commit()

                except Exception as e:
                    connection.rollback()
                    raise e
                finally:
                    connection.close()
Beispiel #28
0
    def get(self, slug=None, postid=None):
        if postid:
            entries = Entry.all().filter("published =", True).filter("post_id =", postid).fetch(1)
        else:
            slug = urldecode(slug)
            entries = Entry.all().filter("published =", True).filter("link =", slug).fetch(1)
        if not entries or len(entries) == 0:
            return self.error(404)

        entry = entries[0]
        comments = Comment.all().filter("entry =", entry)

        commentuser = ["", "", ""]

        if entry.entrytype == "post":
            self.render(
                "single",
                {
                    "entry": entry,
                    "relateposts": entry.relateposts,
                    "comments": comments,
                    "user_name": commentuser[0],
                    "user_email": commentuser[1],
                    "user_url": commentuser[2],
                    "checknum1": random.randint(1, 10),
                    "checknum2": random.randint(1, 10),
                },
            )
        else:

            self.render(
                "page",
                {
                    "entry": entry,
                    "relateposts": entry.relateposts,
                    "comments": comments,
                    "user_name": commentuser[0],
                    "user_email": commentuser[1],
                    "user_url": commentuser[2],
                    "checknum1": random.randint(1, 10),
                    "checknum2": random.randint(1, 10),
                },
            )
Beispiel #29
0
    def test_insert_entry__expect_correct_insert(self):
        connection = Connection('postgres', 'root', '127.0.0.1:5432', 'planting_manager_teste')
        session = connection.session()
        meeiro_id = create_meeiro(name='tadeu', cpf='55584447213', rg='50658045x')
        entry_type_id = create_entry_type('despesas', connection)
        session.expunge_all()
        session.close()

        Entry.insert(meeiro_id=meeiro_id, entry_date=datetime(2018, 10, 1),
                     entry_type_id=entry_type_id, entry_value=100.0,
                     description='veneno', db_session=connection.session())

        america_timezone = pytz.timezone('America/Sao_Paulo')
        entry = session.query(EntryMapping).one()
        self.assertEqual(entry.entry_type, entry_type_id)
        self.assertEqual(str(entry.entry_date), str(america_timezone.localize(datetime(2018, 10, 1))))
        self.assertEqual(entry.meeiro_id, meeiro_id)
        self.assertEqual(entry.description, 'veneno')
        self.assertEqual(entry.entry_value, 100.0)
Beispiel #30
0
    def action_init_blog(self, slug=None):

        for com in Comment.all():
            com.delete()

        for entry in Entry.all():
            entry.delete()

        g_blog.entrycount = 0
        self.write('"Init has succeed."')
Beispiel #31
0
    def initialize(self, request, response):
        try:
            BaseRequestHandler.initialize(self, request, response)

            m_pages = (
                Entry.all()
                .filter("entrytype =", "page")
                .filter("published =", True)
                .filter("entry_parent =", 0)
                .order("menu_order")
            )
            blogroll = Link.all().filter("linktype =", "blogroll")

            query = Entry.gql("WHERE entrytype = 'post' AND published = TRUE ORDER BY date")
            entries = query.fetch(1)
            start_date = end_date = None
            if entries:
                start_date = entries[0].date

            query = Entry.gql("WHERE entrytype = 'post' AND published = TRUE ORDER BY date DESC")
            entries = query.fetch(1)
            if entries:
                end_date = entries[0].date

            end_year = 0
            if end_date:
                end_year = end_date.year

            self.template_vals.update(
                {
                    "dates": self.build_dates(start_date, end_date),
                    "end_year": end_year,
                    "menu_pages": m_pages,
                    "tags": Tag.all().order("tag"),
                    "blogroll": blogroll,
                    "recent_comments": Comment.all().order("-date").fetch(5),
                }
            )
            logging.info("base public page initialized")
        except:
            logging.error(traceback.format_exc())
            return self.error(404)
Beispiel #32
0
 def parse_entry(self, entry):
     return Entry(
         id=entry.find('id').text,
         title=entry.find('title').text,
         link=entry.find('link')['href'],
         updated=entry.find('updated').text,
         summary=entry.find('summary').text,
         content=entry.find('content').text.replace('\r',
                                                    '').replace('\n', ''),
         author=Author(name=entry.find('author').find('name').text,
                       uri=entry.find('author').find('uri').text,
                       email=entry.find('author').find('email').text))
Beispiel #33
0
def get_recent_data(domain):
    key = '%s:recent' % domain
    data = memcache.get(key)
    if data is not None:
        return pickle.loads(data)
    else:
        # ~Two weeks of data.
        data = list(Entry.all().filter('domain =', urls[domain])
                               .order('-time')
                               .run(limit=336))
        memcache.add(key, pickle.dumps(data), time=3600)
        return data
Beispiel #34
0
    def test_get_entries__filter_by_range_date__expected_correct_result(self):
        america_timezone = pytz.timezone('America/Sao_Paulo')

        connection = Connection('postgres', 'root', '127.0.0.1:5432', 'planting_manager_teste')
        session = connection.session()
        meeiro_id = create_meeiro(name='tadeu', cpf='55584447213', rg='50658045x')
        entry_type_id = create_entry_type('despesas', connection)
        session.expunge_all()
        session.close()

        execute_session = connection.session()
        Entry.insert(meeiro_id=meeiro_id, entry_date=america_timezone.localize(datetime(2018, 10, 2, 1, 1)),
                     entry_type_id=entry_type_id, entry_value=100.0,
                     description='veneno', db_session=execute_session)

        Entry.insert(meeiro_id=meeiro_id, entry_date=america_timezone.localize(datetime(2018, 10, 1, 23, 58)),
                     entry_type_id=entry_type_id, entry_value=1520.0,
                     description='combustivel', db_session=execute_session)

        filters_ = Entry.get_filters_to_query_entry(db_session=execute_session,
                                                    date_filter=
                                                    DateRange(min_date=america_timezone.localize(datetime(2018, 10, 1, 23, 59)),
                                                              max_date=america_timezone.localize(datetime(2018, 10, 2, 1, 10)),
                                                              use_equal=True),
                                                    )
        entries = Entry.list(execute_session, filters_)
        self.assertEqual(len(entries), 1)
        expected_entry = entries[0]

        self.assertEqual(expected_entry._entry_type_id, entry_type_id)
        self.assertEqual(str(expected_entry._entry_date), str(america_timezone.localize(datetime(2018, 10, 2, 1, 1))))
        self.assertEqual(expected_entry._meeiro_id, meeiro_id)
        self.assertEqual(expected_entry._description, 'veneno')
        self.assertEqual(expected_entry._entry_value, 100.0)
Beispiel #35
0
    def action_update_tags(self, slug=None):
        for tag in Tag.all():
            tag.delete()
        for entry in Entry.all().filter('entrytype =', 'post'):
            if entry.tags:
                for t in entry.tags:
                    try:
                        logging.info('sss:' + t)
                        Tag.add(t)
                    except:
                        traceback.print_exc()

        self.write('"All tags for entry have been updated."')
Beispiel #36
0
    def get(self, slug):
        self.response.headers.add_header(
            "Content-type", "application/x-web-app-manifest+json")

        try:
            app = Entry.all().filter('slug =', slug).get()
        except Exception:
            self.response.write('{"error":"Not found."}')
            return

        try:
            blob_reader = blobstore.BlobReader(app.url)
        except Exception:
            self.response.write('{"error":"Could not retrieve package."}')
            return

        try:
            package = ZipFile(StringIO(blob_reader.read()))
        except Exception:
            self.response.write('{"error":"Could not retrieve package."}')
            return

        try:
            manifest = package.read("manifest.webapp")
        except Exception:
            self.response.write('{"error":"Could not open manifest."}')
            return

        try:
            unjsoned = json.loads(manifest)
        except Exception:
            self.response.write('{"error":"Could not parse manifest."}')
            return

        try:
            name = unjsoned["name"]
        except Exception:
            self.response.write('{"error":"Could not read app name."}')
            return

        try:
            version = unjsoned["version"]
        except Exception:
            self.response.write('{"error":"Could not read app version."}')
            return

        self.response.write(json.dumps({
            "name": name,
            "package_path": "/serve/%s" % app.url,
            "version": version,
        }))
Beispiel #37
0
    def get(self, slug=None):
        logging.info("browsing by tag %s" % slug)
        if not slug:
            self.error(404)
            return
        try:
            page_index = int(self.param('page'))
        except:
            page_index = 1
        import urllib
        slug = urldecode(slug)

        entries = Entry.all().filter('tags =', slug).order('-date')
        (entries, links) = Pager(query=entries).fetch(page_index)
        self.render('tag', {'entries': entries, 'tag': slug, 'pager': links})
Beispiel #38
0
 def post(self, slug):
     key = self.request.get("key", None)
     entry = Entry.get(key)
     
     entry.title     = self.request.get('title')
     entry.slug      = self.request.get("slug")
     # entry.published = datetime.datetime.strptime( self.request.get("published"), "%Y-%m-%d %H:%M:%S" )
     entry.excerpt   = self.request.get("excerpt")
     entry.body      = self.request.get("body")
     entry.draft     = bool(self.request.get("draft"))
     
     entry.put()
     entry.index()
     entry.indexed_title_changed()
     self.redirect("/entry/" + entry.slug)
Beispiel #39
0
    def get(self, number):
        offset = 10*(int(number)-1)
        entries_count = Entry.all(keys_only=True).count()
        if entries_count / 10 >= number:
            number = number+1
        else:
            number = False

        entries = db.Query(Entry).filter('draft = ', False).order('-published').fetch(limit=10, offset=offset)
        for i in range(len(entries)):
            body = markdown.markdown(entries[i].body)
            entries[i].body = body
            
        path = os.path.join(template_dir, 'blog.html')
        data = {'entries': entries, 'sitetitle': 'The Staydecent&trade; Web Design &amp; Development Blog', 'number':number}
        self.response.out.write(template.render(path, data))
Beispiel #40
0
 def get(self, slug=None):
     if not slug:
         self.error(404)
         return
     try:
         page_index = int(self.param('page'))
     except:
         page_index = 1
     slug = urllib.unquote(slug).decode('utf8')
     cats = Category.all().filter('slug =', slug).fetch(1)
     if cats:
         entries = Entry.all().filter('categorie_keys =', cats[0].key()).order('-date')
         (entries, links) = Pager(query=entries).fetch(page_index)
         self.render('category', {'entries': entries, 'category': cats[0], 'pager': links})
     else:
         self.error(414, slug)
Beispiel #41
0
    def __init__(self, name: str, body: dict):
        if body['type'] != TYPE_DIRECTORY:
            raise TypeError('cant parse non-directory object')

        self.name = name

        self.created_at = body['created_at']
        self.updated_at = body['updated_at']

        self._content = {}
        for k in body['content'].keys():
            k_object = body['content'][k]
            if k_object['type'] == TYPE_DIRECTORY:
                self._content[k] = Directory(k, k_object)
            elif k_object['type'] == TYPE_ENTRY:
                self._content[k] = Entry(k, k_object)
Beispiel #42
0
 def parse_entries(self, soup):
     results = []
     entries = soup.find_all(
         id=re.compile('MAQA_Search_gvResults_ctl00__\d+'))
     for entry in entries:
         heading = entry.find('a')
         body = entry.find(id=re.compile(
             'MAQA_Search_gvResults_ctl00_ctl\d+_pnl(Motion|Question)(?<!Header)$'
         ))
         if body == None:
             continue
         # body = body.text
         link = self.entry_link.format(
             re.search('\w{3}-\w{5}', heading.text).group())
         entry_obj = Entry(heading, body, link)
         results.append(entry_obj)
     return results
    def get(self, year, month):
        

        gql = "WHERE date > DATETIME(%s, %s, 1, 0, 0, 0) AND date < DATETIME(%s, %s, %d, 23, 59, 59) ORDER BY date DESC" % (year, month, year, month, self.last_day_of_month(year, month))
        query = Entry.gql(gql)
        entries = query.fetch(1000)

        show_prev = False
        show_next = False
                
        return self.render('index', {
            'entries':      entries,
            'show_prev':    show_prev,
            'show_next':    show_next,
            'pageindex':    0,
            'ishome':       True,
            'end_year':     int(year),
            })
def database_entry():
    path = request.args['path']
    entry_name = request.args['entry_name']

    if request.method == 'POST':
        try:
            e = Entry(
                entry_name,
                dict(type=TYPE_ENTRY,
                     created_at=get_current_date(),
                     updated_at=get_current_date(),
                     content=request.json))
            return created(session_global.database.decrypted.new_entry(
                path, e))
        except KeyError as k:
            return internal(k)

    elif request.method == 'GET':
        try:
            return retrieved(
                session_global.database.decrypted.get_entry(path, entry_name))
        except KeyError as k:
            return not_found(k)
        except TypeError as te:
            return internal(te)

    elif request.method == 'PUT':
        try:
            return edited(
                session_global.database.decrypted.update_entry(
                    path, entry_name, request.json))
        except KeyError as k:
            return not_found(k)

    elif request.method == 'DELETE':
        try:
            return deleted(
                session_global.database.decrypted.delete_entry(
                    path, entry_name))
        except KeyError as k:
            return not_found(k)
        except TypeError as te:
            return internal(te)
Beispiel #45
0
    def test_get_canonical_from_database_by_url(self, use_database=False):
        amount_of_correct_retrievals = 0
        session = get_engine_session()

        # Use data from the database
        if use_database:
            old_entries = get_data(session=session,
                                   limit=100,
                                   offset=5000,
                                   order_descending=True,
                                   canonical_url=True)

        # Or use a single entry as specified below
        else:
            old_entries = [
                Entry(
                    original_url=
                    "https://www.mynbc5.com/amp/article/emily-ferlazzo-joseph-bolton-vermont-missing-update/38004866",
                    canonical_url=
                    "https://abc3340.com/news/inside-your-world/the-federal-government-spends-billions-each-year-maintaining-empty-buildings-nationwide"
                )
            ]

        for old_entry in old_entries:
            log.info("OLD")
            log.info(old_entry.entry_id)
            log.info(old_entry.canonical_url)
            found_entry = get_entry_by_original_url(old_entry.original_url,
                                                    session)

            if found_entry:
                log.info("NEW")
                log.info(found_entry.entry_id)
                log.info(old_entry.canonical_url)

                if old_entry.entry_id == found_entry.entry_id:
                    amount_of_correct_retrievals += 1

            else:
                log.warning("No entry found!")

        self.assertEqual(amount_of_correct_retrievals, old_entries.len)
Beispiel #46
0
    def update_user_entry(self, user, callback=None):
        logging.info('user:'******'id'])

        for feed in feeds:
            logging.info(feed)

            entries = Entry.get_new_entries(feed['site_url'], feed['updated_at'])
            user_entries = []
            for entry in entries:
                user_entries.append((entry['id'], user['id'], entry['site_url'] ))
            if len(user_entries) > 0:
                UserEntry.insert_entries(user_entries)
            user.update_feed_update_at(feed['site_url'])

        groups = user.get_groups()
        callback(groups)
Beispiel #47
0
def metaWeblog_editPost(postid, struct, publish):
    if struct.has_key('categories'):
        cates = struct['categories']
    else:
        cates = []
    newcates = []
    for cate in cates:
        c = Category.all().filter('name =', cate).fetch(1)
        if c:
            newcates.append(c[0].key())
    entry = Entry.get_by_id(int(postid))

    if struct.has_key('mt_keywords'):
        entry.settags(struct['mt_keywords'])

    if struct.has_key('wp_slug'):
        entry.slug = struct['wp_slug']
    if struct.has_key('mt_excerpt'):
        entry.excerpt = struct['mt_excerpt']

    entry.title = struct['title']
    entry.content = struct['description']
    if struct.has_key('mt_text_more'):
        content = struct['mt_text_more']
        if content:
            entry.content = entry.content + '<!--more-->' + struct['mt_text_more']
    entry.categorie_keys = newcates
    
    if struct.has_key('mt_tags'):
        tags = struct['mt_tags']
    else:
        tags = []
    
    entry.settags(tags)
    
    if publish:
        entry.publish(True)
    else:
        entry.save()

    return True
Beispiel #48
0
def get_all_entries():
    # Open a connection to the database
    with sqlite3.connect("./dailyjournal.db") as conn:

        # Just use these. It's a Black Box.
        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        # Write the SQL query to get the information you want
        db_cursor.execute("""
        SELECT
            a.id,
            a.concept,
            a.entry,
            a.date,
            a.moods_id
        FROM JournalEntries a
        """)

        # Initialize an empty list to hold all entry representations
        entries = []

        # Convert rows of data into a Python list
        dataset = db_cursor.fetchall()

        # Iterate list of data returned from database
        for row in dataset:

            # Create an entry instance from the current row.
            # Note that the database fields are specified in
            # exact order of the parameters defined in the
            # entry class above.
            entry = Entry(row['id'], row['concept'], row['entry'], row['date'],
                          row['moods_id'])

            entries.append(entry.__dict__)

    # Use `json` package to properly serialize list as JSON
    return json.dumps(entries)
Beispiel #49
0
    def action_getcomments(self):
        key = self.param('key')
        entry = Entry.get(key)
        comments = Comment.all().filter('entry =', key)

        commentuser = self.request.cookies.get('comment_user', '')
        if commentuser:
            commentuser = commentuser.split('#@#')
        else:
            commentuser = ['', '', '']

        vals = {
            'entry': entry,
            'comments': comments,
            'user_name': commentuser[0],
            'user_email': commentuser[1],
            'user_url': commentuser[2],
            'checknum1': random.randint(1, 10),
            'checknum2': random.randint(1, 10),
            }
        html = self.get_render('comments', vals)

        self.write(simplejson.dumps(html.decode('utf8')))
Beispiel #50
0
def get_single_entry(id):
    with sqlite3.connect("./daily_journal.db") as conn:
        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        db_cursor.execute(
            """
        SELECT
            e.id,
            e.concept,
            e.entry,
            e.date,
            e.mood_id
        FROM entries e
        WHERE e.id = ?
        """, (id, ))

        data = db_cursor.fetchone()

        entry = Entry(data['id'], data['concept'], data['entry'], data['date'],
                      data['mood_id'])

        return json.dumps(entry.__dict__)
Beispiel #51
0
def get_entries_with_value(value):
    with sqlite3.connect("./dailyjournal.db") as conn:
        conn.row_factory = sqlite3.Row
        db_cursor = conn.cursor()

        queryString = f'%{value}%'
        # Use a ? parameter to inject a variable's value
        # into the SQL statement.
        entries = []

        db_cursor.execute(
            """
         SELECT
            a.id,
            a.concept,
            a.entry,
            a.date,
            a.moods_id
        FROM JournalEntries a
        WHERE a.entry LIKE ?
        """, (queryString, ))

        data = db_cursor.fetchall()

        for row in data:

            # Create an entry instance from the current row.
            # Note that the database fields are specified in
            # exact order of the parameters defined in the
            # entry class above.
            entry = Entry(row['id'], row['concept'], row['entry'], row['date'],
                          row['moods_id'])

            entries.append(entry.__dict__)

    return json.dumps(entries)
Beispiel #52
0
def storeInDatabase():
    # Get json data from the endpoint
    try:
        response = requests.get("https://api.publicapis.org/entries").json()
    except Exception as e:
        error = f"Error! {e}"
        return render_template('index.html', err=error)
    else:
        limit = 0
        for entry in response["entries"]:
            entry = Entry(entry["API"], entry["Description"], entry["Auth"],
                          entry["HTTPS"], entry["Cors"], entry["Link"],
                          entry["Category"])
            try:
                db.session.add(entry)
            except Exception as e:
                error = f"Error! {e}"
                return render_template('index.html', err=error)

            # Limit to insert only 50 records
            limit += 1
            if limit == 50:
                break
        try:
            # Commit the new records to DB
            db.session.commit()
            message = "Data Successfully Stored in database"
        except SQLAlchemyError as e:
            error = e.__dict__['orig']
            return render_template('index.html', err=error)
        else:
            # Get all entries from database
            entries = Entry.query.all()
            return render_template('index.html',
                                   success=message,
                                   entries=entries)
Beispiel #53
0
from models.entry import Entry

entry = Entry()

entry.test = 'hello'
print entry.test
#
# headers = [
#                {
#                    "name": "Host",
#                    "value": "www.cnn.com"
#                },
#                {
#                    "name": "User-Agent",
#                    "value": "curl/7.43.0"
#                },
#                {
#                    "name": "Accept",
#                    "value": "*/*"
#                },
#                {
#                    "name": "Proxy-Connection",
#                    "value": "Keep-Alive"
#                },
#                {
#                    "name": "accept-encoding",
#                    "value": "identity"
#                }
#            ]
#
#
Beispiel #54
0
def response(context, flow):
    """
       Called when a server response has been received. At the time of this
       message both a request and a response are present and completely done.
    """
    # Values are converted from float seconds to int milliseconds later.
    ssl_time = -.001
    connect_time = -.001
    if flow.server_conn not in context.seen_server:
        # Calculate the connect_time for this server_conn. Afterwards add it to
        # seen list, in order to avoid the connect_time being present in entries
        # that use an existing connection.
        connect_time = flow.server_conn.timestamp_tcp_setup - \
                       flow.server_conn.timestamp_start
        context.seen_server.add(flow.server_conn)

        if flow.server_conn.timestamp_ssl_setup is not None:
            # Get the ssl_time for this server_conn as the difference between
            # the start of the successful tcp setup and the successful ssl
            # setup. If  no ssl setup has been made it is left as -1 since it
            # doesn't apply to this connection.
            ssl_time = flow.server_conn.timestamp_ssl_setup - \
                       flow.server_conn.timestamp_tcp_setup

    # Calculate the raw timings from the different timestamps present in the
    # request and response object. For lack of a way to measure it dns timings
    # can not be calculated. The same goes for HAR blocked: MITMProxy will open
    # a server connection as soon as it receives the host and port from the
    # client connection. So the time spent waiting is actually spent waiting
    # between request.timestamp_end and response.timestamp_start thus it
    # correlates to HAR wait instead.
    timings_raw = {
        'send': flow.request.timestamp_end - flow.request.timestamp_start,
        'wait': flow.response.timestamp_start - flow.request.timestamp_end,
        'receive': flow.response.timestamp_end - flow.response.timestamp_start,
        'connect': connect_time,
        'ssl': ssl_time
    }

    # HAR timings are integers in ms, so we have to re-encode the raw timings to
    # that format.
    timings = dict([(key, int(1000 * value))
                    for key, value in timings_raw.iteritems()])

    # The full_time is the sum of all timings. Timings set to -1 will be ignored
    # as per spec.
    full_time = 0
    for item in timings.values():
        if item > -1:
            full_time += item

    started_date_time = datetime.fromtimestamp(
        flow.request.timestamp_start, tz=pytz.timezone('UTC')).isoformat()

    request_query_string = [{
        "name": k,
        "value": v
    } for k, v in flow.request.get_query()]
    request_http_version = ".".join([str(v) for v in flow.request.httpversion])
    # Cookies are shaped as tuples by MITMProxy.
    request_cookies = [{
        "name": k.strip(),
        "value": v[0]
    } for k, v in (flow.request.get_cookies() or {}).iteritems()]
    request_headers = [{
        "name": k,
        "value": v
    } for k, v in flow.request.headers]
    request_headers_size = len(str(flow.request.headers))
    request_body_size = len(flow.request.content)

    response_http_version = ".".join(
        [str(v) for v in flow.response.httpversion])
    # Cookies are shaped as tuples by MITMProxy.
    response_cookies = [{
        "name": k.strip(),
        "value": v[0]
    } for k, v in (flow.response.get_cookies() or {}).iteritems()]
    response_headers = [{
        "name": k,
        "value": v
    } for k, v in flow.response.headers]
    response_headers_size = len(str(flow.response.headers))
    response_body_size = len(flow.response.content)
    response_body_decoded_size = len(flow.response.get_decoded_content())
    response_body_compression = response_body_decoded_size - response_body_size
    response_mime_type = flow.response.headers.get_first('Content-Type', '')
    response_redirect_url = flow.response.headers.get_first('Location', '')

    container_id = os.environ['HOSTNAME']

    entry = Entry()
    entry.startedDateTime = started_date_time
    entry.time = full_time
    entry.container_id = container_id
    entry.request = {
        "method": flow.request.method,
        "url": flow.request.url,
        "httpVersion": request_http_version,
        "cookies": request_cookies,
        "headers": request_headers,
        "queryString": request_query_string,
        "headersSize": request_headers_size,
        "bodySize": request_body_size,
    }
    entry.response = {
        "url": flow.request.url,
        "status": flow.response.code,
        "statusText": flow.response.msg,
        "httpVersion": response_http_version,
        "cookies": response_cookies,
        "headers": response_headers,
        "content": {
            "size": response_body_size,
            "compression": response_body_compression,
            "mimeType": response_mime_type
        },
        "redirectURL": response_redirect_url,
        "headersSize": response_headers_size,
        "bodySize": response_body_size,
    }
    entry.cache = {}
    entry.timings = timings

    #ws = create_connection("ws://localhost:9000/ws/{0}".format(entry.pageref))
    ws = create_connection(
        "ws://websocket.bandit.io:9000/ws/{0}".format(container_id),
        sslopt={"check_hostname": False})
    ws.send(
        json.dumps({
            "method": "Network.requestWillBeSent",
            "params": {
                "requestId": "7897.52",
                "frameId": "7897.1",
                "loaderId": "7897.3",
                "documentURL": entry.request['url'],
                "request": {
                    "url": entry.request['url'],
                    "method": entry.request['method'],
                    "headers": {
                        header['name']: header['value']
                        for header in entry.request['headers']
                    }
                },
                "timestamp": 88986.634829,
                "wallTime": 1440472453.19435,
                "initiator": {
                    "type": "other"
                },
                "type": "Document"
            }
        }))
    ws.send(
        json.dumps({
            "method": "Network.responseReceived",
            "params": {
                "requestId": "7897.52",
                "frameId": "7897.1",
                "loaderId": "7897.3",
                "timestamp": 88986.985021,
                "type": "Document",
                "response": {
                    "url": entry.response['url'],
                    "status": entry.response['status'],
                    "statusText": entry.response['statusText'],
                    "headers": {
                        header['name']: header['value']
                        for header in entry.response['headers']
                    },
                    "mimeType": entry.response['content']['mimeType'],
                    "connectionReused": False,
                    "fromDiskCache": False,
                    "fromServiceWorker": False,
                    "timing": {
                        "requestTime": 88986.636403,
                        "proxyStart": -1,
                        "proxyEnd": -1,
                        "dnsStart": 0,
                        "dnsEnd": 108.372000002419,
                        "connectStart": 108.372000002419,
                        "connectEnd": 113.420000008773,
                        "sslStart": -1,
                        "sslEnd": -1,
                        "serviceWorkerFetchStart": -1,
                        "serviceWorkerFetchReady": -1,
                        "serviceWorkerFetchEnd": -1,
                        "sendStart": 113.492999997106,
                        "sendEnd": 113.573000009637,
                        "receiveHeadersEnd": 347.90900000371
                    },
                    # "requestHeaders": {
                    #     "If-None-Match": "\"1440455137124|#public|0|en|||0\"",
                    #     "Accept-Encoding": "gzip, deflate, sdch",
                    #     "Host": "www.chromium.org",
                    #     "Accept-Language": "en-US,en;q=0.8",
                    #     "Upgrade-Insecure-Requests": "1",
                    #     "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36",
                    #     "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
                    #     "Cache-Control": "max-age=0",
                    #     "Cookie": "_ga=GA1.2.1062414394.1440468745; _gat_SitesTracker=1; __utmt=1; __utma=221884874.1062414394.1440468745.1440468745.1440471278.2; __utmb=221884874.2.10.1440471278; __utmc=221884874; __utmz=221884874.1440468745.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); aftzc=QW1lcmljYS9Mb3NfQW5nZWxlczp3eGRhd0FxcWxWZkNYdHRkVVJ2ZStlVEpOOVE9",
                    #     "Connection": "keep-alive",
                    #     "If-Modified-Since": "Mon, 24 Aug 2015 22:25:37 GMT"
                    # },
                    "remoteIPAddress": "216.239.32.27",
                    "remotePort": 80,
                    "protocol":
                    "http/{0}".format(entry.response['httpVersion'])
                }
            }
        }))
    ws.send(
        json.dumps({
            "method": "Network.dataReceived",
            "params": {
                "requestId": "7897.52",
                "timestamp": 88986.985513,
                "dataLength": entry.response['content']['size'],
                "encodedDataLength": entry.response['bodySize']
            }
        }))
    ws.send(
        json.dumps({
            "method": "Network.loadingFinished",
            "params": {
                "requestId": "7897.52",
                "timestamp": 88986.985401,
                "encodedDataLength": entry.response['bodySize']
            }
        }))

    #ws.send(json.dumps({"method":"Network.requestWillBeSent","params":{"requestId":"7897.52","frameId":"7897.1","loaderId":"7897.3","documentURL":"http://www.chromium.org/","request":{"url":"http://www.chromium.org/","method":"GET","headers":{"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8","Upgrade-Insecure-Requests":"1","User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"}},"timestamp":88986.634829,"wallTime":1440472453.19435,"initiator":{"type":"other"},"type":"Document"}}))
    #ws.send(json.dumps({"method":"Network.responseReceived","params":{"requestId":"7897.52","frameId":"7897.1","loaderId":"7897.3","timestamp":88986.985021,"type":"Document","response":{"url":"http://www.chromium.org/","status":304,"statusText":"Not Modified","headers":{"Date":"Tue, 25 Aug 2015 03:14:13 GMT","Last-Modified":"Mon, 24 Aug 2015 22:25:37 GMT","Server":"GSE","X-Robots-Tag":"noarchive","ETag":"\"1440455137124|#public|0|en|||0\""},"mimeType":"text/html","connectionReused":False,"connectionId":2554,"encodedDataLength":-1,"fromDiskCache":False,"fromServiceWorker":False,"timing":{"requestTime":88986.636403,"proxyStart":-1,"proxyEnd":-1,"dnsStart":0,"dnsEnd":108.372000002419,"connectStart":108.372000002419,"connectEnd":113.420000008773,"sslStart":-1,"sslEnd":-1,"serviceWorkerFetchStart":-1,"serviceWorkerFetchReady":-1,"serviceWorkerFetchEnd":-1,"sendStart":113.492999997106,"sendEnd":113.573000009637,"receiveHeadersEnd":347.90900000371},"headersText":"HTTP/1.1 304 Not Modified\r\nX-Robots-Tag: noarchive\r\nLast-Modified: Mon, 24 Aug 2015 22:25:37 GMT\r\nETag: \"1440455137124|#public|0|en|||0\"\r\nDate: Tue, 25 Aug 2015 03:14:13 GMT\r\nServer: GSE\r\n\r\n","requestHeaders":{"If-None-Match":"\"1440455137124|#public|0|en|||0\"","Accept-Encoding":"gzip, deflate, sdch","Host":"www.chromium.org","Accept-Language":"en-US,en;q=0.8","Upgrade-Insecure-Requests":"1","User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36","Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8","Cache-Control":"max-age=0","Cookie":"_ga=GA1.2.1062414394.1440468745; _gat_SitesTracker=1; __utmt=1; __utma=221884874.1062414394.1440468745.1440468745.1440471278.2; __utmb=221884874.2.10.1440471278; __utmc=221884874; __utmz=221884874.1440468745.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); aftzc=QW1lcmljYS9Mb3NfQW5nZWxlczp3eGRhd0FxcWxWZkNYdHRkVVJ2ZStlVEpOOVE9","Connection":"keep-alive","If-Modified-Since":"Mon, 24 Aug 2015 22:25:37 GMT"},"requestHeadersText":"GET / HTTP/1.1\r\nHost: www.chromium.org\r\nConnection: keep-alive\r\nCache-Control: max-age=0\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\r\nUpgrade-Insecure-Requests: 1\r\nUser-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36\r\nAccept-Encoding: gzip, deflate, sdch\r\nAccept-Language: en-US,en;q=0.8\r\nCookie: _ga=GA1.2.1062414394.1440468745; _gat_SitesTracker=1; __utmt=1; __utma=221884874.1062414394.1440468745.1440468745.1440471278.2; __utmb=221884874.2.10.1440471278; __utmc=221884874; __utmz=221884874.1440468745.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); aftzc=QW1lcmljYS9Mb3NfQW5nZWxlczp3eGRhd0FxcWxWZkNYdHRkVVJ2ZStlVEpOOVE9\r\nIf-None-Match: \"1440455137124|#public|0|en|||0\"\r\nIf-Modified-Since: Mon, 24 Aug 2015 22:25:37 GMT\r\n\r\n","remoteIPAddress":"216.239.32.27","remotePort":80,"protocol":"http/1.1"}}}))
    #ws.send(json.dumps({"method":"Network.dataReceived","params":{"requestId":"7897.52","timestamp":88986.985513,"dataLength":23423,"encodedDataLength":190}}))
    ##WebSocketHandler.ws_send('test', {"method":"Page.frameNavigated","params":{"frame":{"id":"7897.1","loaderId":"7897.3","url":"http://www.chromium.org/","mimeType":"text/html","securityOrigin":"http://www.chromium.org"}}})
    #ws.send(json.dumps({"method":"Network.loadingFinished","params":{"requestId":"7897.52","timestamp":88986.985401,"encodedDataLength":190}}))

    ws.close()


# def done(context):
#     """
#         Called once on script shutdown, after any other events.
#     """
#     from pprint import pprint
#     import json
#
#     json_dump = context.HARLog.json()
#     compressed_json_dump = context.HARLog.compress()
#
#     if context.dump_file == '-':
#         context.log(pprint.pformat(json.loads(json_dump)))
#     elif context.dump_file.endswith('.zhar'):
#         file(context.dump_file, "w").write(compressed_json_dump)
#     else:
#         file(context.dump_file, "w").write(json_dump)
#     context.log(
#         "HAR log finished with %s bytes (%s bytes compressed)" % (
#             len(json_dump), len(compressed_json_dump)
#         )
#     )
#     context.log(
#         "Compression rate is %s%%" % str(
#             100. * len(compressed_json_dump) / len(json_dump)
#         )
#     )
#
#
# def print_attributes(obj, filter_string=None, hide_privates=False):
#     """
#         Useful helper method to quickly get all attributes of an object and its
#         values.
#     """
#     for attr in dir(obj):
#         if hide_privates and "__" in attr:
#             continue
#         if filter_string is not None and filter_string not in attr:
#             continue
#         value = getattr(obj, attr)
#         print("%s.%s" % ('obj', attr), value, type(value))
    def new_entry(self, path: str, entry: Entry):
        d = self.access_path(path)
        d.new_entry(entry)

        return entry.to_dict()
Beispiel #56
0
    def receive(self, message):
        # Combine message bodies and decode html
        bodies = message.bodies("text/plain")

        for content_type, body in bodies:
            decoded_body = body.decode()

        # Split body from any optional fields
        body_list = decoded_body.split("---\n\n", 1)
        body_len = len(body_list)
        fields = body_list[0]
        body = body_list[1]

        # grab the first paragraph as
        body_parts = body.split("\n\n", 1)
        excerpt = body_parts[0]
        body = body_parts[1]

        # Grab any data from fields
        slug = message.subject.replace(" ", "-").lower()
        cat_name = "Bit"
        draft = False
        published = None

        if fields:
            logging.info("We have YAML front matter.")
            for config in yaml.load_all(fields):
                logging.info("Checking each YFM.")
                logging.debug(type(config))
                # hard-coded data extraction
                # Probably should do this a better way
                if "slug" in config:
                    slug = config["slug"]

                if "published" in config:
                    published = config["published"]

                if "category" in config:
                    cat_name = config["category"]

                if "draft" in config:
                    draft = True
        else:
            logging.error("Malformed YFM: --------- " + fields)

        if published is None:
            date = message.date.split(" -")
            published = datetime.datetime.strptime(date[0], "%a, %d %b %Y %H:%M:%S")
        else:
            published = datetime.datetime.strptime(published, "%a, %d %b %Y %H:%M:%S")

        # get the category object
        category = db.Query(Category).filter("name =", cat_name).get()
        logging.info("Entry in cat: " + category.name + " from: " + cat_name)

        # Process attachments!!!!!!
        images = []
        thumbnail = None

        if cat_name == "Work":
            if message.attachments:
                # Check file extension of attachments
                re_filetype = re.compile("(.gif|.jpg|.jpeg|.png|.GIF|.JPG|.JPEG|.PNG)")

                for att in message.attachments:
                    # att[0] = name, att[1] = payload
                    is_image = re_filetype.search(att[0])

                    filename, encoded_data = att
                    data = encoded_data.payload
                    if encoded_data.encoding:
                        data = data.decode(encoded_data.encoding)

                    if is_image:
                        # upload image
                        if thumbnail is None:
                            thumb = image.resize(data, 250)
                            thumbnail = s3.upload_image(filename, thumb, message.subject)

                        new_image = s3.upload_image(filename, data, message.subject)
                        images.append(new_image)
                    else:
                        logging.info(att)
                        logging.info("Attachment was not an image.")

        # save
        # Okay, just hardcode email
        # author property is now string
        # check user.email() to author when in admin
        if message.sender == "":
            entry = Entry(
                author=message.sender,
                title=message.subject,
                slug=slug,
                category=category.key(),
                body=body,
                excerpt=excerpt,
                published=published,
                draft=draft,
                images=images,
                thumbnail=thumbnail,
            )
            entry.put()
            entry.index()
Beispiel #57
0
    sheema, errors = user_schema.load({
        'username': '******',
        'email': 'sheema@email',
        'password': '******',
        'password_confirmation': 'pass',
        # 'entries': [breakfast],
        # 'tasks': [project],
        # 'events': [friday_drinks]
    })

    if errors:
        raise Exception(errors)

    entry_one = Entry(
        date='2019-07-29',
        what=
        'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
        image='image-url',
        user=sheema)

    entry_two = Entry(
        date='2019-07-30',
        what=
        'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
        image='image-url',
        user=sheema)

    entry_three = Entry(
        date='2019-07-20',
        what=
        'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
        image='image-url',
def get_data(session,
             limit,
             offset,
             order_descending,
             entry_id=None,
             entry_type=None,
             handled_utc=None,
             original_url=None,
             canonical_url=None,
             canonical_type=None,
             note=None) -> [Entry]:
    log.info(f"Getting data of type {entry_type} from {Table.__tablename__},"
             f" limit = {limit}, order_descending = {order_descending}")

    # Store the values in a dict
    filter_options = {
        Table.entry_id: entry_id,
        Table.entry_type: entry_type,
        Table.handled_utc: handled_utc,
        Table.original_url: original_url,
        Table.canonical_url: canonical_url,
        Table.canonical_type: canonical_type,
        Table.note: note
    }

    # Create a new query
    q = session.query(Table)

    for attr, value in filter_options.items():
        log.debug(f"attr= {attr}, value={value}")

        # If true, select not NULL
        if value is True:
            q = q.filter(attr.isnot(None))

        # If false, select NULL
        elif value is False:
            q = q.filter(attr == None)

        # If anything else, select X
        elif value is not None:
            q = q.filter(attr == value)

    # Sort descending (returns most recent rows)
    if order_descending:
        q = q.order_by(Table.entry_id.desc())

    if offset:
        q = q.offset(offset)

    # Set a limit
    q = q.limit(limit)
    log.info(q)
    log.info(f"Received data, rows: {q.count()}")

    # Generate entry instance for each returned row, add these to a list
    entries = []
    for entry in q:
        entries.append(
            Entry(entry_id=entry.entry_id,
                  entry_type=entry.entry_type,
                  handled_utc=entry.handled_utc,
                  original_url=entry.original_url,
                  canonical_url=entry.canonical_url,
                  canonical_type=entry.canonical_type,
                  note=entry.note))

    log.info("Entry instance(s) generated")

    return entries
Beispiel #59
0
    def test_canonical(self, use_database=True):
        amount_of_canonicals = 0
        old_amount_of_canonicals = 0

        # Use data from the database
        if use_database:
            old_entries = get_data(session=get_engine_session(),
                                   limit=500,
                                   offset=5000,
                                   order_descending=True,
                                   canonical_url=False)

        # Or use a single entry as specified below
        else:
            old_entries = [
                Entry(
                    original_url=
                    "https://www.google.com/amp/s/abc3340.com/amp/news/inside-your-world/the-federal-government-spends-billions-each-year-maintaining-empty-buildings-nationwide",
                    canonical_url=
                    "https://abc3340.com/news/inside-your-world/the-federal-government-spends-billions-each-year-maintaining-empty-buildings-nationwide"
                )
            ]

        # Loop through every old entry and try to find the canonicals, compare the results
        for old_entry in old_entries:
            if old_entry.canonical_url:
                old_amount_of_canonicals += 1

            urls = get_urls(old_entry.original_url)
            urls_info = get_urls_info(urls)
            if urls_info:
                for link in urls_info:
                    log.info(link.canonical_alt)

                    if link.amp_canonical:
                        log.info(link.amp_canonical)
                    if link.canonical:
                        amount_of_canonicals += 1

                    log.info(f"BODY   : {old_entry.original_url}")
                    log.info(f"OLD    : {old_entry.canonical_url}")
                    log.info(f"NEW    : {link.canonical}")

                    if link.canonical == old_entry.canonical_url:
                        log.info("It's the same!")
                    else:
                        log.info("It's not the same!")
                    """if link.canonical:
                        similarity = get_article_similarity(old_entry.original_url, link.canonical, log_articles=False)
                        log.info(f"Article similarity= {similarity}")"""

            else:
                log.warning(f"No canonicals found")

        log.info(
            f"\nCanonicals found: Old: {old_amount_of_canonicals}, New: {amount_of_canonicals}"
        )

        # If same as before, great!
        if amount_of_canonicals == old_amount_of_canonicals:
            self.assertEqual(amount_of_canonicals, old_amount_of_canonicals)
        # If it is better than before, great!
        if amount_of_canonicals > old_amount_of_canonicals:
            self.assertGreater(amount_of_canonicals, old_amount_of_canonicals)
        # If it is worse than before, not good.
        if amount_of_canonicals < old_amount_of_canonicals:
            self.assertLess(old_amount_of_canonicals, amount_of_canonicals)
Beispiel #60
0
 def posts(self):
     return Entry.all().filter('entrytype =', 'post').filter('categorie_keys =', self)