Exemplo n.º 1
0
 def post(self):
     
     keynames = Setting.KEY_LIST.keys()
     settings = []
     
     for keyname in keynames:
         settings.append(Setting(key_name=keyname, value=self.request.get(keyname)))
     
     Setting.save(settings)
     
     self.redirect('../?status=settings_saved')
Exemplo n.º 2
0
    def get(self):
        path = self.request.path.split('/')[
            3:]  # cut off the initial "/admin/entry/"
        settings = Setting.get_in_dict()
        result = {}

        id = path[0]
        if id == '':
            # New entry page
            result['is_draft'] = True
        else:
            query = {}
            query['id'] = int(id)
            query['time_offset'] = settings['time_offset']

            result['entry'] = Entry.get_by_query(query)
            if result:
                if result['entry'].public == None:
                    result['is_draft'] = True
            else:
                # The id is not valid, go to new entry page.
                self.redirect('../entry/?status=bad_id')

        template_path = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'template',
            'admin_entry_single.html')  #two os.path.dirname = "../"
        self.response.out.write(template.render(template_path, result))
Exemplo n.º 3
0
 def get(self):
     path = self.request.path.split('/')[3:] # [3:] cuts off the initial "/admin/entries/"
     settings = Setting.get_in_dict()
     result = {}
     query = {}
     query['time_offset'] = settings['time_offset']
     query['limit'] = 25 # entry per page limit
     
     if path[0] == 'archive':
         result['is_archive'] = True
         year = int(path[1][0:4])
         month = int(path[1][4:6])
         query['start_time'] = lp.time.str2datetime(str(year) + '-' + str(month) + '-01 00:00:00', settings['time_offset'])
         if month == 12:
             month = 1
             year +=1
         else:
             month += 1
         query['end_time'] = lp.time.str2datetime(str(year) + '-' + str(month) + '-01 00:00:00', settings['time_offset'])
     
     elif path[0] == 'tag':
         result['is_tag'] = True
         query['tags'] = urllib.unquote(path[1]).split(' ') #TODO: need to clean the result to avoid injection
         
     elif path[0] == 'draft':
         result['is_draft'] = True
         query['public'] = None
     
     elif path[0] == 'private':
         result['is_private'] = True
         query['public'] = False
     
     if 'page' in path:
         query['page'] = int(path[path.index('page')+1])
     else:
         query['page'] = 1
     
     
     result['query'] = query
     result['entries'], num_entries = Entry.get_by_query(query)
     
     # Setup pagination links
     max_page_links = 7
     num_pages = max(int(math.ceil(num_entries * 1.0 / query['limit'])), 1)
     start_page = max(query['page'] - max_page_links / 2, 2)
     end_page = min(start_page + max_page_links, num_pages)
     
     result['page_list'] = [1, ]
     if start_page > 2:
         result['page_list'] += [0, ]
     result['page_list'] += range(start_page, end_page)
     if end_page < num_pages:
         result['page_list'] += [0, ]
     if num_pages != 1:
         result['page_list'] += [num_pages, ]
     
     result['base_url'] = self.request.path.split('page')[0].rstrip('/')
     
     template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'template', 'admin_entry_batch.html') #two os.path.dirname to get the root app dir
     self.response.out.write(template.render(template_path, result))
Exemplo n.º 4
0
 def get(self):
     path = self.request.path.split('/')[3:] # cut off the initial "/admin/entry/"
     settings = Setting.get_in_dict()
     result = {}
     
     id = path[0]
     if id == '':
         # New entry page
         result['is_draft'] = True
     else:
         query = {}
         query['id'] = int(id)
         query['time_offset'] = settings['time_offset']
         
         result['entry'] = Entry.get_by_query(query)
         if result:
             if result['entry'].public == None:
                 result['is_draft'] = True
         else:
             # The id is not valid, go to new entry page.
             self.redirect('../entry/?status=bad_id')
     
     template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'template', 'admin_entry_single.html') #two os.path.dirname = "../"
     self.response.out.write(template.render(template_path, result))
Exemplo n.º 5
0
    def post(self):

        if self.request.get('update_latest'):
            action = 'update_latest'
        elif self.request.get('update_collection'):
            action = 'update_collection'
        else:
            self.redirect('/')
            return

        settings = Setting.get_in_dict()
        ns = '{http://www.w3.org/2005/Atom}'

        def save_to_s3(key, content):
            conn = S3Connection(settings['aws3_access_key'],
                                settings['aws3_secret_key'])
            bucket = conn.get_bucket(settings['aws3_bucket_name'])
            f = S3Key(bucket)
            f.key = key
            f.set_contents_from_string(content,
                                       headers={
                                           'Content-Type':
                                           'application/javascript',
                                           'x-amz-storage-class':
                                           'REDUCED_REDUNDANCY'
                                       },
                                       policy='public-read')

        if action == 'update_latest':
            url = 'https://www.blogger.com/feeds/' + settings[
                'blogger_id'] + '/posts/default?max-results=' + settings[
                    'max_latest_result']

            xmlfile = urllib2.urlopen(url)
            entryxml = et.parse(xmlfile).findall(ns + 'entry')
            entries = []
            for e in entryxml:
                entries.append({
                    'id':
                    re.sub(r'.*post-', '',
                           e.find(ns + 'id').text),
                    'title':
                    e.find(ns + 'title').text,
                    'published':
                    e.find(ns + 'published').text,
                    'content':
                    re.sub(
                        r'\r\n|\r|\n|<div class="blogger-post-footer">.*</div>',
                        '',
                        e.find(ns + 'content').text),
                    'tags':
                    [cat.attrib['term'] for cat in e.findall(ns + 'category')]
                })
            #self.response.out.write(lp.json.encodep(entries).encode('utf-8'))

            save_to_s3('latest.json', lp.json.encodep(entries).encode('utf-8'))

            self.redirect('/?status=updated_latest')

        elif action == 'update_collection':
            if settings['collection_list'].strip() == '':
                self.redirect('/?status=error_collection_empty')
                return

            collection_list = [
                line.strip().split(' ')
                for line in settings['collection_list'].strip().split('\n')
            ]
            collection_list = [{
                'name': item[0],
                'slug': item[1]
            } for item in collection_list]
            for item in collection_list:
                url = 'https://www.blogger.com/feeds/' + settings[
                    'blogger_id'] + '/posts/default/-/' + urllib.quote(
                        item['name'].encode('utf-8')
                    ) + '?max-results=' + settings['max_collection_result']

                xmlfile = urllib2.urlopen(url)
                entryxml = et.parse(xmlfile).findall(ns + 'entry')
                entries = []
                for e in entryxml:
                    entries.append({
                        'id':
                        re.sub(r'.*post-', '',
                               e.find(ns + 'id').text),
                        'title':
                        e.find(ns + 'title').text,
                        'published':
                        e.find(ns + 'published').text,
                        'content':
                        re.sub(
                            r'\r\n|\r|\n|<div class="blogger-post-footer">.*</div>',
                            '',
                            e.find(ns + 'content').text),
                        'tags': [
                            cat.attrib['term']
                            for cat in e.findall(ns + 'category')
                        ]
                    })

                save_to_s3('anthology/' + item['slug'] + '.json',
                           lp.json.encodep(entries).encode('utf-8'))

                #logging.info(url)
            #Now update the index file
            save_to_s3('anthology.json',
                       lp.json.encodep(collection_list).encode('utf-8'))

            self.redirect('/?status=updated_collection')
Exemplo n.º 6
0
    def post(self):
        settings = Setting.get_in_dict()

        # getting the entry status and saving action
        is_newentry = (self.request.get('id') == '')
        is_draft = (self.request.get('public') == '')
        if self.request.get('publish'):
            action = 'publish'
        elif self.request.get('saveasdraft'):
            action = 'saveasdraft'
        elif self.request.get('update'):
            action = 'update'

        if is_newentry:
            entry = Entry()
        else:
            entry = Entry.get_by_id(int(self.request.get('id')))
            entry_original = copy.deepcopy(entry)  #Entry()
            #entry_original.published = entry.published
            #entry_original.tags = entry.tags

        if entry:
            if self.request.get('title'):
                entry.title = self.request.get('title')
            entry.content = self.request.get('content')

            # setup publish time
            if is_draft:
                entry.published = datetime.datetime.utcnow()
                # Always update publish time when save as draft so the draft will be bumped to the top.
            else:
                entry.published = lp.time.str2datetime(
                    self.request.get('published'), settings['time_offset'])
                # Allow manual edit of published time only on non-draft non-new entries

            # setup tags
            if self.request.get('tags'):
                entry.tags = list(
                    set(
                        filter(lambda a: a != '',
                               self.request.get('tags').strip().split(' '))))
                # list(set(a)) to eliminate duplicates
                # filter to eliminate 2 or more consecutive spaces
            else:
                entry.tags = []

            #setup entry status and update archives and tags
            if action == 'publish':
                entry.public = True
                lp.stat.update_count("archive",
                                     add=[
                                         entry.published.strftime("%Y%m"),
                                     ])
                if entry.tags:
                    lp.stat.update_count("tag", add=entry.tags)
            elif action == 'saveasdraft':
                #public = None to indicate a draft post
                entry.public = None
            elif action == 'update':
                # We don't set a publicity until the entry is published.
                entry.public = (self.request.get('public') == 'True')
                if entry.published.strftime(
                        "%Y%m") != entry_original.published.strftime("%Y%m"):
                    lp.stat.update_count(
                        "archive",
                        add=[
                            entry.published.strftime("%Y%m"),
                        ],
                        subtract=[
                            entry_original.published.strftime("%Y%m"),
                        ])
                if set(entry.tags) != set(entry_original.tags):
                    lp.stat.update_count("tag",
                                         add=entry.tags,
                                         subtract=entry_original.tags)

            Entry.save(entry)

            self.redirect('../entries/?status=item_saved')

        else:

            self.redirect('../entry/?status=bad_id')
Exemplo n.º 7
0
 def post(self):
     settings = Setting.get_in_dict()
     
     # getting the entry status and saving action
     is_newentry = (self.request.get('id') == '')
     is_draft = (self.request.get('public') == '')
     if self.request.get('publish'):
         action = 'publish'
     elif self.request.get('saveasdraft'):
         action = 'saveasdraft'
     elif self.request.get('update'):
         action = 'update'
     
     if is_newentry:
         entry = Entry()
     else:
         entry = Entry.get_by_id(int(self.request.get('id')))
         entry_original = copy.deepcopy(entry)#Entry()
         #entry_original.published = entry.published
         #entry_original.tags = entry.tags
     
     if entry:
         if self.request.get('title'):
             entry.title = self.request.get('title')
         entry.content = self.request.get('content')
         
         # setup publish time
         if is_draft:
             entry.published = datetime.datetime.utcnow()
             # Always update publish time when save as draft so the draft will be bumped to the top.
         else:
             entry.published = lp.time.str2datetime(self.request.get('published'), settings['time_offset'])
             # Allow manual edit of published time only on non-draft non-new entries
         
         # setup tags
         if self.request.get('tags'):
             entry.tags = list(set(filter(lambda a: a != '', self.request.get('tags').strip().split(' '))))
             # list(set(a)) to eliminate duplicates
             # filter to eliminate 2 or more consecutive spaces
         else:
             entry.tags = []
         
         #setup entry status and update archives and tags
         if action == 'publish':
             entry.public = True
             lp.stat.update_count("archive", add = [entry.published.strftime("%Y%m"), ] )
             if entry.tags:
                 lp.stat.update_count("tag", add = entry.tags)
         elif action == 'saveasdraft':
             #public = None to indicate a draft post
             entry.public = None
         elif action == 'update':
             # We don't set a publicity until the entry is published.
             entry.public = (self.request.get('public') == 'True')
             if entry.published.strftime("%Y%m") != entry_original.published.strftime("%Y%m"):
                 lp.stat.update_count("archive", add = [entry.published.strftime("%Y%m"), ], subtract = [entry_original.published.strftime("%Y%m"), ])
             if set(entry.tags) != set(entry_original.tags):
                 lp.stat.update_count("tag", add = entry.tags, subtract = entry_original.tags)
         
         Entry.save(entry)
         
         self.redirect('../entries/?status=item_saved')
         
     else:
         
         self.redirect('../entry/?status=bad_id')
Exemplo n.º 8
0
 def get(self):
     
     settings = Setting.get_in_dict()
     
     template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'template', 'settings.html') #two os.path.dirname = "../"
     self.response.out.write(template.render(template_path, settings))
Exemplo n.º 9
0
 def post(self):
     
     if self.request.get('update_latest'):
         action = 'update_latest'
     elif self.request.get('update_collection'):
         action = 'update_collection'
     else:
         self.redirect('/')
         return
     
     settings = Setting.get_in_dict()
     ns = '{http://www.w3.org/2005/Atom}'
     
     def save_to_s3(key, content):
             conn = S3Connection(settings['aws3_access_key'], settings['aws3_secret_key'])
             bucket = conn.get_bucket(settings['aws3_bucket_name'])
             f = S3Key(bucket)
             f.key = key
             f.set_contents_from_string(content, headers = {'Content-Type': 'application/javascript', 'x-amz-storage-class':'REDUCED_REDUNDANCY'}, policy = 'public-read')
     
     if action == 'update_latest':
         url = 'https://www.blogger.com/feeds/' + settings['blogger_id'] + '/posts/default?max-results=' + settings['max_latest_result']
         
         xmlfile = urllib2.urlopen(url)
         entryxml = et.parse(xmlfile).findall(ns+'entry')
         entries = []
         for e in entryxml:
             entries.append({
                             'id': re.sub(r'.*post-', '', e.find(ns+'id').text),
                             'title': e.find(ns+'title').text,
                             'published': e.find(ns+'published').text,
                             'content': re.sub(r'\r\n|\r|\n|<div class="blogger-post-footer">.*</div>', '', e.find(ns+'content').text),
                             'tags': [cat.attrib['term'] for cat in e.findall(ns+'category')]
                             })
         #self.response.out.write(lp.json.encodep(entries).encode('utf-8'))
         
         save_to_s3('latest.json', lp.json.encodep(entries).encode('utf-8'))
         
         self.redirect('/?status=updated_latest')
         
     elif action == 'update_collection':
         if settings['collection_list'].strip() == '':
             self.redirect('/?status=error_collection_empty')
             return
         
         collection_list = [ line.strip().split(' ') for line in settings['collection_list'].strip().split('\n')]
         collection_list = [ {'name':item[0], 'slug': item[1]} for item in collection_list]
         for item in collection_list:
             url = 'https://www.blogger.com/feeds/' + settings['blogger_id'] + '/posts/default/-/' + urllib.quote(item['name'].encode('utf-8')) + '?max-results=' + settings['max_collection_result']
             
             xmlfile = urllib2.urlopen(url)
             entryxml = et.parse(xmlfile).findall(ns+'entry')
             entries = []
             for e in entryxml:
                 entries.append({
                                 'id': re.sub(r'.*post-', '', e.find(ns+'id').text),
                                 'title': e.find(ns+'title').text,
                                 'published': e.find(ns+'published').text,
                                 'content': re.sub(r'\r\n|\r|\n|<div class="blogger-post-footer">.*</div>', '', e.find(ns+'content').text),
                                 'tags': [cat.attrib['term'] for cat in e.findall(ns+'category')]
                                 })
             
             save_to_s3('anthology/' + item['slug'] + '.json', lp.json.encodep(entries).encode('utf-8'))
             
             #logging.info(url)
         #Now update the index file
         save_to_s3('anthology.json', lp.json.encodep(collection_list).encode('utf-8'))
         
         self.redirect('/?status=updated_collection')