def summarize(self, meme, page, related):
     sbapi = ScrapboxAPI(parent=self.nb_app)
     has_page = 1 if page is not None else 0
     count = len(related['1']) + len(related['2']) + has_page
     if page is not None:
         p = page
     elif len(related['1']) > 0:
         p = related['1'][0]
     elif len(related['2']) > 0:
         p = related['2'][0]
     else:
         return None
     if 'lines' in p:
         has_code = len([desc
                         for desc in p['lines']
                         if self._has_code(desc['text'])]) > 0
     else:
         self.log.info('No lines(maybe relatedPages): {}'.format(p['title']))
         details = sbapi.get(p['title'])
         has_code = len([desc
                         for desc in details['lines']
                         if self._has_code(desc['text'])]) > 0
     return {'description': self.summarized_desc(meme, p),
             'page_url': sbapi.get_view_url(p['title']),
             'title': p['title'],
             'has_code': has_code,
             'count': '{}'.format(count)}
Exemple #2
0
async def test_get_from_public(http_client):
    sbapi = ScrapboxAPI()
    sbapi.project_id = 'some_project'
    await sbapi.get('some-meme')
    assert http_client.call_args is not None
    req = http_client.call_args[0][0]
    assert req.url == 'https://scrapbox.io/api/pages/some_project/some-meme'
    assert str(req.headers) == ''
Exemple #3
0
async def test_get(http_client):
    sbapi = ScrapboxAPI()
    sbapi.cookie_connect_sid = 'secret'
    sbapi.project_id = 'some_project'
    await sbapi.get('some-meme')
    assert http_client.call_args is not None
    req = http_client.call_args[0][0]
    assert req.url == 'https://scrapbox.io/api/pages/some_project/some-meme'
    assert str(req.headers) == "{'Cookie': 'connect.sid=secret'}"
 def get_tag_info(self, meme):
     sbapi = ScrapboxAPI(parent=self.nb_app)
     links = sbapi.get(meme)
     page_content = None
     related = {'1': [], '2': []}
     if links is not None:
         if links['persistent']:
             page_content = self.collect_content({'user': links['user']},
                                                 links)
         related['1'] = self.get_relates(links['relatedPages']['links1hop'])
         related['2'] = self.get_relates(links['relatedPages']['links2hop'])
     return self.summarize(meme, page_content, related), page_content, related
    def get(self):
        notebook = json.loads(self.get_query_argument('notebook'))
        self.log.info('Notebook: {}'.format(notebook))
        meme = notebook['meme']['current']
        if self.get_query_argument('mode', default='new') != 'edit':
            tag = '\n\n#{}\n'.format(meme)
        else:
            tag = ''

        sbapi = ScrapboxAPI(parent=self.nb_app)
        title = self.get_query_argument('title', default=meme)
        url = sbapi.get_create_url(title,
                                   tag + self._get_content(notebook['toc']))

        self.redirect(url)
    def get(self):
        cell = json.loads(self.get_query_argument('cell'))
        self.log.info('Cell: {}'.format(cell))
        meme = cell['metadata']['lc_cell_meme']['current']
        uuid_meme, _ = parse_cell_id(meme)
        if self.get_query_argument('mode', default='new') != 'edit':
            tag = '\n\n#{}\n'.format(uuid_meme)
            if meme != uuid_meme:
                tag += '#{}\n'.format(meme)
        else:
            tag = ''

        sbapi = ScrapboxAPI(parent=self.nb_app)
        title = self.get_query_argument('title', default=meme)
        url = sbapi.get_create_url(title, tag + self._get_content(cell))

        self.redirect(url)
 def _get_content(self, cell):
     sbapi = ScrapboxAPI(parent=self.nb_app)
     if cell['cell_type'] == 'code':
         lines = cell['source'].split('\n')
         code = 'code:' + sbapi.cell_filename + '\n' + \
                '\n'.join(['  ' + l for l in lines])
         return code + '\n'
     elif cell['cell_type'] == 'markdown':
         lines = cell['source'].split('\n')
         code = 'code:cell.md\n' + '\n'.join(['  ' + l for l in lines])
         return code + '\n'
     else:
         return ''
def test_get(requests_get):
    sbapi = ScrapboxAPI()
    sbapi.cookie_connect_sid = 'secret'
    sbapi.project_id = 'some_project'
    sbapi.get('some-meme')
    requests_get.assert_called_with(
        'https://scrapbox.io/api/pages/'
        'some_project/some-meme',
        cookies={'connect.sid': 'secret'})
Exemple #9
0
def test_get_create_url():
    sbapi = ScrapboxAPI()
    sbapi.project_id = 'some_project'
    base_url = 'https://scrapbox.io/some_project/'
    body = 'Body'
    assert sbapi.get_create_url('Foo', body) == base_url + 'Foo?body=' + body
    body = 'Body 2'
    abody = 'Body%202'
    assert sbapi.get_create_url('Foo', body) == base_url + 'Foo?body=' + abody
    body = ''.join(['{}'.format(i % 10) for i in range(0, 1025)])
    abody = body[:1024]
    assert sbapi.get_create_url('Foo Bar', body) == base_url + \
           'Foo%20Bar?body=' + abody
Exemple #10
0
def test_get_view_url():
    sbapi = ScrapboxAPI()
    sbapi.project_id = 'some_project'
    base_url = 'https://scrapbox.io/some_project/'
    assert sbapi.get_view_url('Foo') == base_url + 'Foo'
    assert sbapi.get_view_url('Foo Bar') == base_url + 'Foo%20Bar'
def test_get_from_public(requests_get):
    sbapi = ScrapboxAPI()
    sbapi.project_id = 'some_project'
    sbapi.get('some-meme')
    requests_get.assert_called_with('https://scrapbox.io/api/pages/'
                                    'some_project/some-meme')