def publish(site): type = request.form.get('h') new_post_url = API_NEW_POST_URL.format(site.site_id) data = { 'title': request.form.get('name'), 'content': util.get_complex_content(request.form), 'excerpt': request.form.get('summary'), 'slug': request.form.get('slug'), } files = None photo_file = request.files.get('photo') if photo_file: # TODO support multiple files data['format'] = 'image' files = { 'media[0]': (os.path.basename(photo_file.filename), photo_file), } req = requests.Request('POST', new_post_url, data=util.trim_nulls(data), files=files, headers={ 'Authorization': 'Bearer ' + site.token, }) req = req.prepare() s = requests.Session() r = s.send(req) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) r_data = r.json() return util.make_publish_success_response(r_data.get('URL'), data=r_data)
def interpret_response(result): if result.status_code // 100 != 2: return util.wrap_silo_error_response(result) result_json = result.json() twitter_url = 'https://twitter.com/{}/status/{}'.format( result_json.get('user', {}).get('screen_name'), result_json.get('id_str')) return util.make_publish_success_response(twitter_url, result_json)
def publish(site): new_post_url = API_NEW_POST_URL.format(site.site_id) data = { 'title': request.form.get('name'), 'content': util.get_complex_content(request.form), 'excerpt': request.form.get('summary'), 'slug': request.form.get('slug'), } files = None photo_files = util.get_possible_array_value(request.files, 'photo') photo_urls = util.get_possible_array_value(request.form, 'photo') if photo_files or photo_urls: data['format'] = 'image' if photo_files: files = { 'media[]': [(os.path.basename(photo_file.filename), photo_file) for photo_file in photo_files], } if photo_urls: data['media_urls[]'] = photo_urls req = requests.Request('POST', new_post_url, data=util.trim_nulls(data), files=files, headers={'Authorization': 'Bearer ' + site.token}) req = req.prepare() s = requests.Session() r = s.send(req) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) r_data = r.json() return util.make_publish_success_response(r_data.get('URL'), data=r_data)
def publish(site): def get_photo_id(original): """Based on an original URL, find the Flickr syndicated URL and extract the photo ID Returns a tuple with (photo_id, photo_url) """ flickr_url = util.posse_post_discovery(original, FLICKR_PHOTO_RE) if flickr_url: m = FLICKR_PHOTO_RE.match(flickr_url) if m: return m.group(2), flickr_url return None, None def get_path_alias(): return (site.account.user_info.get('person', {}).get('path_alias') or site.account.user_id) in_reply_to = request.form.get('in-reply-to') like_of = request.form.get('like-of') title = request.form.get('name') desc = request.form.get('content[value]') or request.form.get('content') # try to comment on a photo if in_reply_to: photo_id, flickr_url = get_photo_id(in_reply_to) if not photo_id: return util.make_publish_error_response( 'Could not find Flickr photo to comment on based on URL {}' .format(in_reply_to)) r = call_api_method('POST', 'flickr.photos.comments.addComment', { 'photo_id': photo_id, 'comment_text': desc or title, }, site=site) result = r.json() if result.get('stat') == 'fail': return util.wrap_silo_error_response(r) return util.make_publish_success_response( result.get('comment', {}).get('permalink'), result) # try to like a photo if like_of: photo_id, flickr_url = get_photo_id(like_of) if not photo_id: return util.make_publish_error_response( 'Could not find Flickr photo to like based on original URL {}' .format(like_of)) r = call_api_method('POST', 'flickr.favorites.add', { 'photo_id': photo_id, }, site=site) result = r.json() if result.get('stat') == 'fail': return util.wrap_silo_error_response(r) return util.make_publish_success_response( flickr_url + '#liked-by-' + get_path_alias(), result) # otherwise we're uploading a photo photo_file = util.get_first(util.get_files_or_urls_as_file_storage(request.files, request.form, 'photo')) if not photo_file: photo_file = util.get_first(util.get_files_or_urls_as_file_storage(request.files, request.form, 'video')) if not photo_file: return util.make_publish_error_response('Missing "photo" attachment') r = upload({'title': title, 'description': desc}, photo_file, site=site) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) photo_id, error = interpret_upload_response(r) if error: return util.make_publish_error_response(error) # maybe add some tags or people cats = util.get_possible_array_value(request.form, 'category') tags = [] user_ids = [] for cat in cats: if util.looks_like_a_url(cat): resp = call_api_method( 'GET', 'flickr.urls.lookupUser', {'url': cat}, site=site) if resp.status_code // 100 != 2: current_app.logger.error( 'Error looking up user by url %s. Response: %r, %s', cat, resp, resp.text) result = resp.json() if result.get('stat') == 'fail': current_app.logger.debug( 'User not found for url %s', cat) else: user_id = result.get('user', {}).get('id') if user_id: user_ids.append(user_id) else: tags.append('"' + cat + '"') if tags: current_app.logger.debug('Adding tags: %s', ','.join(tags)) resp = call_api_method('POST', 'flickr.photos.addTags', { 'photo_id': photo_id, 'tags': ','.join(tags), }, site=site) current_app.logger.debug('Added tags: %r, %s', resp, resp.text) for user_id in user_ids: current_app.logger.debug('Tagging user id: %s', user_id) resp = call_api_method('POST', 'flickr.photos.people.add', { 'photo_id': photo_id, 'user_id': user_id, }, site=site) current_app.logger.debug('Tagged person: %r, %s', resp, resp.text) lat, lng = util.parse_geo_uri(request.form.get('location')) if lat and lng: current_app.logger.debug('setting location: %s, %s', lat, lng) resp = call_api_method('POST', 'flickr.photos.geo.setLocation', { 'photo_id': photo_id, 'lat': lat, 'lon': lng, }, site=site) current_app.logger.debug('set location: %r, %s', resp, resp.text) return util.make_publish_success_response( 'https://www.flickr.com/photos/{}/{}/'.format( get_path_alias(), photo_id))
def publish(site): auth = OAuth1(client_key=current_app.config['TWITTER_CLIENT_KEY'], client_secret=current_app.config['TWITTER_CLIENT_SECRET'], resource_owner_key=site.account.token, resource_owner_secret=site.account.token_secret) def interpret_response(result): if result.status_code // 100 != 2: return util.wrap_silo_error_response(result) result_json = result.json() twitter_url = 'https://twitter.com/{}/status/{}'.format( result_json.get('user', {}).get('screen_name'), result_json.get('id_str')) return util.make_publish_success_response(twitter_url, result_json) def get_tweet_id(original): tweet_url = util.posse_post_discovery(original, TWEET_RE) if tweet_url: m = TWEET_RE.match(tweet_url) if m: return m.group(1), m.group(2) return None, None def upload_photo(photo): current_app.logger.debug('uploading photo, name=%s, type=%s', photo.filename, photo.content_type) result = requests.post(UPLOAD_MEDIA_URL, files={ 'media': (photo.filename, photo.stream, photo.content_type), }, auth=auth) if result.status_code // 100 != 2: return None, result result_data = result.json() current_app.logger.debug('upload result: %s', result_data) return result_data.get('media_id_string'), None def upload_video(video, default_content_type='video/mp4'): # chunked video upload chunk_files = [] def cleanup(): for f in chunk_files: os.unlink(f) chunk_size = 1 << 20 total_size = 0 while True: chunk = video.read(chunk_size) if not chunk: break total_size += len(chunk) tempfd, tempfn = tempfile.mkstemp( '-%03d-%s' % (len(chunk_files), video.filename)) with open(tempfn, 'wb') as f: f.write(chunk) chunk_files.append(tempfn) current_app.logger.debug('init upload. type=%s, length=%s', video.content_type, video.content_length) result = requests.post(UPLOAD_MEDIA_URL, data={ 'command': 'INIT', 'media_type': video.content_type or default_content_type, 'total_bytes': total_size, }, auth=auth) current_app.logger.debug('init result: %s %s', result, result.text) if result.status_code // 100 != 2: cleanup() return None, result result_data = result.json() media_id = result_data.get('media_id_string') segment_idx = 0 for chunk_file in chunk_files: current_app.logger.debug('appending file: %s', chunk_file) result = requests.post(UPLOAD_MEDIA_URL, data={ 'command': 'APPEND', 'media_id': media_id, 'segment_index': segment_idx, }, files={ 'media': open(chunk_file, 'rb'), }, auth=auth) current_app.logger.debug('append result: %s %s', result, result.text) if result.status_code // 100 != 2: cleanup() return None, result segment_idx += 1 current_app.logger.debug('finalize uploading video: %s', media_id) result = requests.post(UPLOAD_MEDIA_URL, data={ 'command': 'FINALIZE', 'media_id': media_id, }, auth=auth) current_app.logger.debug('finalize result: %s %s', result, result.text) if result.status_code // 100 != 2: cleanup() return None, result cleanup() return media_id, None data = {} format = brevity.FORMAT_NOTE content = request.form.get('content[value]') or request.form.get('content') if 'name' in request.form: format = brevity.FORMAT_ARTICLE content = request.form.get('name') repost_ofs = util.get_possible_array_value(request.form, 'repost-of') for repost_of in repost_ofs: _, tweet_id = get_tweet_id(repost_of) if tweet_id: return interpret_response( requests.post(RETWEET_STATUS_URL.format(tweet_id), auth=auth)) else: if repost_ofs: content = 'Reposted: {}'.format(repost_ofs[0]) like_ofs = util.get_possible_array_value(request.form, 'like-of') for like_of in like_ofs: _, tweet_id = get_tweet_id(like_of) if tweet_id: return interpret_response( requests.post(FAVE_STATUS_URL, data={'id': tweet_id}, auth=auth)) else: if like_ofs: content = 'Liked: {}'.format(like_ofs[0]) media_ids = [] for photo in util.get_files_or_urls_as_file_storage( request.files, request.form, 'photo'): media_id, err = upload_photo(photo) if err: return util.wrap_silo_error_response(err) media_ids.append(media_id) for video in util.get_files_or_urls_as_file_storage( request.files, request.form, 'video'): media_id, err = upload_video(video) if err: return util.wrap_silo_error_response(err) media_ids.append(media_id) in_reply_tos = util.get_possible_array_value(request.form, 'in-reply-to') for in_reply_to in in_reply_tos: twitterer, tweet_id = get_tweet_id(in_reply_to) if tweet_id: data['in_reply_to_status_id'] = tweet_id break else: if in_reply_tos: content = 'Re: {}, {}'.format(in_reply_tos[0], content) location = request.form.get('location') current_app.logger.debug('received location param: %s', location) data['lat'], data['long'] = util.parse_geo_uri(location) permalink_url = request.form.get('url') if media_ids: data['media_ids'] = ','.join(media_ids) if content: data['status'] = brevity.shorten(content, permalink=permalink_url, format=format, target_length=280) # for in-reply-to tweets, leading @mentions will be looked up from the original Tweet, and added to the new Tweet from there. # https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update data['auto_populate_reply_metadata'] = 'true' data = util.trim_nulls(data) current_app.logger.debug('publishing with params %s', data) return interpret_response( requests.post(CREATE_STATUS_URL, data=data, auth=auth))
def publish(site): auth_headers = {'Authorization': 'token ' + site.account.token} in_reply_to = request.form.get('in-reply-to') if in_reply_to: in_reply_to = util.posse_post_discovery(in_reply_to, BASE_PATTERN) repo_match = (re.match(REPO_PATTERN, in_reply_to) or re.match(ISSUES_PATTERN, in_reply_to)) issue_match = (re.match(ISSUE_PATTERN, in_reply_to) or re.match(PULL_PATTERN, in_reply_to)) # reply to a repository -- post a new issue if repo_match: endpoint = 'https://api.github.com/repos/{}/{}/issues'.format( repo_match.group(1), repo_match.group(2)) title = request.form.get('name') body = (request.form.get('content[markdown]') or request.form.get('content[value]') or request.form.get('content') or '') if not title and body: title = body[:49] + '\u2026' data = { 'title': title, 'body': body, 'labels': util.get_possible_array_value(request.form, 'category'), } # reply to an issue -- post a new comment elif issue_match: endpoint = 'https://api.github.com/repos/{}/{}/issues/{}/comments'.format( issue_match.group(1), issue_match.group(2), issue_match.group(3)) body = (request.form.get('content[markdown]') or request.form.get('content[value]') or request.form.get('content') or '') data = {'body': body} else: return util.make_publish_error_response( 'Reply URL does look like a repo or issue: ' + in_reply_to) current_app.logger.debug('sending POST to %s with data %s', endpoint, data) r = requests.post(endpoint, json=data, headers=auth_headers) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) resp_json = r.json() return util.make_publish_success_response(resp_json.get('html_url'), resp_json) # like a repository -- star the repository like_of = request.form.get('like-of') if like_of: like_of = util.posse_post_discovery(like_of, BASE_PATTERN) repo_match = re.match(REPO_PATTERN, like_of) if repo_match: endpoint = 'https://api.github.com/user/starred/{}/{}'.format( repo_match.group(1), repo_match.group(2)) current_app.logger.debug('sending PUT to %s', endpoint) r = requests.put(endpoint, headers=auth_headers) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) return util.make_publish_success_response(like_of + '#starred-by-' + site.account.username) else: return util.make_publish_error_response( 'Like-of URL must be a repo: ' + like_of) return util.make_publish_error_response( 'See {} for details publishing to GitHub.'.format( url_for('views.developers', _external=True)))
def publish(site): title = request.form.get('name') content = request.form.get('content[value]') or request.form.get('content') permalink = request.form.get('url') photo_file = util.get_first( util.get_possible_array_value(request.files, 'photo')) photo_url = util.get_first( util.get_possible_array_value(request.form, 'photo')) video_file = util.get_first( util.get_possible_array_value(request.files, 'video')) video_url = util.get_first( util.get_possible_array_value(request.form, 'video')) location = request.form.get('location') post_data = {'access_token': site.account.token} post_files = None api_endpoint = 'https://graph.facebook.com/v2.5/me/feed' fburl_separator = 'posts' message = (content if not permalink else '({})'.format(permalink) if not content else '{} ({})'.format(content, permalink)) if video_file or video_url: if video_file: post_files = { 'source': (video_file.filename, video_file.stream, video_file.content_type or 'video/mp4') } elif video_url: post_data['url'] = video_url post_data['title'] = title post_data['description'] = message api_endpoint = 'https://graph-video.facebook.com/v2.5/me/videos' fburl_separator = 'videos' elif photo_file or photo_url: if photo_file: post_files = {'source': photo_file} elif photo_url: post_data['url'] = photo_url post_data['caption'] = message # TODO support album id as alternative to 'me' # TODO upload to "Timeline photos" album by default api_endpoint = 'https://graph.facebook.com/v2.5/me/photos' fburl_separator = 'photos' elif title and content: # looks like an article -- include the permalink as a 'link' # instead of inline post_data['message'] = '{}\n\n{}'.format(title, content) post_data['link'] = permalink post_data['name'] = title elif content: post_data['message'] = message tokens = brevity.tokenize(content) # linkify the first url in the message linktok = next((tok for tok in tokens if tok.tag == 'link'), None) if linktok: post_data['link'] = linktok.content else: return util.make_publish_error_response( 'Request must contain a photo, video, or content') # posting Location to Facebook is disabled for now -- just # searching lat/long does not get us close enough to assume we # have the correct place. if False and location: if location.isnumeric(): post_data['place'] = location else: place_name = (request.form.get('place-name') or request.form.get('place_name')) lat, lng = util.parse_geo_uri(location) if lat and lng: current_app.logger.debug('Search FB for a place, %s at %s, %s', place_name, lat, lng) r = requests.get('https://graph.facebook.com/v2.5/search', params=util.trim_nulls({ 'type': 'place', 'center': '%s,%s' % (lat, lng), 'distance': '500', 'q': place_name, 'access_token': site.account.token, })) if r.status_code != 200: current_app.logger.warning( 'FB place search failed with response %r: %r', r, r.text) else: places = r.json().get('data', []) if not places: # TODO consider searching without a place name? current_app.logger.warning( 'FB no resuts for place %s at %s, %s ', place_name, lat, lng) else: current_app.logger.debug('Found FB place: %s (%s)', places[0].get('name'), places[0].get('id')) post_data['place'] = places[0].get('id') post_data = util.trim_nulls(post_data) current_app.logger.debug('Publishing to facebook %s: data=%s, files=%s', api_endpoint, post_data, post_files) r = requests.post(api_endpoint, data=post_data, files=post_files) # need Web Canvas permissions to do this, which I am too lazy to apply for # if r.status_code == 400: # error_data = r.json().get('error', {}) # code = error_data.get('code') # subcode = error_data.get('subcode') # # token is expired or otherwise invalid # if code == 190: # send_token_expired_notification( # site.account.user_id, # "silo.pub's Facebook access token has expired. Click the " # "Facebook button on silo.pub's homepage to renew.", # 'https://silo.pub/') if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) resp_data = r.json() userid = '' fbid = resp_data.get('id') or resp_data.get('post_id') split = fbid.split('_') if len(split) == 2: userid, fbid = split return util.make_publish_success_response( 'https://www.facebook.com/{}/{}/{}'.format( site.account.username or userid, fburl_separator, fbid), data=resp_data)
def publish(site): auth = OAuth1( client_key=current_app.config['TWITTER_CLIENT_KEY'], client_secret=current_app.config['TWITTER_CLIENT_SECRET'], resource_owner_key=site.account.token, resource_owner_secret=site.account.token_secret) def interpret_response(result): if result.status_code // 100 != 2: return util.wrap_silo_error_response(result) result_json = result.json() twitter_url = 'https://twitter.com/{}/status/{}'.format( result_json.get('user', {}).get('screen_name'), result_json.get('id_str')) return util.make_publish_success_response(twitter_url, result_json) def get_tweet_id(original): tweet_url = util.posse_post_discovery(original, TWEET_RE) if tweet_url: m = TWEET_RE.match(tweet_url) if m: return m.group(1), m.group(2) return None, None def upload_photo(photo): current_app.logger.debug('uploading photo, name=%s, type=%s', photo.filename, photo.content_type) result = requests.post(UPLOAD_MEDIA_URL, files={ 'media': (photo.filename, photo.stream, photo.content_type), }, auth=auth) if result.status_code // 100 != 2: return None, result result_data = result.json() current_app.logger.debug('upload result: %s', result_data) return result_data.get('media_id_string'), None def upload_video(video, default_content_type='video/mp4'): # chunked video upload chunk_files = [] def cleanup(): for f in chunk_files: os.unlink(f) chunk_size = 1 << 20 total_size = 0 while True: chunk = video.read(chunk_size) if not chunk: break total_size += len(chunk) tempfd, tempfn = tempfile.mkstemp('-%03d-%s' % ( len(chunk_files), video.filename)) with open(tempfn, 'wb') as f: f.write(chunk) chunk_files.append(tempfn) current_app.logger.debug('init upload. type=%s, length=%s', video.content_type, video.content_length) result = requests.post(UPLOAD_MEDIA_URL, data={ 'command': 'INIT', 'media_type': video.content_type or default_content_type, 'total_bytes': total_size, }, auth=auth) current_app.logger.debug('init result: %s %s', result, result.text) if result.status_code // 100 != 2: cleanup() return None, result result_data = result.json() media_id = result_data.get('media_id_string') segment_idx = 0 for chunk_file in chunk_files: current_app.logger.debug('appending file: %s', chunk_file) result = requests.post(UPLOAD_MEDIA_URL, data={ 'command': 'APPEND', 'media_id': media_id, 'segment_index': segment_idx, }, files={ 'media': open(chunk_file, 'rb'), }, auth=auth) current_app.logger.debug( 'append result: %s %s', result, result.text) if result.status_code // 100 != 2: cleanup() return None, result segment_idx += 1 current_app.logger.debug('finalize uploading video: %s', media_id) result = requests.post(UPLOAD_MEDIA_URL, data={ 'command': 'FINALIZE', 'media_id': media_id, }, auth=auth) current_app.logger.debug('finalize result: %s %s', result, result.text) if result.status_code // 100 != 2: cleanup() return None, result cleanup() return media_id, None data = {} format = brevity.FORMAT_NOTE content = request.form.get('content[value]') or request.form.get('content') if 'name' in request.form: format = brevity.FORMAT_ARTICLE content = request.form.get('name') repost_ofs = util.get_possible_array_value(request.form, 'repost-of') for repost_of in repost_ofs: _, tweet_id = get_tweet_id(repost_of) if tweet_id: return interpret_response( requests.post(RETWEET_STATUS_URL.format(tweet_id), auth=auth)) else: if repost_ofs: content = 'Reposted: {}'.format(repost_ofs[0]) like_ofs = util.get_possible_array_value(request.form, 'like-of') for like_of in like_ofs: _, tweet_id = get_tweet_id(like_of) if tweet_id: return interpret_response( requests.post(FAVE_STATUS_URL, data={'id': tweet_id}, auth=auth)) else: if like_ofs: content = 'Liked: {}'.format(like_ofs[0]) media_ids = [] for photo in util.get_files_or_urls_as_file_storage(request.files, request.form, 'photo'): media_id, err = upload_photo(photo) if err: return util.wrap_silo_error_response(err) media_ids.append(media_id) for video in util.get_files_or_urls_as_file_storage(request.files, request.form, 'video'): media_id, err = upload_video(video) if err: return util.wrap_silo_error_response(err) media_ids.append(media_id) in_reply_tos = util.get_possible_array_value(request.form, 'in-reply-to') for in_reply_to in in_reply_tos: twitterer, tweet_id = get_tweet_id(in_reply_to) if tweet_id: data['in_reply_to_status_id'] = tweet_id break else: if in_reply_tos: content = 'Re: {}, {}'.format(in_reply_tos[0], content) location = request.form.get('location') current_app.logger.debug('received location param: %s', location) data['lat'], data['long'] = util.parse_geo_uri(location) permalink_url = request.form.get('url') if media_ids: data['media_ids'] = ','.join(media_ids) if content: data['status'] = brevity.shorten(content, permalink=permalink_url, format=format, target_length=280) # for in-reply-to tweets, leading @mentions will be looked up from the original Tweet, and added to the new Tweet from there. # https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update data['auto_populate_reply_metadata'] = 'true' data = util.trim_nulls(data) current_app.logger.debug('publishing with params %s', data) return interpret_response( requests.post(CREATE_STATUS_URL, data=data, auth=auth))
def publish(site): auth = OAuth1( client_key=current_app.config['TUMBLR_CLIENT_KEY'], client_secret=current_app.config['TUMBLR_CLIENT_SECRET'], resource_owner_key=site.account.token, resource_owner_secret=site.account.token_secret) create_post_url = CREATE_POST_URL.format(site.domain) photo_url = util.get_first(util.get_possible_array_value(request.form, 'photo')) photo_file = util.get_first(util.get_possible_array_value(request.files, 'photo')) if photo_url: data = util.trim_nulls({ 'type': 'photo', 'slug': request.form.get('slug'), 'caption': request.form.get('content[html]') or request.form.get('content') or request.form.get('name') or request.form.get('summary'), 'source': photo_url }) r = requests.post(create_post_url, data=data, auth=auth) elif photo_file: # tumblr signs multipart in a weird way. first sign the request as if # it's application/x-www-form-urlencoded, then recreate the request as # multipart but use the signed headers from before. Mostly cribbed from # https://github.com/tumblr/pytumblr/blob/\ # 20e7e38ba6f0734335deee64d4cae45fa8a2ce90/pytumblr/request.py#L101 # The API documentation and some of the code samples gave me the # impression that you could also send files just as part of the # form-encoded data but I couldnit make it work # https://www.tumblr.com/docs/en/api/v2#pphoto-posts # https://gist.github.com/codingjester/1649885#file-upload-php-L56 data = util.trim_nulls({ 'type': 'photo', 'slug': request.form.get('slug'), 'caption': request.form.get('content[html]') or request.form.get('content') or request.form.get('name') or request.form.get('summary'), }) fake_req = requests.Request('POST', create_post_url, data=data) fake_req = fake_req.prepare() auth(fake_req) real_headers = dict(fake_req.headers) # manually strip these, requests will recalculate them for us del real_headers['Content-Type'] del real_headers['Content-Length'] current_app.logger.info( 'uploading photo to tumblr %s, headers=%r', create_post_url, real_headers) r = requests.post(create_post_url, data=data, files={ 'data': photo_file, }, headers=real_headers) else: data = util.trim_nulls({ # one of: text, photo, quote, link, chat, audio, video 'type': 'text', 'slug': request.form.get('slug'), 'title': request.form.get('name'), 'body': util.get_complex_content(request.form), }) current_app.logger.info( 'posting to tumblr %s, data=%r', create_post_url, data) r = requests.post(create_post_url, data=data, auth=auth) current_app.logger.info( 'response from tumblr %r, data=%r, headers=%r', r, r.content, r.headers) if r.status_code // 100 != 2: current_app.logger.warn( 'Tumblr publish failed with response %s', r.text) return util.wrap_silo_error_response(r) location = None if 'Location' in r.headers: location = r.headers['Location'] else: # only get back the id, look up the url post_id = r.json().get('response').get('id') r = requests.get(FETCH_POST_URL.format(site.domain), params={ 'api_key': current_app.config['TUMBLR_CLIENT_KEY'], 'id': post_id, }) if r.status_code // 100 == 2: posts = r.json().get('response', {}).get('posts', []) if posts: location = posts[0].get('post_url') return util.make_publish_success_response(location)
def publish(site): auth = OAuth1(client_key=current_app.config['GOODREADS_CLIENT_KEY'], client_secret=current_app.config['GOODREADS_CLIENT_SECRET'], resource_owner_key=site.account.token, resource_owner_secret=site.account.token_secret) # publish a review # book_id (goodreads internal id) # review[review] (text of the review) # review[rating] (0-5) ... 0 = not given, 1-5 maps directly to h-review p-rating # review[read_at] dt-reviewed in YYYY-MM-DD # shelf -- check p-category for any that match existing goodreads shelves, # TODO consider creating shelves for categories? # item might be an ISBN, a Goodreads URL, or just a title item = request.form.get('item') if not item: item_name = request.form.get('item[name]') item_author = request.form.get('item[author]') if item_name and item_author: item = item_name + ' ' + item_author rating = request.form.get('rating') review = next( (request.form.get(key) for key in ('description[value]', 'description', 'content[value]', 'content', 'summary', 'name')), None) categories = util.get_possible_array_value(request.form, 'category') if not item: return util.make_publish_error_response( 'Expected "item": a URL, ISBN, or book title to review') m = item and BOOK_URL_RE.match(item) if m: book_id = m.group(1) else: # try searching for item r = requests.get(SEARCH_URL, params={ 'q': item, 'key': current_app.config['GOODREADS_CLIENT_KEY'], }) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) root = ETree.fromstring(r.content) book = root.find('search/results/work/best_book') if not book: return { 'error': 'Goodreads found no results for query: ' + item, 'upstream-data': r.text } book_id = book.findtext('id') # add book to shelves all_shelves = set() if categories: r = requests.get(SHELVES_LIST_URL, params={ 'key': current_app.config['GOODREADS_CLIENT_KEY'], 'user_id': site.account.user_id, }) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) root = ETree.fromstring(r.content) for shelf in root.find('shelves'): all_shelves.add(shelf.findtext('name')) matched_categories = [c for c in categories if c in all_shelves] permalink = 'https://www.goodreads.com/book/show/' + book_id resp_data = {} # publish a review of the book if rating or review: current_app.logger.debug( 'creating a review: book=%s, rating=%s, review=%s', book_id, rating, review) r = requests.post( REVIEW_CREATE_URL, data=util.trim_nulls({ 'book_id': book_id, 'review[rating]': rating, 'review[review]': review, # first shelf that matches 'shelf': matched_categories.pop(0) if matched_categories else None, }), auth=auth) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) # example response """<?xml version="1.0" encoding="UTF-8"?> <review> <id type="integer">1484927007</id> <user-id type="integer">4544167</user-id> <book-id type="integer">9361589</book-id> <rating type="integer">2</rating> <read-status>read</read-status> <sell-flag type="boolean">false</sell-flag> <review></review> <recommendation nil="true"/> <read-at type="datetime" nil="true"/> <updated-at type="datetime">2015-12-29T21:25:34+00:00</updated-at> <created-at type="datetime">2015-12-29T21:25:34+00:00</created-at> <comments-count type="integer">0</comments-count> <weight type="integer">0</weight> <ratings-sum type="integer">0</ratings-sum> <ratings-count type="integer">0</ratings-count> <notes nil="true"/> <spoiler-flag type="boolean">false</spoiler-flag> <recommender-user-id1 type="integer">0</recommender-user-id1> <recommender-user-name1 nil="true"/> <work-id type="integer">14245059</work-id> <read-count nil="true"/> <last-comment-at type="datetime" nil="true"/> <started-at type="datetime" nil="true"/> <hidden-flag type="boolean">false</hidden-flag> <language-code type="integer" nil="true"/> <last-revision-at type="datetime">2015-12-29T21:25:34+00:00</last-revision-at> <non-friends-rating-count type="integer">0</non-friends-rating-count> </review>""" root = ETree.fromstring(r.content) review_id = root.findtext('id') permalink = 'https://www.goodreads.com/review/show/' + review_id resp_data['review-response'] = r.text if matched_categories: r = requests.post(ADD_BOOKS_TO_SHELVES_URL, data={ 'bookids': book_id, 'shelves': ','.join(matched_categories), }, auth=auth) if r.status_code // 100 != 2: current_app.logger.error( 'Failed to add book %s to additional shelves %r. Status: %s, Response: %r', book_id, matched_categories, r.status_code, r.text) resp_data['shelves-response'] = r.text return util.make_publish_success_response(permalink, data=resp_data)
def publish(site): auth = OAuth1( client_key=current_app.config["TUMBLR_CLIENT_KEY"], client_secret=current_app.config["TUMBLR_CLIENT_SECRET"], resource_owner_key=site.account.token, resource_owner_secret=site.account.token_secret, ) type = request.form.get("h") create_post_url = CREATE_POST_URL.format(site.domain) photo_file = request.files.get("photo") if photo_file: # tumblr signs multipart in a weird way. first sign the request as if # it's application/x-www-form-urlencoded, then recreate the request as # multipart but use the signed headers from before. Mostly cribbed from # https://github.com/tumblr/pytumblr/blob/\ # 20e7e38ba6f0734335deee64d4cae45fa8a2ce90/pytumblr/request.py#L101 # The API documentation and some of the code samples gave me the # impression that you could also send files just as part of the # form-encoded data but I couldnit make it work # https://www.tumblr.com/docs/en/api/v2#pphoto-posts # https://gist.github.com/codingjester/1649885#file-upload-php-L56 data = util.trim_nulls( { "type": "photo", "slug": request.form.get("slug"), "caption": request.form.get("content[html]") or request.form.get("content") or request.form.get("name") or request.form.get("summary"), } ) fake_req = requests.Request("POST", create_post_url, data=data) fake_req = fake_req.prepare() auth(fake_req) real_headers = dict(fake_req.headers) # manually strip these, requests will recalculate them for us del real_headers["Content-Type"] del real_headers["Content-Length"] current_app.logger.info("uploading photo to tumblr %s, headers=%r", create_post_url, real_headers) r = requests.post(create_post_url, data=data, files={"data": photo_file}, headers=real_headers) else: data = util.trim_nulls( { # one of: text, photo, quote, link, chat, audio, video "type": "text", "slug": request.form.get("slug"), "title": request.form.get("name"), "body": util.get_complex_content(request.form), } ) current_app.logger.info("posting to tumblr %s, data=%r", create_post_url, data) r = requests.post(create_post_url, data=data, auth=auth) current_app.logger.info("response from tumblr %r, data=%r, headers=%r", r, r.content, r.headers) if r.status_code // 100 != 2: current_app.logger.warn("Tumblr publish failed with response %s", r.text) return util.wrap_silo_error_response(r) location = None if "Location" in r.headers: location = r.headers["Location"] else: # only get back the id, look up the url post_id = r.json().get("response").get("id") r = requests.get( FETCH_POST_URL.format(site.domain), params={"api_key": current_app.config["TUMBLR_CLIENT_KEY"], "id": post_id}, ) if r.status_code // 100 == 2: posts = r.json().get("response", {}).get("posts", []) if posts: location = posts[0].get("post_url") return util.make_publish_success_response(location)
def publish(site): auth = OAuth1(client_key=current_app.config['TUMBLR_CLIENT_KEY'], client_secret=current_app.config['TUMBLR_CLIENT_SECRET'], resource_owner_key=site.account.token, resource_owner_secret=site.account.token_secret) create_post_url = CREATE_POST_URL.format(site.domain) photo_url = util.get_first( util.get_possible_array_value(request.form, 'photo')) photo_file = util.get_first( util.get_possible_array_value(request.files, 'photo')) if photo_url: data = util.trim_nulls({ 'type': 'photo', 'slug': request.form.get('slug'), 'caption': request.form.get('content[html]') or request.form.get('content') or request.form.get('name') or request.form.get('summary'), 'source': photo_url }) r = requests.post(create_post_url, data=data, auth=auth) elif photo_file: # tumblr signs multipart in a weird way. first sign the request as if # it's application/x-www-form-urlencoded, then recreate the request as # multipart but use the signed headers from before. Mostly cribbed from # https://github.com/tumblr/pytumblr/blob/\ # 20e7e38ba6f0734335deee64d4cae45fa8a2ce90/pytumblr/request.py#L101 # The API documentation and some of the code samples gave me the # impression that you could also send files just as part of the # form-encoded data but I couldnit make it work # https://www.tumblr.com/docs/en/api/v2#pphoto-posts # https://gist.github.com/codingjester/1649885#file-upload-php-L56 data = util.trim_nulls({ 'type': 'photo', 'slug': request.form.get('slug'), 'caption': request.form.get('content[html]') or request.form.get('content') or request.form.get('name') or request.form.get('summary'), }) fake_req = requests.Request('POST', create_post_url, data=data) fake_req = fake_req.prepare() auth(fake_req) real_headers = dict(fake_req.headers) # manually strip these, requests will recalculate them for us del real_headers['Content-Type'] del real_headers['Content-Length'] current_app.logger.info('uploading photo to tumblr %s, headers=%r', create_post_url, real_headers) r = requests.post(create_post_url, data=data, files={ 'data': photo_file, }, headers=real_headers) else: data = util.trim_nulls({ # one of: text, photo, quote, link, chat, audio, video 'type': 'text', 'slug': request.form.get('slug'), 'title': request.form.get('name'), 'body': util.get_complex_content(request.form), }) current_app.logger.info('posting to tumblr %s, data=%r', create_post_url, data) r = requests.post(create_post_url, data=data, auth=auth) current_app.logger.info('response from tumblr %r, data=%r, headers=%r', r, r.content, r.headers) if r.status_code // 100 != 2: current_app.logger.warn('Tumblr publish failed with response %s', r.text) return util.wrap_silo_error_response(r) location = None if 'Location' in r.headers: location = r.headers['Location'] else: # only get back the id, look up the url post_id = r.json().get('response').get('id') r = requests.get(FETCH_POST_URL.format(site.domain), params={ 'api_key': current_app.config['TUMBLR_CLIENT_KEY'], 'id': post_id, }) if r.status_code // 100 == 2: posts = r.json().get('response', {}).get('posts', []) if posts: location = posts[0].get('post_url') return util.make_publish_success_response(location)
def publish(site): def get_photo_id(original): """Based on an original URL, find the Flickr syndicated URL and extract the photo ID Returns a tuple with (photo_id, photo_url) """ flickr_url = util.posse_post_discovery(original, FLICKR_PHOTO_RE) if flickr_url: m = FLICKR_PHOTO_RE.match(flickr_url) if m: return m.group(2), flickr_url return None, None def get_path_alias(): return (site.account.user_info.get('person', {}).get('path_alias') or site.account.user_id) in_reply_to = request.form.get('in-reply-to') like_of = request.form.get('like-of') title = request.form.get('name') desc = request.form.get('content[value]') or request.form.get('content') # try to comment on a photo if in_reply_to: photo_id, flickr_url = get_photo_id(in_reply_to) if not photo_id: return util.make_publish_error_response( 'Could not find Flickr photo to comment on based on URL {}'. format(in_reply_to)) r = call_api_method('POST', 'flickr.photos.comments.addComment', { 'photo_id': photo_id, 'comment_text': desc or title, }, site=site) result = r.json() if result.get('stat') == 'fail': return util.wrap_silo_error_response(r) return util.make_publish_success_response( result.get('comment', {}).get('permalink'), result) # try to like a photo if like_of: photo_id, flickr_url = get_photo_id(like_of) if not photo_id: return util.make_publish_error_response( 'Could not find Flickr photo to like based on original URL {}'. format(like_of)) r = call_api_method('POST', 'flickr.favorites.add', { 'photo_id': photo_id, }, site=site) result = r.json() if result.get('stat') == 'fail': return util.wrap_silo_error_response(r) return util.make_publish_success_response( flickr_url + '#liked-by-' + get_path_alias(), result) # otherwise we're uploading a photo photo_file = util.get_first( util.get_files_or_urls_as_file_storage(request.files, request.form, 'photo')) if not photo_file: photo_file = util.get_first( util.get_files_or_urls_as_file_storage(request.files, request.form, 'video')) if not photo_file: return util.make_publish_error_response('Missing "photo" attachment') r = upload({'title': title, 'description': desc}, photo_file, site=site) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) photo_id, error = interpret_upload_response(r) if error: return util.make_publish_error_response(error) # maybe add some tags or people cats = util.get_possible_array_value(request.form, 'category') tags = [] user_ids = [] for cat in cats: if util.looks_like_a_url(cat): resp = call_api_method('GET', 'flickr.urls.lookupUser', {'url': cat}, site=site) if resp.status_code // 100 != 2: current_app.logger.error( 'Error looking up user by url %s. Response: %r, %s', cat, resp, resp.text) result = resp.json() if result.get('stat') == 'fail': current_app.logger.debug('User not found for url %s', cat) else: user_id = result.get('user', {}).get('id') if user_id: user_ids.append(user_id) else: tags.append('"' + cat + '"') if tags: current_app.logger.debug('Adding tags: %s', ','.join(tags)) resp = call_api_method('POST', 'flickr.photos.addTags', { 'photo_id': photo_id, 'tags': ','.join(tags), }, site=site) current_app.logger.debug('Added tags: %r, %s', resp, resp.text) for user_id in user_ids: current_app.logger.debug('Tagging user id: %s', user_id) resp = call_api_method('POST', 'flickr.photos.people.add', { 'photo_id': photo_id, 'user_id': user_id, }, site=site) current_app.logger.debug('Tagged person: %r, %s', resp, resp.text) lat, lng = util.parse_geo_uri(request.form.get('location')) if lat and lng: current_app.logger.debug('setting location: %s, %s', lat, lng) resp = call_api_method('POST', 'flickr.photos.geo.setLocation', { 'photo_id': photo_id, 'lat': lat, 'lon': lng, }, site=site) current_app.logger.debug('set location: %r, %s', resp, resp.text) return util.make_publish_success_response( 'https://www.flickr.com/photos/{}/{}/'.format(get_path_alias(), photo_id))
def publish(site): auth = OAuth1( client_key=current_app.config['GOODREADS_CLIENT_KEY'], client_secret=current_app.config['GOODREADS_CLIENT_SECRET'], resource_owner_key=site.account.token, resource_owner_secret=site.account.token_secret) # publish a review # book_id (goodreads internal id) # review[review] (text of the review) # review[rating] (0-5) ... 0 = not given, 1-5 maps directly to h-review p-rating # review[read_at] dt-reviewed in YYYY-MM-DD # shelf -- check p-category for any that match existing goodreads shelves, # TODO consider creating shelves for categories? # item might be an ISBN, a Goodreads URL, or just a title item = request.form.get('item') if not item: item_name = request.form.get('item[name]') item_author = request.form.get('item[author]') if item_name and item_author: item = item_name + ' ' + item_author rating = request.form.get('rating') review = next((request.form.get(key) for key in ( 'description[value]', 'description', 'content[value]', 'content', 'summary', 'name')), None) categories = util.get_possible_array_value(request.form, 'category') if not item: return util.make_publish_error_response( 'Expected "item": a URL, ISBN, or book title to review') m = item and BOOK_URL_RE.match(item) if m: book_id = m.group(1) else: # try searching for item r = requests.get(SEARCH_URL, params={ 'q': item, 'key': current_app.config['GOODREADS_CLIENT_KEY'], }) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) root = ETree.fromstring(r.content) book = root.find('search/results/work/best_book') if not book: return { 'error': 'Goodreads found no results for query: ' + item, 'upstream-data': r.text } book_id = book.findtext('id') # add book to shelves all_shelves = set() if categories: r = requests.get(SHELVES_LIST_URL, params={ 'key': current_app.config['GOODREADS_CLIENT_KEY'], 'user_id': site.account.user_id, }) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) root = ETree.fromstring(r.content) for shelf in root.find('shelves'): all_shelves.add(shelf.findtext('name')) matched_categories = [c for c in categories if c in all_shelves] permalink = 'https://www.goodreads.com/book/show/' + book_id resp_data = {} # publish a review of the book if rating or review: current_app.logger.debug('creating a review: book=%s, rating=%s, review=%s', book_id, rating, review) r = requests.post(REVIEW_CREATE_URL, data=util.trim_nulls({ 'book_id': book_id, 'review[rating]': rating, 'review[review]': review, # first shelf that matches 'shelf': matched_categories.pop(0) if matched_categories else None, }), auth=auth) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) # example response """<?xml version="1.0" encoding="UTF-8"?> <review> <id type="integer">1484927007</id> <user-id type="integer">4544167</user-id> <book-id type="integer">9361589</book-id> <rating type="integer">2</rating> <read-status>read</read-status> <sell-flag type="boolean">false</sell-flag> <review></review> <recommendation nil="true"/> <read-at type="datetime" nil="true"/> <updated-at type="datetime">2015-12-29T21:25:34+00:00</updated-at> <created-at type="datetime">2015-12-29T21:25:34+00:00</created-at> <comments-count type="integer">0</comments-count> <weight type="integer">0</weight> <ratings-sum type="integer">0</ratings-sum> <ratings-count type="integer">0</ratings-count> <notes nil="true"/> <spoiler-flag type="boolean">false</spoiler-flag> <recommender-user-id1 type="integer">0</recommender-user-id1> <recommender-user-name1 nil="true"/> <work-id type="integer">14245059</work-id> <read-count nil="true"/> <last-comment-at type="datetime" nil="true"/> <started-at type="datetime" nil="true"/> <hidden-flag type="boolean">false</hidden-flag> <language-code type="integer" nil="true"/> <last-revision-at type="datetime">2015-12-29T21:25:34+00:00</last-revision-at> <non-friends-rating-count type="integer">0</non-friends-rating-count> </review>""" root = ETree.fromstring(r.content) review_id = root.findtext('id') permalink = 'https://www.goodreads.com/review/show/' + review_id resp_data['review-response'] = r.text if matched_categories: r = requests.post(ADD_BOOKS_TO_SHELVES_URL, data={ 'bookids': book_id, 'shelves': ','.join(matched_categories), }, auth=auth) if r.status_code // 100 != 2: current_app.logger.error( 'Failed to add book %s to additional shelves %r. Status: %s, Response: %r', book_id, matched_categories, r.status_code, r.text) resp_data['shelves-response'] = r.text return util.make_publish_success_response(permalink, data=resp_data)
def publish(site): title = request.form.get('name') content = request.form.get('content[value]') or request.form.get('content') permalink = request.form.get('url') photo_file = util.get_first(util.get_possible_array_value(request.files, 'photo')) photo_url = util.get_first(util.get_possible_array_value(request.form, 'photo')) video_file = util.get_first(util.get_possible_array_value(request.files, 'video')) video_url = util.get_first(util.get_possible_array_value(request.form, 'video')) location = request.form.get('location') post_data = {'access_token': site.account.token} post_files = None api_endpoint = 'https://graph.facebook.com/v2.5/me/feed' fburl_separator = 'posts' message = ( content if not permalink else '({})'.format(permalink) if not content else '{} ({})'.format(content, permalink)) if video_file or video_url: if video_file: post_files = {'source': (video_file.filename, video_file.stream, video_file.content_type or 'video/mp4')} elif video_url: post_data['url'] = video_url post_data['title'] = title post_data['description'] = message api_endpoint = 'https://graph-video.facebook.com/v2.5/me/videos' fburl_separator = 'videos' elif photo_file or photo_url: if photo_file: post_files = {'source': photo_file} elif photo_url: post_data['url'] = photo_url post_data['caption'] = message # TODO support album id as alternative to 'me' # TODO upload to "Timeline photos" album by default api_endpoint = 'https://graph.facebook.com/v2.5/me/photos' fburl_separator = 'photos' elif title and content: # looks like an article -- include the permalink as a 'link' # instead of inline post_data['message'] = '{}\n\n{}'.format(title, content) post_data['link'] = permalink post_data['name'] = title elif content: post_data['message'] = message tokens = brevity.tokenize(content) # linkify the first url in the message linktok = next((tok for tok in tokens if tok.tag == 'link'), None) if linktok: post_data['link'] = linktok.content else: return util.make_publish_error_response( 'Request must contain a photo, video, or content') # posting Location to Facebook is disabled for now -- just # searching lat/long does not get us close enough to assume we # have the correct place. if False and location: if location.isnumeric(): post_data['place'] = location else: place_name = (request.form.get('place-name') or request.form.get('place_name')) lat, lng = util.parse_geo_uri(location) if lat and lng: current_app.logger.debug('Search FB for a place, %s at %s, %s', place_name, lat, lng) r = requests.get( 'https://graph.facebook.com/v2.5/search', params=util.trim_nulls({ 'type': 'place', 'center': '%s,%s' % (lat, lng), 'distance': '500', 'q': place_name, 'access_token': site.account.token, })) if r.status_code != 200: current_app.logger.warning( 'FB place search failed with response %r: %r', r, r.text) else: places = r.json().get('data', []) if not places: # TODO consider searching without a place name? current_app.logger.warning( 'FB no resuts for place %s at %s, %s ', place_name, lat, lng) else: current_app.logger.debug( 'Found FB place: %s (%s)', places[0].get('name'), places[0].get('id')) post_data['place'] = places[0].get('id') post_data = util.trim_nulls(post_data) current_app.logger.debug( 'Publishing to facebook %s: data=%s, files=%s', api_endpoint, post_data, post_files) r = requests.post(api_endpoint, data=post_data, files=post_files) # need Web Canvas permissions to do this, which I am too lazy to apply for # if r.status_code == 400: # error_data = r.json().get('error', {}) # code = error_data.get('code') # subcode = error_data.get('subcode') # # token is expired or otherwise invalid # if code == 190: # send_token_expired_notification( # site.account.user_id, # "silo.pub's Facebook access token has expired. Click the " # "Facebook button on silo.pub's homepage to renew.", # 'https://silo.pub/') if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) resp_data = r.json() userid = '' fbid = resp_data.get('id') or resp_data.get('post_id') split = fbid.split('_') if len(split) == 2: userid, fbid = split return util.make_publish_success_response( 'https://www.facebook.com/{}/{}/{}'.format( site.account.username or userid, fburl_separator, fbid), data=resp_data)
def publish(site): """ Request: POST https://www.googleapis.com/blogger/v3/blogs/6561492933847572094/posts { "title": "This is a test, beautiful friend", "content": "This is some content with <i>html</i>!" } Response: 200 OK { "kind": "blogger#post", "id": "8225907794810815386", "blog": { "id": "6561492933847572094" }, "published": "2015-04-14T20:00:00-07:00", "updated": "2015-04-14T20:00:19-07:00", "etag": "\"Fgc6PVMaOxmEtPvQq0K7b_sZrRM/dGltZXN0YW1wOiAxNDI5MDY2ODE5MTYwCm9mZnNldDogLTI1MjAwMDAwCg\"", "url": "http://nofeathersnofur.blogspot.com/2015/04/this-is-test-beautiful-friend.html", "selfLink": "https://www.googleapis.com/blogger/v3/blogs/6561492933847572094/posts/8225907794810815386", "title": "This is a test, beautiful friend", "content": "This is some content with <i>html</i>!", "author": { "id": "01975554238474627641", "displayName": "Kyle", "url": "http://www.blogger.com/profile/01975554238474627641", "image": { "url": "http://img2.blogblog.com/img/b16-rounded.gif" } }, "replies": { "totalItems": "0", "selfLink": "https://www.googleapis.com/blogger/v3/blogs/6561492933847572094/posts/8225907794810815386/comments" }, "status": "LIVE", "readerComments": "ALLOW" } """ maybe_refresh_access_token(site.account) type = request.form.get('h') create_post_url = API_CREATE_POST_URL.format(site.site_id) current_app.logger.info('posting to blogger %s', create_post_url) post_data = util.trim_nulls({ 'title': request.form.get('name'), 'content': util.get_complex_content(request.form), }) r = requests.post(create_post_url, headers={ 'Authorization': 'Bearer ' + site.account.token, 'Content-Type': 'application/json', }, data=json.dumps(post_data)) current_app.logger.info( 'response from blogger %r, data=%r, headers=%r', r, r.content, r.headers) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) success_data = r.json() return util.make_publish_success_response( success_data.get('url'), data=success_data)
def publish(site): auth_headers = {'Authorization': 'token ' + site.account.token} in_reply_to = request.form.get('in-reply-to') if in_reply_to: in_reply_to = util.posse_post_discovery(in_reply_to, BASE_PATTERN) repo_match = (re.match(REPO_PATTERN, in_reply_to) or re.match(ISSUES_PATTERN, in_reply_to)) issue_match = (re.match(ISSUE_PATTERN, in_reply_to) or re.match(PULL_PATTERN, in_reply_to)) # reply to a repository -- post a new issue if repo_match: endpoint = 'https://api.github.com/repos/{}/{}/issues'.format( repo_match.group(1), repo_match.group(2)) title = request.form.get('name') body = (request.form.get('content[markdown]') or request.form.get('content[value]') or request.form.get('content') or '') if not title and body: title = body[:49] + '\u2026' data = { 'title': title, 'body': body, 'labels': util.get_possible_array_value(request.form, 'category'), } # reply to an issue -- post a new comment elif issue_match: endpoint = 'https://api.github.com/repos/{}/{}/issues/{}/comments'.format( issue_match.group(1), issue_match.group(2), issue_match.group(3)) body = (request.form.get('content[markdown]') or request.form.get('content[value]') or request.form.get('content') or '') data = {'body': body} else: return util.make_publish_error_response( 'Reply URL does look like a repo or issue: ' + in_reply_to) current_app.logger.debug('sending POST to %s with data %s', endpoint, data) r = requests.post(endpoint, json=data, headers=auth_headers) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) resp_json = r.json() return util.make_publish_success_response( resp_json.get('html_url'), resp_json) # like a repository -- star the repository like_of = request.form.get('like-of') if like_of: like_of = util.posse_post_discovery(like_of, BASE_PATTERN) repo_match = re.match(REPO_PATTERN, like_of) if repo_match: endpoint = 'https://api.github.com/user/starred/{}/{}'.format( repo_match.group(1), repo_match.group(2)) current_app.logger.debug('sending PUT to %s', endpoint) r = requests.put(endpoint, headers=auth_headers) if r.status_code // 100 != 2: return util.wrap_silo_error_response(r) return util.make_publish_success_response( like_of + '#starred-by-' + site.account.username) else: return util.make_publish_error_response( 'Like-of URL must be a repo: ' + like_of) return util.make_publish_error_response( 'See {} for details publishing to GitHub.' .format(url_for('views.developers', _external=True)))