Exemplo n.º 1
0
def get_cached_entries():
    blogger_service = service.GDataService()
    blogger_service.service = 'blogger'
    blogger_service.server = 'www.blogger.com'
    blogger_service.ssl = False
    query = service.Query()
    query.feed = '/feeds/6752139154038265086/posts/default'
    query.max_results = 500

    bri_entries = []
    entries = []
    i = 0
    while 1:
        query.start_index = i * 500 + 1
        feed = blogger_service.Get(query.ToUri())
        logging.info('%d entries fetched, fetch number %d' %
                     (len(feed.entry), i + 1))
        entries.extend(feed.entry)

        if len(feed.entry) == 500:
            i += 1
        else:
            break

    logging.info('retrieved %d entries total' % len(entries))

    cachedentries = tuple(format_entry(e) for e in entries)

    return cachedentries
Exemplo n.º 2
0
def get_hrefs():
    blogger_service = service.GDataService()
    blogger_service.service = 'blogger'
    blogger_service.server = 'www.blogger.com'
    blogger_service.ssl = False
    query = service.Query()
    query.feed = '/feeds/6752139154038265086/posts/default'
    query.max_results = 500

    allhrefs = []
    i = 0
    while 1:
        query.start_index = i * 500 + 1
        feed = blogger_service.Get(query.ToUri())
        logging.info('%d urls fetched, fetch number %d' %
                     (len(feed.entry), i + 1))
        allhrefs.extend(entry.link[-1].href for entry in feed.entry)

        if len(feed.entry) == 500:
            i += 1
        else:
            break

    logging.info('retrieved %d urls total' % len(allhrefs))
    return allhrefs
Exemplo n.º 3
0
 def _export_task(self, cr, uid, data, context):
     obj_user = pooler.get_pool(cr.dbname).get('res.users')
     blog_auth_details = obj_user.read(cr, uid, uid, [])
     if not blog_auth_details['blogger_email'] or not blog_auth_details[
             'blogger_password']:
         raise osv.except_osv(
             'Warning !',
             'Please  blogger Enter email id and password in users')
     try:
         self.blog_service = service.GDataService(
             blog_auth_details['blogger_email'],
             blog_auth_details['blogger_password'])
         self.blog_service.source = 'Tiny'
         self.blog_service.service = 'blogger'
         self.blog_service.server = 'www.blogger.com'
         self.blog_service.ProgrammaticLogin()
         feed = self.blog_service.Get('/feeds/default/blogs')
         self_link = feed.entry[0].GetSelfLink()
         if self_link:
             self.blog_id = self_link.href.split('/')[-1]
         obj_task = pooler.get_pool(cr.dbname).get('project.task')
         data_task = obj_task.read(cr, uid, data['form']['blog_id'][0][2],
                                   [])
         for task in data_task:
             entry = gdata.GDataEntry()
             entry.author.append(atom.Author(atom.Name(text='uid')))
             entry.title = atom.Title(title_type='xhtml', text=task['name'])
             entry.content = atom.Content(content_type='html',
                                          text=task['description'])
             self.blog_service.Post(
                 entry, '/feeds/' + self.blog_id + '/posts/default')
         return {'summary': 'Succefully sent tasks to blogger'}
     except Exception, e:
         raise osv.except_osv('Error !', e)
Exemplo n.º 4
0
    def handle_import(self, options):
        """
        Gets posts from Forumger.
        """

        forum_id = options.get("forum_id")
        if forum_id is None:
            raise CommandError("Usage is import_forumger %s" % self.args)

        try:
            from gdata import service
        except ImportError:
            raise CommandError("Could not import the gdata library.")

        forumger = service.GDataService()
        forumger.service = "forumger"
        forumger.server = "www.forumger.com"
        query = service.Query()
        query.feed = "/feeds/%s/posts/full" % forum_id
        query.max_results = 500
        try:
            feed = forumger.Get(query.ToUri())
        except service.RequestError, err:
            message = "There was a service error. The response was: " \
                "%(status)s %(reason)s - %(body)s" % err.message
            raise CommandError(message, forumger.server + query.feed,
                               err.message["status"])
Exemplo n.º 5
0
    def blogerLogin(self, user, password):

        self.gdService = service.GDataService(user, password)
        self.gdService.source = 'Blogger_Python_Sample-1.0'
        self.gdService.service = 'blogger'
        self.gdService.server = 'www.blogger.com'
        self.gdService.ProgrammaticLogin()
Exemplo n.º 6
0
    def __init__(self, email, password):

        # Authenticate using ClientLogin.
        self.service = service.GDataService(email, password)
        self.service.source = 'BloGTK-2.0'
        self.service.service = 'blogger'
        self.service.server = 'www.blogger.com'
        self.service.ProgrammaticLogin()
Exemplo n.º 7
0
def get_service(login, password):
    global __service
    if not __service:
        __service = gdataservice.GDataService(login, password)
        __service.source = 'zgoda-JPA-%s' % const.VERSION_STRING
        __service.service = 'blogger'
        __service.server = 'www.blogger.com'
    return __service
Exemplo n.º 8
0
def get_blogger_service():
	token = get_token()
	blogger_service = service.GDataService()
	blogger_service.source = 'duck-bloggerintegration-1.0'
	blogger_service.service = 'blogger'
	blogger_service.account_type = 'GOOGLE'
	blogger_service.server = 'www.blogger.com'
	blogger_service.SetClientLoginToken(token)
	return blogger_service
Exemplo n.º 9
0
    def __init__(self, username, password):

        self.gdService = service.GDataService(username, password)
        self.gdService.source = 'Blogger_Python_Sample-1.0'
        self.gdService.service = 'blogger'
        self.gdService.server = 'www.blogger.com'
        self.gdService.ProgrammaticLogin()

        # keep a list of blogs.
        self.listBlogs()
Exemplo n.º 10
0
 def __init__(self, user, password):
     self.blogger_service = service.GDataService(user, password)
     self.blogger_service.source = 'gv-cl-blogger-updater-1.0'  # Client software name
     self.blogger_service.service = 'blogger'
     self.blogger_service.account_type = 'GOOGLE'
     self.blogger_service.server = 'www.blogger.com'
     self.blogger_service.ssl = True  # Let's protect connection
     print "[I] Logging into " + self.blogger_service.server + " ..."
     self.blogger_service.ProgrammaticLogin()
     if self.blogger_service.current_token is None:
         print "[E] Unable to login into \"" + self.blogger_service.server + "\""
Exemplo n.º 11
0
    def open(self):
        """
        Opens connection to blogger.
        """
        from gdata import service

        super(gblog_connector, self).open()
        self.gblog_service = service.GDataService(self.email, self.password)
        self.gblog_service.source = 'Tiny'
        self.gblog_service.service = 'blogger'
        self.gblog_service.server = 'www.blogger.com'
        self.gblog_service.ProgrammaticLogin()
        return self.gblog_service
Exemplo n.º 12
0
def _login():

    blogger_service = service.GDataService(
        os.environ.get('GOOGLE_USERNAME'),
        os.environ.get('GOOGLE_PASSWORD'))
    blogger_service.source = 'import from bitkickers'
    blogger_service.service = 'blogger'
    blogger_service.account_type = 'GOOGLE'
    blogger_service.server = 'www.blogger.com'
    blogger_service.ProgrammaticLogin()

    # Authenticate using ClientLogin, AuthSub, or OAuth.
    #client = gdata.blogger.client.BloggerClient()
    #gdata.sample_util.authorize_client(
    #    client, service='blogger', source='Blogger_Python_Sample-2.0',
    #    scopes=['http://www.blogger.com/feeds/'])    

    return blogger_service
Exemplo n.º 13
0
    def __init__(self, email, password):
        """Creates a GDataService and provides ClientLogin auth details to it.
    The email and password are required arguments for ClientLogin.  The
    'source' defined below is an arbitrary string, but should be used to
    reference your name or the name of your organization, the app name and
    version, with '-' between each of the three values."""

        # Authenticate using ClientLogin.
        self.service = service.GDataService(email, password)
        self.service.source = 'Blogger_Python_Sample-1.0'
        self.service.service = 'blogger'
        self.service.server = 'www.blogger.com'
        self.service.ProgrammaticLogin()

        # Get the blog ID for the first blog.
        feed = self.service.Get('/feeds/default/blogs')
        self_link = feed.entry[0].GetSelfLink()
        if self_link:
            self.blog_id = self_link.href.split('/')[-1]
Exemplo n.º 14
0
def blogPoster():
    
    if len(sys.argv)==1:
        print 'Error: argument required.'
    else:
        file = sys.argv[1]

    post = open(file)
    user ='******'
    #password = sys.argv[2] 
    password = getpass.getpass()

    # login
    blogger_service = service.GDataService(user,password)
    blogger_service.source = 'wjmBlogger-0.01'
    blogger_service.service = 'blogger'
    blogger_service.account_type = 'GOOGLE'
    blogger_service.server = 'www.blogger.com'
    blogger_service.ProgrammaticLogin()
    
    feed = blogger_service.Get('/feeds/default/blogs')    
    blog_id = feed.entry[0].GetSelfLink().href.split("/")[-1]


    entry = gdata.GDataEntry()
    entry.title = atom.Title('xhtml', text=post.readline())
    tags = post.readline().split(',')
    if tags == ['\n']:
        print 'no tags'
    else:
        for tag in tags :
            category = atom.Category(term=tag, scheme="http://www.blogger.com/atom/ns#")
            entry.category.append(category)
    entry.content = atom.Content(content_type='html', text=post.read()) 
    
    blogger_service.Post(entry, '/feeds/%s/posts/default' % blog_id)


    print('posted')
    post.close()
Exemplo n.º 15
0
    def createPost(self, title, content, author_name, is_draft):
        """This method creates a new post on a blog.  The new post can be stored as
        a draft or published based on the value of the is_draft parameter.  The
        method creates an GDataEntry for the new post using the title, content,
        author_name and is_draft parameters.  With is_draft, True saves the post as
        a draft, while False publishes the post.  Then it uses the given
        GDataService to insert the new post.  If the insertion is successful, the
        added post (GDataEntry) will be returned.
        """

        # Authenticate using ClientLogin.
        self.service = service.GDataService('user', 'password')
        self.service.source = 'Blogger_Python_Sample-1.0'
        self.service.service = 'blogger'
        self.service.server = 'www.blogger.com'
        self.service.ProgrammaticLogin()

        # Get the blog ID for the first blog.
        feed = self.service.Get('/feeds/default/blogs')
        self_link = feed.entry[0].GetSelfLink()
        if self_link:
            self.blog_id = self_link.href.split('/')[-1]

        # Create the entry to insert.
        entry = gdata.GDataEntry()
        entry.author.append(atom.Author(atom.Name(text=author_name)))
        entry.title = atom.Title(title_type='xhtml', text=title)
        entry.content = atom.Content(content_type='html', text=content)
        if is_draft:
            control = atom.Control()
            control.draft = atom.Draft(text='yes')
            entry.control = control

        # Ask the service to insert the new entry.
        return self.service.Post(entry,
                                 '/feeds/' + self.blog_id + '/posts/default')
Exemplo n.º 16
0
    def __init__(self, email, password):
        # Authenticate using ClientLogin.
        self.service = service.GDataService(email, password)
        self.service.source = 'Blogger_Python_Sample-1.0'
        self.service.service = 'blogger'
        self.service.server = 'www.blogger.com'
        self.service.ProgrammaticLogin()
        self.blog_id = 0

        # Get the blog ID for http://pythonjobs.blogspot.com
        query = service.Query()
        query.feed = '/feeds/default/blogs'
        feed = self.service.Get(query.ToUri())

        for entry in feed.entry:
            print "\t" + entry.title.text
            print entry.link[0].href

            # if entry.link[0].href=='http://pythonjobs.blogspot.com/':
            if entry.link[
                    0].href == 'http://www.blogger.com/feeds/18362312542208032325/blogs/5503040385101187323':
                self_link = entry.GetSelfLink()
                self.blog_id = self_link.href.split('/')[-1]
                break
Exemplo n.º 17
0
''' Program by Robert Washbourne http://devpy.me [email protected]'''

from BeautifulSoup import BeautifulSoup
from gdata import service
import os.path
import urllib
import shutil
import gdata
import atom
import os

print "Logging in"

blogger_service = service.GDataService('', '')
blogger_service.service = 'blogger'
blogger_service.account_type = 'GOOGLE'
blogger_service.server = 'www.blogger.com'
blogger_service.ProgrammaticLogin()

attribution = True
blogurl = "washbourne.blogspot.com"

print "Viewing post feed"


def DownloadImages(feed):
    ''' Download images from blogger given feed'''

    print "Downloading images"

    if os.path.exists(os.getcwd() + "/" + feed.title.text + " Backup/"):
Exemplo n.º 18
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from gdata import service
import gdata
import atom

user = "******"

passwd = raw_input("Password for %s: " % user)

blogger_service = service.GDataService(user, passwd)
blogger_service.source = 'duck-bloggerintegration-1.0'
blogger_service.service = 'blogger'
blogger_service.account_type = 'GOOGLE'
blogger_service.server = 'www.blogger.com'
blogger_service.ProgrammaticLogin()

#token = blogger_service.current_token.get_token_string()
token = blogger_service.GetClientLoginToken()

f = open('token', 'wb')
f.write(token)
f.close()
Exemplo n.º 19
0
    Blogger Automatic Content Generation and Publication
    Coded by Ricky L. Wilson
    Post entries from RSS feeds to a blogger blog using Googles Blogger API.
    Don't go to crazy Blogger only alows 50 post's a day 
"""
print banner

blog_id = ''
email = ''
password = ''
source = 'My blog'
feeds = ["http://stackoverflow.com/feeds/"]
limit = 2

# Programmatically log in to blogger
blogger_service = service.GDataService(email, password)
blogger_service.source = source
blogger_service.service = 'blogger'
blogger_service.account_type = 'GOOGLE'
blogger_service.server = 'www.blogger.com'
blogger_service.ProgrammaticLogin()


def CreatePublicPost(blogger_service, blog_id, title, content):
    entry = gdata.GDataEntry()
    entry.title = atom.Title('xhtml', title)
    entry.content = atom.Content(content_type='html', text=content)
    return blogger_service.Post(entry, '/feeds/%s/posts/default' % blog_id)


def publishFeed(url, limit=0):
Exemplo n.º 20
0
from BeautifulSoup import BeautifulSoup
from gdata import service
import urllib
import shutil
import gdata
import atom
import os

print "Logging in"

blogger_service = service.GDataService('*****@*****.**', 'idog021)')
blogger_service.source = 'exampleCo-exampleApp-1.0'
blogger_service.service = 'blogger'
blogger_service.account_type = 'GOOGLE'
blogger_service.server = 'www.blogger.com'
blogger_service.ProgrammaticLogin()

print "Viewing post feed"


def PrintAllPosts(blogger_service, blog_id, max_results='99999'):
    query = service.Query()
    query.feed = '/feeds/' + blog_id + '/posts/default'
    query.max_results = max_results
    feed = blogger_service.Get(query.ToUri())

    if os.path.exists(os.getcwd() + "/" + feed.title.text + " Backup"):
        shutil.rmtree(os.getcwd() + "/" + feed.title.text + " Backup")
    os.mkdir(os.getcwd() + "/" + feed.title.text + " Backup")

    os.makedirs(os.getcwd() + "/" + feed.title.text + " Backup" + "/images/")
Exemplo n.º 21
0
key0 = keys['DISH0106'][0].split(' ')
key1 = keys['DISH0106'][1].split(' ')
dish0106 = '<strong>DISH 0106</strong><br/><em>key0 </em><strong>%s</strong><br/>%s<br/><em>key1 </em><strong>%s</strong><br/>%s<br/>' % (key0[1], key0[0], key1[1], key1[0])

key0 = keys['BELL0901'][0].split(' ')
key1 = keys['BELL0901'][1].split(' ')
bell0901 = '<strong>BELL 0901</strong><br/><em>key0 </em><strong>%s</strong><br/>%s<br/><em>key1 </em><strong>%s</strong><br/>%s<br/>' % (key0[1], key0[0], key1[1], key1[0])

key0 = keys['BELL0907'][0].split(' ')
key1 = keys['BELL0907'][1].split(' ')
bell0907 = '<strong>BELL 0907</strong><br/><em>key0 </em><strong>%s</strong><br/>%s<br/><em>key1 </em><strong>%s</strong><br/>%s<br/>' % (key0[1], key0[0], key1[1], key1[0])

new_content = dish0101 + dish0106 + bell0901 + bell0907

# login...
selfservice = service.GDataService('user', 'password')
selfservice.source = 'Blogger_Python_Sample-1.0'
selfservice.service = 'blogger'
selfservice.server = 'www.blogger.com'
selfservice.ProgrammaticLogin()

# Get the blog ID for the first blog.
feed = selfservice.Get('/feeds/default/blogs')
self_link = feed.entry[0].GetSelfLink()
if self_link:
    selfblog_id = self_link.href.split('/')[-1]

# find the update entry.
query = service.Query()
query.feed = '/feeds/' + selfblog_id + '/posts/default'
query.published_min = "2008-02-17T14:00:00-08:00"
Exemplo n.º 22
0
    def handle_import(self, options):
        """
        Gets posts from Blogger.
        """

        blog_id = options.get("blog_id")
        if blog_id is None:
            raise CommandError("Usage is import_blogger %s" % self.args)

        try:
            from gdata import service
        except ImportError:
            raise CommandError("Could not import the gdata library.")

        blogger = service.GDataService()
        blogger.service = "blogger"
        blogger.server = "www.blogger.com"
        query = service.Query()
        query.feed = "/feeds/%s/posts/full" % blog_id
        query.max_results = 500
        try:
            feed = blogger.Get(query.ToUri())
        except service.RequestError as err:
            message = "There was a service error. The response was: " \
                "%(status)s %(reason)s - %(body)s" % err.message
            raise CommandError(message, blogger.server + query.feed,
                               err.message["status"])

        for (i, entry) in enumerate(feed.entry):
            # this basically gets the unique post ID from the URL to itself
            # and pulls the ID off the end.
            post_id = entry.GetSelfLink().href.split("/")[-1]
            title = entry.title.text
            content = entry.content.text
            #this strips off the time zone info off the end as we want UTC
            published_date = datetime.strptime(
                entry.published.text[:-6],
                "%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)

            #TODO - issues with content not generating correct <P> tags

            tags = [tag.term for tag in entry.category]
            post = self.add_post(title=title,
                                 content=content,
                                 pub_date=published_date,
                                 tags=tags)

            # get the comments from the post feed and then add them to
            # the post details
            ids = (blog_id, post_id)
            comment_url = "/feeds/%s/%s/comments/full?max-results=1000" % ids
            comments = blogger.Get(comment_url)

            for comment in comments.entry:
                email = comment.author[0].email.text
                author_name = comment.author[0].name.text
                #this strips off the time zone info off the end as we want UTC
                comment_date = datetime.strptime(
                    comment.published.text[:-6],
                    "%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
                website = ""
                if comment.author[0].uri:
                    website = comment.author[0].uri.text
                body = comment.content.text

                # add the comment as a dict to the end of the comments list
                self.add_comment(post=post,
                                 name=author_name,
                                 email=email,
                                 body=body,
                                 website=website,
                                 pub_date=comment_date)
Exemplo n.º 23
0
Arquivo: bg.py Projeto: wimac/home
    elif o == "--p":
      password = a

  if password =="inline":
	password = getpass.getpass()   

  if file == '' or password == '' or user=='':
    print ('python blog.py --f [file] --u [username]  --p [password] | inline ')
    sys.exit(2)
    
  fileHandle = open (file)
  
  #sample = BloggerExample(user, password)
  #sample.CreatePost (fileHandle.readline() ,fileHandle.read() , "bloger", False)
   
  servic = service.GDataService(user, password)
  servic.source = 'Blogger_Python_Sample-1.0'
  servic.service = 'blogger'
  servic.server = 'www.blogger.com'
  servic.ProgrammaticLogin() 


  feed = servic.Get('/feeds/default/blogs')
  self_link = feed.entry[0].GetSelfLink()
  if self_link:
	blog_id = self_link.href.split('/')[-1]

  
  entry = gdata.GDataEntry()
  entry.author.append(atom.Author(atom.Name(text='author')))
  entry.title = atom.Title(title_type='xhtml', text=fileHandle.readline() )
Exemplo n.º 24
0
 def __init__(self, username, password):
     self.service = gdata_service.GDataService(username, password)
     self.service.server = 'www.blogger.com'
     self.service.service = 'blogger'
     self.service.ProgrammaticLogin()
    def handle_import(self, options):
        """
        Gets posts from Blogger.
        """

        blog_id = options.get("blog_id")
        if blog_id is None:
            raise CommandError("Usage is import_blogger %s" % self.args)

        try:
            from gdata import service
        except ImportError:
            raise CommandError("Could not import the gdata library.")

        blogger = service.GDataService()
        blogger.service = "blogger"
        blogger.server = "www.blogger.com"

        start_index = 1
        processed_posts = []
        new_posts = 1

        while new_posts:
            new_posts = 0

            query = service.Query()
            query.feed = "/feeds/%s/posts/full" % blog_id
            query.max_results = 500
            query.start_index = start_index

            try:
                feed = blogger.Get(query.ToUri())
            except service.RequestError as err:
                message = ("There was a service error. The response was: "
                           "%(status)s %(reason)s - %(body)s" % err.message)
                raise CommandError(message, blogger.server + query.feed,
                                   err.message["status"])

            for (i, entry) in enumerate(feed.entry):
                # this basically gets the unique post ID from the URL to itself
                # and pulls the ID off the end.
                post_id = entry.GetSelfLink().href.split("/")[-1]

                # Skip duplicate posts. Important for the last query.
                if post_id in processed_posts:
                    continue

                title = entry.title.text
                content = entry.content.text
                # this strips off the time zone info off the end as we want UTC
                clean_date = entry.published.text[:re.search(
                    r"\.\d{3}", entry.published.text).end()]

                published_date = self.parse_datetime(clean_date)

                # TODO - issues with content not generating correct <P> tags

                tags = [tag.term for tag in entry.category]
                post = self.add_post(title=title,
                                     content=content,
                                     pub_date=published_date,
                                     tags=tags)

                # get the comments from the post feed and then add them to
                # the post details
                comment_url = "/feeds/%s/%s/comments/full?max-results=1000"
                comments = blogger.Get(comment_url % (blog_id, post_id))

                for comment in comments.entry:
                    email = comment.author[0].email.text
                    author_name = comment.author[0].name.text
                    # Strip off the time zone info off the end as we want UTC
                    clean_date = comment.published.text[:re.search(
                        r"\.\d{3}", comment.published.text).end()]

                    comment_date = self.parse_datetime(clean_date)

                    website = ""
                    if comment.author[0].uri:
                        website = comment.author[0].uri.text
                    body = comment.content.text

                    # add the comment as a dict to the end of the comments list
                    self.add_comment(
                        post=post,
                        name=author_name,
                        email=email,
                        body=body,
                        website=website,
                        pub_date=comment_date,
                    )

                processed_posts.append(post_id)
                new_posts += 1

            start_index += 500
Exemplo n.º 26
0
 def __init__(self, blog_id):
     self.blog_id = blog_id
     self.blogger_service = service.GDataService()