Example #1
0
def CallKdapi(SearchUrl):
    itemDict =  kdapi.check(SearchUrl)
    itemList = itemDict['output']
    itemTime = itemDict['time']
    length = len(itemList)
    print "finished"
    if length == 0:
        return {'ID':'Empty'}
    if length == 1:
        if itemList[0].similarity == None:
            return {'ID':'Empty'}
    score = 0
    url = None
    for item in itemList:
        if item.similarity != None:
            #print item.title
            if item.similarity > 98:
                if score < item.score:
                    print item.score
                    score = item.score
                    url = item.link
    if url == None:
        return {'ID':'Empty'}
    if score < 30:
        return {'ID':'Empty'}
    print url
    FirstSplitUrl = url.split("/comments/")
    SecondSplitUrl = FirstSplitUrl[1].split("/")
    SubmissionID = SecondSplitUrl[0]
    return {'ID':SubmissionID}
Example #2
0
def CallKdapi(SearchUrl):
    itemDict = kdapi.check(SearchUrl)
    itemList = itemDict['output']
    itemTime = itemDict['time']
    length = len(itemList)
    print "finished"
    if length == 0:
        return {'ID': 'Empty'}
    if length == 1:
        if itemList[0].similarity == None:
            return {'ID': 'Empty'}
    score = 0
    url = None
    for item in itemList:
        if item.similarity != None:
            #print item.title
            if item.similarity > 98:
                if score < item.score:
                    print item.score
                    score = item.score
                    url = item.link
    if url == None:
        return {'ID': 'Empty'}
    if score < 30:
        return {'ID': 'Empty'}
    print url
    FirstSplitUrl = url.split("/comments/")
    SecondSplitUrl = FirstSplitUrl[1].split("/")
    SubmissionID = SecondSplitUrl[0]
    return {'ID': SubmissionID}
Example #3
0
def karmacomment(link):
    assert isinstance(link, str), "The karmaLink needs to be a string"
    sourcesubmission = r.get_submission(submission_id=link.split("/")[6])
    karmalist = []
    for item in kdapi.check(link):
        if item.score is None:
            print "Score is none!"
            continue
        elif item.score == "None":
            print "Score is None(type)"
            continue
        elif sourcesubmission.short_link.split('/')[3] == item.link.split("/")[6]:  # grabs ID's and compares them
            print "ID is same!"
            continue
        else:
            karmaid = item.link.split("/")[6]  # KarmaBLANK is just the stuff from karmadecay
            karmasubmission = r.get_submission(submission_id=karmaid, comment_limit=0)  # Submits OBJ for KD
            karmasubmission.replace_more_comments(limit=0, threshold=0)  # Messing with morecomments is dumb, this works
            # TODO : Figure out why the hell this ^^^ works

            for karmaComment in karmasubmission.comments:  # goes through comments and adds score and text body
                if karmaComment.body == "None" or karmaComment.body is None:
                    print "Body has nothing!"
                    continue
                elif "(/" in karmaComment.body:
                    print "Body has weird text"
                    continue
                elif karmaComment.body == "[deleted]":
                    print "Deleted comment :("
                    continue
                elif not karmaComment.is_root:
                    print "Not root!"
                    continue
                else:
                    commenttuple = [karmaComment.body, karmaComment.score]
                    karmalist.append(commenttuple)  # adds them to karmalist to be sorted later

    if not karmalist:
        return None
    else:
        return sorted(karmalist, key=itemgetter(1))[-1][0] # Sorts and then returns comment with most karma
Example #4
0
def getReposts(url):
  """Given a reddit url, use kdapi to get any reposts.
  Returns a list of repostDict's
  """

  repostList = []
  reposts = kdapi.check(url)
  reposts.sort(key=operator.attrgetter('score'), reverse=True)
  for item in reposts:
    repostDict = {}
    repostDict['link'] = item.link
    repostDict['score'] = item.score
    repostDict['similarity'] = item.similarity
    if item.subreddit:
      repostDict['subreddit'] = item.subreddit[3:]
    else:
      repostDict['subreddit'] = None
      
    repostList.append(repostDict)
  
  return repostList
def getReposts(url):
    """Given a reddit url, use kdapi to get any reposts.
  Returns a list of repostDict's
  """

    repostList = []
    reposts = kdapi.check(url)
    reposts.sort(key=operator.attrgetter('score'), reverse=True)
    for item in reposts:
        repostDict = {}
        repostDict['link'] = item.link
        repostDict['score'] = item.score
        repostDict['similarity'] = item.similarity
        if item.subreddit:
            repostDict['subreddit'] = item.subreddit[3:]
        else:
            repostDict['subreddit'] = None

        repostList.append(repostDict)

    return repostList
Example #6
0
import kdapi
import praw

score = 0
url = None
itemList = kdapi.check("http://i.imgur.com/nn5UneG.gif")
print len(itemList)
for item in itemList:
    print item.title
    if score < item.score:
        print item.score
        score = item.score
        url = item.link
    if item.similarity > 98:
        print "yes"
print url
FirstSplitUrl = url.split("/comments/")
SecondSplitUrl = FirstSplitUrl[1].split("/")
SubmissionID = SecondSplitUrl[0]
print SubmissionID
r = praw.Reddit('Comment Scraper 1.0 by u/_Daimon_ see '
                'https://praw.readthedocs.org/en/latest/'
                'pages/comment_parsing.html')
submission = r.get_submission(submission_id=SubmissionID)
CommentList = submission.comments
TopComment = CommentList[0].body
print TopComment
Example #7
0
import kdapi, praw
reddit = praw.Reddit(client_id='asdasd',
                     client_secret='asdasd',
                     user_agent='Does.this.matter.lol',
                     password="******",
                     username= "******")
for submission in reddit.subreddit('all').top('hour',limit=1000):
    if ("redd.it" in submission.url or "imgur" in submission.url):
        print("ID: "+submission.id)
        prevhigh = 0
        highpost = 0
        try:
            for item in kdapi.check(submission.url):
                sub2 = reddit.submission(url=item.link)
                if (sub2 == submission):
                    continue
                sub2score = 0
                try:
                    sub2score = sub2.score
                    if (sub2.score > prevhigh):
                        highpost = sub2
                        prevhigh = sub2.score
                except Exception:
                    pass
                    print(" - ERROR GETTING VOTES OF "+sub2.id)
            if (highpost != 0):
                print(" - HIGH POST: "+highpost.id)
                print("    - SCORE: "+str(highpost.score))
                try:
                    print("    - COMMENT: "+highpost.comments[0].body)
                except Exception:
Example #8
0
import kdapi
import praw

score = 0
url = None
itemList =  kdapi.check("http://i.imgur.com/nn5UneG.gif")
print len(itemList)
for item in itemList:
    print item.title
    if score < item.score:
        print item.score
        score = item.score
        url = item.link
    if item.similarity > 98:
        print "yes"
print url
FirstSplitUrl = url.split("/comments/")
SecondSplitUrl = FirstSplitUrl[1].split("/")
SubmissionID = SecondSplitUrl[0]
print SubmissionID
r = praw.Reddit('Comment Scraper 1.0 by u/_Daimon_ see '
                'https://praw.readthedocs.org/en/latest/'
                'pages/comment_parsing.html')
submission = r.get_submission(submission_id=SubmissionID)
CommentList = submission.comments
TopComment = CommentList[0].body
print TopComment