Exemplo n.º 1
0
def search_for_tweets(search_term):
    twitter_url = 'https://api.twitter.com/1.1/search/tweets.json'
    url = twurl.augment(twitter_url, {'q': search_term, 'count': 10})
    connection = urllib2.urlopen(url)
    data = connection.read()
    js = json.loads(data)
    html = ''

    for item in js['statuses']:
        archives = make_archive_dropdown(item['id'])
        html += make_tweet(item, 'myTimeline', archives)
    return html
Exemplo n.º 2
0
def searchForTweets(searchTerm):
    global session
    twitter_url = 'https://api.twitter.com/1.1/search/tweets.json'
    url = twurl.augment(twitter_url, {'q': searchTerm, 'count': 15}, session)
    connection = urllib2.urlopen(url)
    data = connection.read()
    js = json.loads(data)
    html = ''
    for item in js['statuses']:
        archives = makeArchiveDropdown(item)
        html = html + makeTweet(item, 'myTimeline', archives)
    return html
Exemplo n.º 3
0
def spiderTwitter():
  TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
  conn = sqlite3.connect('spider.sqlite3')
  cur = conn.cursor()
  cur.execute('''
  CREATE TABLE IF NOT EXISTS Twitter (name TEXT, retrieved INTEGER, friends INTEGER)''')
  while True:
    acct = raw_input('Enter a Twitter account, or quit: ')
    if ( acct == 'quit' ) : break
    if ( len(acct) < 1 ) :
      cur.execute('SELECT name FROM Twitter WHERE retrieved = 0 LIMIT 1')
      try:
        acct = cur.fetchone()[0]
      except:
        print 'No unretrieved Twitter accounts found'
        continue
  
  url = twurl.augment(TWITTER_URL, {'screen_name': acct, 'count': '20'} )
  print 'Retrieving', url
  connection = urllib.urlopen(url)
  data = connection.read()
  headers = connection.info().dict
  # print 'Remaining', headers['x-rate-limit-remaining']
  js = json.loads(data)
  # print json.dumps(js, indent=4)

  cur.execute('UPDATE Twitter SET retrieved=1 WHERE name = ?', (acct, ) )
  countnew = 0
  countold = 0
  for u in js['users'] :
    friend = u['screen_name']
    print friend
    cur.execute('SELECT friends FROM Twitter WHERE name = ? LIMIT 1', (friend, ) )
    try:
      count = cur.fetchone()[0]
      cur.execute('UPDATE Twitter SET friends = ? WHERE name = ?', (count+1, friend) )
      countold = countold + 1
    except:
      cur.execute('''INSERT INTO Twitter (name, retrieved, friends) VALUES ( ?, 0, 1 )''', ( friend, ) )
      countnew = countnew + 1
    print 'New accounts=',countnew,' revisited=',countold
    conn.commit()
  cur.close()
Exemplo n.º 4
0
import urllib.request, urllib.parse, urllib.error
import twurl

TWITTER_URL = "https://api.twitter.com/1.1/statuses/user_timeline.json"

while True:
    print("")
    acct = input("Enter Twitter Account:")
    if len(acct) < 1:
        break
    url = twurl.augment(TWITTER_URL, {"screen_name": acct, "count": "2"})
    print("Retrieving", url)
    connection = urllib.request.urlopen(url)
    data = connection.read().decode()
    print(data[:250])
    headers = dict(connection.getheaders())
    # print headers
    print("Remaining", headers["x-rate-limit-remaining"])
Exemplo n.º 5
0
def call_api(twitter_url, parameters):
    url = twurl.augment(twitter_url, parameters)
    connection = urllib2.urlopen(url)
    return connection.read()
Exemplo n.º 6
0
import os

if os.path.exists("~/Desktop/majorProject/Data/search_result.txt"):
    os.remove("~/Desktop/majorProject/Data/search_result.txt")
TWITTER_URL = 'https://api.twitter.com/1.1/search/tweets.json?'
word = raw_input('Enter word:')
word_no_retweet = word + ' -RT'  #filter out no retweet
f = open("/users/karnageknight/Desktop/majorProject/Data/search_result.txt",
         "a+")
count = 0
for x in range(0, 3):  #range defines number of pages displayed
    if x == 0:
        url = twurl.augment(
            TWITTER_URL, {
                'q': word_no_retweet,
                'count': '100',
                'lang': 'en',
                'include_entities': 'false',
                'return_type': 'popular'
            })  #for first page
    else:
        url = twurl.augment(
            TWITTER_URL, {
                'q': word_no_retweet,
                'count': '100',
                'lang': 'en',
                'include_entities': 'false',
                'max_id': next_max_id,
                'return_type': 'popular'
            })  #for subsequent tweets using their max_id
    #print 'Retrieving', url
    #print word_no_retweet
Exemplo n.º 7
0
import urllib.request, urllib.parse, urllib.error
from twurl import augment

print('* Calling Twitter...')
url = augment('https://api.twitter.com/1.1/statuses/user_timeline.json',
              {'screen_name': 'drchuck', 'count': '2'})
print(url)
connection = urllib.request.urlopen(url)
data = connection.read()
print(data)

headers = dict(connection.getheaders())
print(headers)
Exemplo n.º 8
0
from pprint import pprint

# https://apps.twitter.com/
# Create App and get the four strings, put them in hidden.py

TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'

# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

print("This program will give you a possibility to search something in json")
print('')
acct = input('Enter Twitter Account: ')
url = twurl.augment(TWITTER_URL, {'screen_name': acct, 'count': '5'})
# print('Retrieving', url)
connection = urllib.request.urlopen(url, context=ctx)
data = connection.read().decode()

js = json.loads(data)
print(json.dumps(js, indent=2))


def search_json(json):
    """
    dict -> dict
    You can serch values by key
    """
    nick_pair = []
    nicknames = []
Exemplo n.º 9
0
import re
import objectpath
import pytz
import sys

non_bmp = dict.fromkeys(range(0x1000, sys.maxunicode + 1), 0xfffd)
TWITTER_URL = 'https://api.twitter.com/1.1/search/tweets.json?'

ctx=ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

while True:
    print('')
    query = input('Enter the query:')
    if len(query)<1:
        break
    url = twurl.augment(TWITTER_URL,
                        {'q':query, 'count':'100'})
    con = ur.urlopen(url, context = ctx)
    jas = json.loads(con.read().decode())
    tree_obj = objectpath.Tree(jas)
    tweets = list(tree_obj.execute('$..text'))
    for x in tweets:
        try:
            print(x)
        except:
            print(x.translate(non_bmp))
    headers = dict(connection.getheaders())
    print('Remaining', headers['x-rate-limit-remaining'])
Exemplo n.º 10
0
# https://apps.twitter.com/
# Create App and get the four strings, put them in hidden.py

TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'

# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

while True:
    print('')
    acct = input('Enter Twitter Account:')
    if (len(acct) < 1): break
    url = twurl.augment(TWITTER_URL,
    #Count is number of friends IE.. First five friends
                        {'screen_name': acct, 'count': '5'})
    print('Retrieving', url)
    connection = urllib.request.urlopen(url, context=ctx)
    data = connection.read().decode()
#add try except
    js = json.loads(data)
    #Pretty Prints the data
    print(json.dumps(js, indent=2))

    headers = dict(connection.getheaders())
    print('Remaining', headers['x-rate-limit-remaining'])

    for u in js['users']:
        print(u['screen_name'])
        if 'status' not in u:
Exemplo n.º 11
0
#this script is authored by Georges BODIONG
import sys
import urllib
import twurl
import json

TWITTER_URL = 'https://api.twitter.com/1.1/search/tweets.json'

while True:
	print '-----------------------------------'
	q = raw_input('Enter your search query: ')
#	if (len(acct) < 1) : break

	url = twurl.augment(TWITTER_URL,
	 {'q' : q})
	print 'Retrieving', url
	connection = urllib.urlopen(url)
	data = connection.read()
	headers = connection.info().dict
	print '-----------------------------------'
	print 'Retrieved', len(data), 'characters'

	js = json.loads(data)

#	print json.dumps(js, indent=4)

	#persist data in the file system
	#in a file named followers.json
	with open('dBootcamp.json', 'w') as f:
    		json.dump(js, f)
    	print 'The operation was successful...'		
Exemplo n.º 12
0
#!/usr/bin/env python

import urllib
from twurl import augment

print "Connecting..."
request_url = r""
params = {}
url = augment(request_url, params)
print "\n{}\n".format(url)

connection = urllib.urlopen(url)
data = connection.read()
print data

headers = connection.info.dict
print headers
Exemplo n.º 13
0
import urllib
import twurl
import json

# Retrieves friend list data from twitter

TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'

while True:
    print ''
    acct = raw_input('Enter Twitter Account:')
    if (len(acct) < 1): break
    url = twurl.augment(TWITTER_URL, {
        'screen_name': acct,
        'count': '5'
    })  # we only grab the latest 5 users
    print 'Retrieving', url
    connection = urllib.urlopen(url)
    data = connection.read()  # Body JSON Data
    headers = connection.info(
    ).dict  # JSON Header data. Includes the x rate limit remaining
    print 'Remaining', headers['x-rate-limit-remaining']

    js = json.loads(data)  # json  load the body data

    print json.dumps(js, indent=4)  # Pretty print with indent 4

    for u in js['users']:
        print u['screen_name']
        s = u['status']['text']
        print '  ', s[:50]  # Print the first 50 characters of each tweet
import urllib.request, urllib.parse, urllib.error
from twurl import augment
import ssl

# https://apps.twitter.com/
# Create App and get the four strings, put them in hidden.py

print('* Calling Twitter...')
url = augment('https://api.twitter.com/1.1/statuses/user_timeline.json', {
    'screen_name': 'satvikboorela',
    'count': '2'
})
print(url)

# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

connection = urllib.request.urlopen(url, context=ctx)
data = connection.read()
print(data)

print('======================================')
headers = dict(connection.getheaders())
print(headers)
Exemplo n.º 15
0
import urllib.request, urllib.parse, urllib.error
import json
from twurl import augment
import ssl

ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

while True:
    accn = input("Enter Username:- ")
    if len(accn) < 1: break

    url = augment('https://api.twitter.com/1.1/friends/list.json', {
        'screen_name': accn,
        'count': '80'
    })
    print('Retrieving: ', url)
    uh = urllib.request.urlopen(url, context=ctx)

    data = uh.read().decode()
    headers = dict(uh.getheaders())
    print('Remaining Request: ', headers['x-rate-limit-remaining'])

    js = json.loads(data)
    x = open('output.json', 'w')
    x.write(json.dumps(js, indent=4))
    #print(json.dumps(js, indent = 2))
    users = js['users']
    for user in users:
        print('-------------------------------')
Exemplo n.º 16
0
while True:
    print('')
    keyword = input('Enter Your Search Keyword:')
    if (len(keyword) < 1): break

    #Represents the 2min timeframe
    minutetime = 2

    #Describes the number of timeframes you wish to retrieve
    for x in range(0, 3):

        #max_id is used to set an ending marker of each timeframe
        url = twurl.augment(
            TWITTER_URL, {
                'q': keyword,
                'count': '100',
                'max_id': identity,
                'result_type': 'recent'
            })
        connection = urllib.request.urlopen(url, context=ctx)
        data = connection.read().decode()
        jsondata = json.loads(data)

        #Count for each timeframe
        counter = 0

        for id in jsondata['statuses']:
            a = id['created_at']
            b = list(a)
            del b[0:4]
Exemplo n.º 17
0
# Create App and get the four strings, put them in hidden.py

TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json' #pagina para obtener listas de amigos con api de twitter

# Ignore SSL certificate errors
#====instrucciones para ingorar errores de certificado SSL===========================================
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

#====codigo para extraccion de informacion de twitter, requiere token y secrets en el archivo hidden dentro de una carpeta donde este corriendo el programa=========================
while True: #mientras la condicion sea verdadera
    print('')  #imprime un renglon en blanco
    acct = input('Enter Twitter Account:') #solicita al usuiario la entrada de una cuenta de twitter
    if (len(acct) < 1): break #en caso de no ingresarse una cuenta de twitter, con break, el programa sale del lazo
    url = twurl.augment(TWITTER_URL,
                        {'screen_name': acct, 'count': '5'}) #biblioteca con tuplas de key:value, en "screen_name" se carga la pagina con la cuenta y en "count" se carga un valor predetermiado de "5"
                        #esta relacionada con la bibloteca twurl, ya que es un campo de ingreso a la misma
    print('Retrieving', url) #envia a impresion la cadena "Retrieving" + la "url"
    connection = urllib.request.urlopen(url, context=ctx) # puntero para leer la pagina regresa una cadena de valores, "calla" revisiones de seguridad en errores de certificado con 'context=ctx' para ssl
    data = connection.read().decode() #orden de lectura, es la representacion en cadena del "json", viene de internet, se tiene que codificar para cambiar de utf-8 al formato general de pyton: "unicode"

    js = json.loads(data) #carga la informacion recuperada en la variable js ya en formato "unicode" y genera un "arbol" de la misma en notacion json
    print(json.dumps(js, indent=2)) #envia a impresion las secciones recuparadas en js, dandoles sangria de dos espacios para mayor claridad

    headers = dict(connection.getheaders())  #declara "headers" como el diccionario donde se almacenan todos los headers con el comando getheaders() de "urllib", aplicado al puntero de la pagina solicitada
    print('Remaining', headers['x-rate-limit-remaining']) #envia a impresion el valor de la key con nombre 'x-rate-limit-remaining', el limite para twitter es muy corto, cerca de 15 unicamente

    for u in js['users']: # para cada 'u', abreviatura para usuario, en la informacion que contiene js, ya ordenada, buca lo contenido en la key 'users'
        print(u['screen_name']) #envia a impresion el valor de la key dentro de 'users' con el nombre 'screen_name'
        if 'status' not in u: #si no hay una key con nombre 'status' dentro de la key 'users'
            print('   * No status found') #envia a imporesion : no se encontro un status
def augment(url, parameters): # parameters = dictionary
  secrets = hidden.oauth()
  consumer = oauth.OAuthConsumer(secrets['consumer_key'], secrets['consumer_secret'])
  token = oauth.OAuthToken(secrets['token_key'], secrets['token_secret'])
  oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token = token, http_method = 'GET', http_url = url, parameters = parameters)
  oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
  return oauth_request.to_url()

-- twitter.py

import urllib
from twurl import augment

print '* Calling Twitter... *'
url = augment('https://api.twitter.com/1.1/statuses/user_timeline.json', {'screen_name' : 'drchuck', 'count': 2} ) # show me the user's timeline, for user (screen_name) drchuck and show me the first 2
print url
connection = urllib.urlopen(url)
data = connection.read() # gets the body 
print data # will be JSON data
headers = connection.info().dict # gets a dictionary with only the headers
print headers

# python -mjson.tool < json_ugly_file

-- twitter2.py

import urllib
import twurl
import json
Exemplo n.º 19
0
import urllib.request, urllib.parse, urllib.error
from twurl import augment

print('* Calling Twitter...')
url = augment('https://api.twitter.com/1.1/statuses/user_timeline.json',
        {'screen_name': 'drchuck', 'count': '2'} )
print(url)
connection = urllib.request.urlopen(url)
data = connection.read()
print(data)

headers = dict(connection.getheaders())
print(headers)
Exemplo n.º 20
0
import urllib
from twurl import augment
import json

print "reads a Twitter account's timeline"

while True:
        try:
            cout = int(raw_input('Enter tweet count:'))
            break
        except ValueError:
            print "Oops!  That was no valid number.  Try again..."     

print '* Calling Twitter...'
url = augment('https://api.twitter.com/1.1/statuses/user_timeline.json',
        {'screen_name': 'Theidden_one', 'count': cout} )
print url[:100]
connection = urllib.urlopen(url)
data = connection.read()
#print data
headers = connection.info().dict
#print headers
print '\nRemaining', headers['x-rate-limit-remaining']
js = json.loads(data)
#print json.dumps(js, indent=4)

for u in js:
    print u['text']
def gettwfriends(acct, count):

    APP_ROOT = os.path.dirname(os.path.abspath(__file__))

    count = int(count)

    TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
    USER_URL = 'https://api.twitter.com/1.1/users/lookup.json'

    # Ignore SSL certificate errors
    ctx = ssl.create_default_context()
    ctx.check_hostname = False
    ctx.verify_mode = ssl.CERT_NONE

    conn = sqlite3.connect(APP_ROOT + '\\friends.sqlite')
    cur = conn.cursor()

    cur.execute('''CREATE TABLE IF NOT EXISTS People
                                (id                 INTEGER PRIMARY KEY AUTOINCREMENT,
                                name           TEXT UNIQUE,
                                data             TEXT DEFAULT NULL,
                                retrieved    INTEGER  DEFAULT (0) CHECK (retrieved in (0,1) ) )'''
                )

    cur.execute('''CREATE TABLE IF NOT EXISTS Follows
                                (from_id       INTEGER,
                                to_id              INTEGER,
                                UNIQUE        (from_id, to_id))''')

    conn.commit()

    while True:
        # acct = input('Enter a Twitter account, or quit: ')
        # if (acct == 'quit'):
        #     break
        count = count - 1
        if count < 0:
            break
        if (len(acct) < 1):
            cur.execute(
                'SELECT id, name FROM People WHERE retrieved=0 LIMIT 1')
            try:
                (id, acct) = cur.fetchone()
            except:
                print('No unretrieved Twitter accounts found')
                # continue
        else:
            cur.execute('SELECT id FROM People WHERE name = ? LIMIT 1',
                        (acct, ))
            try:
                id = cur.fetchone()[0]
            except:
                cur.execute(
                    '''INSERT OR IGNORE INTO People
                            (name, retrieved) VALUES (?, 0)''', (acct, ))
                conn.commit()
                if cur.rowcount != 1:
                    print('Error inserting account:', acct)
                    continue
                id = cur.lastrowid
                try:
                    url2 = twurl.augment(USER_URL, {'screen_name': acct})
                    try:
                        connection2 = urllib.request.urlopen(url2, context=ctx)
                    except Exception as err2:
                        print('Failed to Retrieve #USER', err2)
                        break
                    data2 = connection2.read().decode()
                    headers2 = dict(connection2.getheaders())
                    print('Remaining user_lookup',
                          headers2['x-rate-limit-remaining'])
                    try:
                        js2 = json.loads(data2)[0]
                        save = json.dumps(js2).encode()
                    except:
                        print('Unable to parse json')
                        print(data2)
                        break
                    cur.execute('UPDATE People SET data=? WHERE name=?', (
                        memoryview(save),
                        acct,
                    ))
                    conn.commit()
                except:
                    print('Error retrieving User data')
                    continue
        if count > 5:
            racct = count
        else:
            racct = 200
        url = twurl.augment(TWITTER_URL, {'screen_name': acct, 'count': racct})
        print('Retrieving account', acct)

        try:
            connection = urllib.request.urlopen(url, context=ctx)
        except Exception as err:
            print('Failed to Retrieve', err)
            break

        data = connection.read().decode()
        headers = dict(connection.getheaders())
        print('Remaining', headers['x-rate-limit-remaining'])

        try:
            js = json.loads(data)
        except:
            print('Unable to parse json')
            print(data)
            break

        if 'users' not in js:
            print('Incorrect JSON received')
            print(json.dumps(js, indent=4))
            continue

        cur.execute('UPDATE People SET retrieved=1 WHERE name = ?', (acct, ))
        countnew = 0
        countold = 0
        for u in js['users']:
            friend = u['screen_name']
            print(friend)
            cur.execute('SELECT id FROM People WHERE name = ? LIMIT 1',
                        (friend, ))
            try:
                friend_id = cur.fetchone()[0]
                countold = countold + 1
            except:
                cur.execute(
                    '''INSERT OR IGNORE INTO People (name, data,retrieved)
                            VALUES (?,?, 0)''', (
                        friend,
                        memoryview(json.dumps(u).encode()),
                    ))
                conn.commit()
                if cur.rowcount != 1:
                    print('Error inserting account:', friend)
                    continue
                friend_id = cur.lastrowid
                countnew = countnew + 1
            cur.execute(
                '''INSERT OR IGNORE INTO Follows (from_id, to_id)
                        VALUES (?, ?)''', (id, friend_id))
        cur.execute(
            '''INSERT OR IGNORE INTO Follows (from_id, to_id)
                    VALUES (?, ?)''', (id, id))
        print('New accounts=', countnew, ' revisited=', countold)
        print('Remaining', headers['x-rate-limit-remaining'])
        conn.commit()
        acct = ""
    cur.close()
Exemplo n.º 22
0
def callAPI(twitter_url, parameters, session):
    url = twurl.augment(twitter_url, parameters, session)
    connection = urllib2.urlopen(url)
    return connection.read()
Exemplo n.º 23
0
def callApi(twitter_url, parameters):  #
    url = twurl.augment(twitter_url, parameters)
    connection = urllib2.urlopen(url)
    return connection.read()
            
#enter twitter ID
while True:
    twitterID = raw_input("ID:")
    if twitterID == "quit": break
    if len(twitterID)<1:
        twitterID = cur.execute('''SELECT name FROM Twitter WHERE
            retrieved = 0 LIMIT 1''')
        try:
            twitterID = cur.fetchone()[0]
        except:
            print "No unretrieved twitter ID"
            continue #NO NEW Twitter ID

#read the twitter data using API 
url = twurl.augment(twitter_url, {'screen_name':twitterID, 'count':'20'})
print "retrieving url", url
connection = urllib.urlopen(url)
data = connection.read()
header = connection.info().dict
print header
js = json.loads(data)
print js.dumps(js,indent = 4)

cur.execute("UPDATE Twitter SET retrieved = 1 WHERE name = ?", (twitterID,))
#find all the friends
countnew = 0
countold = 0
for u in js["users"]:
    friendname = u["screen_name"]
    cur.execute('''SELECT friends FROM Twitter WHERE name = ?''', friendname)
Exemplo n.º 25
0
#-------------------------------------------------------------------------------
# Name:        module1
# Purpose:
#
# Author:      Trenton J. McKinney
#
# Created:     24/11/2015
# Copyright:   (c) Trenton J. McKinney 2015
# Licence:     <your licence>
#-------------------------------------------------------------------------------

import urllib

import twurl

print 'Calling Twitter'

url = twurl.augment('https://api.twitter.com/1.1/statuses/user_timeline.json',
        {'screen_name': 'Trenton_EE', 'count': '2'})

print url
connection = urllib.urlopen(url)
data = connection.read()
print data
headers = connection.info().dict
print headers
Exemplo n.º 26
0
import urllib.request, urllib.parse, urllib.error
import twurl
import json

TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'

while True:
    print('')
    acct = input('Enter Twitter Account:')
    if ( len(acct) < 1 ) : break
    url = twurl.augment(TWITTER_URL,
        {'screen_name': acct, 'count': '5'} )
    print('Retrieving', url)
    connection = urllib.request.urlopen(url)
    data = connection.read().decode()
    headers = dict(connection.getheaders())
    print('Remaining', headers['x-rate-limit-remaining'])
    js = json.loads(data)
    print(json.dumps(js, indent=4))

    for u in js['users'] :
        print(u['screen_name'])
        s = u['status']['text']
        print('  ',s[:50])
#! python3

import urllib.request, urllib.parse, urllib.error
from twurl import augment
import ssl

# https://apps.twitter.com/
# Create App and get the four strings, put them in hidden.py

print('* Calling Twitter...')
url = augment('https://api.twitter.com/1.1/statuses/user_timeline.json', {
    'screen_name': 'Vaibhav_Fubuki',
    'count': '2'
})
print(url)

# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

connection = urllib.request.urlopen(url, context=ctx)
data = connection.read()
print(data)  # Encoded data
# print(data.decode())   							# Decoded  data

print('======================================')
headers = dict(connection.getheaders())
print(headers)