Exemplo n.º 1
0
def update(status):
    try:
        get("http://starfury.eu5.org/alter.php?name=" + envars["Name"] +
            "&gamemode=" + envars["Gamemode"] + "&map=" + envars["Map"] +
            "&players=" + str(len(users)) + "/" + envars["Player Limit"] +
            "&status=" + status + "&ip=" + envars["IPv4"] + "&port=" +
            envars["Port"])
    except:
        pass
Exemplo n.º 2
0
def cari_link(page):
    import urllib as link
    from bs4 import BeautifulSoup
    import re
    print (page)
    html_page = link.urlopen("https://www.mainbasket.com/c/4/berita/abl?page="+str(page)).read()
    soup = BeautifulSoup(html_page, "html.parser")

    for a in soup.findAll('div', 'post-title'):
        #print a
        for link in a.findAll('a', attrs={'href': re.compile("^https://")}):
            print link.get('href')
            kumpulan_link.append(link.get('href'))
Exemplo n.º 3
0
 def download(self, follow_par_dir = False, verbose = True, ext = ''):
     if verbose: print "[*] Starting WebScraper"
     for url in self.urls:
         self.__br.open(url)
         fol = self.__folder_encode(url)
         if not os.path.exists(self.__folder_encode(url)): os.mkdir(self.__folder_encode(url))
         if verbose: print "[*] Scraping: ", self.__br.title()
         
         for link in self.__br.links():
             url_ = link.absolute_url
             try:
                 if not self.__is_dir(url_) and link.url[0:3] != '?C=' and url_.split('.')[-1] in ext:
                     if verbose: print "[!] Downloading: ", self.__file_encode(url_.split('/')[-1])
                     get(url_, fol+self.__file_encode(url_.split('/')[-1]))
                 else:
                     if url_ not in self.urls and follow_par_dir and link.url[0:3] != '?C=' and self.__is_dir(url_):
                         if verbose: print "[*] Adding parent directory ",url_," to the URL Queue"
                         self.urls.append(url_)
                     elif url_ not in self.urls and not follow_par_dir and url_ == '/'.join(link.base_url.split('/')[0:-2])+'/':
                         continue
             except Exception, err:
                 print "[!!] ", err
                 pass
def cari_link(page):
    import urllib as link
    from bs4 import BeautifulSoup
    import re
    print(page)
    html_page = link.urlopen(
        "http://surabaya.tribunnews.com/topic/berita-gresik?&page=" +
        str(page)).read()
    soup = BeautifulSoup(html_page, "lxml")

    for a in soup.findAll('h3', 'f20 ln24 fbo'):
        for link in a.findAll('a', attrs={'href': re.compile("^http://")}):
            #print link.get('href')
            kumpulan_link.append(link.get('href'))
Exemplo n.º 5
0
def find_emails(url):
    print("Finding Emails on: {}".format(url))

    try:
        html_body = get(url)
        parsed = BeautifulSoup(html_body, 'html.parser')
    except Exception as e:
        print(e)
        return None

    title = parsed.title.string if parsed.title else url
    emails = re.findall(ReEmailAddress, parsed.getText())

    return (title, emails)
Exemplo n.º 6
0
def getActualFollow():
    global viewers,CHAN,PASS,fetching
    fetching = True
    try:
        headers = {'Accept': 'application/vnd.twitchtv.v3+json'}
        r = urllib.get('https://api.twitch.tv/kraken/streams/'+CHAN[1:]+'?oauth_token='+PASS[6:], headers=headers)
        jsonData = r.json()
        ac.console('json {}'.format(jsonData))
        if jsonData['stream'] != None:
            viewers = str(jsonData['stream']['viewers'])
        else:
            viewers = 'offline'
    except:
        pass
    fetching = False
Exemplo n.º 7
0
API_params = {'token': 'c5213a1102b8422c80378944e1246d10', 'qc': 'all',
           'complete': '1', 'network': '1,2', 'status': 'active',
           'recent': '360'}
# inciweb_url = 'http://inciweb.nwcg.gov/feeds/rss/incidents/'
# inciweb = feedparser.parse(inciweb_url)
fire_lat = []
fire_lon = []
file_in = 'C:\\FireWeatherNow\\storage\\fire_data\\active_fires.json'
with open(file_in, 'r') as file1:
    json.load(file1)
print(file1)


nearest_stids = []
for k in range(len(fire_lat)): #put polygon path here
    r = urllib.get(baseURL+API_params + '&radius=' + str(fire_lat[k]) + ',' +
                   str(fire_lon[k])+',50')
    nearest_stids.append(r.json())
all_stations = []
stid_dict = {}
for l in range(len(nearest_stids)):
    all_stations = nearest_stids[l]['STATION']
    stid_dict[fire_title[l]] = []
    for m in range(len(all_stations)):
        stid_dict[fire_title[l]].append(all_stations[m]['STID'])



'''
# API query gets latest 12 hours of data from RAWS and AWOS/ASOS/FAA stations
API_request = requests.get(baseURL + 'timeseries?' + token + parameters)
API_data = API_request.json()
Exemplo n.º 8
0
# get an elevation profile from a bunch of trail points
import json
from urllib import urlretrieve as get
from math import ceil

with open("sample-route.json") as infile:
    route = json.load(infile)["route"]
    route = json.loads(route)

print route[0]

locs_per_query = 500
route_length = len(route)
if route_length > locs_per_query:
    num_queries = ceil(route_length / locs_per_query)
    for i in range(int(num_queries)):
        start = i * locs_per_query 
	end = (i + 1) * locs_per_query 
	locations = "|".join(",".join(str(c) for c in coords) for coords in route[start:end])
        print locations
        url = "http://maps.googleapis.com/maps/api/elevation/json?sensor=false&path=%s" % locations
	get(url, filename="elevation-profile%d.json" % i)
	print i, start, end
else:        
    locations = "|".join(",".join(str(c) for c in coords) for coords in route)
    url = "http://maps.googleapis.com/maps/api/elevation/json?sensor=false&path=%s" % locations

#get(url, filename="elevation-profile.json")
#print "got elevation profile"

Exemplo n.º 9
0
url = "http://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&sensor=false&key=" + keys.google + "&%s"
params = {"origins": [], "destinations": []}

# test:
params["origins"] = [
    "+".join("16th and mission st san francisco ca".split(" "))
]

with open("data/locations.csv") as infile:
    parks = []
    for row in csv.DictReader(infile):
        if row["type"] in ["Park", "Trailhead"]:
            location = ",".join([row["Y"], row["X"]])
            params["destinations"].append(location)

# params["destinations"] = params["destinations"][:-1]    # so it's 100 exactly

# print len(params["destinations"])
# import sys
# sys.exit()

# construct the query
origins = "origins=" + "|".join(params["origins"])
destinations = "destinations=" + "|".join(params["destinations"])
query = "&".join([origins, destinations])
distances = url % query

# test:
get(distances, filename="json/test-more.json")
with open("json/test-destinations-more.json", "w") as outfile:
    outfile.write(json.dumps(params["destinations"]))
Exemplo n.º 10
0
def update(status):
	try:
		get("http://starfury.eu5.org/alter.php?name="+envars["Name"]+"&gamemode="+envars["Gamemode"]+"&map="+envars["Map"]+"&players="+str(len(users))+"/"+envars["Player Limit"]+"&status="+status+"&ip="+envars["IPv4"]+"&port="+envars["Port"])
	except:
		pass
Exemplo n.º 11
0
import urllib
from tqdm import tqdm

class DownloadProgressBar(tqdm):
    def update_tp

url = "https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2018-01.csv"

r = urllib.get(url)

f = open(local_filename,'w')
f.close()
Exemplo n.º 12
0
# get an elevation profile from a bunch of trail points
import json
from urllib import urlretrieve as get
from math import ceil

with open("sample-route.json") as infile:
    route = json.load(infile)["route"]
    route = json.loads(route)

print route[0]

locs_per_query = 500
route_length = len(route)
if route_length > locs_per_query:
    num_queries = ceil(route_length / locs_per_query)
    for i in range(int(num_queries)):
        start = i * locs_per_query
        end = (i + 1) * locs_per_query
        locations = "|".join(",".join(str(c) for c in coords)
                             for coords in route[start:end])
        print locations
        url = "http://maps.googleapis.com/maps/api/elevation/json?sensor=false&path=%s" % locations
        get(url, filename="elevation-profile%d.json" % i)
        print i, start, end
else:
    locations = "|".join(",".join(str(c) for c in coords) for coords in route)
    url = "http://maps.googleapis.com/maps/api/elevation/json?sensor=false&path=%s" % locations

#get(url, filename="elevation-profile.json")
#print "got elevation profile"
Exemplo n.º 13
0
import keys

url = "http://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&sensor=false&key=" + keys.google + "&%s"
params = {"origins": [], "destinations":[]}

# test:
params["origins"] = ["+".join("16th and mission st san francisco ca".split(" "))]

with open("data/locations.csv") as infile:
    parks = []
    for row in csv.DictReader(infile):
        if row["type"] in ["Park", "Trailhead"]:
            location = ",".join([row["Y"], row["X"]])
            params["destinations"].append(location)

# params["destinations"] = params["destinations"][:-1]    # so it's 100 exactly

# print len(params["destinations"])
# import sys
# sys.exit()

# construct the query
origins = "origins=" + "|".join(params["origins"])
destinations = "destinations=" + "|".join(params["destinations"])
query = "&".join([origins, destinations])
distances = url % query

# test:
get(distances, filename="json/test-more.json")
with open("json/test-destinations-more.json","w") as outfile:
    outfile.write(json.dumps(params["destinations"]))