示例#1
0
 def __init__(self):
     self.ERR_PHRASE = "Затрудняюсь определить погоду"
     self.CONDITIONS = {'clear': "ясно",
                        'partly-cloudy': 'малооблачно',
                        'cloudy': 'облачно с прояснениями',
                        'overcast': 'пасмурно',
                        'partly-cloudy-and-light-rain': 'небольшой дождь',
                        'partly-cloudy-and-rain': 'дождь',
                        'overcast-and-rain': 'сильный дождь',
                        'overcast-thunderstorms-with-rain': 'сильный дождь, гроза',
                        'cloudy-and-light-rain': 'небольшой дождь',
                        'overcast-and-light-rain': 'небольшой дождь',
                        'cloudy-and-rain': 'дождь',
                        'overcast-and-wet-snow': 'дождь со снегом',
                        'partly-cloudy-and-light-snow': 'небольшой снег',
                        'partly-cloudy-and-snow': 'снег',
                        'overcast-and-snow': 'снегопад',
                        'cloudy-and-light-snow': 'небольшой снег',
                        'overcast-and-light-snow': 'небольшой снег',
                        'cloudy-and-snow': 'снег'}
     key = get_key("weather")
     if key[0]:
         self.WEATHER_KEY = key[1]
     else:
         print("WEATHER: KEY ERR!!")
         return "ERR"
示例#2
0
def send_email():

    # connect to alarm program database
    conn = sqlite3.connect('alarm.db')
    c = conn.cursor()

    # get username and password for email
    login = c.execute("SELECT * FROM keys").fetchone()

    # log in to email server
    server = smtplib.SMTP('smtp.gmail.com', 587)
    server.starttls()

    key = keys.get_key()
    email = keys.decrypt(login[0], key)
    password = keys.decrypt(login[1], key)
    server.login(email, password)
 
    # send an email
    msg = "Test"
    server.sendmail(email, email, msg)

    # disconnect
    server.quit()
    conn.close()
def search_tw_info(cursor, db):
    print('Search tweets')
    try:
        count = 0
        key_count = 0
        for i in range(0, cursor.count(), 100):
            while 1:
                try:
                    group = []
                    print(i)
                    for j in range(i, i + 100):
                        if j < cursor.count():
                            group.append(str(cursor[j]['retweeted_status']))
                    str_gr = ','.join(group)
                    key = get_key(key_count)
                    key_count += 1
                    api = TwitterAPI(key[0], key[1], key[2], key[3])
                    r = api.request('statuses/lookup', {'id': str_gr})
                    tws = r.json()
                    for data in tws:
                        save_tweet_to_db(db, data)
                        count += 1
                        print(data['id'])
                    break
                except Exception as e:
                    print('error: %s' % e)

        print(str(count) + ' - ' + str(cursor.count()))

    except Exception as e:
           print(e)
    def start(self):
        img_seq = []
        img_seq_length = (utils.image_seq_size - 1) * utils.pred_skip_frame + 1

        while True:
            img, _ = capture(region=self.region, dump=False)
            image, map = self.__split_and_save(img)

            image = self.__process_image(image)
            map = self.__process_map(map)

            img_seq.append(image)

            if len(img_seq) < img_seq_length:
                continue

            if len(img_seq) > img_seq_length:
                img_seq.pop(0)

            img_seq_input = np.array([img_seq[::utils.pred_skip_frame]])
            map_input = np.array([map])

            start_time = time.time()

            """
            Computation is done in batches. This method is designed for performance in
            large scale inputs. For small amount of inputs that fit in one batch,
            directly using `__call__` is recommended for faster execution, e.g.,
            `model(x)`, or `model(x, training=False)` if you have layers such as
            `tf.keras.layers.BatchNormalization` that behaves differently during
            inference. Also, note the fact that test loss is not affected by
            regularization layers like noise and dropout.
            """
            # pred = self.model.predict([img_seq_input, map_input])[0]

            # [(batch, seq, img_height, img_width, channel) (batch, map_height, map_width, channel)]
            pred = self.model([img_seq_input, map_input], training=False)[self.pred_ret_idx].numpy()[0]
            max_index = np.argmax(pred)
            # print(pred)
            # print("predict speed {} s".format(time.time() - start_time))

            key = get_key(max_index)
            key, limit = self.__speed_controller(key)

            if limit:
                for _ in range(5):
                    press(key)

            press(key)
def get_timeline(db, id, limit_tweets, since_id=0):

    count_tweets = db.users_timelines.find({'user': id}).count()
    count = 0
    while count <= 1000 and db.users_timelines.find({
            'user': id
    }).count() <= limit_tweets:
        key = get_key(count)
        res = search_tweets(key[0], key[1], key[2], key[3], limit_tweets, db,
                            id, since_id)
        if not res[0]:
            time.sleep(20)
            count = count + 1
            count_tweets = count_tweets + res[1]
        else:
            break
    print(count_tweets)
示例#6
0
def get_events(zipcode):
    global APP_KEY
    if APP_KEY == "":
        APP_KEY = keys.get_key("eventbrite")[0]
    events_url = "https://www.eventbriteapi.com/v3/events/search/?token=" + APP_KEY + "&sort_by=best&location.address=<zipcode>&location.within=50mi"
    retList = []
    print events_url
    events_url = events_url.replace("<zipcode>", str(zipcode))
    '''
    response = urllib2.urlopen(events_url)
    url = response.geturl()
    info = response.read()
    info = json.loads(info)
    '''
    info = requests.get(events_url).json()
    print info
    return info
示例#7
0
def get_access_token():
    """Gets Access Token"""
    global SPOTIFY_CLIENT_ID
    global SPOTIFY_CLIENT_SECRET
    global ACCESS_TOKEN

    if SPOTIFY_CLIENT_ID == "" or SPOTIFY_CLIENT_SECRET == "":
        spotkeys = keys.get_key("spotify")
        SPOTIFY_CLIENT_ID = spotkeys[0]
        SPOTIFY_CLIENT_SECRET = spotkeys[1]

    print("Getting Spotify Token...")
    url = "https://accounts.spotify.com/api/token"
    body_params = {"grant_type": "client_credentials"}
    response = json.loads(
        requests.post(url,
                      data=body_params,
                      auth=(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET)).text)

    ACCESS_TOKEN = response["access_token"]
示例#8
0
def decrypt_user_credentials(token):
    """Decrypt saved credentials"""
    try:
        key = get_key()
        f_1 = Fernet(key)
        token = bytes(token, 'utf-8')
        password = f_1.decrypt(token)
        password = password.decode()
        return password

    except FileNotFoundError:
        print(Fore.LIGHTRED_EX + 'No encryption key was found')
        while True:
            user_choice = input('Enter encryption key?(y/n)')
            if user_choice.lower() == 'y':
                key = enter_key()
                f_1 = Fernet(key)
                token = bytes(token, 'utf-8')
                password = f_1.decrypt(token)
                password = password.decode()
                return password
示例#9
0
def get_food_options(food):
    global EDAMAM_APP_KEY
    global EDAMAM_APP_ID
    if (EDAMAM_APP_ID == "" or EDAMAM_APP_KEY == ""):
        edkeys = keys.get_key("edamam")
        EDAMAM_APP_ID = edkeys[0]
        EDAMAM_APP_KEY = edkeys[1]
    url = "https://api.edamam.com/api/food-database/parser?ingr=" + food + "&app_id=" + EDAMAM_APP_ID + "&app_key=" + EDAMAM_APP_KEY + "&page=0"
    food_opt = requests.get(url).json()
    # print(food_opt['parsed'][0]['food']['label'])  # same as num 0 in hints below
    # print(food_opt['parsed'][0]['food']['uri'])

    # food_json = {"yield": 1, 'ingredients': [None, None, None]}   # too many ingredients...
    # food_size = min(len(food_opt['hints']), 3)
    # for i in range(0, food_size):
    #     # print(food_opt['hints'][i]['food']['label'])
    #     food_json['ingredients'][i] = {"quantity": 1,
    #                                    "measureURI": food_opt['hints'][i]['measures'][1]['uri'],
    #                                    "foodURI": food_opt['hints'][i]['food']['uri'],
    #                                    "label": food_opt['hints'][i]['food']['label']
    #                                    }

    food_size = min(len(food_opt['hints']), 3)
    food_json = {
        "yield":
        1,
        'ingredients': [{
            "quantity":
            1,
            "measureURI":
            food_opt['hints'][0]['measures'][1]['uri'],
            "foodURI":
            food_opt['hints'][0]['food']['uri'],
            "label":
            food_opt['hints'][0]['food']['label']
        }]
    }
    return food_json
示例#10
0
def encrypt_credentials(password):
    """Encrypt credentials using the provided key"""
    try:
        key = get_key()
        f_1 = Fernet(key)
        token = f_1.encrypt(password.encode())
        token = token.decode()
        return token

    except FileNotFoundError:
        print(Fore.LIGHTRED_EX + 'No encryption key was found!\n' + cr)

        while True:
            user_choice = input('(Generate key)(Input key)(cancel)?(g)(i)(c)')
            if user_choice.lower() == 'c':
                print(Fore.LIGHTRED_EX + 'You need an encryption key to '
                      'encrypt your login credentials!\nBack to the main '
                      'menu!\n' + cr)
                break

            elif user_choice.lower() == 'g':
                key = generate_key()
                f_1 = Fernet(key)
                token = f_1.encrypt(password.encode())
                token = token.decode()
                return token

            elif user_choice.lower() == 'i':
                key = enter_key()
                f_1 = Fernet(key)
                token = f_1.encrypt(password.encode())
                token = token.decode()
                return token
            else:
                print(Fore.LIGHTRED_EX + 'Please enter a valid option!' + cr)
                continue
示例#11
0
import json
import tweepy
import keys

keys = keys.get_key("twitter")

auth = tweepy.OAuthHandler(keys["api-key"], keys["api-key-secret"])
auth.set_access_token(keys["accsess-token"], keys["accsess-token-secret"])
api = tweepy.API(auth, wait_on_rate_limit=True)

t = api.search(q="Digitalisierung", tweet_mode="extended", count=2, rpp=1)

for tweet in t:
    print("When:", tweet.created_at)
    print("Who:", tweet.user.screen_name)
    print("Where:", tweet.user.location)
    print(tweet.full_text)

    #print()
    #print(tweet._json)
示例#12
0
import botometer, keys
mashape_key = keys.get_key()
twitter_app_auth = keys.get_twitter_auth()

bom = botometer.Botometer(wait_on_ratelimit=True,
                          mashape_key=mashape_key,
                          **twitter_app_auth)


def test_user(user):
    return bom.check_account(user)
示例#13
0
import sys

sys.path.append("./")
import keys
import requests
from bs4 import BeautifulSoup
from database import Database
from logger import Logger

search_term = "digitale+transformation"
logger = Logger(site="zeit", search_term=search_term).getLogger()
db = Database(logger)

# Fill in your details here to be posted to the login form.
user = keys.get_key("zeituser")
pw = keys.get_key("zeitpw")

for i in range(277):
    payload = {
        'email':
        user,
        'pass':
        pw,
        "return_url":
        "https://www.zeit.de/suche/index?q=" + search_term +
        "&sort=aktuell&p=" + str(i)
    }
    # create Session to login and send login information with every further request
    with requests.Session() as s:
        try:
            p = s.post('https://meine.zeit.de/anmelden', data=payload)
示例#14
0
    updater.start_polling()
    updater.idle()
    pass


if __name__ == '__main__':
    MASTERS_IDS = [403054226, 265801498]
    FEATURES = [
        "1)Переводить фразы c русского на английский и наоборот! (/translate)",
        "2)Считать за тебя! (/count)", "3)Работать с цитатами!",
        "4)Выдавать краткую информацию о погоде (/get_weather [place] [limit]. Ну, или просто пришли мне свою геопозицию!)"
    ]

    reply_keyboard = [['/start', '/close'], ['/count', '/translate'],
                      [
                          "/random_quote",
                          "/get_last_quotes",
                      ], ["/get_weather", "/change_last_quotes"]]  #Buttons
    MARKUP = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=False)
    MIN_MARKUP = ReplyKeyboardMarkup([["/start"]], one_time_keyboard=False)
    STOP_MARKUP = ReplyKeyboardMarkup([["/stop"]], one_time_keyboard=False)

    response, key = get_key("telegram-bot")
    if response:
        translator = YandexDictionary()
        print("My name: @SupremeSmartBot")
        main(key)
    else:
        print("ERR:", key)
    pass
示例#15
0
import urllib2, json, os, urllib, datetime, keys

key = keys.get_key("TMDB")


def get_popmovies():
    url = "https://api.themoviedb.org/3/movie/now_playing?api_key=" + key + "&language=en-US&page=1"
    request = urllib2.urlopen(url)
    result = request.read()
    d = json.loads(result)
    retList = []
    for movie in d["results"]:
        retList.append(movie["original_title"])
    return retList


#print get_popmovies()
示例#16
0
def get_json_from_url(url):
    """Returns content from ProPublica url"""
    api_key = keys.get_key()
    headers = {'X-API-Key': api_key}
    r_val = requests.get(url=url, headers=headers)
    return r_val.content
示例#17
0
import urllib2, json, urllib, keys
key = keys.get_key("DarkSky")


#https://api.darksky.net/forecast/[key]/[latitude],[longitude] (proper format)
def setup(lon, lat):
    url = "https://api.darksky.net/forecast/cc75d8edb8caaac081449e4871136ffe/" + str(
        lon) + "," + str(lat)
    request = urllib2.urlopen(url)
    result = request.read()
    d = json.loads(result)
    return d


'''
for key in d:
    if (key == "hourly"):
        for k2 in d[key]:
            print k2
            print "corresponds to"
            print d[key][k2]
            print
#use icon to create a getBg fxn

'''


def backgrounder(lon, lat):
    d = setup(lon, lat)
    for key in d:
        if (key == "hourly"):
示例#18
0
文件: movies.py 项目: vliok/Familia
import urllib2, json, os, urllib, datetime, keys
import maps

key = keys.get_key("Foursquare")
"""
Args:
lat - latitude of original location
lon - longitude of original location
query - either food, shops, coffee, movie
lim - number of items to return

Return:
info from foursquare api in format of:
[
    [venue name, venue lat, venue lon, venue id, venue category, venue rating, reviews]
]
"""


def get_movies(lat, lon, query, lim):
    url = "https://api.foursquare.com/v2/venues/explore?ll=" + str(
        lat) + "," + str(lon) + "&query=" + query + "&limit=" + str(
            lim) + "&oauth_token=" + key
    request = urllib2.urlopen(url)
    result = request.read()
    d = json.loads(result)
    d = d["response"]["groups"][0]["items"]
    retList = []
    for venue in d:
        #print venue["venue"]["id"]
        try:
示例#19
0
 def __init__(self):
     self.DICT_KEY = get_key("dictionary")[1]
     self.TRANSLATOR_KEY = get_key("translator")[1]
     self.LANGUAGES = {"ru": "ru-en", "en": "en-ru"}
     self.ERR_PHRASE = "Затрудняюсь перевести"
示例#20
0
import urllib2, json, os, urllib, keys
import time

key = keys.get_key("Geolocation")
keym = keys.get_key("Maps")
keyd = keys.get_key("Destination")


#get map query
def get_map_query(loc):
    googleurl = "https://www.google.com/maps/embed/v1/place?key=%s&q=%s" % (
        keym, loc)
    return googleurl


#given a dictionary with lng and lat to find approximate address
def reverse_geo(ldic):
    googleurl = "https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s" % (
        ldic['lat'], ldic['lng'], key)
    request = urllib2.urlopen(googleurl)
    result = request.read()
    d = json.loads(result)
    rdic = d['results'][0]
    address = rdic['formatted_address']
    address = urllib.quote_plus(address)
    return address


def geo_loc(location):
    #finds the longitude and latitude of a given location parameter using Google's Geocode API
    #return format is a dictionary with longitude and latitude as keys
import sys
sys.path.append("./")
from logger import Logger
import requests as r
import json
import keys
from bs4 import BeautifulSoup
from database import Database
import time
from useragent import get_random_useraget

search_term = "digitalization"
logger = Logger(site="NYT", search_term=search_term).getLogger()
db = Database(logger)
key = keys.get_key("newyorktimes")

response = r.get(
    "https://api.nytimes.com/svc/search/v2/articlesearch.json?q=" +
    search_term + "&sort=newest&page=1&api-key=" + key).content

result = json.loads(response)["response"]

hits = result["meta"]["hits"]
pages = int(hits / 10)

for i in range(1, pages):
    response = r.get(
        "https://api.nytimes.com/svc/search/v2/articlesearch.json?q=" +
        search_term + "&sort=newest&page=" + str(i) + "&api-key=" +
        key).content
    try:
示例#22
0

if (__name__ == '__main__'):
    '''
    key = keys.get_key('C#','minor')
    
    part1 = [
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        
        key[7 * 4 + 5], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 5], key[7 * 5 + 0], key[7 * 5 + 2],
        
        key[7 * 4 + 5], natural(key[7 * 5 + 1]), key[7 * 5 + 3],
        key[7 * 4 + 5], natural(key[7 * 5 + 1]), key[7 * 5 + 3],
        
        key[7 * 4 + 4], sharp(key[7 * 4 + 6]), key[7 * 5 + 3],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 1],
        key[7 * 4 + 3], sharp(key[7 * 4 + 6]), key[7 * 5 + 1],
        ]
    
    write_music(part1)
    '''
    key = keys.get_key('C', 'major')

    notes = list(fibonacci_mod_iter(100, 25))[5:]
    audio.write_music([key[20 + n] for n in notes])
示例#23
0
    key = keys.get_key('C#','minor')
    
    part1 = [
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        
        key[7 * 4 + 5], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 5], key[7 * 5 + 0], key[7 * 5 + 2],
        
        key[7 * 4 + 5], natural(key[7 * 5 + 1]), key[7 * 5 + 3],
        key[7 * 4 + 5], natural(key[7 * 5 + 1]), key[7 * 5 + 3],
        
        key[7 * 4 + 4], sharp(key[7 * 4 + 6]), key[7 * 5 + 3],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 2],
        key[7 * 4 + 4], key[7 * 5 + 0], key[7 * 5 + 1],
        key[7 * 4 + 3], sharp(key[7 * 4 + 6]), key[7 * 5 + 1],
        ]
    
    write_music(part1)
    '''
    key = keys.get_key('C','major')
    
    notes = list(fibonacci_mod_iter(100, 25))[5:]
    audio.write_music([key[20 + n] for n in notes])
    
    
    
 
import time
import requests
from bs4 import BeautifulSoup
from database import Database
from logger import Logger
from useragent import get_random_useraget
from datetime import datetime

search_terms = ["digitisation", "digital+transformation"]  # + Digitalisation

for search_term in search_terms:

    logger = Logger(site="financialtimes", search_term=search_term).getLogger()
    db = Database(logger)

    cookies = keys.get_key("ft-cookies")

    header = {"Cookie": cookie_maker.create_cookie_string(cookies)}

    for i in range(1, 26):
        response = requests.get("https://www.ft.com/search?q=" + search_term +
                                "&page=" + str(i) + "&sort=date",
                                headers=header)

        base = BeautifulSoup(response.content, "html.parser")
        articles = base.find_all("li", {"class": "search-results__list-item"})

        for article in articles:
            try:
                if article.find(
                        "span",
示例#25
0
import sys
sys.path.append("./")
from datetime import datetime
from useragent import get_random_useraget
from logger import Logger
from database import Database
from bs4 import BeautifulSoup
import requests
import time
import keys
import cookie_maker

search_terms = ["digital+transformation", "digitization"]
cookies = keys.get_key("forbes-cookies")
header = {"Cookie": cookie_maker.create_cookie_string(cookies)}

for search_term in search_terms:
    logger = Logger(site="forbes", search_term=search_term).getLogger()
    db = Database(logger)

    for i in range(900, 10000, 20):

        response = requests.get(
            "https://www.forbes.com/simple-data/search/more/?start=" + str(i) +
            "&q=" + search_term,
            headers=header)
        base = BeautifulSoup(response.content, "html.parser")
        articles = base.find_all("article")

        for article in articles:
            try: