Exemplo n.º 1
0
    def test_teardown(self):
        try:
            lock = threading.Lock()
            https = https_reader.server_plugin(lock, PORT)
        except Exception as e:
            print e
            self.fail("Failed to startup server")

        try:
            conn = httplib.HTTPSConnection('localhost', 4443, timeout=5, context=ssl._create_unverified_context())
            conn.request("GET", "/")
            r1 = conn.getresponse()
            if r1.status == 404:
                connection = True
            else:
                connection = False
            conn.close()
        except Exception as e:
            print e
            self.fail("Client couldn't to server")
        try:
            https.tear_down()
            httplib.HTTPSConnection('localhost', 4443, timeout=5, context=ssl._create_unverified_context())
            r2 = conn.getresponse()
            self.fail("Server failed to shutdown")

        except Exception as e:
            #sever shutdown
            self.assertTrue(True)
def getServices(agsSiteAdminURL, token):
    # Setup the parameters
    parameters = urllib.urlencode({'token': token,
                  'f': 'json'})
    queryString = parameters.encode('utf-8')

    # Post request
    try:
        printMessage("Querying ArcGIS Server for a list of services - " + agsSiteAdminURL + "/admin/services" + "...","info")         
        context = ssl._create_unverified_context()
        request = urllib2.Request(agsSiteAdminURL + "/admin/services",queryString)
        responseJSON = json.loads(urllib2.urlopen(request, context=context).read())
        if "error" in str(responseJSON).lower():
            printMessage(responseJSON,"error")
            if (sendErrorEmail == "true"):
                message = "There is an issue with the ArcGIS Server site - " + agsSiteAdminURL + "..." + "<br/><br/>"
                message += str(responseJSON) + "<br/>"
                # Send email
                sendEmail(message,None)
            # Exit the python script
            sys.exit()
        else:
            # Iterate through services
            services = []
            for eachService in responseJSON['services']:                
                services.append(eachService['serviceName']+ "." + eachService['type'])

            # Iterate through folders
            for folder in responseJSON['folders']:
                # Ignore the system or utilities folder
                if ((folder.lower() != "system") or (folder.lower() != "utilities")):
                    context = ssl._create_unverified_context()
                    request = urllib2.Request(agsSiteAdminURL + "/admin/services/" + folder,queryString)
                    responseJSON = json.loads(urllib2.urlopen(request, context=context).read())
                    if "error" in str(responseJSON).lower():
                        printMessage(responseJSON,"error")
                        if (sendErrorEmail == "true"):
                            message = "There is an issue with the ArcGIS Server site - " + agsSiteAdminURL + "..." + "<br/><br/>"
                            message += str(responseJSON) + "<br/>"
                            # Send email
                            sendEmail(message,None)
                        # Exit the python script
                        sys.exit()
                    else:
                        # Iterate through services
                        for eachService in responseJSON['services']:                
                            services.append(folder + "/" + eachService['serviceName']+ "." + eachService['type'])
            # Return services list 
            return services           
    except urllib2.URLError, error:
        printMessage("There is an issue connecting to the ArcGIS Server site - " + agsSiteAdminURL + "...","error")
        printMessage(error,"error")
        if (sendErrorEmail == "true"):
            message = "There is an issue connecting to the ArcGIS Server site - " + agsSiteAdminURL + "..." + "<br/><br/>"
            message += str(error) + "<br/>"
            # Send email
            sendEmail(message,None)        
        # Exit the python script
        sys.exit()
Exemplo n.º 3
0
 def post_process_options(self):
     if self.options.disable_ssl_verification:
         try:
             import ssl
             ssl._create_unverified_context()
         except Exception:
             raise CommandError('The --disable-ssl-verification flag is '
                                'only available with Python 2.7.9+')
Exemplo n.º 4
0
    def add_spark(self,node):
        """Connect to SPARK on local host and dump information. 
        Uses requests. Note: disables HTTPS certificate warnings."""
        import os
        import json
        from   urllib.request import urlopen
        import ssl
        if "SPARK_ENV_LOADED" not in os.environ:
            return        # no spark

        spark = ET.SubElement(node, 'spark')
        try:
            import requests
            import urllib3
            urllib3.disable_warnings()
        except ImportError:
            ET.SubElement(spark,'error').text = "SPARK_ENV_LOADED present but requests module not available"
            return 

        host = 'localhost'
        p1 = 4040
        p2 = 4050
        import urllib.error
        for port in range(p1,p2+1):
            try:
                url = 'http://{}:{}/api/v1/applications/'.format(host,port)
                resp  = urlopen(url, context=ssl._create_unverified_context())
                spark_data = resp.read()
                break
            except (ConnectionError, ConnectionRefusedError, urllib.error.URLError) as e:
                continue
        if port>=p2:
            ET.SubElement(spark,'error').text = f"SPARK_ENV_LOADED present but no listener on {host} ports {p1}-{p2}"
            return

        # Looks like we have spark!
        for app in json.loads(spark_data):
            app_id   = app['id']
            app_name = app['name']
            e = ET.SubElement(spark,'application',{'id':app_id,'name':app_name})

            attempt_count = 1
            for attempt in app['attempts']:
                e = ET.SubElement(spark,'attempt')
                json_to_xml(e,attempt)
            for param in ['jobs','allexecutors','storage/rdd']:
                url = f'http://{host}:{port}/api/v1/applications/{app_id}/{param}'
                resp = urlopen(url, context=ssl._create_unverified_context())
                data = resp.read()
                e = ET.SubElement(spark,param.replace("/","_"))
                json_to_xml(e,json.loads(data))
Exemplo n.º 5
0
 def download_hsjson(url):
     context = ssl._create_unverified_context()
     req = urllib.request.urlopen(url, context=context)
     f = req.read()
     with open('cards.json', 'wb') as file:
         file.write(f)
     return f
Exemplo n.º 6
0
 def urlopenWithPassword(self, target, username, password):
     context = ssl._create_unverified_context()
     urlopener = myURLOpener(proxies=self.__proxies, context=context)
     if username:
         print "Setting username/password: "******" " + password
         urlopener.setpasswd(username, password)
     return urlopener.open(target).read()
Exemplo n.º 7
0
def main():
    url = 'https://github.com/kaito834'
    proxys = {
        'http': 'http://127.0.0.1:8080/',
        'https': 'https://127.0.0.1:8080/'
    }

    #  Initialize urllib.request
    proxyhandler = urllib.request.ProxyHandler(proxys)
    # https://www.python.org/dev/peps/pep-0476/
    # This script disables certificate verfication, but this is *NOT* recommended.
    # If you use self-signed certificate, you should add your root certificate by SSLContext.load_cert_chain()
    # https://docs.python.org/3/library/ssl.html#ssl.SSLContext.load_cert_chain
    # https://docs.python.org/3/library/ssl.html#ssl-security
    context = ssl._create_unverified_context()
    httpshandler =  urllib.request.HTTPSHandler(context=context)
    opener = urllib.request.build_opener(proxyhandler, httpshandler)
    urllib.request.install_opener(opener)

    try:
        res = urllib.request.urlopen(url)
        resBody = res.read().decode('utf-8')
    except urllib.error.HTTPError as e:
        print('[!] urllib.error.HTTPError: {0}'.format(e))
    except urllib.error.URLError as e:
        # The URLError below raises when urlopen() cannot connect proxy server.
        # <urlopen error [WinError 10061] 対象のコンピューターによって拒否されたため、接続できませんでした。>
        print('[!] urllib.error.URLError: {0}'.format(e))

        print('[!] Launch local proxy because it\'s necessary for this scrpit.')
        print('[!] Plase retry this script!')
        launchLocalProxy()
    except Exception as e:
        print('[!] Exception: {0}'.format(e))
Exemplo n.º 8
0
    def __send_xml_str(self, xml_str):
        logger.debug("Sending: %s" % xml_str)

        xml_data = urllib.parse.urlencode({'XML': xml_str})
        request = urllib.request.Request(self.door_url(), xml_data)
        base64string = base64.encodestring('%s:%s' % (self.door_user, self.door_pass)).replace('\n', '')
        request.add_header("Authorization", "Basic %s" % base64string)
        context = ssl._create_unverified_context()
        context.set_ciphers('RC4-SHA')

        self.lock.acquire()
        try:
            result = urllib.request.urlopen(request, context=context)
            return_code = result.getcode()
            return_xml = result.read()
            result.close()
        finally:
            self.lock.release()

        logger.debug("Response code: %d" % return_code)
        logger.debug("Response: %s" % return_xml)
        if return_code != 200:
            raise Exception("Did not receive 200 return code")
        error = get_attribute(return_xml, "errorMessage")
        if error:
            raise Exception("Received an error: %s" % error)

        return return_xml
Exemplo n.º 9
0
def get_from_wiki(file_name):
    """We host some larger files used for the test suite separately on the TreeCorr wiki repo
    so people don't need to download them with the code when checking out the repo.
    Most people don't run the tests after all.
    """
    import os
    local_file_name = os.path.join('data',file_name)
    url = 'https://github.com/rmjarvis/TreeCorr/wiki/' + file_name
    if not os.path.isfile(local_file_name):
        try:
            from urllib.request import urlopen
        except ImportError:
            from urllib import urlopen
        import shutil

        print('downloading %s from %s...'%(local_file_name,url))
        # urllib.request.urlretrieve(url,local_file_name)
        # The above line doesn't work very well with the SSL certificate that github puts on it.
        # It works fine in a web browser, but on my laptop I get:
        # urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:600)>
        # The solution is to open a context that doesn't do ssl verification.
        # But that can only be done with urlopen, not urlretrieve.  So, here is the solution.
        # cf. http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
        #     http://stackoverflow.com/questions/27835619/ssl-certificate-verify-failed-error
        try:
            import ssl
            context = ssl._create_unverified_context()
            u = urlopen(url, context=context)
        except (AttributeError, TypeError):
            # Note: prior to 2.7.9, there is no such function or even the context keyword.
            u = urlopen(url)
        with open(local_file_name, 'wb') as out:
            shutil.copyfileobj(u, out)
        u.close()
        print('done.')
Exemplo n.º 10
0
    def _make_connect(self, https, host, port, proxy=None):
        if not https:
            if proxy:
                con = httplib.HTTPConnection(
                    proxy[0], proxy[1], timeout=self.timeout)
                con.set_tunnel(host, port)
            else:
                con = httplib.HTTPConnection(host, port, timeout=self.timeout)
            # con .set_debuglevel(2) #?
            con.connect()
            return con
        for p in self.protocol:
            context = ssl._create_unverified_context(p)
            try:
                if proxy:

                    con = httplib.HTTPSConnection(
                        proxy[0], proxy[1], context=context,
                        timeout=self.timeout)
                    con.set_tunnel(host, port)
                else:
                    con = httplib.HTTPSConnection(
                        host, port, context=context, timeout=self.timeout)
                con.connect()
                return con
            except ssl.SSLError, e:
                # print e,protocol
                pass
Exemplo n.º 11
0
    def test_app_using_ipv6_and_ssl(self):
        CONF.set_default("ssl_cert_file",
                         os.path.join(TEST_VAR_DIR, 'certificate.crt'))
        CONF.set_default("ssl_key_file",
                         os.path.join(TEST_VAR_DIR, 'privatekey.key'))

        greetings = 'Hello, World!!!'

        @webob.dec.wsgify
        def hello_world(req):
            return greetings

        server = manila.wsgi.Server("test_app",
                                    hello_world,
                                    host="::1",
                                    port=0)
        server.start()

        if hasattr(ssl, '_create_unverified_context'):
            response = urllib.request.urlopen(
                'https://[::1]:%d/' % server.port,
                context=ssl._create_unverified_context())
        else:
            response = urllib.request.urlopen(
                'https://[::1]:%d/' % server.port)

        self.assertEqual(greetings, response.read())

        server.stop()
Exemplo n.º 12
0
    def send(self, type, text, raw):
        message_time = time.time()
        message_timestamp = time.ctime(message_time)

        if check_time_restriction(self.starttime, self.endtime):
            if self.suppress_timestamp == False:
                self.msg_to_send = text[:10000].encode('utf8') + " Message Sent at: " + message_timestamp
            else:
                self.msg_to_send = text[:10000].encode('utf8')

            notify_data = {
                'apikey': self.api_key,
                'application': self.app_name,
                'event': self.event,
                'description': self.msg_to_send,
                'priority': self.priority
            }

            if sys.version_info >= (2,7,9):
                http_handler = HTTPSConnection(PROWL_URL, context=ssl._create_unverified_context())
            else:
                http_handler = HTTPSConnection(PROWL_URL)

            http_handler.request(PROWL_METHOD, PROWL_PATH, headers=self.headers,body=urlencode(notify_data))

            http_response = http_handler.getresponse()

            if http_response.status == 200:
                return True
            else:
                current_app.logger.info('Event Prowl Notification Failed: {0}'. format(http_response.reason))
                raise Exception('Prowl Notification Failed: {0}' . format(http_response.reason))
Exemplo n.º 13
0
def get_all_versions():
    url = _LIST_VERSIONS_URL
    url += '?_=' + str(int(time.time())) # prevents caching

    want_prereleases = subprocess.check_output('source /data/etc/os.conf && echo $os_prereleases', shell=True, stderr=subprocess.STDOUT).strip() == 'true'

    try:
        logging.debug('board is %s' % _BOARD)
        logging.debug('fetching %s...' % url)

        context = ssl._create_unverified_context()

        response = urllib2.urlopen(url, timeout=settings.REMOTE_REQUEST_TIMEOUT, context=context)
        releases = json.load(response)

        versions = []
        for release in releases:
            if release.get('prerelease') and not want_prereleases:
                continue
            
            for asset in release.get('assets', []):
                if not re.match('^motioneyeos-%s-\d{8}\.img.gz$' % _BOARD, asset['name']):
                    continue
                    
            versions.append(release['name'])

        logging.debug('available versions: %(versions)s' % {'versions': ', '.join(versions)})

        return sorted(versions)

    except Exception as e:
        logging.error('could not get versions: %s' % e, exc_info=True)

    return []
Exemplo n.º 14
0
    def __init__(self, host=None, port=None, types=(),
                 timeout=8, verify_ssl=False):
        self.host = host
        if not Resolver.host_is_ip(self.host):
            raise ValueError('The host of proxy should be the IP address. '
                             'Try Proxy.create() if the host is a domain')

        self.port = int(port)
        if self.port > 65535:
            raise ValueError('The port of proxy cannot be greater than 65535')

        self.expected_types = set(types) & {'HTTP', 'HTTPS', 'CONNECT:80',
                                            'CONNECT:25', 'SOCKS4', 'SOCKS5'}
        self._timeout = timeout
        self._ssl_context = (True if verify_ssl else
                             _ssl._create_unverified_context())
        self._types = {}
        self._is_working = False
        self.stat = {'requests': 0, 'errors': Counter()}
        self._ngtr = None
        self._geo = Resolver.get_ip_info(self.host)
        self._log = []
        self._runtimes = []
        self._schemes = ()
        self._closed = True
        self._reader = {'conn': None, 'ssl': None}
        self._writer = {'conn': None, 'ssl': None}
Exemplo n.º 15
0
	def __init__(self):
		try:
			context = ssl._create_unverified_context()
		except AttributeError:
			context = None

		FancyURLopener.__init__(self, context=context)
Exemplo n.º 16
0
def get_page(url):
    """Return html code from cmsweb.cern.ch of the page with the given url"""
    print "HTTP access: ", url
    try:
        socket_obj = urllib2.urlopen(url)
    except Exception, e:
        socket_obj = urllib2.urlopen(url, context=ssl._create_unverified_context())
Exemplo n.º 17
0
    def get_video_data(self):
        """Gets the page and extracts out the video data."""
        # Reset the filename incase it was previously set.
        self.title = None

        context = ssl._create_unverified_context()
        response = urlopen(self.url,context=context)

        if not response:
            raise PytubeError("Unable to open url: {0}".format(self.url))

        html = response.read()
        if isinstance(html, str):
            restriction_pattern = "og:restrictions:age"
        else:
            restriction_pattern = bytes("og:restrictions:age", "utf-8")

        if restriction_pattern in html:
            raise AgeRestricted("Age restricted video. Unable to download "
                                "without being signed in.")

        # Extract out the json data from the html response body.
        json_object = self._get_json_data(html)

        # Here we decode the stream map and bundle it into the json object. We
        # do this just so we just can return one object for the video data.
        encoded_stream_map = json_object.get("args", {}).get(
            "url_encoded_fmt_stream_map")
        json_object["args"]["stream_map"] = self._parse_stream_map(
            encoded_stream_map)
        return json_object
Exemplo n.º 18
0
def install_pip():
    """Install pip"""
    try:
        import pip  # NOQA
    except ImportError:
        if PY3:
            from urllib.request import urlopen
        else:
            from urllib2 import urlopen

        if hasattr(ssl, '_create_unverified_context'):
            ctx = ssl._create_unverified_context()
        else:
            ctx = None
        kw = dict(context=ctx) if ctx else {}
        safe_print("downloading %s" % GET_PIP_URL)
        req = urlopen(GET_PIP_URL, **kw)
        data = req.read()

        tfile = os.path.join(tempfile.gettempdir(), 'get-pip.py')
        with open(tfile, 'wb') as f:
            f.write(data)

        try:
            sh('%s %s --user' % (PYTHON, tfile))
        finally:
            os.remove(tfile)
Exemplo n.º 19
0
def ConnectNoSSL(host='localhost', port=443, user='******', pwd='',
                 service="hostd", adapter="SOAP", namespace=None, path="/sdk",
                 version=None, keyFile=None, certFile=None, thumbprint=None,
                 b64token=None, mechanism='userpass'):
   """
   Provides a standard method for connecting to a specified server without SSL
   verification. Useful when connecting to servers with self-signed certificates
   or when you wish to ignore SSL altogether. Will attempt to create an unverified
   SSL context and then connect via the Connect method.
   """

   if hasattr(ssl, '_create_unverified_context'):
      sslContext = ssl._create_unverified_context()
   else:
      sslContext = None

   return Connect(host=host,
                  port=port,
                  user=user,
                  pwd=pwd,
                  service=service,
                  adapter=adapter,
                  namespace=namespace,
                  path=path,
                  version=version,
                  keyFile=keyFile,
                  certFile=certFile,
                  thumbprint=thumbprint,
                  sslContext=sslContext,
                  b64token=b64token,
                  mechanism=mechanism)
Exemplo n.º 20
0
def SmartConnectNoSSL(protocol='https', host='localhost', port=443, user='******', pwd='',
                      service="hostd", path="/sdk", connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
                      preferredApiVersions=None, keyFile=None, certFile=None,
                      thumbprint=None, b64token=None, mechanism='userpass'):
   """
   Provides a standard method for connecting to a specified server without SSL
   verification. Useful when connecting to servers with self-signed certificates
   or when you wish to ignore SSL altogether. Will attempt to create an unverified
   SSL context and then connect via the SmartConnect method.
   """

   if hasattr(ssl, '_create_unverified_context'):
      sslContext = ssl._create_unverified_context()
   else:
      sslContext = None

   return SmartConnect(protocol=protocol,
                       host=host,
                       port=port,
                       user=user,
                       pwd=pwd,
                       service=service,
                       path=path,
                       connectionPoolTimeout=connectionPoolTimeout,
                       preferredApiVersions=preferredApiVersions,
                       keyFile=keyFile,
                       certFile=certFile,
                       thumbprint=thumbprint,
                       sslContext=sslContext,
                       b64token=b64token,
                       mechanism=mechanism)
    def get_https_json_response(server_address, api, username, password):
        """
        Returns the response from the URL specified
        """
        try:
            # lib opener
            response = {}
            context = ssl._create_unverified_context()
            conn = HTTPSConnection(server_address, context=context)
            auth = str.encode("%s:%s" % (str(username), str(password)))
            user_and_pass = b64encode(auth).decode("ascii")
            headers = {'Authorization': 'Basic %s' % user_and_pass, "Accept": 'application/json'}
            conn.request('GET', str(api), headers=headers)
            res = conn.getresponse()
            bit_data = res.read()
            string_data = bit_data.decode(encoding='UTF-8')
            if len(string_data) > 0:
                response['data'] = json.loads(string_data)
            else:
                response['data'] = None
            response['status'] = 200
        except:
            print("--MseHelper get_json_response error:", sys.exc_info()[1])
            response['data'] = sys.exc_info()[1]
            response['status'] = 400

        return response
Exemplo n.º 22
0
def GetStatus(ip, port):
  status_xml = ''
  url = 'https://%s:%s/status/sessions' % (ip, port)
  context = ssl._create_unverified_context()
  request = urllib2.urlopen(url, context=context)
  response = request.read()
  return xmltodict.parse(response, dict_constructor=dict)['MediaContainer']
Exemplo n.º 23
0
def download_engine_certs(prefix):
    engine_ip = prefix.virt_env.engine_vm().ip()
    engine_base_url = '/ovirt-engine/services/pki-resource?resource=ca-certificate&format='
    engine_ca_url = engine_base_url + 'X509-PEM-CA'
    engine_ssh_url = engine_base_url + 'OPENSSH-PUBKEY'

    # We use an unverified connection, as L0 host cannot resolve '...engine.lago.local'
    conn = httplib.HTTPSConnection(engine_ip, context=ssl._create_unverified_context())

    def _download_file(url, path):
        conn.request("GET", url)
        resp = conn.getresponse()
        nt.assert_true(
            resp.status == 200
        )
        data = resp.read()
        with open(path, 'wb') as outfile:
            outfile.write(data)

    _download_file(engine_ca_url, 'engine-ca.pem')
    # TODO: verify certificate. Either use it, or run:
    # 'openssl x509 -in engine-ca.pem -text -noout'

    _download_file(engine_ssh_url, 'engine-rsa.pub')
    # TODO: verify public key. Either use it, or run:
    # 'ssh-keygen -l -f engine-rsa.pub'

    conn.close()
Exemplo n.º 24
0
	def readGithubCommitLogs(self):
		url = 'https://api.github.com/repos/openpli/%s/commits' % self.projects[self.project][0]
		commitlog = ""
		from datetime import datetime
		from json import loads
		from urllib2 import urlopen
		try:
			commitlog += 80 * '-' + '\n'
			commitlog += url.split('/')[-2] + '\n'
			commitlog += 80 * '-' + '\n'
			try:
				# OpenPli 5.0 uses python 2.7.11 and here we need to bypass the certificate check
				from ssl import _create_unverified_context
				log = loads(urlopen(url, timeout=5, context=_create_unverified_context()).read())
			except:
				log = loads(urlopen(url, timeout=5).read())
			for c in log:
				creator = c['commit']['author']['name']
				title = c['commit']['message']
				date = datetime.strptime(c['commit']['committer']['date'], '%Y-%m-%dT%H:%M:%SZ').strftime('%x %X')
				commitlog += date + ' ' + creator + '\n' + title + 2 * '\n'
			commitlog = commitlog.encode('utf-8')
			self.cachedProjects[self.projects[self.project][1]] = commitlog
		except:
			commitlog += _("Currently the commit log cannot be retrieved - please try later again")
		self["AboutScrollLabel"].setText(commitlog)
Exemplo n.º 25
0
  def checkURL(self, url):
    """Try to connect to a given url. Result is True if url returns HTTP code 200, in any other case
    (like unreachable server or wrong HTTP code) result will be False
    """
    logger.info("Connecting to the following url " + url);

    try:
      parsedurl = urlparse(url)

      if sys.version_info >= (2,7,9):
        import ssl
        ca_connection = httplib.HTTPSConnection(parsedurl[1], context=ssl._create_unverified_context())
      else:
        ca_connection = httplib.HTTPSConnection(parsedurl[1])

      ca_connection.request("GET", parsedurl[2])
      response = ca_connection.getresponse()  
      status = response.status    
      logger.info("Calling url received " + str(status))
      
      if status == 200: 
        return True
      else: 
        return False
    except SSLError as slerror:
        logger.error(str(slerror))
        logger.error("SSLError: Failed to connect. Please check openssl library versions. \n" +
                     "Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details.")
        return False
    except Exception, e:
      logger.info("Failed to connect to " + str(url) + " due to " + str(e))
      return False
Exemplo n.º 26
0
 def getServer(self):
     #return self.MyServer
     print "server by xmlrpc = ", self.server
     if sys.version_info<(2,7,9):
         return ServerProxy(self.server,allow_none = 1)
     else:
         return ServerProxy(self.server, allow_none = 1, context=ssl._create_unverified_context())
Exemplo n.º 27
0
Arquivo: pic.py Projeto: alexxa/pulp
def connect(verify_ssl=True):
    global _CONNECTION
    if verify_ssl:
        _CONNECTION = httplib.HTTPSConnection(HOST, PORT)
    else:
        insecure_context = ssl._create_unverified_context()
        _CONNECTION = httplib.HTTPSConnection(HOST, PORT, context=insecure_context)
Exemplo n.º 28
0
def get_actual_sid(sid, gamepath):
    version = ""
    with open(join_path(gamepath, "game/ffxivgame.ver"), "r") as f:
        version = f.readline()
    if version == "":
        raise Exception("Unable to read version information!")

    version_hash = (
        gen_hash(join_path(gamepath, "boot/ffxivboot.exe"))
        + ","
        + gen_hash(join_path(gamepath, "boot/ffxivlauncher.exe"))
        + ","
        + gen_hash(join_path(gamepath, "boot/ffxivupdater.exe"))
    )

    response = open_url(
        version_url.format(version=version, sid=sid),
        version_hash.encode("utf-8"),
        version_headers,
        ssl._create_unverified_context(),
    )
    response_data = response.read().decode("utf-8")
    actual_sid = response.headers.get("X-Patch-Unique-Id")

    if response.headers.get("X-Latest-Version") != version or response_data != "":
        print(response.headers.as_string())
        print(response_data)
        raise Exception("Game out of date.  Please run the official launcher to update it.")

    return (actual_sid, version)
Exemplo n.º 29
0
    def request(self, handler):
        if self.__is_secure:
            conn = http.client.HTTPSConnection(self.__server_address, context=ssl._create_unverified_context())
        else:
            conn = http.client.HTTPConnection(self.__server_address)
        try:
            # Send request to server
            (response, response_body, headers) = self.__do_request(conn, handler)
            if response is not None:
                # Print log message
                self.__log_message("""\trequest = {} {}
    response headers = {}
    response status = {} {}
    response data = {}""".format(handler.method, handler.request_url,
                                 str(response.headers),
                                 response.status, response.reason,
                                 ("(none)" if response_body == "" else response_body)))
                return response, response_body
            else:
                print("There was a problem making the request {0}...".format(handler.request_url))
        except http.client.HTTPException as err:
            print("Connection error: {}".format(err))
        except ConnectionRefusedError:
            print("Connection refused! Check that the gateway daemon is running.")
        except Exception as err:
            print("Unknown error: {0}".format(err))
        finally:
            conn.close()

        return None
Exemplo n.º 30
0
def ocr_barcode(token, image_str=None, url=None):
    _url = 'https://ais.cn-north-1.myhuaweicloud.com/v1.0/ocr/barcode'

    _data = {
        "image": image_str,
        "url": url
    }

    kreq = urllib2.Request(url=_url)
    kreq.add_header('Content-Type', 'application/json')
    kreq.add_header('X-Auth-Token', token)
    kreq.add_data(json.dumps(_data))

    resp = None
    status_code = None
    try:
        # 
        # Here we use the unvertified-ssl-context, Because in FunctionStage
        # the client CA-validation have some problem, so we must do this.
        #
        _context = ssl._create_unverified_context()
        r = urllib2.urlopen(kreq, context=_context)

    #
    # We use HTTPError and URLError,because urllib2 can't process the 4XX & 
    # 500 error in the single urlopen function.
    #
    # If you use a modern, high-level designed HTTP client lib, Yeah, I mean requests, 
    # there is no this problem. 
    #
    except HTTPError, e:
        resp = e.read()
        status_code = e.code
if len(sys.argv) < 3:
    print("Usage: jetleak.py [url] [port]")
    sys.exit(1)

url = urlparse(sys.argv[1])
if url.scheme == '' and url.netloc == '':
    print("Error: Invalid URL Entered.")
    sys.exit(1)

port = sys.argv[2]

conn = None

if url.scheme == "https":
    conn = http.client.HTTPSConnection(
        url.netloc + ":" + port, context=ssl._create_unverified_context())
elif url.scheme == "http":
    conn = http.client.HTTPConnection(url.netloc + ":" + port)
else:
    print("Error: Only 'http' or 'https' URL Schemes Supported")
    sys.exit(1)

x = "\x00"
headers = {"Referer": x}
conn.request("POST", "/", "", headers)
r1 = conn.getresponse()
print("\n")
print(r1.reason)

if (r1.status == 400 and ("Illegal character 0x0 in state" in r1.reason)):
    print("\r\nThis version of Jetty is VULNERABLE to JetLeak!")
Exemplo n.º 32
0
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import ssl
import pyone

testSession = "oneadmin:onepass"
testEndpoint = 'https://192.168.121.93/RPC2'
one = pyone.OneServer(testEndpoint, session=testSession, context=ssl._create_unverified_context())

class IntegrationTests(unittest.TestCase):

    def test_pool_info(self):
        hostpool = one.hostpool.info()
        self.assertGreater(len(hostpool.HOST), 0)
        host0 = hostpool.HOST[0]
        self.assertEqual(host0.ID, 0)

    def test_auth_error(self):
        with self.assertRaises(pyone.OneAuthenticationException):
            xone = pyone.OneServer(testEndpoint, session="oneadmin:invalidpass", context=ssl._create_unverified_context())
            xone.hostpool.info()

    def test_market_info(self):
Exemplo n.º 33
0
def index():
    context = ssl._create_unverified_context()
    x = urllib.request.urlopen("https://api.nasa.gov/planetary/apod?api_key=2evCliypb1mU9AQ6tJvFH6503oS0e4lJMArEIq7p", context = context)
    stuff = x.read()
    data = json.loads(stuff)
    return render_template("index.html", stuff=data["url"], data=data["explanation"])
Exemplo n.º 34
0
from urllib.request import Request, urlopen
import ssl
import xml.dom.minidom

# https://github.com/CiscoDevNet/coding-skills-sample-code

# This restores the same behavior as before.
context = ssl._create_unverified_context()

req = Request(
    'https://msesandbox.cisco.com/api/contextaware/v1/maps/info/DevNetCampus/DevNetBuilding/DevNetZone'
)
req.add_header('Authorization', 'Basic bGVhcm5pbmc6bGVhcm5pbmc==')
response = urlopen(req, context=context)
responseString = response.read().decode("utf-8")
dom = xml.dom.minidom.parseString(responseString)
xml = dom.toprettyxml()
print(xml)
access_points = dom.getElementsByTagName('AccessPoint')
for access_point in access_points:
    ap_name = access_point.getAttribute('name')
    ap_eth_addr = access_point.getAttribute('ethMacAddress')
    ap_ip_addr = access_point.getAttribute('ipAddress')
    print(access_point.tagName + ": " + ap_name + '\t eth: ' + ap_eth_addr +
          '\t ip: ' + ap_ip_addr)
response.close()
Exemplo n.º 35
0
def read_horizons(vis):
    import urllib2
    import ssl
    if not os.path.exists(vis):
        print 'Input ms data ' + vis + ' does not exist! '
        return -1
    try:
        # ms.open(vis)
        # summary = ms.summary()
        # ms.close()
        # btime = Time(summary['BeginTime'], format='mjd')
        # etime = Time(summary['EndTime'], format='mjd')
        ## alternative way to avoid conflicts with importeovsa, if needed -- more time consuming
        ms.open(vis)
        metadata = ms.metadata()
        if metadata.observatorynames()[0] == 'EVLA':
            observatory_code = '-5'
        elif metadata.observatorynames()[0] == 'EOVSA':
            observatory_code = '-81'
        elif metadata.observatorynames()[0] == 'ALMA':
            observatory_code = '-7'
        ms.close()
        tb.open(vis)
        btime = Time(tb.getcell('TIME', 0) / 24. / 3600., format='mjd')
        etime = Time(tb.getcell('TIME',
                                tb.nrows() - 1) / 24. / 3600.,
                     format='mjd')
        tb.close()
        print "Beginning time of this scan " + btime.iso
        print "End time of this scan " + etime.iso
        cmdstr = "http://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l&TABLE_TYPE='OBSERVER'&QUANTITIES='1,17,20'&CSV_FORMAT='YES'&ANG_FORMAT='DEG'&CAL_FORMAT='BOTH'&SOLAR_ELONG='0,180'&CENTER='{}@399'&COMMAND='10'&START_TIME='".format(
            observatory_code
        ) + btime.iso.replace(
            ' ', ','
        ) + "'&STOP_TIME='" + etime.iso[:-4].replace(
            ' ', ','
        ) + "'&STEP_SIZE='1 m'&SKIP_DAYLT='NO'&EXTRA_PREC='YES'&APPARENT='REFRACTED'"
        try:
            context = ssl._create_unverified_context()
            f = urllib2.urlopen(cmdstr, context=context)
        except:
            f = urllib2.urlopen(cmdstr)
    except:
        print 'error in reading ms file: ' + vis + ' to obtain the ephemeris!'
        return -1
    # inputs:
    #   ephemfile:
    #       OBSERVER output from JPL Horizons for topocentric coordinates with for example
    #       target=Sun, observer=VLA=-5
    #       extra precision, quantities 1,17,20, REFRACTION
    #       routine goes through file to find $$SOE which is start of ephemeris and ends with $$EOE
    # outputs: a Python dictionary containing the following:
    #   timestr: date and time as a string
    #   time: modified Julian date
    #   ra: right ascention, in rad
    #   dec: declination, in rad
    #   rastr: ra in string
    #   decstr: dec in string
    #   p0: solar p angle, CCW with respect to the celestial north pole
    #   delta: distance from the disk center to the observer, in AU
    #   delta_dot: time derivative of delta, in the light of sight direction. Negative means it is moving toward the observer
    #
    # initialize the return dictionary
    ephem0 = dict.fromkeys(['time', 'ra', 'dec', 'delta', 'p0'])
    lines = f.readlines()
    f.close()
    nline = len(lines)
    istart = 0
    for i in range(nline):
        line = lines[i]
        if line[0:5] == '$$SOE':  # start recording
            istart = i + 1
        if line[0:5] == '$$EOE':  # end recording
            iend = i
    newlines = lines[istart:iend]
    nrec = len(newlines)
    ephem_ = []
    t = []
    ra = []
    dec = []
    p0 = []
    delta = []
    for line in newlines:
        items = line.split(',')
        # t.append({'unit':'mjd','value':Time(float(items[1]),format='jd').mjd})
        # ra.append({'unit': 'rad', 'value': np.radians(float(items[4]))})
        # dec.append({'unit': 'rad', 'value': np.radians(float(items[5]))})
        # p0.append({'unit': 'deg', 'value': float(items[6])})
        # delta.append({'unit': 'au', 'value': float(items[8])})
        t.append(Time(float(items[1]), format='jd').mjd)
        ra.append(np.radians(float(items[4])))
        dec.append(np.radians(float(items[5])))
        p0.append(float(items[6]))
        delta.append(float(items[8]))
    # convert list of dictionary to a dictionary of arrays
    ephem = {'time': t, 'ra': ra, 'dec': dec, 'p0': p0, 'delta': delta}
    return ephem
Exemplo n.º 36
0
def main():

    si = None
    global inputs
    inputs = get_args()
    log.info("Trying to connect to VCENTER SERVER . . .")
    context = None
    operation = inputs['operation']
    log.info("Operation selected : " + operation)
    if operation == 'list_all':
        global final_obj
        final_obj = list()
        try:
            for vc in inputs['vcenter_ip']:

                if inputs['ignore_ssl'] and hasattr(
                        ssl, "_create_unverified_context"):
                    context = ssl._create_unverified_context()
                si = connect.Connect(vc,
                                     443,
                                     inputs['vcenter_user'],
                                     inputs['vcenter_password'],
                                     sslContext=context)
                atexit.register(Disconnect, si)
                log.info("Connected to VCENTER SERVER : " + vc)
                log.info("Searching for snapshot . . .")
                content = si.RetrieveContent()
                container = content.viewManager.CreateContainerView(
                    content.rootFolder, [vim.VirtualMachine], True)
                for c in container.view:

                    if c.snapshot != None:
                        obj = list_snapshots_recursively(
                            c.snapshot.rootSnapshotList)

                        for i in obj:
                            final_obj.append(vc + ";" + c.name + ";" + i)

                log.info("Snapshot data collected")
            print(
                "-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
            )
            print("| %-40s| %-25s| %-50s| %-40s| %-10s| %-40s" %
                  ('Vcenter Name', 'VM Name', 'SnapshotName', 'Created Time',
                   'Hours', 'Description'))
            print(
                "------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
            )
            for line in final_obj:
                val1 = line.split(';')[0].strip()
                val2 = line.split(';')[1].strip()
                val3 = line.split(';')[2].strip()
                val4 = line.split(';')[3].strip()
                val5 = line.split(';')[4].strip()
                val6 = line.split(';')[5].strip()

                print("| %-40s| %-25s| %-50s| %-40s| %-10s | %-40s" %
                      (val1, val2, val3, val4, val5, val6))
            print(
                "-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
            )
            hrd = inputs['hour']

            out = create_html(final_obj, hrd)
            if out == True:
                log.info("Html file created!!!")
            else:
                log.info("Error while Creating html :" + str(out))
        except Exception as e:
            log.error(e)

    elif operation == 'remove':

        try:

            for vc in inputs['vcenter_ip']:
                if inputs['ignore_ssl'] and hasattr(
                        ssl, "_create_unverified_context"):
                    context = ssl._create_unverified_context()
                si = connect.Connect(vc,
                                     443,
                                     inputs['vcenter_user'],
                                     inputs['vcenter_password'],
                                     sslContext=context)
                atexit.register(Disconnect, si)
                log.info("Connected to VCENTER SERVER : " + vc)
                log.info("Searching for snapshot . . .")
                content = si.RetrieveContent()
                container = content.viewManager.CreateContainerView(
                    content.rootFolder, [vim.VirtualMachine], True)
                for c in container.view:
                    if c.snapshot != None:
                        ob = get_snapshot(c.snapshot.rootSnapshotList)
                        for i in ob:
                            WaitForTask(i.RemoveSnapshot_Task(False))
                            log.info(
                                "Deleted snapshot for Vcenter: %s , VM name: %s"
                                % (vc, c.name))
        except Exception as e:
            log.error(e)
    else:

        log.error("Specify operation in " "remove/list_all")
Exemplo n.º 37
0
def predict_house(url: None):
    ssl._create_default_https_context = ssl._create_unverified_context
    context = ssl._create_unverified_context()
    if url == None:
        url = "https://s-media-cache-ak0.pinimg.com/originals/7f/67/4f/7f674f7d115c654ad2e610bae1ea7c27.jpg"

    urllib.request.urlretrieve(url, 'house.png')
    # r= urllib.request.urlopen(url)
    # imgdata = r.read()
    # resized_image = Image.open(io.BytesIO(r.data))

    #print(resized_image)

    # First, pass the path of the image
    dir_path = os.path.dirname(os.path.realpath(__file__))
    image_path = "house.png"
    filename = image_path
    image_size = 128
    num_channels = 3
    images = []
    # Reading the image using OpenCV
    #image = cv2.imread(filename)
    # Resizing the image to our desired size and preprocessing will be done exactly as done during training
    image = cv2.imread(filename)
    image = cv2.resize(image, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
    images.append(image)
    images = np.array(images, dtype=np.uint8)
    images = images.astype('float32')
    images = np.multiply(images, 1.0 / 255.0)
    #The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.
    x_batch = images.reshape(1, image_size, image_size, num_channels)
    model_directory = [
        dir_path + '/house_models/chimney_smoke_none_model',
        dir_path + '/house_models/house_door_model'
    ]
    transmitting_sentences = ''
    ind = [0, 0, 0]
    cnt = 0

    for i in model_directory:
        ## Let us restore the saved model
        sess = tf.Session()
        # saver = tf.train.Saver()
        # Step-1: Recreate the network graph. At this step only graph is created.
        saver = tf.train.import_meta_graph(i + '/model.meta')
        # Step-2: Now let's load the weights saved using the restore method.
        saver.restore(sess, tf.train.latest_checkpoint(i))

        # Accessing the default graph which we have restored
        graph = tf.get_default_graph()

        # Now, let's get hold of the op that we can be processed to get the output.
        # In the original network y_pred is the tensor that is the prediction of the network
        y_pred = graph.get_tensor_by_name("y_pred:0")

        ## Let's feed the images to the input placeholders
        x = graph.get_tensor_by_name("x:0")
        y_true = graph.get_tensor_by_name("y_true:0")
        if cnt == 0:
            print(cnt)
            y_test_images = np.zeros((1, 3))
        else:
            y_test_images = np.zeros((1, 2))
        ### Creating the feed_dict that is required to be fed to calculate y_pred
        feed_dict_testing = {x: x_batch, y_true: y_test_images}
        result = sess.run(y_pred, feed_dict=feed_dict_testing)
        # result is of this format [probabiliy_of_rose probability_of_sunflower]
        print(result)
        tf.reset_default_graph()

        ind[cnt] = np.argmax(result)
        cnt = cnt + 1

    file_add = ['chimney_', 'door_']
    transmitting_sentences = ""
    ind = [0, 0]
    for i in file_add:
        file_read = dir_path + '/' + i + str(ind[0]) + '.txt'
        print(file_read)
        f = io.open(file_read, 'rb')
        line = f.read()
        transmitting_sentences += line.decode('euc-kr') + '\n'
        print(transmitting_sentences)
        f.close()

    print(transmitting_sentences)
    return transmitting_sentences
Exemplo n.º 38
0
 def check_external(self, homepage, path, text):
     headers = {'User-Agent': 'Mozilla/5.0'}
     context = ssl._create_unverified_context()
     request = urllib2.Request(path, headers = headers)
     assert path in homepage
     assert text in urllib2.urlopen(request, context = context).read()
Exemplo n.º 39
0
import requests
import ssl

header = {
    'User-Agent':
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
}
requestContent = ssl._create_unverified_context()
response = requests.get('', headers=header, context=requestContext)
print(response.text)

#1.找到分类
#2.找ajax接口
#https://xueqiu.com/v4/statuses/public_timeline_by_category.json?since_id=-1&max_id=20289503&count=15&category=-1

#数据层结构
data = {'list': [{}, {}, {}]}
data['list']
Exemplo n.º 40
0
def parse():
    '''
    Read command-line arguments, return a simple configuration for running tests.
    '''
    parser = argparse.ArgumentParser(
        description=
        'Connect to an available board, flash image(s), and run tests.',
        usage='bft [options...]',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=HELP_EPILOG)
    parser.add_argument('-a',
                        '--analysis',
                        metavar='',
                        type=str,
                        default=None,
                        help='Only run post processing analysis on logs')
    parser.add_argument('-b',
                        '--board_type',
                        metavar='',
                        type=str,
                        nargs='+',
                        default=None,
                        help='MODEL(s) of board to connect to')
    parser.add_argument('-c',
                        '--config_file',
                        metavar='',
                        type=str,
                        default=None,
                        help='JSON config file for boardfarm')
    parser.add_argument('-e',
                        '--extend',
                        metavar='',
                        type=str,
                        default=None,
                        action="append",
                        help='NAME of extra test to run')
    parser.add_argument('-f',
                        '--filter',
                        metavar='',
                        type=str,
                        default=None,
                        action="append",
                        help='Regex filter off arbitrary board parameters')
    parser.add_argument(
        '-g',
        '--golden',
        metavar='',
        type=str,
        default=[],
        nargs='+',
        help='Path to JSON results to compare against (golden master)')
    parser.add_argument('-i',
                        '--inventory',
                        action='store_true',
                        help='List available boards and exit')
    parser.add_argument('-k',
                        '--kernel',
                        metavar='',
                        type=str,
                        default=None,
                        help='URL or file PATH of Kernel image to flash')
    parser.add_argument('-l',
                        '--list_tests',
                        action='store_true',
                        help='List available tests and exit')
    parser.add_argument('-m',
                        '--meta_img_loc',
                        metavar='',
                        type=str,
                        default=None,
                        help='URL or file PATH to meta image to flash')
    parser.add_argument('-n',
                        '--board_names',
                        metavar='',
                        type=str,
                        nargs='+',
                        default=[],
                        help='NAME(s) of boards to run on')
    owrt_tests_dir = os.path.join(os.getcwd(), "results", '')
    parser.add_argument('-o',
                        '--output_dir',
                        metavar='',
                        type=str,
                        default=owrt_tests_dir,
                        help='Directory to output results files too')
    parser.add_argument('-p',
                        '--package',
                        metavar='',
                        type=str,
                        action="append",
                        default=None,
                        help='URL or file PATH of ipk install after boot')
    parser.add_argument('-q',
                        '--feature',
                        metavar='',
                        type=str,
                        default=[],
                        nargs='+',
                        help='Features required for this test run')
    parser.add_argument('-r',
                        '--rootfs',
                        metavar='',
                        type=str,
                        default=None,
                        help='URL or file PATH of Rootfs image to flash')
    parser.add_argument('-s',
                        '--sysupgrade',
                        metavar='',
                        type=str,
                        default=None,
                        help='URL or file PATH to Sysupgrade image')
    parser.add_argument('-t',
                        '--retry',
                        type=int,
                        default=0,
                        help='How many times to retry every test if it fails')
    parser.add_argument('-u',
                        '--uboot',
                        metavar='',
                        type=str,
                        default=None,
                        help=argparse.SUPPRESS)
    parser.add_argument('-v',
                        '--reboot-vms',
                        action="store_true",
                        help='Reboot VMs before starting tests')
    parser.add_argument('-w',
                        '--wan',
                        metavar='',
                        type=str,
                        default='dhcp',
                        help='WAN protocol, dhcp (default) or pppoe')
    parser.add_argument('-x',
                        '--testsuite',
                        metavar='',
                        type=str,
                        default=None,
                        help='NAME of test suite to run')
    parser.add_argument(
        '-y',
        '--batch',
        action='store_true',
        help='Run in unattended mode - do not spawn console on failed test')
    parser.add_argument('-z',
                        '--no-network',
                        action='store_true',
                        help='Skip basic network tests when booting')
    parser.add_argument(
        '--bootargs',
        metavar='',
        type=str,
        default=None,
        help='bootargs to set or append to default args (board dependant)')
    parser.add_argument('--nfsroot',
                        metavar='',
                        type=str,
                        default=None,
                        help='URL or file PATH of Rootfs image to flash')
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s {}'.format(library.version),
                        help='show version and exit')
    parser.add_argument(
        '--nostrict',
        action='store_true',
        help='ignores failure to import a tests from a testsuite')

    args = parser.parse_args()

    if args.list_tests:
        import tests
        tests.init(config)
        # Print all classes that are a subclass of TestCase
        for e in dir(tests):
            thing = getattr(tests, e)
            if inspect.isclass(thing) and \
               issubclass(thing, unittest2.TestCase):
                try:
                    print("%20s - %s" % (e, thing.__doc__.split('\n')[0]))
                except:
                    print("%20s -" % e)
        sys.exit(0)

    try:
        if args.config_file is not None:
            config.boardfarm_config_location = args.config_file

        if config.boardfarm_config_location.startswith("http"):
            data = urlopen(config.boardfarm_config_location).read().decode()
        else:
            data = open(config.boardfarm_config_location, 'r').read()

        config.boardfarm_config = json.loads(data)

        if "_redirect" in config.boardfarm_config and args.config_file is None:
            print("Using boardfarm config file at %s" %
                  config.boardfarm_config['_redirect'])
            print("Please set your default config by doing:")
            print('    export BFT_CONFIG="%s"' %
                  config.boardfarm_config['_redirect'])
            print(
                "If you want to use local config, remove the _redirect line.")
            data = urlopen(
                config.boardfarm_config['_redirect']).read().decode()
            config.boardfarm_config_location = config.boardfarm_config[
                '_redirect']
            config.boardfarm_config = json.loads(data)

        config.boardfarm_config.pop('_redirect', None)

        if 'locations' in config.boardfarm_config:
            location = config.boardfarm_config['locations']
            del config.boardfarm_config['locations']

            for board in config.boardfarm_config:
                if 'location' in config.boardfarm_config[board]:
                    board_location = config.boardfarm_config[board]['location']
                    if board_location in location:
                        for key, value in location[board_location].iteritems():
                            if type(value) == list:
                                config.boardfarm_config[board][key].extend(
                                    value)
                            else:
                                config.boardfarm_config[board][key] = value

    except Exception as e:
        print(e)
        print('Unable to access/read Board Farm configuration\n%s' %
              boardfarm_config_location)
        sys.exit(1)

    # Check if boardfarm configuration is empty
    if not config.boardfarm_config:
        print("ERROR! Boardfarm config at %s is empty, so" % args.config_file)
        print("either all stations are in use or disabled.")
        sys.exit(10)
    # Check if given board type(s) have any overlap with available board types from config
    if args.board_type:
        all_board_types = [
            config.boardfarm_config[key].get('board_type')
            for key in config.boardfarm_config
        ]
        if not (set(args.board_type) & set(all_board_types)):
            print("ERROR! You specified board types: %s " %
                  " ".join(args.board_type))
            print("but that is not an existing & available type of board.")
            print("Please choose a board type from:")
            print("\n".join([" * %s" % x for x in set(all_board_types)]))
            sys.exit(10)
    # Check if given board name(s) are present in available boards
    if args.board_names:
        all_board_names = [
            key for key in config.boardfarm_config if key != "locations"
        ]
        if not (set(args.board_names) & set(all_board_names)):
            print("ERROR! You specified board names: %s " %
                  " ".join(args.board_names))
            print("but that is not an existing & available board.")
            print("Please choose a board name from:")
            print("\n".join([" * %s" % x for x in sorted(all_board_names)]))
            sys.exit(10)

    config.batch = args.batch

    if args.inventory:
        print("%11s  %15s  %5s  %25s  %25s  %s" %
              ('Name', 'Model', 'Auto', 'LAN', 'WAN', 'Notes'))
        bf = config.boardfarm_config
        for i, b in enumerate(sorted(bf)):
            if args.board_type is None or bf[b].get(
                    'board_type') in args.board_type:
                if not args.board_names or b in args.board_names:
                    info = {
                        'name': b,
                        'type': bf[b].get('board_type'),
                        'wlan': bf[b].get('wlan_device') != None,
                        'auto': bf[b].get('available_for_autotests', True),
                        'conn_cmd': bf[b].get('conn_cmd'),
                        'lan_device': bf[b].get('lan_device', ''),
                        'wan_device': bf[b].get('wan_device', ''),
                        'notes': bf[b].get('notes', "")
                    }
                    if not args.filter or (args.filter and filter_boards(
                            bf[b], args.filter)):
                        print(
                            "%(name)11s  %(type)15s  %(auto)5s  %(lan_device)25s  %(wan_device)25s  %(notes)s"
                            % info)
        print("To connect to a board by name:\n  ./bft -x connect -n NAME")
        print(
            "To connect to any board of a given model:\n  ./bft -x connect -b MODEL"
        )
        sys.exit(0)

    if hasattr(config, 'INSTALL_PKGS') is False:
        config.INSTALL_PKGS = ""

    config.retry = args.retry

    if args.package:
        for pkg in args.package:
            config.INSTALL_PKGS += " %s" % pkg

    config.UBOOT = args.uboot
    config.KERNEL = args.kernel
    config.ROOTFS = args.rootfs
    config.NFSROOT = args.nfsroot
    config.META_BUILD = args.meta_img_loc
    # Quick check to make sure file url/path arguments are reasonable
    for x in (config.UBOOT, config.KERNEL, config.ROOTFS, config.META_BUILD):
        if x is None:
            continue
        if x.startswith('http://') or x.startswith('https://'):
            try:

                def add_basic_auth(login_str, request):
                    '''Adds Basic auth to http request, pass in login:password as string'''
                    import base64
                    encodeuser = base64.b64encode(
                        login_str.encode('utf-8')).decode("utf-8")
                    authheader = "Basic %s" % encodeuser
                    request.add_header("Authorization", authheader)

                import ssl
                context = ssl._create_unverified_context()

                req = urllib.Request(x)

                try:
                    import netrc, urlparse
                    n = netrc.netrc()
                    login, unused, password = n.authenticators(
                        urlparse.urlparse(x).hostname)
                    add_basic_auth("%s:%s" % (login, password), req)
                except (TypeError, ImportError, IOError,
                        netrc.NetrcParseError):
                    pass

                # If url returns 404 or similar, raise exception
                urlopen(req, timeout=20, context=context)
            except Exception as e:
                print(e)
                print('Error trying to access %s' % x)
                sys.exit(1)
        else:
            if not os.path.isfile(x):
                print("File not found: %s" % x)
                sys.exit(1)

    if args.sysupgrade:
        config.SYSUPGRADE_NEW = args.sysupgrade
    if args.testsuite:
        config.TEST_SUITE = args.testsuite
    else:
        if args.extend:
            # One or more test cases was specified at command-line, just boot first.
            config.TEST_SUITE = "flash"
        else:
            # No test suite or test cases specified, so just boot and interact.
            config.TEST_SUITE = "interact"
    if args.extend:
        config.EXTRA_TESTS = args.extend
        config.EXTRA_TESTS += ["Interact"]

    config.output_dir = os.path.abspath(args.output_dir) + os.sep
    try:
        os.mkdir(config.output_dir)
    except:
        pass

    if args.analysis:
        import analysis
        for cstr in dir(analysis):
            c = getattr(analysis, cstr)
            if inspect.isclass(c) and issubclass(c, analysis.Analysis):
                sys.stdout.write("Running analysis class = %s... " % c)
                console_log = open(args.analysis, 'r').read()
                from analysis.analysis import prepare_log
                try:
                    c().analyze(prepare_log(console_log), config.output_dir)
                    print("DONE!")
                except Exception as e:
                    print("FAILED!")
                    traceback.print_exc(file=sys.stdout)
                    continue
        exit(0)

    if args.board_type:
        library.print_bold("Selecting board from board type = %s" %
                           args.board_type)
        config.BOARD_NAMES = []
        possible_names = config.boardfarm_config
        if args.board_names:
            print("Board names = %s" % args.board_names)
            # Allow selection only from given set of board names
            possible_names = set(config.boardfarm_config) & set(
                args.board_names)
        for b in possible_names:
            if len(args.board_names) != 1 and \
               'available_for_autotests' in config.boardfarm_config[b] and \
               config.boardfarm_config[b]['available_for_autotests'] == False:
                # Skip this board
                continue
            if args.feature != []:
                if 'feature' not in config.boardfarm_config[b]:
                    continue
                features = config.boardfarm_config[b]['feature']
                if 'devices' in config.boardfarm_config[b]:
                    seen_names = []
                    for d in config.boardfarm_config[b]['devices']:
                        if 'feature' in d:
                            # since we only connect to one type of device
                            # we need to ignore the features on the other ones
                            # even though they should be the same
                            if d['name'] in seen_names:
                                continue
                            seen_names.append(d['name'])

                            if type(d['feature']) is str or type(
                                    d['feature']) is unicode:
                                d['feature'] = [d['feature']]
                            features.extend(x for x in d['feature']
                                            if x not in features)
                if type(features) is str or type(features) is unicode:
                    features = [features]
                if set(args.feature) != set(args.feature) & set(features):
                    continue
            for t in args.board_type:
                if config.boardfarm_config[b]['board_type'].lower() == t.lower(
                ):
                    if args.filter:
                        if filter_boards(config.boardfarm_config[b],
                                         args.filter, b):
                            config.BOARD_NAMES.append(b)
                    else:
                        config.BOARD_NAMES.append(b)
        if not config.BOARD_NAMES:
            print(
                "ERROR! No boards meet selection requirements and have available_for_autotests = True."
            )
            sys.exit(10)
    else:
        if not args.board_names:
            print("ERROR")
            print("You must specify a board name with the '-n' argument:")
            print("./run-all.py -n 3000")
            print(
                "That same board name must be present in boardfarm configuration."
            )
            sys.exit(1)
        else:
            config.BOARD_NAMES = args.board_names

    config.WAN_PROTO = args.wan
    config.reboot_vms = args.reboot_vms
    config.setup_device_networking = not args.no_network
    config.bootargs = args.bootargs
    config.golden = args.golden
    config.features = args.feature
    config.TEST_SUITE_NOSTRICT = args.nostrict

    return config
"""
import ssl
import urllib.request

global variables, resultMetadata

__file__ = variables.get("PA_TASK_NAME")
print("BEGIN " + __file__)

# -------------------------------------------------------------
# Import an external python script containing a collection of
# common utility Python functions and classes
PA_CATALOG_REST_URL = variables.get("PA_CATALOG_REST_URL")
PA_PYTHON_UTILS_URL = PA_CATALOG_REST_URL + "/buckets/machine-learning/resources/Utils_Script/raw"
if PA_PYTHON_UTILS_URL.startswith('https'):
    exec(urllib.request.urlopen(PA_PYTHON_UTILS_URL, context=ssl._create_unverified_context()).read(), globals())
else:
    exec(urllib.request.urlopen(PA_PYTHON_UTILS_URL).read(), globals())
global check_task_is_enabled, assert_not_none_not_empty
global get_input_variables, get_and_decompress_dataframe
global preview_dataframe_in_task_result

# -------------------------------------------------------------
# Check if the Python task is enabled or not
check_task_is_enabled()

# -------------------------------------------------------------
# Get data from the propagated variables
#
OUTPUT_TYPE = variables.get("OUTPUT_TYPE")
assert_not_none_not_empty(OUTPUT_TYPE, "OUTPUT_TYPE should be defined!")
Exemplo n.º 42
0
def main():
    start_time = time.time()
    dataset = []
    sys.stderr.write(str(datetime.datetime.now()) + '\n')

    book_index = 0
    for i, s_url in enumerate(ProgressBar()(search_urls)):
        time.sleep(SLEEP_SEC)
        for try_count in range(MAX_OPEN_COUNT):
            try:
                hdr = {
                    'User-Agent': 'Mozilla/5.0',
                    'referer': 'https://www.smashwords.com'
                }
                context = ssl._create_unverified_context()
                req = urllib.request.Request(s_url, headers=hdr)
                response = urllib.request.urlopen(req, context=context)
                #response = opener.open(s_url)
                if try_count >= 1:
                    sys.stderr.write('Succeeded in opening {}\n'.format(s_url))
                break  # success
            except Exception as e:
                sys.stderr.write('Failed to open {}\n'.format(s_url))
                sys.stderr.write('{}: {}\n'.format(type(e).__name__, str(e)))
                time.sleep(RETRY_SLEEP_SEC)
        else:
            sys.stderr.write(' Gave up to open {}\n'.format(s_url))
        body = response.read()
        soup = BeautifulSoup(body, 'lxml')

        book_links = soup.find_all(class_="library-title")

        for b_link in book_links:
            book_index += 1
            b_url = b_link.get('href')
            for try_count in range(MAX_OPEN_COUNT):
                try:
                    hdr = {
                        'User-Agent': 'Mozilla/5.0',
                        'referer': 'https://www.smashwords.com'
                    }
                    context = ssl._create_unverified_context()
                    req = urllib.request.Request(b_url, headers=hdr)
                    response = urllib.request.urlopen(req, context=context)
                    #response = opener.open(b_url)
                    if try_count >= 1:
                        sys.stderr.write(
                            'Succeeded in opening {}\n'.format(b_url))
                    break  # success
                except Exception as e:
                    sys.stderr.write('Failed to open {}\n'.format(b_url))
                    sys.stderr.write('{}: {}\n'.format(
                        type(e).__name__, str(e)))
                    time.sleep(RETRY_SLEEP_SEC)
            else:
                sys.stderr.write(' Gave up to open {}\n'.format(b_url))

            body = response.read()
            soup = BeautifulSoup(body, 'lxml')

            # get meta
            meta_infos = soup.find_all(class_="col-md-3")
            if not meta_infos:
                sys.stderr.write('Failed: meta_info {}\n'.format(b_url))
                continue
            meta_txts = [
                m.text for m in meta_infos if 'Language: English' in m.text
            ]

            # check lang
            is_english = len(meta_txts) >= 1
            if not is_english:
                continue

            # get num words
            meta_txt = meta_txts[0].replace(',', '')
            match = num_words_pt.search(meta_txt)
            if match:
                num_words = int(match.group(1))
            elif 'num_words' in REQUIRED:
                sys.stderr.write('Failed: num_words {}\n'.format(b_url))
                continue
            else:
                num_words = 0

            # get publish date
            meta_txt = meta_txts[0]
            match = pub_date_pt.search(meta_txt)
            if match:
                pub_date = match.group(1)
            elif 'publish' in REQUIRED:
                sys.stderr.write('Failed: publish {}\n'.format(b_url))
                continue
            else:
                pub_data = ''

            # get genres
            genre_txts = soup.find_all(class_="category")
            if genre_txts:
                genres = [
                    g.text.replace('\u00a0\u00bb\u00a0', '\t').strip()
                    for g in genre_txts
                ]
            elif 'genres' in REQUIRED:
                sys.stderr.write('Failed: genre {}\n'.format(b_url))
                continue
            else:
                genres = []

            # get title
            title = soup.find("h1")
            if title:
                title = title.text
            elif 'title' in REQUIRED:
                sys.stderr.write('Failed: title {}\n'.format(b_url))
                continue
            else:
                title = ''

            # get author
            author = soup.find(itemprop="author")
            if author:
                author = author.text
            elif 'author' in REQUIRED:
                sys.stderr.write('Failed: author {}\n'.format(b_url))
                continue
            else:
                author = ''

            # TODO: Using <div class="card" id="download"> and "epub"/"txt"
            # get epub
            epub_links = soup.find_all(
                title=
                "Supported by many apps and devices (e.g., Apple Books, Barnes and Noble Nook, Kobo, Google Play, etc.)"
            )
            if epub_links:
                epub_url = epub_links[0].get('href')
                if epub_url:
                    epub_url = 'https://www.smashwords.com' + epub_url
                elif 'epub' in REQUIRED:
                    sys.stderr.write('Failed: epub2 {}\n'.format(b_url))
                    continue
                else:
                    epub_url = ''
            elif 'epub' in REQUIRED:
                sys.stderr.write('Failed: epub1 {}\n'.format(b_url))
                continue
            else:
                epub_url = ''

            # get txt if possible
            txt_links = soup.find_all(
                title="Plain text; contains no formatting")
            if not txt_links:
                txt_url = ''
            else:
                txt_url = txt_links[0].get('href')
                if not txt_url:
                    txt_url = ''
                else:
                    txt_url = 'https://www.smashwords.com' + txt_url

            if not epub_url and not txt_url:
                sys.stderr.write('Failed: epub and txt {}\n'.format(b_url))
                continue

            data = {
                'page': b_url,
                'epub': epub_url,
                'txt': txt_url,
                'title': title,
                'author': author,
                'genres': genres,
                'publish': pub_date,
                'num_words': num_words,
                'b_idx': book_index
            }
            print(json.dumps(data))
Exemplo n.º 43
0
    def do_GET(self):
        if self.path == 'http://proxy2.test/':
            self.send_cacert()
            return

        req = self
        content_length = int(req.headers.get('Content-Length', 0))
        req_body = self.rfile.read(content_length) if content_length else None

        if req.path[0] == '/':
            if isinstance(self.connection, ssl.SSLSocket):
                req.path = "https://%s%s" % (req.headers['Host'], req.path)
            else:
                req.path = "http://%s%s" % (req.headers['Host'], req.path)

        req_body_modified = self.request_handler(req, req_body)
        if req_body_modified is False:
            self.send_error(403)
            return
        elif req_body_modified is not None:
            req_body = req_body_modified
            req.headers['Content-length'] = str(len(req_body))

        u = urllib.parse.urlsplit(req.path)
        scheme, netloc, path = u.scheme, u.netloc, (u.path + '?' + u.query
                                                    if u.query else u.path)
        assert scheme in ('http', 'https')
        if netloc:
            req.headers['Host'] = netloc
        setattr(req, 'headers', self.filter_headers(req.headers))

        try:
            origin = (scheme, netloc)
            if not origin in self.tls.conns:
                if scheme == 'https':
                    self.tls.conns[origin] = http.client.HTTPSConnection(
                        netloc,
                        timeout=self.timeout,
                        context=ssl._create_unverified_context())
                else:
                    self.tls.conns[origin] = http.client.HTTPConnection(
                        netloc, timeout=self.timeout)
            conn = self.tls.conns[origin]
            conn.request(self.command, path, req_body, dict(req.headers))
            res = conn.getresponse()

            version_table = {10: 'HTTP/1.0', 11: 'HTTP/1.1'}
            setattr(res, 'headers', res.msg)
            setattr(res, 'response_version', version_table[res.version])

            # support streaming
            if not 'Content-Length' in res.headers and 'no-store' in res.headers.get(
                    'Cache-Control', ''):
                self.response_handler(req, req_body, res, '')
                setattr(res, 'headers', self.filter_headers(res.headers))
                self.relay_streaming(res)
                with self.lock:
                    self.save_handler(req, req_body, res, '')
                return

            res_body = res.read()
        except Exception as e:
            if origin in self.tls.conns:
                del self.tls.conns[origin]
            self.send_error(502)
            return

        content_encoding = res.headers.get('Content-Encoding', 'identity')
        res_body = self.decode_content_body(res_body, content_encoding)

        try:
            charset = re.search('charset=(\S+)',
                                res.headers.get('Content-type', ''))[1]
            print('decoding', charset)
            res_body_plain = res_body.decode(charset)
        except (KeyError, TypeError):
            print('no charset found')
            res_body_plain = ''
            #pass

        res_body_modified = self.response_handler(req, req_body, res,
                                                  res_body_plain)
        if res_body_modified is False:
            self.send_error(403)
            return
        elif res_body_modified is not None:
            res_body_plain = res_body_modified
            res_body = self.encode_content_body(res_body_plain.encode(charset),
                                                content_encoding)
            res.headers['Content-Length'] = str(len(res_body))

        setattr(res, 'headers', self.filter_headers(res.headers))

        print('sending response', res.status, res.reason)
        self.send_response(res.status, res.reason)
        for k, v in res.headers.items():
            self.send_header(k, v)
        self.end_headers()
        self.wfile.write(res_body)
        self.wfile.flush()

        print('saving handler')
        with self.lock:
            self.save_handler(req, req_body, res, res_body_plain)
        print('done')
Exemplo n.º 44
0
    def _rest_call(self, action, resource, data, extra_headers=None):
        if self.retry >= MAX_RETRIES:
            LOG.error('RESTProxy: Max retries exceeded')
            # Get ready for the next set of operation
            self.retry = 0
            return 0, None, None, None
        uri = self.base_uri + resource
        body = json.dumps(data)
        headers = {}
        headers['Content-type'] = 'application/json'
        headers['X-Nuage-Organization'] = self.organization
        if self.auth:
            headers['Authorization'] = self.auth
        conn = None
        if extra_headers:
            headers.update(extra_headers)

        LOG.debug('Request uri: %s', uri)
        LOG.debug('Request headers: %s', headers)
        LOG.debug('Request body: %s', body)

        if self.serverssl:
            conn = httplib.HTTPSConnection(
                # original implementation below
                #                self.server, self.port, timeout=self.timeout)
                # workaround implementation below
                self.server,
                self.port,
                timeout=self.timeout,
                context=ssl._create_unverified_context())
            if conn is None:
                LOG.error('RESTProxy: Could not establish HTTPS ' 'connection')
                return 0, None, None, None
        else:
            conn = httplib.HTTPConnection(self.server,
                                          self.port,
                                          timeout=self.timeout)
            if conn is None:
                LOG.error('RESTProxy: Could not establish HTTP ' 'connection')
                return 0, None, None, None

        try:
            conn.request(action, uri, body, headers)
            response = conn.getresponse()
            respstr = response.read()
            respdata = respstr
            LOG.debug('Response status is %(st)s and reason is %(res)s', {
                'st': response.status,
                'res': response.reason
            })
            LOG.debug('Response data is %s', respstr)
            if response.status in self.success_codes:
                try:
                    respdata = json.loads(respstr)
                except ValueError:
                    # response was not JSON, ignore the exception
                    pass
            ret = (response.status, response.reason, respstr, respdata)
        except (socket.timeout, socket.error) as e:
            LOG.error('ServerProxy: %(action)s failure, %(e)r', locals())
            # retry
            self.retry += 1
            return self._rest_call(action, resource, data, extra_headers)
        conn.close()
        if response.status == 503:
            if self.retry_503 < MAX_RETRIES_503:
                time.sleep(1)
                self.retry_503 += 1
                LOG.debug('VSD unavailable. Retrying')
                return self._rest_call(action,
                                       resource,
                                       data,
                                       extra_headers=extra_headers)
            else:
                LOG.debug('After 5 retries VSD is unavailable. Bailing out')
        self.retry = 0
        self.retry_503 = 0
        return ret
Exemplo n.º 45
0
Arquivo: web.py Projeto: wangvsa/spack
def read_from_url(url, accept_content_type=None):
    url = url_util.parse(url)
    context = None

    verify_ssl = spack.config.get('config:verify_ssl')

    # Don't even bother with a context unless the URL scheme is one that uses
    # SSL certs.
    if uses_ssl(url):
        if verify_ssl:
            if __UNABLE_TO_VERIFY_SSL:
                # User wants SSL verification, but it cannot be provided.
                warn_no_ssl_cert_checking()
            else:
                # User wants SSL verification, and it *can* be provided.
                context = ssl.create_default_context()  # novm
        else:
            # User has explicitly indicated that they do not want SSL
            # verification.
            if not __UNABLE_TO_VERIFY_SSL:
                context = ssl._create_unverified_context()

    req = Request(url_util.format(url))
    content_type = None
    is_web_url = url.scheme in ('http', 'https')
    if accept_content_type and is_web_url:
        # Make a HEAD request first to check the content type.  This lets
        # us ignore tarballs and gigantic files.
        # It would be nice to do this with the HTTP Accept header to avoid
        # one round-trip.  However, most servers seem to ignore the header
        # if you ask for a tarball with Accept: text/html.
        req.get_method = lambda: "HEAD"
        resp = _urlopen(req, timeout=_timeout, context=context)

        content_type = get_header(resp.headers, 'Content-type')

    # Do the real GET request when we know it's just HTML.
    req.get_method = lambda: "GET"

    try:
        response = _urlopen(req, timeout=_timeout, context=context)
    except URLError as err:
        raise SpackWebError('Download failed: {ERROR}'.format(
            ERROR=str(err)))

    if accept_content_type and not is_web_url:
        content_type = get_header(response.headers, 'Content-type')

    reject_content_type = (
        accept_content_type and (
            content_type is None or
            not content_type.startswith(accept_content_type)))

    if reject_content_type:
        tty.debug("ignoring page {0}{1}{2}".format(
            url_util.format(url),
            " with content type " if content_type is not None else "",
            content_type or ""))

        return None, None, None

    return response.geturl(), response.headers, response
Exemplo n.º 46
0
# -------------------------------------------------------------
# Get schedulerapi access and acquire session id
schedulerapi.connect()
sessionid = schedulerapi.getSession()

# -------------------------------------------------------------
# Import an external python script containing a collection of
# common utility Python functions and classes
PA_CATALOG_REST_URL = variables.get("PA_CATALOG_REST_URL")
PA_PYTHON_UTILS_URL = PA_CATALOG_REST_URL + "/buckets/ai-machine-learning/resources/Utils_Script/raw"
req = urllib.request.Request(PA_PYTHON_UTILS_URL)
req.add_header('sessionid', sessionid)
if PA_PYTHON_UTILS_URL.startswith('https'):
    content = urllib.request.urlopen(
        req, context=ssl._create_unverified_context()).read()
else:
    content = urllib.request.urlopen(req).read()
exec(content, globals())
global check_task_is_enabled, preview_dataframe_in_task_result
global get_and_decompress_dataframe, compress_and_transfer_dataframe
global assert_not_none_not_empty, get_input_variables

# -------------------------------------------------------------
# Check if the Python task is enabled or not
check_task_is_enabled()

# -------------------------------------------------------------
# Get data from the propagated variables
#
COLUMNS_NAME = variables.get("COLUMNS_NAME")
    def run(self, item, filename, filename_format='basename', no_clobber=True):
        """run downloader.

        If filename format is not `basename`, then after URL downloaded to `filename` file,
        it will renamed based on the choosen `filename_format`.
        If filename with the new `filename_format` already exist and `no-clobber` is `True`,
        the downloaded file will be deleted and download will be counted as skipped.
        If filename with the new `filename_format` already exist and `no-clobber` is `False`,
        the downloaded file will replace existing file and download will be counted as succcess.

        Args:
            item: Url to be downloaded.
            filename: Filename of the url.
            filename_format: Filename format of the url.
        """
        try:
            req = Request(item, headers={"User-Agent": self.ua.firefox})
            try:
                with urlopen(req) as response, \
                        open(filename, 'wb') as output_file:
                    data = response.read()
                    output_file.write(data)
            except ssl.CertificateError as e:
                logging.debug('Error raised, create unverified context', e=str(e))
                with urlopen(req, context=ssl._create_unverified_context()) as response, \
                        open(filename, 'wb') as output_file:
                    data = response.read()
                    output_file.write(data)

            # assume file is not exist when another filename_format is choosen
            file_already_exist = False
            new_filename = None
            if filename_format == 'sha256':
                new_basename = sha256_checksum(filename=filename)  # without extension
                new_filename = rename_basename(old_filename=filename, new_basename=new_basename)

                new_filename_exist = os.path.isfile(new_filename)
                if new_filename_exist:
                    logging.debug('Exist: {}'.format(new_filename))

                if new_filename_exist and no_clobber:
                    file_already_exist = True
                    send2trash(filename)  # remove downloaded file
                else:
                    # this will rename or move based on the condition.
                    shutil.move(filename, new_filename)
            else:
                logging.debug('Unknown filename format: {}'.format(filename_format))

            if file_already_exist:
                print('Skipped\t\t====> {}'.format(new_filename))
                self.dl_counter += 1
            else:
                print("completed\t====> {}".format(filename))
                self.dl_counter += 1

        except IOError:  # If there is any IOError
            self.error_count += 1
            print("IOError on image {}".format(filename))

        except HTTPError as e:  # If there is any HTTPError
            self.error_count += 1
            print("HTTPError {}".format(filename))

        except URLError as e:
            self.error_count += 1
            print("URLError {}".format(filename))
class TelegramBot:

    messageId = 0
    #Todo: Remove SSL Bypass
    context = ssl._create_unverified_context()
    botToken = ""

    # setup Bot
    def __init__(self, bottoken):
        self.botToken = bottoken

        print("TelegramBot initialized \n")

    # download new messages and return messagestack
    def getNewMessages(self):
        print("getNewMessages...\n")
        messageStack = []

        f = urllib.request.urlopen("https://api.telegram.org/bot"+self.botToken+"/getUpdates", context=self.context)

        data = json.load(f)

        for i in data['result']:
            if i['message']['message_id'] > self.messageId:
                self.messageId= i['message']['message_id']
                messageStack.append(i['message'])

        # print new messages
        print("new Messages:")
        for n in messageStack:
            print(n)
        print()

        return messageStack

    # download picture by fileId to darknet folder
    def getPicture(self,fileId):
        # Get path
        print("fileId:")
        print(fileId+"\n")
        print("Getting Path...\n")

        f = urllib.request.urlopen("https://api.telegram.org/bot" + self.botToken + "/getFile?file_id=" + fileId,
                                   context=self.context)
        data = json.load(f)
        filepath = data['result']['file_path']
        url = "https://api.telegram.org/file/bot" + self.botToken + "/" + filepath
        print(url +"\n")

        # download file
        print("Downloading file...\n")
        file = urllib.request.urlopen(url, context=self.context)

        # save to disk
        title = "userPicture"
        new_path ="darknet/"+ title + ".jpg"
        new_days = open(new_path, 'wb')

        new_days.write(file.read())
        new_days.close()

        print("file downloaded!\n")

    # send text to chatid
    def sendMessage(self ,text, chatid):

        print("sending message...\n")
        urllib.request.urlopen("https://api.telegram.org/bot" + self.botToken + "/sendmessage?chat_id="+str(chatid)+"&text="+str(text),context=self.context)
        print("message sent!")
Exemplo n.º 49
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            verify=True,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=False,
            output='',
            timeout='30',
            username=None,
            password=None,
            as_bytes=False):
    """
    Re-adapted from Twilight0's tulip module => https://github.com/Twilight0/script.module.tulip
    """

    try:
        url = six.ensure_text(url, errors='ignore')
    except Exception:
        pass

    if isinstance(post, dict):
        post = bytes(urlencode(post), encoding='utf-8')
    elif isinstance(post, str) and six.PY3:
        post = bytes(post, encoding='utf-8')

    try:
        handlers = []

        if username is not None and password is not None and not proxy:

            passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
            passmgr.add_password(None, uri=url, user=username, passwd=password)
            handlers += [urllib2.HTTPBasicAuthHandler(passmgr)]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if proxy is not None:

            if username is not None and password is not None:

                if six.PY2:
                    passmgr = urllib2.ProxyBasicAuthHandler()
                else:
                    passmgr = urllib2.HTTPPasswordMgr()

                passmgr.add_password(None,
                                     uri=url,
                                     user=username,
                                     passwd=password)

                handlers += [
                    urllib2.ProxyHandler({'http': '{0}'.format(proxy)}),
                    urllib2.HTTPHandler,
                    urllib2.ProxyBasicAuthHandler(passmgr)
                ]
            else:
                handlers += [
                    urllib2.ProxyHandler({'http': '{0}'.format(proxy)}),
                    urllib2.HTTPHandler
                ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or close is not True:

            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]

            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        try:
            import platform
            is_XBOX = platform.uname()[1] == 'XboxOne'
        except Exception:
            is_XBOX = False

        if not verify and sys.version_info >= (2, 7, 12):

            try:

                import ssl
                ssl_context = ssl._create_unverified_context()
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)

            except Exception:

                pass

        elif verify and ((2, 7, 8) < sys.version_info < (2, 7, 12) or is_XBOX):

            try:

                import ssl
                try:
                    import _ssl
                    CERT_NONE = _ssl.CERT_NONE
                except Exception:
                    CERT_NONE = ssl.CERT_NONE
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)

            except Exception:

                pass

        try:
            headers.update(headers)
        except Exception:
            headers = {}

        if 'User-Agent' in headers:
            pass
        elif mobile is not True:
            #headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 12)
        else:
            headers['User-Agent'] = cache.get(randommobileagent, 12)

        if 'Referer' in headers:
            pass
        elif referer is None:
            headers['Referer'] = '%s://%s/' % (urlparse(url).scheme,
                                               urlparse(url).netloc)
        else:
            headers['Referer'] = referer

        if not 'Accept-Language' in headers:
            headers['Accept-Language'] = 'en-US'

        if 'X-Requested-With' in headers:
            pass
        elif XHR is True:
            headers['X-Requested-With'] = 'XMLHttpRequest'

        if 'Cookie' in headers:
            pass
        elif cookie is not None:
            headers['Cookie'] = cookie

        if 'Accept-Encoding' in headers:
            pass
        elif compression and limit is None:
            headers['Accept-Encoding'] = 'gzip'

        if redirect is False:

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, reqst, fp, code, msg, head):

                    infourl = addinfourl(fp, head, reqst.get_full_url())
                    infourl.status = code
                    infourl.code = code

                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)

            try:
                del headers['Referer']
            except Exception:
                pass

        req = urllib2.Request(url, data=post, headers=headers)

        try:

            response = urllib2.urlopen(req, timeout=int(timeout))

        except HTTPError as response:

            if response.code == 503:

                if 'cf-browser-verification' in response.read(5242880):
                    from resources.lib.modules import cfscrape

                    netloc = '{0}://{1}'.format(
                        urlparse(url).scheme,
                        urlparse(url).netloc)

                    ua = headers['User-Agent']

                    #cf = cache.get(Cfcookie.get, 168, netloc, ua, timeout)
                    try:
                        cf = cache.get(cfscrape.get_cookie_string, 1, netloc,
                                       ua)[0]
                    except BaseException:
                        try:
                            cf = cfscrape.get_cookie_string(url, ua)[0]
                        except BaseException:
                            cf = None
                    finally:
                        headers['Cookie'] = cf

                    req = urllib2.Request(url, data=post, headers=headers)

                    response = urllib2.urlopen(req, timeout=int(timeout))

                elif error is False:
                    return

            elif error is False:
                return

        if output == 'cookie':

            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except Exception:
                pass

            try:
                result = cf
            except Exception:
                pass

        elif output == 'response':

            if limit == '0':
                result = (str(response.code), response.read(224 * 1024))
            elif limit is not None:
                result = (str(response.code), response.read(int(limit) * 1024))
            else:
                result = (str(response.code), response.read(5242880))

        elif output == 'chunk':

            try:
                content = int(response.headers['Content-Length'])
            except Exception:
                content = (2049 * 1024)

            if content < (2048 * 1024):
                return
            result = response.read(16 * 1024)

        elif output == 'extended':

            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except Exception:
                pass

            try:
                cookie = cf
            except Exception:
                pass

            content = response.headers
            result = response.read(5242880)

            if not as_bytes:

                result = six.ensure_text(result, errors='ignore')

            return result, headers, content, cookie

        elif output == 'geturl':

            result = response.geturl()

        elif output == 'headers':

            content = response.headers

            if close:
                response.close()

            return content

        elif output == 'file_size':

            try:
                content = int(response.headers['Content-Length'])
            except Exception:
                content = '0'

            response.close()

            return content

        elif output == 'json':

            content = json.loads(response.read(5242880))

            response.close()

            return content

        else:

            if limit == '0':
                result = response.read(224 * 1024)
            elif limit is not None:
                if isinstance(limit, int):
                    result = response.read(limit * 1024)
                else:
                    result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

        if close is True:
            response.close()

        if not as_bytes:

            result = six.ensure_text(result, errors='ignore')

        return result

    except:

        log_utils.log('Client request failed on url: ' + url + ' | Reason', 1)

        return
Exemplo n.º 50
0
def handle_message(event):

    profile = line_bot_api.get_profile(event.source.user_id)

    input_str = event.message.text

    print(profile.display_name)
    ##
    arg_ss = input_str.split()[0]
    arg_checkin_year = input_str.split()[1]
    arg_checkin_month = input_str.split()[2]
    arg_checkin_monthday= input_str.split()[3]
    arg_checkout_year = input_str.split()[4]
    arg_checkout_month = input_str.split()[5]
    arg_ckeckout_monthday = input_str.split()[6]
    arg_adults = input_str.split()[7]
    arg_group_children = input_str.split()[8]
    url = 'https://www.booking.com/searchresults.zh-tw.html?ss=\"' + quote(str(arg_ss)) + '\"&checkin_year=' + quote(str(arg_checkin_year)) + '&checkin_month=' + quote(str(arg_checkin_month)) + '&checkin_monthday=' + quote(str(arg_checkin_monthday)) + '&checkout_year=' + quote(str(arg_checkout_year)) +  '&checkout_month=' + quote(str(arg_checkout_month)) + '&ckeckout_monthday=' + quote(str(arg_ckeckout_monthday)) + '&group_adults=' + quote(str(arg_adults)) + '&group_children=' + quote(str(arg_group_children))
    ##
    print("===================================")
    print(url)
    print("===================================")
    ####
    context = ssl._create_unverified_context()

    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
    #headers = {"User-Agent": "curl/7.19.7 (universal-apple-darwin10.0) libcurl/7.19.7 OpenSSL/0.9.8r zlib/1.2.3"}

    request = urllib.request.Request(url, headers = headers)

    response = urllib.request.urlopen(request, context = context)

    soup = BeautifulSoup(response, 'html.parser')

    ##
    ### Hotel Title ###
    name = soup.select('.sr-hotel__name')
    name_list = []

    for i in range(len(name)):
        a = name[i].contents
        name_list.append(a[0].split("\n")[1])

    ### Hotel Price ###
    #price availprice no_rack_rate
    price_list = soup.find_all("strong", {"class":"price"})
    prices_list = []

    for price in price_list:  
    #     print(price.text.replace(u'\xa0', u' ').replace("\n", "").split()[1])
        p = price.text.replace(u'\xa0', u' ').replace("\n", "").split()[1]
        
        if len(p) > 3:
            c = p.split(',')
            Price = c[0] + c[1]
            Price = int(Price)
        
        else:
            Price = int(p)
            
        prices_list.append(Price)

    ### Hotel Score ###
    star = soup.find_all("span",{"class":"review-score-badge"})
    score_list = []


    for i in range(len(star)):
    #     print(star[i].text.split()[0])
        score_list.append(star[i].text.split()[0])

    ### Hotel URL ###
    link = soup.find_all("a",{"class":"hotel_name_link url"})
    url1 = "https://www.booking.com/"
    url_list = []

    for i in range(len(link)):
        url2 = link[i].get('href').split("\n")[1]
        url3 = link[i].get('href').split("\n")[2]
        url = url1 + url2 + url3
        url_list.append(url)

    ### Hotel Image URL ###
    img_url = soup.find_all("img",{"class":"hotel_image"})
    img_list = []

    for i in range(len(img_url)):
        img_list.append(img_url[i].get('src'))


    try:
        # length = min(len(name_list),len(prices_list),len(star),len(link),len(img_url))
        length = min(len(name_list),len(star),len(link),len(img_url))


        print("===================================")
        print(length)
        print(len(name_list))
        print(len(star))
        print(len(link))
        print(len(img_url))
        print("===================================")

        end = length - 2
        name_list = name_list[0:end]
        # prices_list = prices_list[0:end]
        url_list = url_list[0:end]
        score_list = score_list[0:end]
        img_list = img_list[0:end]

        hotel_df = pd.DataFrame({"Hotel_name":name_list,"Hotel_url":url_list,"Hotel_score":score_list,"Hotel_pic":img_list}) #"Hotel_price":prices_list,
        hotel_df = hotel_df.sort_values(by = ['Hotel_score'],ascending=False).reset_index(drop=True)

        hotel_df = hotel_df[:6]
        hotel_df = hotel_df[["Hotel_name","Hotel_url","Hotel_score","Hotel_pic"]]
        print(hotel_df.iloc[0,0])
        print(hotel_df.iloc[0,1])
        print(hotel_df.iloc[0,2])
        print(hotel_df.iloc[0,3])

        carousel_template = CarouselTemplate(

            columns=[
                CarouselColumn(
                    thumbnail_image_url=hotel_df.iloc[0,3],
                    title = hotel_df.iloc[0,0][:20],
                    text = '(⊙0⊙)首推訂房(⊙0⊙)',
                    actions=[
                        URITemplateAction(
                            label='評分為:'+str(hotel_df.iloc[0,2]),
                            uri=hotel_df.iloc[0,1]
                        )
                    ]
                ),
                CarouselColumn(
                    thumbnail_image_url=hotel_df.iloc[1,3],
                    title = hotel_df.iloc[1,0][:20],
                    text = '(⊙ˍ⊙)激推訂房(⊙ˍ⊙)',
                    actions=[
                        URITemplateAction(
                            label='評分為:'+str(hotel_df.iloc[1,2]),
                            uri=hotel_df.iloc[1,1]
                        )
                    ]
                ),
                CarouselColumn(
                    thumbnail_image_url=hotel_df.iloc[2,3],
                    title = hotel_df.iloc[2,0][:20],
                    text = '(ˊ● ω ●ˋ)再推訂房(ˊ● ω ●ˋ)',
                    actions=[
                        URITemplateAction(
                            label='評分為:'+str(hotel_df.iloc[2,2]),
                            uri=hotel_df.iloc[2,1]
                        )
                    ]
                ),
                CarouselColumn(
                    thumbnail_image_url=hotel_df.iloc[3,3],
                    title = hotel_df.iloc[3,0][:20],
                    text = '(♡∀♡)再推一個(♡∀♡)',
                    actions=[
                        URITemplateAction(
                            label='評分為:'+str(hotel_df.iloc[3,2]),
                            uri=hotel_df.iloc[3,1]
                        )
                    ]
                ),
                CarouselColumn(
                    thumbnail_image_url=hotel_df.iloc[4,3],
                    title = hotel_df.iloc[4,0][:20],
                    text = '(´・ω・`)選我選我(´・ω・`)',
                    actions=[
                        URITemplateAction(
                            label='評分為:'+str(hotel_df.iloc[4,2]),
                            uri=hotel_df.iloc[4,1]
                        )
                    ]
                )
            ]     
        )

    except:
        error_message = "天啊!出錯了!\n您可能是遇到了以下問題。\n1.你的指令打錯了。\n指令是:訂房地點(ex:桃園) 訂房日期(ex:2018 6 30) 退房日期(ex:2018 7 2) 訂房人數,分別為大人個數與小孩個數(ex:4 2)\n每個之間都有空格噢\n範例:桃園 2018 6 30 2018 7 2 4 2\n2.您所蒐尋的日期還未提供訂房資訊,要再等一等噢!\n3.沒有相關訂房資訊,建議您換個關鍵字。\n"
        message = TextSendMessage(text=error_message)
        line_bot_api.reply_message(event.reply_token,message)\

        msg = Message(api_key="bf6041f2-134c-4c38-a42e-6540684311d0",
              type="user",
              platform="Line",
              version="1.0",
              user_id=profile.display_name,
              message=error_message,
              intent="LinebotSearchPostion",  
              not_handled=True,           
              )            
        resp = msg.send()


    ##################
    # output = ""
    # for i in range(6):
    #     tmp = hotel_df.iloc[i,0] + " " + hotel_df.iloc[i,1] + " " + hotel_df.iloc[i,2]
    #     output = output + tmp
    #     output = output + "\n"
    #     tmp = ""
    # output = hotel_df.iloc[0,1]

    ####
    # Carousel_template = hotel_df.iloc[0,3]
    # message = TextSendMessage(text=output)
    template_message = TemplateSendMessage(
        alt_text = '距離最近的五間旅館', 
        template = carousel_template
    )
    
    line_bot_api.reply_message(event.reply_token, template_message)

    msg = Message(api_key="bf6041f2-134c-4c38-a42e-6540684311d0",
              type="user",
              platform="Line",
              version="1.0",
              user_id=profile.display_name,
              message=event.message.text,
              intent="LinebotSearchPostion",  
              not_handled=False,           
              )            
    resp = msg.send()
Exemplo n.º 51
0
import json
import urllib2
import ssl
from graph_db import graph

CONTEXT = ssl._create_unverified_context()

URL = 'http://grds.paypalinc.com/evebuilder/api/metadata/raw_variables?searchType=\
by_definition&searchData={event},{key},,,,,&searchSimilar=false'

MAP_EVENT_VAR = "match(e:EventKey{{name: '{eventkey}' }}) \
                 match(v:Var{{ name: '{var}'}})           \
                 merge (v)-[:ATTRIBUTE_OF] ->(e) "

SET_EVE_PROPERTY = "match(v:Var{{ name: '{var}'}}) set v.filter = '{filter}', v.eve_type= '{type}', v.eve_key='{key}', v.target='{target}', v.function='{function}' "
def parse_var_event(event, key, table_name):
    print URL.format(event=event, key=key)
    opener = urllib2.build_opener()
    opener.addheaders.append(('Cookie', 'edge_builder=s%3A_duW0dyMu00pX0bA2vRtFxLZEOLp2Xlx.31O43pfFQJn8m0PdmI9swn9l5IfvSs2e%2FZFgYImGZp8'))
    html = opener.open(URL.format(event=event, key=key))
    print html
    json_file = json.loads(html.read())
    var_list = json_file['data']
    for var in var_list:
        json_keys =  var.keys()
        for updator in var['updators']:
            if updator['rollupMessages'] == event:
                #print var['name'], var['type'], updator['filter'], updator['key'], updator['target'],updator['func']
                #print var['type']
                graph.cypher.execute(SET_EVE_PROPERTY.format(var=var['name'], filter= updator['filter'] , \
Exemplo n.º 52
0
def getTypicalNoticeLastid(deptCode: str):
    deptType = getDeptType(deptCode)
    query = "list_id=" + deptCode

    # 전체 공지의 경우
    if deptType == DepartmentType.General:
        url = "https://www.uos.ac.kr/korNotice/list.do?" + query

        try:
            context = ssl._create_unverified_context()
            req = Request(url)
            res = urlopen(req, context=context)
            html = res.read()

            soup = BeautifulSoup(html, "html.parser")

            noticeListSoup = soup.find("ul", class_="listType")
            lastNoticeSoup = noticeListSoup.find("a", {"href": "#"})
            # onclick = fnView('1', '22529'); 에서 함수 파라미터만 추출
            matched = re.match(r"[^(]*\(([^)]*)\)", lastNoticeSoup["onclick"])
            paramList = matched[1].split(",")
            # 파라미터 중 두번째 파라미터가 listId이므로 이것만 반환
            lastId = paramList[1].replace("'", "")
            return int(lastId)
        except:
            print("공지사항 마지막 listId를 불러오는데 실패 -> " + deptCode)

    # 공과대학 / 정경대학 / 인문대학 / 자연과학대학 경우
    elif deptType == DepartmentType.Engineering or deptType == DepartmentType.Humanities or deptType == DepartmentType.NaturalScience:
        url = "https://www.uos.ac.kr/korNotice/list.do?" + query

        try:
            context = ssl._create_unverified_context()
            req = Request(url)
            res = urlopen(req, context=context)
            html = res.read()

            soup = BeautifulSoup(html, "html.parser")

            noticeListContainerSoup = soup.find("div", class_="table-style")
            for noticeItemSoup in noticeListContainerSoup.find_all(
                    "div", class_="tb-body"):
                if noticeItemSoup.find(lambda tag: tag.name == "ul" and tag.
                                       get("class") == ['clearfix']):
                    noticeListSoup = noticeItemSoup.find(
                        lambda tag: tag.name == "ul" and tag.get(
                            "class") == ['clearfix'])
                    break
            lastNoticeSoup = noticeListSoup.find("a", {"href": "#a"})
            # onclick = fnView('1', '22529'); 에서 함수 파라미터만 추출
            matched = re.match(r"[^(]*\(([^)]*)\)", lastNoticeSoup["onclick"])
            paramList = matched[1].split(",")
            # 파라미터 중 두번째 파라미터가 listId이므로 이것만 반환
            lastId = paramList[1].replace("'", "")
            return int(lastId)
        except:
            print("공지사항 마지막 listId를 불러오는데 실패 -> " + deptCode)

    # 경영대학의 경우
    elif deptType == DepartmentType.Business:
        url = "https://biz.uos.ac.kr/korNotice/list.do?" + query
        selector = "#container > div > ul > li:nth-child(6) > a"
        attributeType = AttributeType.Href

        return getLastNoticeListId(deptCode, url, selector, attributeType)

    # 국제교류과의 경우
    elif deptType == DepartmentType.InterChange:
        url = "https://kiice.uos.ac.kr/korNotice/list.do?" + query
        selector = "#subContents > table > tbody > tr:nth-child(3) > td.title > a"
        attributeType = AttributeType.Onclick

        return getLastNoticeListId(deptCode, url, selector, attributeType)

    # 생활관의 경우
    elif deptType == DepartmentType.Dormitory:
        url = "https://dormitory.uos.ac.kr/korNotice/list.do?" + query
        selector = "#container > div.subCont > div.contents > ul > li:nth-child(1) > a"
        attributeType = AttributeType.Href

        return getLastNoticeListId(deptCode, url, selector, attributeType)

    # 정경대학의 경우
    elif deptType == DepartmentType.Economics:
        subQuery = "&cate_id2=000010005"
        url = "https://www.uos.ac.kr/social/korNotice/list.do?" + query + subQuery
        selector = "#content02 > div.sc-right > div.table-style > div:nth-child(2) > ul > li.tb-wid02.txl > a"
        attributeType = AttributeType.Onclick

        return getLastNoticeListId(deptCode, url, selector, attributeType)
Exemplo n.º 53
0
def request(url, close=True, redirect=True, error=False, verify=True, proxy=None, post=None, headers=None, mobile=False,
            XHR=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='30'):
    try:
        if not url:
            return
        handlers = []
        if proxy is not None:
            handlers += [urllib2.ProxyHandler({'http': '%s' % (proxy)}), urllib2.HTTPHandler]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)
        if output == 'cookie' or output == 'extended' or not close is True:
            cookies = cookielib.LWPCookieJar()
            handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)
        try:
            import platform
            node = platform.node().lower()
            is_XBOX = platform.uname()[1] == 'XboxOne'
        except Exception:
            node = ''
            is_XBOX = False
        if verify is False and sys.version_info >= (2, 7, 12):
            try:
                import ssl
                ssl_context = ssl._create_unverified_context()
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                pass
        if verify is True and ((2, 7, 8) < sys.version_info < (2, 7, 12) or is_XBOX):
            try:
                import ssl
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                pass
        if url.startswith('//'): url = 'http:' + url
        _headers = {}
        try:
            _headers.update(headers)
        except:
            pass
        if 'User-Agent' in _headers:
            pass
        elif mobile is True:
            _headers['User-Agent'] = cache.get(randommobileagent, 1)
        else:
            _headers['User-Agent'] = cache.get(randomagent, 1)
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if not 'Accept-Language' in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR is True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif cookie is not None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'
        if redirect is False:
            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers, req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)
            try:
                del _headers['Referer']
            except:
                pass
        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)
        url = utils.byteify(url)
        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)
        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:
            if response.code == 503:
                cf_result = response.read(5242880)
                try:
                    encoding = response.info().getheader('Content-Encoding')
                except:
                    encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(fileobj=StringIO.StringIO(cf_result)).read()
                if 'cf-browser-verification' in cf_result:
                    while 'cf-browser-verification' in cf_result:
                        netloc = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
                        ua = _headers['User-Agent']
                        cf = cache.get(cfcookie().get, 1, netloc, ua, timeout)
                        _headers['Cookie'] = cf
                        request = urllib2.Request(url, data=post)
                        _add_request_header(request, _headers)
                        try:
                            response = urllib2.urlopen(request, timeout=int(timeout))
                            cf_result = 'Success'
                        except urllib2.HTTPError as response:
                            cache.remove(cfcookie().get, netloc, ua, timeout)
                            cf_result = response.read()
                else:
                    log_utils.log('Request-Error (%s): %s' % (str(response.code), url), log_utils.LOGDEBUG)
                    if error is False:
                        return
            else:
                log_utils.log('Request-Error (%s): %s' % (str(response.code), url), log_utils.LOGDEBUG)
                if error is False:
                    return
        if output == 'cookie':
            try:
                result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close is True:
                response.close()
            return result
        elif output == 'geturl':
            result = response.geturl()
            if close is True: response.close()
            return result
        elif output == 'headers':
            result = response.headers
            if close is True: response.close()
            return result
        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close is True: response.close()
            return result
        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            response.close()
            return content
        if limit == '0':
            result = response.read(224 * 1024)
        elif limit is not None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)
        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()
        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)
            _headers['Cookie'] = su
            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)
            response = urllib2.urlopen(request, timeout=int(timeout))
            if limit == '0':
                result = response.read(224 * 1024)
            elif limit is not None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)
            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()
        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua, timeout)
            result = _basic_request(url, headers=_headers, post=post, timeout=timeout, limit=limit)
        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1]) for item in response.info().items()])
            except:
                response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close is True: response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close is True: response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url), log_utils.LOGDEBUG)
        return
Exemplo n.º 54
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30',
            verifySsl=True,
            flare=True,
            ignoreErrors=None,
            as_bytes=False):
    try:
        if not url: return None
        if url.startswith('//'): url = 'http:' + url
        try:
            url = py_tools.ensure_text(url, errors='ignore')
        except:
            pass

        if isinstance(post, dict):
            post = bytes(urlencode(post), encoding='utf-8')
        elif isinstance(post, str) and py_tools.isPY3:
            post = bytes(post, encoding='utf-8')

        handlers = []
        if proxy is not None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or close is not True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if not verifySsl and version_info >= (2, 7, 12):
            try:
                import ssl
                ssl_context = ssl._create_unverified_context()
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                from fenomscrapers.modules import log_utils
                log_utils.error()

        if verifySsl and ((2, 7, 8) < version_info < (2, 7, 12)):
            # try:
            # import ssl
            # ssl_context = ssl.create_default_context()
            # ssl_context.check_hostname = False
            # ssl_context.verify_mode = ssl.CERT_NONE
            # handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            # opener = urllib2.build_opener(*handlers)
            # urllib2.install_opener(opener)
            # except:
            # from fenomscrapers.modules import log_utils
            # log_utils.error()
            try:
                import ssl
                try:
                    import _ssl
                    CERT_NONE = _ssl.CERT_NONE
                except Exception:
                    CERT_NONE = ssl.CERT_NONE
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                from fenomscrapers.modules import log_utils
                log_utils.error()

        try:
            headers.update(headers)
        except:
            headers = {}

        if 'User-Agent' in headers: pass
        elif mobile is not True:
            headers['User-Agent'] = cache.get(randomagent, 12)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in headers: pass
        elif referer is not None: headers['Referer'] = referer
        if 'Accept-Language' not in headers:
            headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in headers: pass
        elif XHR: headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in headers: pass
        elif cookie: headers['Cookie'] = cookie
        if 'Accept-Encoding' in headers: pass
        elif compression and limit is None: headers['Accept-Encoding'] = 'gzip'

        # if redirect is False:
        # class NoRedirection(urllib2.HTTPErrorProcessor):
        # def http_response(self, request, response):
        # return response
        # opener = urllib2.build_opener(NoRedirection)
        # urllib2.install_opener(opener)
        # try: del headers['Referer']
        # except: pass

        if redirect is False:

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, reqst, fp, code, msg, head):
                    infourl = addinfourl(fp, head, reqst.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)
            try:
                del headers['Referer']
            except:
                pass

        req = urllib2.Request(url, data=post)
        _add_request_header(req, headers)
        try:
            response = urllib2.urlopen(req, timeout=int(timeout))
        except HTTPError as error_response:  # if HTTPError, using "as response" will be reset after entire Exception code runs and throws error around line 247 as "local variable 'response' referenced before assignment", re-assign it
            response = error_response
            try:
                ignore = ignoreErrors and (int(response.code) == ignoreErrors
                                           or int(
                                               response.code) in ignoreErrors)
            except:
                ignore = False

            if not ignore:
                if response.code in [
                        301, 307, 308, 503, 403
                ]:  # 403:Forbidden added 3/3/21 for cloudflare, fails on bad User-Agent
                    cf_result = response.read(5242880)
                    try:
                        encoding = response.headers["Content-Encoding"]
                    except:
                        encoding = None
                    if encoding == 'gzip':
                        cf_result = gzip.GzipFile(
                            fileobj=StringIO(cf_result)).read()

                    if flare and 'cloudflare' in str(response.info()).lower():
                        from fenomscrapers.modules import log_utils
                        log_utils.log(
                            'client module calling cfscrape: url=%s' % url,
                            level=log_utils.LOGDEBUG)
                        try:
                            from fenomscrapers.modules import cfscrape
                            if isinstance(post, dict): data = post
                            else:
                                try:
                                    data = parse_qs(post)
                                except:
                                    data = None
                            scraper = cfscrape.CloudScraper()
                            if response.code == 403:  # possible bad User-Agent in headers, let cfscrape assign
                                response = scraper.request(
                                    method='GET' if post is None else 'POST',
                                    url=url,
                                    data=data,
                                    timeout=int(timeout))
                            else:
                                response = scraper.request(
                                    method='GET' if post is None else 'POST',
                                    url=url,
                                    headers=headers,
                                    data=data,
                                    timeout=int(timeout))
                            result = response.content
                            flare = 'cloudflare'  # Used below
                            try:
                                cookies = response.request._cookies
                            except:
                                log_utils.error()
                            if response.status_code == 403:  # if cfscrape server still responds with 403
                                log_utils.log(
                                    'cfscrape-Error url=(%s): %s' %
                                    (url, 'HTTP Error 403: Forbidden'),
                                    __name__,
                                    level=log_utils.LOGDEBUG)
                                return None
                        except:
                            log_utils.error()
                    elif 'cf-browser-verification' in cf_result:
                        netloc = '%s://%s' % (urlparse(url).scheme,
                                              urlparse(url).netloc)
                        ua = headers['User-Agent']
                        cf = cache.get(cfcookie().get, 168, netloc, ua,
                                       timeout)
                        headers['Cookie'] = cf
                        req = urllib2.Request(url, data=post)
                        _add_request_header(req, headers)
                        response = urllib2.urlopen(req, timeout=int(timeout))
                    else:
                        if error is False:
                            from fenomscrapers.modules import log_utils
                            log_utils.error('Request-Error url=(%s)' % url)
                            return None
                else:
                    if error is False:
                        from fenomscrapers.modules import log_utils
                        log_utils.error('Request-Error url=(%s)' % url)
                        return None
                    elif error is True and response.code in [
                            401, 404, 405
                    ]:  # no point in continuing after this exception runs with these response.code's
                        try:
                            response_headers = dict(
                                [(item[0].title(), item[1])
                                 for item in list(response.info().items())]
                            )  # behaves differently 18 to 19. 18 I had 3 "Set-Cookie:" it combined all 3 values into 1 key. In 19 only the last keys value was present.
                        except:
                            from fenomscrapers.modules import log_utils
                            log_utils.error()
                            response_headers = response.headers
                        return (str(response), str(response.code),
                                response_headers)

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close is True: response.close()
            return result
        elif output == 'geturl':
            result = response.geturl()
            if close is True: response.close()
            return result
        elif output == 'headers':
            result = response.headers
            if close is True: response.close()
            return result
        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            try:
                result = response.read(16 * 1024)
            except:
                result = response  # testing
            if close is True: response.close()
            return result
        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            if close is True: response.close()
            return content
        if flare != 'cloudflare':
            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)

        try:
            encoding = response.headers["Content-Encoding"]
        except:
            encoding = None

        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO(result)).read()
        if not as_bytes:
            result = py_tools.ensure_text(result, errors='ignore')

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)
            headers['Cookie'] = su
            req = urllib2.Request(url, data=post)
            _add_request_header(req, headers)
            response = urllib2.urlopen(req, timeout=int(timeout))
            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)
            try:
                encoding = response.headers["Content-Encoding"]
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO(result)).read()
        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse(url).scheme, urlparse(url).netloc)
            ua = headers['User-Agent']
            headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                          timeout)
            result = _basic_request(url,
                                    headers=headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)

        if output == 'extended':
            try:
                response_headers = dict(
                    [(item[0].title(), item[1])
                     for item in list(response.info().items())]
                )  # behaves differently 18 to 19. 18 I had 3 "Set-Cookie:" it combined all 3 values into 1 key. In 19 only the last keys value was present.
            except:
                from fenomscrapers.modules import log_utils
                log_utils.error()
                response_headers = response.headers
            try:
                response_code = str(response.code)
            except:
                response_code = str(response.status_code
                                    )  # object from CFScrape Requests object.
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close is True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close is True: response.close()
            return result
    except:
        from fenomscrapers.modules import log_utils
        log_utils.error('Request-Error url=(%s)' % url)
        return None
Exemplo n.º 55
0
def startApp():
    global run, before
    if run == True:
        # 在需要修改全域變量的函數中,需先指定該變量為全域變量global,python才會將這個變量以全域變量去執行。
        global timeCount
        # timeCount = 0
        # while 1:
        #time.sleep (1)
        # 地震警報====================================================================================
        ## 監測是否有新增地震警報
        after = dict([(f, None) for f in os.listdir(path_to_watch + '.')])
        #print ('b:{}\na:{}'.format(before,after))
        added = [f for f in after if not f in before]
        removed = [f for f in before if not f in after]
        if added:
            for ear in added:
                print("Added: ", ", ".join(added))
                difEarth(path_to_watch + ear)
        if removed: print("Removed: ", ", ".join(removed))
        before = after
        timeCount = timeCount + 1
        if timeCount == 100:
            try:
                # print ('{}:開始抓取CAP資料'.format(datetime.now().strftime("%Y-%m-%dT%H:%M:%S")))
                info.insert(
                    '1.0', '{}:開始抓取CAP資料\n'.format(
                        datetime.now().strftime("%Y-%m-%dT%H:%M:%S")))
                # CAP====================================================================================
                ## 確認CAP資料庫是否存在,若無則建立資料庫
                for tableName in tableList:
                    db_Check(tableName, tableList[tableName])
                for relateTable in DB_Dic:
                    #print (relateTable)
                    db_Check(
                        relateTable, "id serial primary key, %s varchar(50)" %
                        (DB_Dic[relateTable]))

                ## 監測CAP訊息
                context = ssl._create_unverified_context()
                capList_url = urlopen(
                    'https://alerts.ncdr.nat.gov.tw/JSONAtomFeed.ashx?County=%E9%AB%98%E9%9B%84%E5%B8%82',
                    context=context)
                capList_info = json.loads(capList_url.read().decode('utf-8'))
                cap_date = datetime.strptime(
                    capList_info['entry'][0]['updated'].split('+')[0],
                    "%Y-%m-%dT%H:%M:%S")
                api_key = 'apikey'

                ## 取得相對應id的CAP內容
                idList = []
                for cap in capList_info["entry"]:
                    cur.execute("SELECT * FROM cap_kaohsiung")
                    raw = cur.fetchall()
                    cap_id = [f for f in raw if f[1] == cap['id']]
                    conn.commit()
                    if cap_id:
                        #print ('資料已存在!')
                        pass
                    else:
                        ## 針對每一項CAP_ID的內容進行處理================================================
                        capURL = urlopen(
                            'https://alerts.ncdr.nat.gov.tw/api/dump/datastore?apikey=%s&capid=%s&format=json'
                            % (api_key, cap['id']))
                        cap_content = json.loads(capURL.read().decode('utf-8'))
                        #cap_status = cap_content['status']
                        cap_effective = cap_content['info'][0]['effective']
                        cap_expires = cap_content['info'][0]['expires']
                        cap_headline = cap_content['info'][0]['headline']
                        cap_description = cap_content['info'][0]['description']
                        cap_instruction = cap_content['info'][0]['instruction']
                        cap_web = cap_content['info'][0]['web']
                        ## 以下參數需check Relate
                        cap_event = cap_content['info'][0]['certainty']
                        cap_responseType = cap_content['info'][0]['certainty']
                        cap_msgType = cap_content['info'][0]['certainty']
                        cap_sender = cap_content['info'][0]['senderName']
                        cap_category = cap_content['info'][0]['category']
                        cap_urgency = cap_content['info'][0]['urgency']
                        cap_severity = cap_content['info'][0]['severity']
                        cap_certainty = cap_content['info'][0]['certainty']
                        ## part of caps didn't contain alert_color
                        for para in cap_content['info'][0]['parameter']:
                            ## alert_color
                            if para['valueName'] == 'alert_color':
                                cap_alert_Color = para['value']
                            else:
                                cap_alert_Color = 'NA'
                            ## alert_title (無relate)
                            if para['valueName'] == 'alert_title':
                                cap_alert_title = para['value']
                            else:
                                cap_alert_title = None
                        ## 建立Check Dictionary
                        relateCheckList = {
                            "cap_category": cap_category,
                            "cap_sender": cap_sender,
                            "cap_event": cap_event,
                            "cap_alert_Color": cap_alert_Color,
                            "cap_certainty": cap_certainty,
                            "cap_urgency": cap_urgency,
                            "cap_serverity": cap_severity,
                            "cap_responseType": cap_responseType,
                            "cap_msgType": cap_msgType
                        }

                        for relateitem in DB_Dic:
                            ## 建立動態變數,如cap_catrgory_id = checkRelateTable('cap_catrgory', relateCheckList['cap_catrgory'])
                            locals()['%s_id' % relateitem] = int(
                                checkRelateTable(relateitem,
                                                 DB_Dic[relateitem],
                                                 relateCheckList[relateitem]))
                            idList.append(locals()['%s_id' % relateitem])
                        ids = ', '.join(str(e) for e in idList)
                        print(ids)
                        ## 將資料存入資料庫中
                        cur.execute(
                            """INSERT INTO cap_kaohsiung (cap_id, alert_title, effective, expires, headline, description, instruction, web, category, senderName, event, alert_color, certainty, urgency, severity, responseType, msgType) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %s)"""
                            % (cap['id'], cap_alert_title, cap_effective,
                               cap_expires, cap_headline, cap_description,
                               cap_instruction, cap_web, ids))
                        conn.commit()
                        idList.clear()
                        ## 傳送CAP訊息至使用者
                        capMsgText = "========%s========\n訊息種類:%s\n警報類型:%s\n%s by%s\n%s:%s\n生效時間:%s\n失效時間:%s" % (
                            cap_headline, cap_msgType, cap_category,
                            cap_description, cap_sender, cap_instruction,
                            cap_web, cap_effective, cap_expires)
                        push_message(capMsgText)
            except:
                msgtext = '{time}:{capid}資料出現意外!'.format(
                    time=datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
                    capid=cap['id'])
                # print (msgtext)
                info.insert('1.0', msgtext + '\n')
                pass
            timeCount = 0
        app.after(1000, startApp)
Exemplo n.º 56
0
 def test_auth_error(self):
     with self.assertRaises(pyone.OneAuthenticationException):
         xone = pyone.OneServer(testEndpoint, session="oneadmin:invalidpass", context=ssl._create_unverified_context())
         xone.hostpool.info()
Exemplo n.º 57
0
def parse_dg(ip, key, dg):

    conn = httplib.HTTPSConnection(ip,
                                   context=ssl._create_unverified_context())

    print ""
    print "Grabbing copy of shared pre-rulebase..."
    conn.request(
        "GET",
        "/api/?type=config&action=get&xpath=/config/shared/pre-rulebase&key=" +
        key)
    r1 = conn.getresponse()
    data1 = r1.read()
    p_shared_pre_rulebase = etree.fromstring(data1)
    conn.close()
    r1.close()

    print "Grabbing copy of shared post-rulebase..."
    print ""
    conn.request(
        "GET",
        "/api/?type=config&action=get&xpath=/config/shared/post-rulebase&key="
        + key)
    r2 = conn.getresponse()
    data2 = r2.read()
    p_shared_post_rulebase = etree.fromstring(data2)
    conn.close()
    r2.close()

    dgname_encode = dg.replace(" ", "%20")
    print "Grabbing copy of pre-rulebase for device-group '" + dg + "'."
    conn.request(
        "GET",
        "/api/?type=config&action=get&xpath=/config/devices/entry[@name='localhost.localdomain']"
        "/device-group/entry[@name='" + dgname_encode + "']/pre-rulebase" +
        "&key=" + key)
    r1 = conn.getresponse()
    data1 = r1.read()
    dg_rulebase_dict[dg + "-pre"] = etree.fromstring(data1)
    conn.close()
    r1.close()

    print "Grabbing copy of post-rulebase for device-group '" + dg + "'."
    conn.request(
        "GET",
        "/api/?type=config&action=get&xpath=/config/devices/entry[@name='localhost.localdomain']"
        "/device-group/entry[@name='" + dgname_encode + "']/post-rulebase" +
        "&key=" + key)
    r2 = conn.getresponse()
    data2 = r2.read()
    dg_rulebase_dict[dg + "-post"] = etree.fromstring(data2)
    conn.close()
    r2.close()

    print "Counting '" + dg + "' decrypt pre-rules"
    for rule in dg_rulebase_dict.get(dg + "-pre").findall(
            'result/pre-rulebase/decryption/rules/'):
        if not rule.findall('disabled'):
            for profile in rule.findall('action'):
                if profile.text == "decrypt":
                    dg_dict[dg]['decrypt'] = 'yes'
                    rule_dict[dg]['dcrypt_rule_count'] += 1
                else:
                    rule_dict[dg]['no_dcrypt_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    for profile in rule.findall('action'):
                        if profile.text == "decrypt":
                            dg_dict[dg]['decrypt'] = 'yes'
                            rule_dict[dg]['dcrypt_rule_count'] += 1
                        else:
                            rule_dict[dg]['no_dcrypt_rule_count'] += 1

    print "Counting '" + dg + "' decrypt post-rules"
    for rule in dg_rulebase_dict.get(dg + "-post").findall(
            'result/post-rulebase/decryption/rules/'):
        if not rule.findall('disabled'):
            for profile in rule.findall('action'):
                if profile.text == "decrypt":
                    dg_dict[dg]['decrypt'] = 'yes'
                    rule_dict[dg]['dcrypt_rule_count'] += 1
                else:
                    rule_dict[dg]['no_dcrypt_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    for profile in rule.findall('action'):
                        if profile.text == "decrypt":
                            dg_dict[dg]['decrypt'] = 'yes'
                            rule_dict[dg]['dcrypt_rule_count'] += 1
                        else:
                            rule_dict[dg]['no_dcrypt_rule_count'] += 1

    print "Counting shared decrypt pre-rules"
    for rule in p_shared_pre_rulebase.findall(
            'result/pre-rulebase/decryption/rules/'):
        if not rule.findall('disabled'):
            for profile in rule.findall('action'):
                if profile.text == "decrypt":
                    dg_dict[dg]['decrypt'] = 'yes'
                    rule_dict[dg]['dcrypt_rule_count'] += 1
                else:
                    rule_dict[dg]['no_dcrypt_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    for profile in rule.findall('action'):
                        if profile.text == "decrypt":
                            dg_dict[dg]['decrypt'] = 'yes'
                            rule_dict[dg]['dcrypt_rule_count'] += 1
                        else:
                            rule_dict[dg]['no_dcrypt_rule_count'] += 1

    print "Counting shared decrypt post-rules"
    for rule in p_shared_post_rulebase.findall(
            'result/post-rulebase/decryption/rules/'):
        if not rule.findall('disabled'):
            for profile in rule.findall('action'):
                if profile.text == "decrypt":
                    dg_dict[dg]['decrypt'] = 'yes'
                    rule_dict[dg]['dcrypt_rule_count'] += 1
                else:
                    rule_dict[dg]['no_dcrypt_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    for profile in rule.findall('action'):
                        if profile.text == "decrypt":
                            dg_dict[dg]['decrypt'] = 'yes'
                            rule_dict[dg]['dcrypt_rule_count'] += 1
                        else:
                            rule_dict[dg]['no_dcrypt_rule_count'] += 1

    print "Counting '" + dg + "' security pre-rules"
    for rule in dg_rulebase_dict.get(dg + "-pre").findall(
            'result/pre-rulebase/security/rules/'):
        if not rule.findall('disabled'):
            rule_dict[dg]['sec_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    rule_dict[dg]['sec_rule_count'] += 1

    print "Counting '" + dg + "' security post-rules"
    for rule in dg_rulebase_dict.get(dg + "-post").findall(
            'result/post-rulebase/security/rules/'):
        if not rule.findall('disabled'):
            rule_dict[dg]['sec_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    rule_dict[dg]['sec_rule_count'] += 1

    print "Counting shared security pre-rules"
    for rule in p_shared_pre_rulebase.findall(
            'result/pre-rulebase/security/rules/'):
        if not rule.findall('disabled'):
            rule_dict[dg]['sec_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    rule_dict[dg]['sec_rule_count'] += 1

    print "Counting shared security post-rules"
    print ""
    for rule in p_shared_post_rulebase.findall(
            'result/post-rulebase/security/rules/'):
        if not rule.findall('disabled'):
            rule_dict[dg]['sec_rule_count'] += 1
        else:
            for profile in rule.findall('disabled'):
                if profile.text != 'yes':
                    rule_dict[dg]['sec_rule_count'] += 1

    #for y in dg_dict[x].items():
    #    print x
    #    print '-->' + str(y)
    #    print ""

    xmlpath = "result/pre-rulebase/security/rules/"
    loop_rules(dg, xmlpath, p_shared_pre_rulebase)

    xmlpath = "result/post-rulebase/security/rules/"
    loop_rules(dg, xmlpath, p_shared_post_rulebase)

    xmlpath = "result/pre-rulebase/security/rules/"
    dg_pre_rulebase = dg_rulebase_dict.get(dg + "-pre")
    loop_rules(dg, xmlpath, dg_pre_rulebase)

    xmlpath = "result/post-rulebase/security/rules/"
    dg_post_rulebase = dg_rulebase_dict.get(dg + "-post")
    loop_rules(dg, xmlpath, dg_post_rulebase)

    print_output(dg)
Exemplo n.º 58
0
def get_dg(ip, key, dg_specific):

    conn = httplib.HTTPSConnection(ip,
                                   context=ssl._create_unverified_context())
    print dg_specific
    if not dg_specific:
        """Panorama: Get device-groups."""
        request_str = "/api/?type=op&cmd=<show><devicegroups><%2Fdevicegroups><%2Fshow>&key="
        conn.request("GET", request_str + key)
        r = conn.getresponse()
        data = r.read()
        p_dg = etree.fromstring(data)
        conn.close()
        r.close()

    else:
        """Panorama: Get specific device-group."""
        dg_specific_encode = dg_specific.replace(" ", "%20")
        request_str = "/api/?type=op&cmd=<show><devicegroups><name>" + dg_specific_encode + "<%2Fname><%2Fdevicegroups><%2Fshow>&key="
        conn.request("GET", request_str + key)
        r = conn.getresponse()
        data = r.read()
        p_dg = etree.fromstring(data)
        conn.close()
        r.close()

    for x in p_dg.findall("result/devicegroups/entry"):
        dgname_encode = x.attrib['name'].replace(" ", "%20")
        dgname = x.attrib['name']
        dg_dict[dgname] = {}
        rule_dict[dgname] = {}
        for y in x.findall("devices/entry"):
            dg_dict[dgname][y.attrib['name']] = {}
            dg_dict[dgname][
                y.attrib['name']]['name_encoded'] = x.attrib['name'].replace(
                    " ", "%20")
            dg_dict[dgname]['decrypt'] = {}
            rule_dict[dgname]['dcrypt_rule_count'] = 0
            rule_dict[dgname]['no_dcrypt_rule_count'] = 0
            rule_dict[dgname]['sec_rule_count'] = 0
            rule_dict[dgname]['features'] = {
                'virus': 0,
                'spyware': 0,
                'vuln': 0,
                'url-filter': 0,
                'data-filter': 0,
                'file-block': 0,
                'wildfire': 0,
                'app-id': 0,
                'profiles': 0,
                'user-id': 0
            }

            for z in y:
                if z.tag == "serial":
                    dg_dict[dgname][y.attrib['name']][z.tag] = z.text
                if z.tag == "connected":
                    dg_dict[dgname][y.attrib['name']][z.tag] = z.text
                if z.tag == "hostname":
                    dg_dict[dgname][y.attrib['name']][z.tag] = z.text
                if z.tag == "ip-address":
                    dg_dict[dgname][y.attrib['name']][z.tag] = z.text
                if z.tag == "model":
                    dg_dict[dgname][y.attrib['name']][z.tag] = z.text
                if z.tag == "sw-version":
                    dg_dict[dgname][y.attrib['name']][z.tag] = z.text
    #for x in dg_dict:
    #    for y in dg_dict[x].items():
    #        print x
    #        print '-->' + str(y)
    #        print ""

    for dg in dg_dict:
        for device in dg_dict[dg]:
            if dg_dict[dg][device].get('connected') == 'yes':
                if dg not in active_dg:
                    active_dg.append(dg)

    for dg in active_dg:
        parse_dg(ip, key, dg)
Exemplo n.º 59
0
    def do_GET(self):
        if self.path == 'http://webspector.crt/':
            self.send_cacert()
            return

        req = self
        content_length = int(req.headers.get('Content-Length', 0))
        req_body = self.rfile.read(content_length) if content_length else None

        if req.path[0] == '/':
            if isinstance(self.connection, ssl.SSLSocket):
                req.path = "https://%s%s" % (req.headers['Host'], req.path)
            else:
                req.path = "http://%s%s" % (req.headers['Host'], req.path)

        req_body_modified = self.request_handler(req, req_body)
        if req_body_modified is False:
            self.send_error(403)
            return
        elif req_body_modified is not None:
            req_body = req_body_modified
            req.headers['Content-length'] = str(len(req_body))

        u = urlparse.urlsplit(req.path)
        scheme, netloc, path = u.scheme, u.netloc, (u.path + '?' + u.query
                                                    if u.query else u.path)
        assert scheme in ('http', 'https')
        if netloc:
            req.headers['Host'] = netloc
        setattr(req, 'headers', self.filter_headers(req.headers))

        try:
            origin = (scheme, netloc)
            if not origin in self.tls.conns:
                if scheme == 'https':
                    self.tls.conns[origin] = httplib.HTTPSConnection(
                        netloc,
                        timeout=self.timeout,
                        context=ssl._create_unverified_context())
                else:
                    self.tls.conns[origin] = httplib.HTTPConnection(
                        netloc, timeout=self.timeout)
            conn = self.tls.conns[origin]
            conn.request(self.command, path, req_body, dict(req.headers))
            res = conn.getresponse()

            version_table = {10: 'HTTP/1.0', 11: 'HTTP/1.1'}
            setattr(res, 'headers', res.msg)
            setattr(res, 'response_version', version_table[res.version])

            # support streaming
            if not 'Content-Length' in res.headers and 'no-store' in res.headers.get(
                    'Cache-Control', ''):
                self.response_handler(req, req_body, res, '')
                setattr(res, 'headers', self.filter_headers(res.headers))
                self.relay_streaming(res)
                with self.lock:
                    self.save_handler(req, req_body, res, '')
                return

            res_body = res.read()
        except Exception as e:
            print(e)
            if origin in self.tls.conns:
                del self.tls.conns[origin]
            self.send_error(502)
            return

        content_encoding = res.headers.get('Content-Encoding', 'identity')
        res_body_plain = self.decode_content_body(res_body, content_encoding)

        res_body_modified = self.response_handler(req, req_body, res,
                                                  res_body_plain)
        if res_body_modified is False:
            self.send_error(403)
            return
        elif res_body_modified is not None:
            res_body_plain = res_body_modified
            res_body = self.encode_content_body(res_body_plain,
                                                content_encoding)
            res.headers['Content-Length'] = str(len(res_body))

        setattr(res, 'headers', self.filter_headers(res.headers))

        self.wfile.write("%s %d %s\r\n" %
                         (self.protocol_version, res.status, res.reason))
        for line in res.headers.headers:
            self.wfile.write(line)
        self.end_headers()
        self.wfile.write(res_body)
        self.wfile.flush()

        with self.lock:
            self.save_handler(req, req_body, res, res_body_plain)
Exemplo n.º 60
0
def get_f_info(ip, key):

    conn = httplib.HTTPSConnection(ip,
                                   context=ssl._create_unverified_context())

    request_str = "/api/?type=op&cmd=<show><system><info></info></system></show>&key="

    conn.request("GET", request_str + key)
    r = conn.getresponse()
    data = r.read()
    fw_info = etree.fromstring(data)

    print "Getting Firewall Rulebase..."
    print ""
    request_str = "/api/?type=config&action=get&xpath=/config/devices/entry[@name='localhost.localdomain']/vsys/entry" \
                  "[@name='vsys1']/rulebase&key="

    conn.request("GET", request_str + key)
    r = conn.getresponse()
    data = r.read()
    fw_rulebase = etree.fromstring(data)
    conn.close()

    for x in fw_info.findall("result/system/"):
        if x.tag == 'serial':
            fw_dict[x.text] = {}
            fw_dict[x.text][x.tag] = x.text
            fw_dict[x.text]['decrypt'] = {}

    for x in fw_dict:
        for y in fw_info.findall("result/system/"):
            if y.tag == 'hostname':
                fw_dict[x][y.tag] = y.text
            if y.tag == 'ip-address':
                fw_dict[x][y.tag] = y.text
            if y.tag == 'model':
                fw_dict[x][y.tag] = y.text
            if y.tag == 'sw-version':
                fw_dict[x][y.tag] = y.text
        rule_dict[x] = {}
        rule_dict[x]['dcrypt_rule_count'] = 0
        rule_dict[x]['no_dcrypt_rule_count'] = 0
        rule_dict[x]['sec_rule_count'] = 0
        rule_dict[x]['features'] = {
            'virus': 0,
            'spyware': 0,
            'vuln': 0,
            'url-filter': 0,
            'data-filter': 0,
            'file-block': 0,
            'wildfire': 0,
            'app-id': 0,
            'profiles': 0,
            'user-id': 0
        }

    for x in fw_dict:
        print "Counting " + x + " decrypt rules"
        for rule in fw_rulebase.findall('result/rulebase/decryption/rules/'):
            if not rule.findall('disabled'):
                for profile in rule.findall('action'):
                    if profile.text == "decrypt":
                        fw_dict[x]['decrypt'] = 'yes'
                        rule_dict[x]['dcrypt_rule_count'] += 1
                    else:
                        rule_dict[x]['no_dcrypt_rule_count'] += 1
            else:
                for profile in rule.findall('disabled'):
                    if profile.text != 'yes':
                        for profile in rule.findall('action'):
                            if profile.text == "decrypt":
                                fw_dict[x]['decrypt'] = 'yes'
                                rule_dict[x]['dcrypt_rule_count'] += 1
                            else:
                                rule_dict[x]['no_dcrypt_rule_count'] += 1

        print "Counting " + x + " security rules"
        print ""
        for rule in fw_rulebase.findall('result/rulebase/security/rules/'):
            if not rule.findall('disabled'):
                rule_dict[x]['sec_rule_count'] += 1
            else:
                for profile in rule.findall('disabled'):
                    if profile.text != 'yes':
                        rule_dict[x]['sec_rule_count'] += 1

        xmlpath = "result/rulebase/security/rules/"
        loop_rules(x, xmlpath, fw_rulebase)

        print_output(x)