示例#1
0
    def test_load_swagger_resource_aggregate_raises_url_error(
            self, mock_perform_request, mock_utils, mock_app):
        # create extra service
        crm_lm = factories.LogicModule(
            name='Contacts and Appointments Service',
            endpoint='http://crm.example.com',
            endpoint_name='crm',
            relationships={})

        # mock schema urls and app
        product_schema_url = {self.lm.endpoint_name: self.lm.endpoint}
        crm_schema_url = {'crm': crm_lm.endpoint}
        mock_utils.get_swagger_urls.side_effect = [
            product_schema_url, crm_schema_url
        ]
        msg = f'Make sure that {crm_lm.endpoint} is accessible.'
        exception_obj = error.URLError(msg)
        mock_app.side_effect = exception_obj

        # mock service response
        service_response = Mock(PySwaggerResponse)
        service_response.status = 200
        service_response.data = self.response_data
        mock_perform_request.return_value = service_response

        # make api request and validate error
        path = '/old/{}/{}/'.format(self.lm.endpoint_name, 'products')
        with self.assertRaises(error.URLError) as context:
            self.client.get(path, {'aggregate': 'true'})
            self.assertTrue(msg in context.exception)
示例#2
0
    def _get_fake_responses(
            self, url: str
    ) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, str]]]:
        """Return response and headers if faked for this URL in uaclient.conf.

        :return: A tuple of response and header dicts if the URL has an overlay
            response defined. Return (None, {}) otherwise.

        :raises exceptions.URLError: When faked response "code" is != 200.
            URLError reason will be "response" value and any optional
            "headers" provided.
        """
        responses = self._get_response_overlay(url)
        if not responses:
            return None, {}
        if len(responses) == 1:
            # When only one respose is defined, repeat it for all calls
            response = responses[0]
        else:
            # When multiple responses defined pop the first one off the list.
            response = responses.pop(0)
        if response["code"] == 200:
            return response["response"], response.get("headers", {})
        # Must be an error
        e = error.URLError(response["response"])
        raise exceptions.UrlError(
            e,
            code=response["code"],
            headers=response.get("headers", {}),
            url=url,
        )
示例#3
0
    def fetch_url(self, url: str) -> HTTPResponse:
        req = request.Request(url, data=None, headers={'User-Agent': self.UA})
        res = request.urlopen(req)
        if isinstance(res, response.addinfourl):
            raise error.URLError("Got FTP handle instead of HTTP response")

        return res  # type: ignore
示例#4
0
 def test_articles_skips_unreachable_articles(self):
     from urllib import error
     self.create_patch('argos.core.membrane.extractor.extract_entry_data',
                       side_effect=[error.URLError('unreachable')])
     articles = []
     collector.get_articles(self.feed, lambda a: articles.append(a))
     self.assertEquals(len(articles), 0)
示例#5
0
 def test_monitor_call_for_failure(self, mock_urlopen):
     mock_urlopen.side_effect = urlerr.URLError("MOCK Error")
     test_vnf = {}
     test_kwargs = {'mgmt_ip': 'a.b.c.d'}
     monitor_return = self.monitor_http_ping.monitor_call(
         test_vnf, test_kwargs)
     self.assertEqual('failure', monitor_return)
示例#6
0
 def open(self, url, conn_timeout=None):
   if conn_timeout == 0:
     raise urllib_error.URLError('Could not reach %s within deadline.' % url)
   if url.startswith('http'):
     self.opened.set()
   if self.error:
     raise urllib_error.HTTPError(url, self.error, None, None, Compatibility.BytesIO(b'glhglhg'))
   return urllib_request.addinfourl(Compatibility.BytesIO(self.rv), url, None, self.code)
示例#7
0
文件: http.py 项目: xianxu/pants
 def open(self, url, conn_timeout=None, **kw):
   """
     Wrapper in front of urlopen that more gracefully handles odd network environments.
   """
   url = self.maybe_local_url(url)
   with TRACER.timed('Fetching', V=1):
     if not self.reachable(url, conn_timeout=conn_timeout):
       raise urllib_error.URLError('Could not reach %s within deadline.' % url)
     return urllib_request.urlopen(url, **kw)
示例#8
0
def get_api_token(email: str, password: str) -> str:
    try:
        api_token_response = hit_api('/API/GetAPIToken', {
            'Email': email,
            'Password': password
        })
        api_token: str = loads(api_token_response)['APIToken']
        return api_token
    except error.URLError as e:
        raise error.URLError('Error getting API Token')
示例#9
0
 def encode_url(self, url, conn_timeout=None):
   target, target_tmp, headers, headers_tmp = self.translate_all(url)
   with contextlib.closing(self.really_open(url, conn_timeout=conn_timeout)) as http_fp:
     if http_fp.getcode() != 200:
       raise urllib_error.URLError('Non-200 response code from %s' % url)
     with TRACER.timed('Caching', V=2):
       with open(target_tmp, 'wb') as disk_fp:
         disk_fp.write(http_fp.read())
       with open(headers_tmp, 'wb') as headers_fp:
         headers_fp.write(struct.pack('>h', http_fp.code or 0))
         headers_fp.write(str(http_fp.headers).encode('utf8'))
       os.rename(target_tmp, target)
       os.rename(headers_tmp, headers)
示例#10
0
    def do_open(self, http_class, req, **http_conn_args):
        host = req.host
        if not host:
            raise error.URLError('no host given')
        con = self.get_connection(host, client.HTTPConnection, req)
        headers = dict(req.headers)
        headers.update(req.unredirected_hdrs)
        headers['Connection'] = 'keep-alive'
        headers = dict((name.title(), val) for name, val in headers.items())

        if req._tunnel_host:
            tunnel_headers = {}
            proxy_auth_hdr = "Proxy-Authorization"
            if proxy_auth_hdr in headers:
                tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
                # Proxy-Authorization should not be sent to origin
                # server.
                del headers[proxy_auth_hdr]
            con.set_tunnel(req._tunnel_host, headers=tunnel_headers)

        try:
            try:
                con.request(req.get_method(), req.selector, req.data, headers)
            except OSError as err:  # timeout error
                raise error.URLError(err)
            r = con.getresponse()
        except:
            con.close()
            raise

        if con.sock:
            self.cache_connection(host, con)

        r.url = req.get_full_url()
        r.msg = r.reason
        return r
示例#11
0
 def init_page(self, host_name, innerURL, url):
     if self.hosts.get(host_name) is None:
         self.hosts[host_name] = {
             'host_count': 0,
             'host_links_count': 0,
             'innerURL': {innerURL},
             'externalURL': set(),
             'robot': RobotFileParser('http://%s/robots.txt' % host_name)
         }
         try:
             info = request.urlopen(
                 'http://%s/robots.txt' % host_name,
                 timeout=self.robot_wait).info()['Content-Type']
             if info is None or 'text/plain' not in info:
                 raise error.URLError('robots.txt is invalid')
             self.hosts[host_name]['robot'].read(
             )  # may throw error if its url is not valid
         except Exception:
             self.hosts[host_name]['robot'].allow_all = True
     self.hosts[host_name]['host_count'] += 1
示例#12
0
 def make_request(self, url, http_method):
     """
     make HTTP or HTTPS requests
     @param url: site URL address
     @param http_method: HTTP protocol type
     @return dictionary with headers, content and encoding
     """
     request = Request(url, method=http_method)
     try:
         with urlopen(request, timeout=self.timeout) as conn:
             encoding = conn.headers.get_content_charset() or 'UTF-8'
             return {"url": url,
                     "code": conn.code,
                     "type": conn.headers['Content-Type'],
                     "headers": conn.headers,
                     "content": conn.read(),
                     "encoding": encoding}
     except error.HTTPError:
         return {"url": url, "code": 500}
     except error.URLError as e:
         raise error.URLError(f'bad connection {e}')
示例#13
0
 def init_page(self, host_name, innerURL, url):
     if self.hosts.get(host_name) is None:
         self.hosts[host_name] = {
             'host_count': 0,
             'host_links_count': 0,
             'innerURL': {innerURL},
             'externalURL': set(),
             'robot': RobotFileParser('http://%s/robots.txt' % host_name)
         }
         try:
             info = request.urlopen(
                 'http://%s/robots.txt' % host_name,
                 timeout=self.robot_wait).info()['Content-Type']
             if info is None or 'text/plain' not in info:
                 raise error.URLError('robots.txt is invalid')
             self.hosts[host_name]['robot'].read()  # read robots.txt
         except (error.URLError, error.HTTPError, UnicodeDecodeError,
                 RemoteDisconnected, ConnectionResetError, socket.timeout,
                 Exception):
             self.hosts[host_name]['robot'].allow_all = True
     self.hosts[host_name]['innerURL'].add(innerURL)
     self.hosts[host_name]['host_count'] += 1
示例#14
0
    def test_download_image_handles_os_and_image_provider_errors(self):
        aircraft = Aircraft(
            [None, 'bar', 'bar.png', self.seen_datetime, self.seen_datetime])

        image.request.urlretrieve = mock.MagicMock(side_effect=OSError)
        filename = self.image_service.download_image('http://foo.com/bar.png',
                                                     aircraft)
        image.request.urlretrieve.assert_called_once_with(
            'http://foo.com/bar.png',
            '/acars-server/acarsserver/service/../app/assets/img/aircrafts/large/bar.png'
        )
        self.assertIsNone(filename)

        image.request.urlretrieve = mock.MagicMock(
            side_effect=error.URLError('Not Found'))
        filename = self.image_service.download_image('http://foo.com/bar.png',
                                                     aircraft)
        image.request.urlretrieve.assert_called_once_with(
            'http://foo.com/bar.png',
            '/acars-server/acarsserver/service/../app/assets/img/aircrafts/large/bar.png'
        )
        self.assertIsNone(filename)
示例#15
0
    def urlopen(*args, **kwargs):
        if raise_url_error:
            raise error.URLError('FAKE ERROR')

        if raise_http_error:
            raise error.HTTPError(url='',
                                  code=raise_http_error,
                                  msg='FAKE ERROR',
                                  hdrs={},
                                  fp=None)

        class Response:
            @staticmethod
            def read():
                return json.dumps({
                    'transId': returned_token,
                    'cardNumber': '1111-2222-3333-4444',
                    'status': returned_status,
                    'amount': returned_amount,
                    'errorMessage': error_message,
                    'errorCode': error_code
                }).encode()

        return Response()
示例#16
0
    def __init__(self, tr_server, user_name, password):
        """
        This method will initialize the TestRail server using the user name and password provided
        :param tr_server: Name of the Test Rail server
        :param user_name: TestRail user id
        :param password:  TestRail password
        """
        file_dir = os.path.split(os.path.realpath(__file__))[0]
        logging.config.fileConfig(os.path.join(file_dir, "trlogger.ini"))
        # Configure the logger
        self._log = logging.getLogger('testrail')
        self._log.info("Starting TestRail application ")

        # TestRail Connection status
        # Note: This variable is used to ensure we have a valid TestRail Instance
        self._connection_status = False
        try:
            # Check if the URL is valid
            self._client = APIClient(tr_server)
            self._client.password = password
            self._client.user = user_name
            self._connection_status = True
            # Check if the url, user name and password is set correctly by accessing an API
            self._client.send_get(Defines.TR_API_GET_PROJ + "/" +
                                  str(Defines.TR_PROJ_ID_OTG))
            self._log.info(
                "Connected to TestRail server {} with user-id {}".format(
                    tr_server, user_name))
        except err.URLError:
            self._log.exception("Url: {} is not valid".format(tr_server))
            raise err.URLError("Error: Please check the URL")
        except APIError:
            self._log.critical(
                "User-id or Password is not correct. Failed to connect with TestRail url: {}"
                .format(tr_server))
            raise APIError("Error: Please check user id and password")
示例#17
0
 def test_articles_skips_unreachable_articles(self):
     from urllib import error
     self.create_patch('argos.core.membrane.feed.extract_entry_data',
                       side_effect=error.URLError('unreachable'))
     articles = feed.articles(self.source)
     self.assertEquals(len(articles), 0)
示例#18
0
 def __init__(self, url):
     if not is_url(url):
         raise error.URLError(url)
     else:
         self._parsed = urllib.parse.urlparse(url)
 def test_returns_false_when_cra_not_running(self, mock_urlopen):
     mock_urlopen.side_effect = url_error.URLError('CRA not running')
     self.assertFalse(server_check.hosted_by_liveserver(CRA_URL))
示例#20
0
 def mock_urlopen(url, timeout=None):
     raise urlerror.URLError("Mock network error")
示例#21
0
def hotel_url_content(url):
    ntries = 10
    for tries in range(ntries):
        try:
            response = urlopen(url, timeout=20)
            break
        except error.URLError as err:
            print('ReTry: %s' % url)
            time.sleep(30)
        if tries == ntries - 1:
            raise error.URLError('')
    soup = BeautifulSoup(response, "html5lib")

    ## extract the general information
    general_information = json.loads(
        soup.find('script', type="application/ld+json").text)
    #return general_information
    try:
        review_date = general_information['datePublished']
    except:
        try:
            review_date = soup.find('div', {
                'class':
                "ui_column is-10-desktop is-12-tablet is-12-mobile"
            }).find('span', {'class': "ratingDate"})['title']
        except:
            review_date = 'unknown'
    try:
        title = general_information['name']
    except:
        title = 'unknown'
    try:
        content = general_information['reviewBody']
    except:
        content = 'unknown'
    try:
        overall_rating = int(
            general_information['reviewRating']['ratingValue'])
    except:
        overall_rating = 'unknown'

    ##return review_date, title, content, overall_rating

    ## extract the traveling type and date
    stay_info = soup.find('div', {'class': "recommend-titleInline"}).text
    stay_hardcode = 'Stayed: '
    comma_hardcode = ', '
    if 'travelled' in stay_info:
        stay_date = stay_info[len(stay_hardcode):stay_info.find(',')]
    else:
        stay_date = stay_info[len(stay_hardcode):]
    traveling_type = ''
    traveling_type_table = ['family', 'couple', 'solo', 'business', 'friends']
    for type_ in traveling_type_table:
        if type_ in stay_info:
            traveling_type = type_
            break
    if traveling_type == '':  ## save the whole crwl info if the traveling type cannot be detacted
        traveling_type = 'unknown'
        #try:
        #    print ('type cannot be detected')
        #    traveling_type = stay_info[len('travelled as a '):]
        #except:
        #    traveling_type = stay_info
    ##return stay_date, traveling_type

    ## ranking part
    star_info = soup.findAll(
        'div', {'class': "ui_column is-10-desktop is-12-tablet is-12-mobile"})
    star_info = star_info[0].findAll('li', {'class': "recommend-answer"})

    description = 'ui_bubble_rating bubble_'
    ranking_dict = dict()

    if len(
            star_info
    ) > 0:  #only do this when the reviewer provides the rating of each area
        for rate in star_info:
            rate_area = rate.find('div', {
                'class': "recommend-description"
            }).text

            pointer = str(rate).find(description) + len(
                'ui_bubble_rating bubble_')
            try:
                rate_mark = int(str(rate)[pointer:pointer + 2]) / 10
            except:
                rate_mark = str(rate)

            ranking_dict[rate_area] = rate_mark

    ##return ranking_dict

    ## review information
    try:
        reviewer_name = soup.find(
            'div', {
                'class': "prw_rup prw_reviews_member_info_resp_sur"
            }).find('div', {
                'class': "info_text"
            }).find('div').get_text()
    except:
        reviewer_name = 'unknown'
    try:
        reviewer_contributions = int(
            soup.find('div', {
                'class': "prw_rup prw_reviews_member_info_resp_sur"
            }).find('span', {
                'class': 'badgetext'
            }).get_text())
    except:
        reviewer_contributions = 'unknown'

    try:
        reviewer_location = soup.find(
            'div', {
                'class': "prw_rup prw_reviews_member_info_resp_sur"
            }).find('div', {
                'class': "userLoc"
            }).get_text()
    except:
        reviewer_location = 'unknown'

    #return reviewer_name, reviewer_contributions, reviewer_location

    return review_date, title, content, overall_rating, stay_date, traveling_type, ranking_dict, reviewer_name, reviewer_contributions, reviewer_location
示例#22
0
def rental_url_content(reviewer_url, review_code):
	review_code = 'review_' + review_code

	ntries = 10
	for tries in range(ntries):
		try:
			response = urlopen(reviewer_url, timeout=20)
			break
		except error.URLError as err:
			print('ReTry: %s' % reviewer_url)
			time.sleep(30)
		if tries == ntries - 1:
			raise error.URLError('')

	soup = BeautifulSoup(response)

	# general information from json
	json_data = json.loads(str(soup.find('script', {'type': 'application/ld+json'}).text))
	title = json_data['name']
	content = json_data['reviewBody']
	overall_rating = json_data['reviewRating']['ratingValue']
	###

	### extract the table for the rest data
	table = soup.find('div', {'id': str(review_code)})
	###

	# review date and stay date
	date = table.find('span', {'class': 'ratingDate'}).get_text().replace('\n', '')

	review_date_message = 'Reviewed '
	stay_date_message = 'for a stay in '
	try:
		pointer = date.find(stay_date_message)
		if pointer >= 1:
			stay_date = date[pointer + len(stay_date_message):]
	except:
		stay_date = ''

	try:
		pointer = date.find(review_date_message)
		if len(stay_date) == 0:
			end_pointer = len(date)
		else:
			end_pointer = date.find(stay_date_message)

		review_date = date[pointer + len(review_date_message):end_pointer]
	except:
		review_date = ''
	###

	# rank table
	ranking_dict = dict()
	try:
		ranking_table = table.find('table', {'class': "vrReviewRatings"}).findAll('td')
		for detail in ranking_table:
			label = detail.find('span', {'class': 'vrReviewRatingLabel'}).get_text()
			rank = ' '.join(detail.select('span[class*="ui_bubble"]')[0].get('class'))
			rank = int((rank[rank.find('ui_bubble_rating bubble_') + len('ui_bubble_rating bubble_'):])) / 10

			ranking_dict[label] = rank
	except:
		pass
	###

	# traveling type
	try:
		traveling_type = 'unknown'
		all_div = table.select('div')
		for div in all_div:
			if div.find('span', {'class': 'vrReviewItem itemLabel'}) and (
			div.find('span', {'class': 'vrReviewItem itemLabel'}).text) == "Traveling group:":
				if div.div:
					traveling_type = str(div.div.select('span[class="vrReviewItem"]')[0].text)
					break

	except:
		traveling_type = 'unknown'
	###

	# reviewer data
	try:
		reviewer_name = soup.find('div', {'class': "username mo"}).text.replace('\n', '')
	except:
		reviewer_name = 'unknown'

	try:
		reviewer_location = soup.find('div', {'class': "location"}).text.replace('\n', '')
		if reviewer_location == '':
			reviewer_location = 'unknown'
	except:
		reviewer_location = 'unknown'

	try:
		reviewer_contributions = int(re.sub("[^0-9]", "", soup.find('div', {'class': "reviewerBadge badge"}).span.text))
	except:
		reviewer_contributions = 'unknown'
	###

	return title, content, overall_rating, review_date, stay_date, ranking_dict, traveling_type, reviewer_name, reviewer_contributions, reviewer_location
示例#23
0
def airline_url_content(url):
    ntries = 10
    for tries in range(ntries):
        try:
            response = urlopen(url, timeout=20)
            break
        except error.URLError as err:
            print('ReTry: %s' % url)
            time.sleep(30)
        if tries == ntries - 1:
            raise error.URLError('')
    soup = BeautifulSoup(response, "lxml")

    review_id_pointer_start = url.find('-r') + len('-r')
    review_id_pointer_end = url.find('-', review_id_pointer_start)
    review_id = 'review_' + url[review_id_pointer_start:review_id_pointer_end]

    ## extract the general information
    general_information = json.loads(
        soup.find('script', type="application/ld+json").text)
    # return general_information
    try:
        review_date = general_information['datePublished']
    except:
        try:
            review_date = soup.find('div', {
                'class':
                "ui_column is-10-desktop is-12-tablet is-12-mobile"
            }).find('span', {'class': "ratingDate"})['title']
        except:
            review_date = 'unknown'

    try:
        title = general_information['name']
    except:
        title = 'unknown'

    try:
        content = general_information['reviewBody']
    except:
        content = 'unknown'

    try:
        overall_rating = int(
            general_information['reviewRating']['ratingValue'])
    except:
        overall_rating = 'unknown'

    #return review_date, title, content, overall_rating

    all_data = soup.find('div', {'id': review_id})
    try:
        stay_date = all_data.find('div', {
            'class': "prw_rup prw_reviews_stay_date_hsx"
        }).text
        # remove the blank in the front
        while True:
            if 'Date of travel: ' in stay_date:
                stay_date = stay_date.replace('Date of travel: ', '')
            else:
                break
        while True:
            if stay_date[0] == ' ':
                stay_date = stay_date[1:]
            else:
                break

    except:
        stay_date = ''

    #return review_date, title, content, overall_rating, stay_date

    ## ranking part
    ranking_dict = dict()
    try:
        table = all_data.find_all('li', {'class': "recommend-answer"})
        for rating in table:
            rate = str(rating.select('div[class*="ui_bubble_rating bubble_"]'))
            rate_pointer_start = rate.find(
                '<div class="ui_bubble_rating bubble_') + len(
                    '<div class="ui_bubble_rating bubble_')
            rate_pointer_end = rate.find('"></div>', rate_pointer_start)
            rate = rate[rate_pointer_start:rate_pointer_end]
            try:
                rate_mark = int(str(rate)) / 10
            except:
                rate_mark = str(rate)
            label = rating.find('div', {'class': "recommend-description"}).text
            ranking_dict[label] = rate_mark
    except:
        pass

    #return review_date, title, content, overall_rating, stay_date, ranking_dict

    ## review information
    try:
        reviewer_name = all_data.find('div', {'class': "username mo"}).text
    except:
        reviewer_name = 'unknown'

    try:
        reviewer_contributions = all_data.find('span', {
            'class': "badgetext"
        }).text
        try:
            reviewer_contributions = int(reviewer_contributions)
        except:
            pass
    except:
        reviewer_contributions = 'unknown'

    try:
        reviewer_location = soup.find('div', {'class': "location"}).text
    except:
        reviewer_location = 'unknown'

    return review_date, title, content, overall_rating, stay_date, ranking_dict, reviewer_name, reviewer_contributions, reviewer_location
示例#24
0
def errormessage(n):
    if int(n) == 10:
        return 100, 200
    else:
        raise error.URLError('')