Exemplo n.º 1
0
    def get_form(self, payment, data=None):
        if payment.status == 'waiting':
            payment.change_status('input')
        form = DummyForm(data=data,
                         hidden_inputs=False,
                         provider=self,
                         payment=payment)
        if form.is_valid():
            new_status = form.cleaned_data['status']
            payment.change_status(new_status)
            new_fraud_status = form.cleaned_data['fraud_status']
            payment.change_fraud_status(new_fraud_status)

            gateway_response = form.cleaned_data.get('gateway_response')
            verification_result = form.cleaned_data.get('verification_result')
            if gateway_response or verification_result:
                if gateway_response == '3ds-disabled':
                    # Standard request without 3DSecure
                    pass
                elif gateway_response == '3ds-redirect':
                    # Simulate redirect to 3DS and get back to normal
                    # payment processing
                    process_url = payment.get_process_url()
                    params = urlencode(
                        {'verification_result': verification_result})
                    redirect_url = '%s?%s' % (process_url, params)
                    raise RedirectNeeded(redirect_url)
                elif gateway_response == 'failure':
                    # Gateway raises error (HTTP 500 for example)
                    raise URLError('Opps')
                elif gateway_response == 'payment-error':
                    raise PaymentError('Unsupported operation')

            if new_status in ['preauth', 'confirmed']:
                raise RedirectNeeded(payment.get_success_url())
            raise RedirectNeeded(payment.get_failure_url())
        return form
Exemplo n.º 2
0
    def test_installs(self, _extract, _mkdir, _check_hash):
        self.fh.download = MagicMock()

        for url in self.valid_urls:
            filename = urlparse(url).path
            dest = os.path.join('foo', 'fetched', os.path.basename(filename))
            _extract.return_value = dest
            with patch.dict('os.environ', {'CHARM_DIR': 'foo'}):
                where = self.fh.install(url, checksum='deadbeef')
            self.fh.download.assert_called_with(url, dest)
            _extract.assert_called_with(dest, None)
            _check_hash.assert_called_with(dest, 'deadbeef', 'sha1')
            self.assertEqual(where, dest)
            _check_hash.reset_mock()

        url = "http://www.example.com/archive.tar.gz"

        self.fh.download.side_effect = URLError('fail')
        with patch.dict('os.environ', {'CHARM_DIR': 'foo'}):
            self.assertRaises(UnhandledSource, self.fh.install, url)

        self.fh.download.side_effect = OSError('fail')
        with patch.dict('os.environ', {'CHARM_DIR': 'foo'}):
            self.assertRaises(UnhandledSource, self.fh.install, url)
Exemplo n.º 3
0
    def test_request_http_error_retry(self):
        mocked_response = mock.MagicMock(status=200, spec=HTTPResponse)
        mocked_response.read.return_value = b'{ "foo": "bar" }'
        mocked_response.headers = mock.Mock()
        mocked_response.headers.get_charset.return_value = 'utf-8'
        # raise error on first call, then a valid response
        mock_side_effect = [URLError('test error'), mocked_response]

        test_servers = deque(TEST_SERVERS)
        http_client = ElasticsearchRequestController(
            test_servers, TEST_TIMEOUT, None, False, self._mocked_logger)
        # pre-flight check, basically cannot fail but won't hurt
        self.assertEqual(http_client._servers[0], TEST_SERVER_1)

        # test
        with mock.patch.object(http_client, '_url_opener') as mock_url_opener:
            mock_url_opener.open.side_effect = mock_side_effect
            result = http_client.request('/', data=None)

            # http_client._servers[0] is a deque and has been rotated after the first error,
            # so now we expect the second server item to be at the first index
            self.assertEqual(http_client._servers[0], TEST_SERVER_2)
            # the response should match in any way, so check it
            self.assertEqual(result, dict(foo='bar'))
Exemplo n.º 4
0
 def get_all_comments(self):
     """使用多线程抓取评论页"""
     # 当当网要重写这个函数
     self.build_queue()
     if self.urlQueue.qsize() > 10:
         self.use_proxy = True
     if not self.urlQueue.empty():
         threads = []
         # 可以调节线程数, 进而控制抓取速度
         thread_num = self.urlQueue.qsize() // 3 + 1
         if thread_num >= 15:
             thread_num = 15
         if thread_num <= 3:
             thread_num = 3
         for i in range(thread_num):
             t = Thread(target=self.get_comments)
             threads.append(t)
         for t in threads:
             t.start()
         for t in threads:
             # 多线程多join的情况下,依次执行各线程的join方法, 这样可以确保主线程最后退出, 且各个线程间没有阻塞
             t.join()
     else:
         raise URLError('商品链接地址不能被识别')
Exemplo n.º 5
0
    def test_open_auklet_url(self):
        url = self.client.base_url + "private/devices/config/"

        with patch('auklet.utils.urlopen') as url_open:
            self.assertIsNotNone(open_auklet_url(url, self.client.apikey))

            url_open.side_effect = HTTPError(url=None,
                                             code=401,
                                             msg=None,
                                             hdrs=None,
                                             fp=None)
            self.assertRaises(AukletConfigurationError,
                              lambda: open_auklet_url(url, self.client.apikey))

            url_open.side_effect = HTTPError(url=None,
                                             code=None,
                                             msg=None,
                                             hdrs=None,
                                             fp=None)
            self.assertRaises(HTTPError,
                              lambda: open_auklet_url(url, self.client.apikey))

            url_open.side_effect = URLError("")
            self.assertIsNone(open_auklet_url(url, self.client.apikey))
Exemplo n.º 6
0
def parse_redis_url(url):
    from urllib.parse import urlparse, parse_qs
    from urllib.error import URLError

    parsed = urlparse(url)

    if parsed.scheme != 'redis':
        raise URLError('Redis URL does not have the redis scheme')

    path = parsed.path[1:] or ''
    query = parse_qs(parsed.query or '')
    if path:
        db = int(path)
    elif 'db' in query:
        db = int(query['db'][0])
    else:
        db = 0

    options = {'host': parsed.hostname, 'port': parsed.port, 'db': db}

    if parsed.password:
        options['password'] = parsed.password

    return options
Exemplo n.º 7
0
    def _catch_error_request(self, url='', data=None, retrieve=False):
        url = url or self.url

        try:
            response = self.session.open(url, data, 5)
            # checking that tracker is'nt blocked
            if not response.geturl().startswith((self.url, self.url_dl)):
                raise URLError(f"{self.url} is blocked. Try another proxy.")
        except (socket.error, socket.timeout) as err:
            if not retrieve:
                return self._catch_error_request(url, data, True)
            logger.error(err)
            self.error = f"{self.url} is not response! Maybe it is blocked."
            if "no host given" in err.args:
                self.error = "Proxy is bad, try another!"
        except (URLError, HTTPError) as err:
            logger.error(err.reason)
            self.error = err.reason
            if hasattr(err, 'code'):
                self.error = f"Request to {url} failed with status: {err.code}"
        else:
            return response

        return None
Exemplo n.º 8
0
def download_url_resource(path, output=None):
    """
    Télécharger un fichier à une URL et renvoyer le chemin du fichier local téléchargé

    :param output: Chemin de sortie, fichier dans le répertoire temporaire si None
    :type output: str | None
    """
    if output and os.path.exists(output):
        logging.warning(
            _("The download destination file at {path} already exists. Skipped."
              ).format(path=output))
        return output
    try:
        resource = get_url_resource(path, False, stream=True)
    except (IOError, OSError):
        raise URLError(
            "The resource at {path} cannot be downloaded.".format(path=path),
            path)
    resource_file = NamedTemporaryFile(
        delete=False) if output is None else open(output, 'wb')
    for chunk in resource.iter_content(16384):
        resource_file.write(chunk)
    resource_file.close()
    return resource_file.name
Exemplo n.º 9
0
def load(url,
         data: bytes = None,
         timeout: float = socket._GLOBAL_DEFAULT_TIMEOUT,
         cafile: str = None,
         capath: str = None,
         cadefault: bool = False,
         context: ssl.SSLContext = None):
    if isinstance(url, urllib.request.Request):
        request = url
    elif isinstance(url, str):
        request = Request(url)
    else:
        raise URLError("Unvalid URL")
    request.add_header("Accept-encoding", "gzip")
    http_cli = __HTTP_CLIENT_CLASS__(request=request,
                                     data=data,
                                     timeout=timeout,
                                     cafile=cafile,
                                     capath=capath,
                                     cadefault=cadefault,
                                     context=context)
    res = http_cli.urlopen()
    txt = http_cli.read_response_body(res)
    return txt, res
Exemplo n.º 10
0
def buildBingoHintList(boardURL):
    try:
        if len(boardURL) > 256:
            raise URLError(f"URL too large {len(boardURL)}")
        with urllib.request.urlopen(boardURL + "/board") as board:
            if board.length and 0 < board.length < 4096:
                goalList = board.read()
            else:
                raise HTTPError(f"Board of invalid size {board.length}")
    except (URLError, HTTPError) as e:
        logger = logging.getLogger('')
        logger.info(f"Could not retrieve board info. Using default bingo hints instead: {e}")
        genericBingo = read_json(data_path('Bingo/generic_bingo_hints.json'))
        return genericBingo['settings']['item_hints']

    # Goal list returned from Bingosync is a sequential list of all of the goals on the bingo board, starting at top-left and moving to the right.
    # Each goal is a dictionary with attributes for name, slot, and colours. The only one we use is the name
    goalList = [goal['name'] for goal in json.loads(goalList)]
    goalHintRequirements = read_json(data_path('Bingo/bingo_goals.json'))

    hintsToAdd = {}
    for goal in goalList:
        # Using 'get' here ensures some level of forward compatibility, where new goals added to randomiser bingo won't
        # cause the generator to crash (though those hints won't have item hints for them)
        requirements = goalHintRequirements.get(goal,{})
        if len(requirements) != 0:
            for item in requirements:
                hintsToAdd[item] = max(hintsToAdd.get(item, 0), requirements[item]['count'])

    # Items to be hinted need to be included in the item_hints list once for each instance you want hinted
    # (e.g. if you want all three strength upgrades to be hintes it needs to be in the list three times)
    hints = []
    for key, value in hintsToAdd.items():
        for _ in range(value):
            hints.append(key)
    return hints
Exemplo n.º 11
0
    def _cache(self, path):
        """Cache the file specified by path.

        Creates a copy of the file in the datasource cache.

        """
        # We import these here because importing urllib2 is slow and
        # a significant fraction of numpy's total import time.
        if sys.version_info[0] >= 3:
            from urllib.request import urlopen
            from urllib.error import URLError
        else:
            from urllib2 import urlopen
            from urllib2 import URLError

        upath = self.abspath(path)

        # ensure directory exists
        if not os.path.exists(os.path.dirname(upath)):
            os.makedirs(os.path.dirname(upath))

        # TODO: Doesn't handle compressed files!
        if self._isurl(path):
            try:
                openedurl = urlopen(path)
                f = _open(upath, 'wb')
                try:
                    shutil.copyfileobj(openedurl, f)
                finally:
                    f.close()
                    openedurl.close()
            except URLError:
                raise URLError("URL not found: %s" % path)
        else:
            shutil.copyfile(path, upath)
        return upath
    def _get_swagger_spec(self, endpoint_name: str) -> Spec:
        """Get Swagger spec of specified service."""
        logic_module = self._get_logic_module(endpoint_name)
        schema_url = utils.get_swagger_url_by_logic_module(logic_module)

        if schema_url not in self._specs:
            try:
                # Use stored specification of the module
                spec_dict = logic_module.api_specification

                # Pull specification of the module from its service and store it
                if spec_dict is None:
                    response = utils.get_swagger_from_url(schema_url)
                    spec_dict = response.json()
                    logic_module.api_specification = spec_dict
                    logic_module.save()

            except URLError:
                raise URLError(f'Make sure that {schema_url} is accessible.')

            swagger_spec = Spec.from_dict(spec_dict, config=self.SWAGGER_CONFIG)
            self._specs[schema_url] = swagger_spec

        return self._specs[schema_url]
Exemplo n.º 13
0
    def test_network_loss(self):
        """ Updates should be sent when the network is up again. """
        filename = "networkless file.txt"
        doc_id = self.remote.make_file_with_blob("/", filename,
                                                 b"Initial content.")

        # Download file
        local_path = f"/{doc_id}_file-content/{filename}"

        with patch.object(self.manager_1,
                          "open_local_file",
                          new=open_local_file):
            self.direct_edit._prepare_edit(self.nuxeo_url, doc_id)
            self.wait_sync(timeout=2, fail_if_timeout=False)

            # Simulate server error
            bad_remote = self.get_bad_remote()
            error = URLError(
                "[Errno 10051] (Mock) socket operation was attempted to an unreachable network"
            )
            bad_remote.make_upload_raise(error)

            with patch.object(self.engine_1, "remote", new=bad_remote):
                # Update file content
                self.local.update_content(local_path, b"Updated")
                time.sleep(5)
                self.local.update_content(local_path, b"Updated twice")
                time.sleep(5)
                # The file should not be updated on the server
                assert (self.remote.get_blob(
                    self.remote.get_info(doc_id)) == b"Initial content.")

            # Check the file is reuploaded when the network come again
            self.wait_sync(timeout=12, fail_if_timeout=False)
            assert (self.remote.get_blob(
                self.remote.get_info(doc_id)) == b"Updated twice")
Exemplo n.º 14
0
def test_url(url, retry=2, retry_period=1, timeout=10):
    request = Request(url)
    request.get_method = lambda: 'HEAD'

    try:
        response = urlopen(request)
    except HTTPError as e:
        if e.code == 503 and retry:
            time.sleep(retry_period)
            return test_url(url,
                            retry=retry - 1,
                            retry_period=retry_period,
                            timeout=timeout)
        e.msg += ' (%s)' % url
        raise
    except URLError as e:
        if isinstance(e.reason, socket.timeout) and retry:
            time.sleep(retry_period)
            return test_url(url,
                            retry=retry - 1,
                            retry_period=retry_period,
                            timeout=timeout)
        raise URLError(str(e) + ' (%s)' % url)
    return response
Exemplo n.º 15
0
def main(args=None):
    ARGS = parse_args(args if len(args) > 0 else sys.argv[1:])

    logging.basicConfig(level=logging.INFO)

    # value at first index is of current subreddit, second index is total
    TOTAL, DOWNLOADED, ERRORS, SKIPPED, FAILED = [0, 0], [0,
                                                          0], [0,
                                                               0], [0,
                                                                    0], [0, 0]
    PROG_REPORT = [TOTAL, DOWNLOADED, ERRORS, SKIPPED, FAILED]

    # Create the specified directory if it doesn't already exist.
    if not pathexists(ARGS.dir):
        mkdir(ARGS.dir)

    # If a regex has been specified, compile the rule (once)
    RE_RULE = None
    if ARGS.regex:
        RE_RULE = re.compile(ARGS.regex)

    # compile reddit comment url to check if url is one of them
    reddit_comment_regex = re.compile(r'.*reddit\.com\/r\/(.*?)\/comments')

    LAST = ARGS.last

    start_time = None
    ITEM = None

    sort_type = ARGS.sort_type
    if sort_type:
        sort_type = sort_type.lower()

    # check to see if ARGS.subreddit is subreddit or subreddit-list
    if os.path.isfile(
            ARGS.subreddit) and os.path.splitext(ARGS.subreddit)[1] != '':
        ARGS.subreddit_list = ARGS.subreddit

    if ARGS.subreddit_list:
        # ARGS.subreddit_list = ARGS.subreddit_list[0] # can't remember why I did this -jtara1
        subreddit_file = ARGS.subreddit_list
        subreddit_list = parse_subreddit_list(subreddit_file, ARGS.dir)
        if ARGS.verbose:
            print('subreddit_list = %s' % subreddit_list)
    elif not ARGS.subreddit_list:
        subreddit_list = [(ARGS.subreddit, ARGS.dir)]

    # file used to store last reddit id
    log_file = '._history.txt'

    # iterate through subreddit(s)
    for index, section in enumerate(subreddit_list):
        (ARGS.subreddit, ARGS.dir) = section
        FINISHED = False

        if ARGS.verbose:
            print('index: %s, %s, %s' % (index, ARGS.subreddit, ARGS.dir))

        # load last_id or create new entry for last_id in log_data
        log_data, last_id = process_subreddit_last_id(ARGS.subreddit,
                                                      ARGS.sort_type, ARGS.dir,
                                                      log_file, ARGS.dir)

        if ARGS.restart:
            last_id = ''

        TOTAL[0], DOWNLOADED[0], ERRORS[0], SKIPPED[0], FAILED[
            0], FILECOUNT = 0, 0, 0, 0, 0, 0
        # ITEMS loop - begin the loop to get reddit submissions & download media from them
        while not FINISHED:
            if ARGS.verbose:
                print()
            ITEMS = getitems(ARGS.subreddit,
                             multireddit=ARGS.multireddit,
                             previd=last_id,
                             reddit_sort=sort_type)
            # debug ITEMS variable value
            # if ARGS.verbose:
            #    history_log(os.getcwd(), 'ITEMS.txt', 'write', ITEMS)

            # measure time and set the program to wait 4 second between request
            # as per reddit api guidelines
            end_time = time.process_time()

            if start_time is not None:
                elapsed_time = end_time - start_time

                if elapsed_time <= 4:  # throttling
                    time.sleep(4 - elapsed_time)

            start_time = time.process_time()

            # No more items to process
            if not ITEMS:
                if ARGS.verbose:
                    print('No more ITEMS for %s %s' %
                          (ARGS.subreddit, ARGS.sort_type))
                break

            for ITEM in ITEMS:
                TOTAL[0] += 1

                if ('reddit.com/r/' + ARGS.subreddit + '/comments/'
                        in ITEM['url'] or re.match(reddit_comment_regex,
                                                   ITEM['url']) is not None):
                    # hotfix for when last item is comment submission which caused infinite looping
                    last_id = ITEM['id'] if ITEM is not None else None
                    if last_id:
                        log_data[ARGS.subreddit][
                            ARGS.sort_type]['last-id'] = last_id
                        history_log(ARGS.dir,
                                    log_file,
                                    mode='write',
                                    write_data=log_data)
                    continue

                # don't download if url is reddit metrics url
                if 'redditmetrics.com' in ITEM['url']:
                    if ARGS.verbose:
                        print('\t%s was skipped.' % ITEM['url'])

                    SKIPPED[0] += 1
                    continue

                if ITEM['score'] < ARGS.score:
                    if ARGS.verbose:
                        print('    SCORE: {} has score of {}'.format(
                            ITEM['id'], ITEM['score']))
                        'which is lower than required score of {}.'.format(
                            ARGS.score)

                    SKIPPED[0] += 1
                    continue
                elif ARGS.sfw and ITEM['over_18']:
                    if ARGS.verbose:
                        print('    NSFW: %s is marked as NSFW.' % (ITEM['id']))

                    SKIPPED[0] += 1
                    continue
                elif ARGS.nsfw and not ITEM['over_18']:
                    if ARGS.verbose:
                        print('    Not NSFW, skipping %s' % (ITEM['id']))

                    SKIPPED[0] += 1
                    continue
                elif ARGS.regex and not re.match(RE_RULE, ITEM['title']):
                    if ARGS.verbose:
                        print('    Regex match failed')

                    SKIPPED[0] += 1
                    continue
                elif ARGS.skipAlbums and 'imgur.com/a/' in ITEM['url']:
                    if ARGS.verbose:
                        print('    Album found, skipping %s' % (ITEM['id']))

                    SKIPPED[0] += 1
                    continue

                if ARGS.title_contain and ARGS.title_contain.lower(
                ) not in ITEM['title'].lower():
                    if ARGS.verbose:
                        print('    Title not contain "{}",'.format(
                            ARGS.title_contain))
                        'skipping {}'.format(ITEM['id'])

                    SKIPPED[0] += 1
                    continue

                try:
                    URLS = extract_urls(ITEM['url'])
                except URLError as e:
                    print('URLError %s' % e)
                    continue
                except Exception as e:
                    _log.exception("%s", e)
                    continue
                for URL in URLS:
                    try:
                        # Find gfycat if requested
                        if URL.endswith('gif') and ARGS.mirror_gfycat:
                            check = gfycat().check(URL)
                            if check.get("urlKnown"):
                                URL = check.get('webmUrl')

                        # Trim any http query off end of file extension.
                        FILEEXT = pathsplitext(URL)[1]
                        if '?' in FILEEXT:
                            FILEEXT = FILEEXT[:FILEEXT.index('?')]

                        # Only append numbers if more than one file
                        FILENUM = ('_%d' % FILECOUNT if len(URLS) > 1 else '')

                        # create filename based on given input from user
                        if ARGS.filename_format == 'url':
                            FILENAME = '%s%s%s' % (pathsplitext(
                                pathbasename(URL))[0], '', FILEEXT)
                        elif ARGS.filename_format == 'title':
                            FILENAME = '%s%s%s' % (slugify(
                                ITEM['title']), FILENUM, FILEEXT)

                            if len(FILENAME) >= 256:
                                shortened_item_title = slugify(
                                    ITEM['title'])[:256 - len(FILENAME)]
                                FILENAME = '%s%s%s' % (shortened_item_title,
                                                       FILENUM, FILEEXT)
                        else:
                            FILENAME = '%s%s%s' % (ITEM['id'], FILENUM,
                                                   FILEEXT)

                        # join file with directory
                        FILEPATH = pathjoin(ARGS.dir, FILENAME)

                        # Improve debuggability list URL before download too.
                        # url may be wrong so skip that
                        if URL.encode('utf-8') == 'http://':
                            raise URLError('Url is empty')

                        # Download the image
                        try:
                            dl = skp = 0
                            if 'imgur.com' in URL:
                                fname = os.path.splitext(FILENAME)[0]
                                save_path = os.path.join(os.getcwd(), ARGS.dir)
                                downloader = ImgurDownloader(URL,
                                                             save_path,
                                                             fname,
                                                             delete_dne=True,
                                                             debug=False)
                                (dl, skp) = downloader.save_images()
                            else:
                                download_from_url(URL, FILEPATH)
                                dl = 1
                            # Image downloaded successfully!
                            if ARGS.verbose:
                                print('Saved %s as %s' % (URL, FILENAME))
                            DOWNLOADED[0] += 1
                            SKIPPED[0] += skp
                            FILECOUNT += 1
                        except URLError:
                            print('We do not support reddituploads links yet'
                                  ' skipping....')
                        except FileExistsException as ERROR:
                            ERRORS[0] += 1
                            if ARGS.verbose:
                                print(ERROR.message)
                            if ARGS.update:
                                print('    Update complete, exiting.')
                                FINISHED = True
                                break
                        except ImgurException as e:
                            ERRORS[0] += 1
                        except Exception as e:
                            print(e)
                            ERRORS[0] += 1

                        if ARGS.num and (DOWNLOADED[0]) >= ARGS.num:
                            print('    Download num limit reached, exiting.')
                            FINISHED = True
                            break

                    except WrongFileTypeException as ERROR:
                        _log_wrongtype(url=URL,
                                       target_dir=ARGS.dir,
                                       filecount=FILECOUNT,
                                       _downloaded=DOWNLOADED[0],
                                       filename=FILENAME)
                        SKIPPED[0] += 1
                    except HTTPError as ERROR:
                        FAILED[0] += 1
                    except URLError as ERROR:
                        FAILED[0] += 1
                    except InvalidURL as ERROR:
                        FAILED[0] += 1
                    except Exception as exc:
                        FAILED[0] += 1

                # keep track of last_id id downloaded
                last_id = ITEM['id'] if ITEM is not None else None
                if last_id:
                    log_data[ARGS.subreddit][
                        ARGS.sort_type]['last-id'] = last_id
                    history_log(ARGS.dir,
                                log_file,
                                mode='write',
                                write_data=log_data)

                # break out of URL loop to end of ITEMS loop
                if FINISHED:
                    break

            # update variables in PROG_REPORT in SUBREDDIT loop
            for var in PROG_REPORT:
                var[1] += var[0]

    print('Downloaded from %i reddit submissions' % (DOWNLOADED[1]))
    print('(Processed %i, Skipped %i, Errors %i)' %
          (TOTAL[1], SKIPPED[1], ERRORS[1]))

    return DOWNLOADED[1]
Exemplo n.º 16
0
 def get(self, name, context=None, **kws):
     """See `ITemplateLoader`."""
     # Gather some additional information based on the context.
     substitutions = {}
     if IMailingList.providedBy(context):
         mlist = context
         domain = context.domain
         lookup_contexts = [
             mlist.list_id,
             mlist.mail_host,
             None,
         ]
         substitutions.update(
             dict(
                 list_id=mlist.list_id,
                 # For backward compatibility, we call this $listname.
                 listname=mlist.fqdn_listname,
                 domain_name=domain.mail_host,
                 language=mlist.preferred_language.code,
             ))
     elif IDomain.providedBy(context):
         mlist = None
         domain = context
         lookup_contexts = [
             domain.mail_host,
             None,
         ]
         substitutions['domain_name'] = domain.mail_host
     elif context is None:
         mlist = domain = None
         lookup_contexts = [None]
     else:
         raise ValueError('Bad context type: {!r}'.format(context))
     # The passed in keyword arguments take precedence.
     substitutions.update(kws)
     # See if there's a cached template registered for this name and
     # context, passing in the url substitutions.  This handles http:,
     # https:, and file: urls.
     for lookup_context in lookup_contexts:
         try:
             contents = getUtility(ITemplateManager).get(
                 name, lookup_context, **substitutions)
         except (HTTPError, URLError):
             pass
         else:
             if contents is not None:
                 return contents
     # Fallback to searching within the source code.
     code = substitutions.get('language', config.mailman.default_language)
     # Find the template, mutating any missing template exception.
     missing = object()
     default_uri = ALL_TEMPLATES.get(name, missing)
     if default_uri is None:
         return ''
     elif default_uri is missing:
         raise URLError('No such file')
     path, fp = find(default_uri, mlist, code)
     try:
         return fp.read()
     finally:
         fp.close()
Exemplo n.º 17
0
def get(url, **kws):
    parsed = urlparse(url)
    if parsed.scheme in ('http', 'https'):
        response = requests.get(url, **kws)
        response.raise_for_status()
        return response.text
    if parsed.scheme == 'file':
        mode = kws.pop('mode', 'r')
        arguments = dict(mode=mode)
        if 'encoding' in kws or 'b' not in mode:
            arguments['encoding'] = kws.pop('encoding', 'utf-8')
        if len(kws) > 0:
            raise ValueError('Unexpected arguments: {}'.format(
                COMMASPACE.join(sorted(kws))))
        with open(parsed.path, **arguments) as fp:
            return fp.read()
    if parsed.scheme == 'mailman':
        mlist = code = None
        if len(kws) > 0:
            raise ValueError('Unexpected arguments: {}'.format(
                COMMASPACE.join(sorted(kws))))
        # The path can contain one, two, or three components.  Since no empty
        # path components are legal, filter them out.
        parts = [p for p in parsed.path.split('/') if p]
        if len(parts) == 0:
            raise URLError('No template specified')
        elif len(parts) == 1:
            template = parts[0]
        elif len(parts) == 2:
            part0, template = parts
            # Is part0 a language code or a mailing list?  This is rather
            # tricky because if it's a mailing list, it could be a list-id and
            # that will contain dots, as could the language code.
            language = getUtility(ILanguageManager).get(part0)
            if language is None:
                list_manager = getUtility(IListManager)
                # part0 must be a fqdn-listname or list-id.
                mlist = (list_manager.get(part0) if '@' in part0 else
                         list_manager.get_by_list_id(part0))
                if mlist is None:
                    raise URLError('Bad language or list name')
            else:
                code = language.code
        elif len(parts) == 3:
            part0, code, template = parts
            # part0 could be an fqdn-listname or a list-id.
            mlist = (getUtility(IListManager).get(part0) if '@' in part0 else
                     getUtility(IListManager).get_by_list_id(part0))
            if mlist is None:
                raise URLError('Missing list')
            language = getUtility(ILanguageManager).get(code)
            if language is None:
                raise URLError('No such language')
            code = language.code
        else:
            raise URLError('No such file')
        # Find the template, mutating any missing template exception.
        try:
            path, fp = find(template, mlist, code)
        except TemplateNotFoundError:
            raise URLError('No such file')
        try:
            return fp.read()
        finally:
            fp.close()
    raise URLError(url)
Exemplo n.º 18
0
def _get_slice_data(slc: Slice, delivery_type: EmailDeliveryType,
                    session: Session) -> ReportContent:
    slice_url = _get_url_path("Superset.explore_json",
                              csv="true",
                              form_data=json.dumps({"slice_id": slc.id}))

    # URL to include in the email
    slice_url_user_friendly = _get_url_path("Superset.slice",
                                            slice_id=slc.id,
                                            user_friendly=True)

    # Login on behalf of the "reports" user in order to get cookies to deal with auth
    auth_cookies = machine_auth_provider_factory.instance.get_auth_cookies(
        get_reports_user(session))
    # Build something like "session=cool_sess.val;other-cookie=awesome_other_cookie"
    cookie_str = ";".join(
        [f"{key}={val}" for key, val in auth_cookies.items()])

    opener = urllib.request.build_opener()
    opener.addheaders.append(("Cookie", cookie_str))
    response = opener.open(slice_url)
    if response.getcode() != 200:
        raise URLError(response.getcode())

    # TODO: Move to the csv module
    content = response.read()
    rows = [r.split(b",") for r in content.splitlines()]

    if delivery_type == EmailDeliveryType.inline:
        data = None

        # Parse the csv file and generate HTML
        columns = rows.pop(0)
        with app.app_context():
            body = render_template(
                "superset/reports/slice_data.html",
                columns=columns,
                rows=rows,
                name=slc.slice_name,
                link=slice_url_user_friendly,
            )

    elif delivery_type == EmailDeliveryType.attachment:
        data = {__("%(name)s.csv", name=slc.slice_name): content}
        body = __(
            '<b><a href="%(url)s">Explore in Superset</a></b><p></p>',
            name=slc.slice_name,
            url=slice_url_user_friendly,
        )

    # how to: https://api.slack.com/reference/surfaces/formatting
    slack_message = __(
        """
        *%(slice_name)s*\n
        <%(slice_url_user_friendly)s|Explore in Superset>
        """,
        slice_name=slc.slice_name,
        slice_url_user_friendly=slice_url_user_friendly,
    )

    return ReportContent(body, data, None, slack_message, content)
Exemplo n.º 19
0
 def test_facebook_failed_request(self):
     config = {'side_effect': URLError(reason="Testing error handling")}
     with patch.object(urllib.request, 'urlopen', **config):
         self.assertRaises(EmbedNotFoundException,
                           FacebookOEmbedFinder().find_embed,
                           "https://fb.watch/ABC123eew/")
Exemplo n.º 20
0
 def mocked_get_addr_info_failed(self, request):
     raise URLError('Getaddrinfo failed')
Exemplo n.º 21
0
 def mocked_connection_time_out(self, request):
     raise URLError('Connection timed out')
Exemplo n.º 22
0
 def mock_urlopen(*args, **kwargs):
     raise URLError("http://foo")
Exemplo n.º 23
0
            },
            "files": [
                {
                    "path": "src/main.c",
                    "status": "modified",
                    "additions": 1,
                    "deletions": 1
                }
            ]
        }
    }''')),  # valid response
    ('CVE-1970-1000', 200, b'', None, io.BytesIO(b'')),  # empty response
    ('CVE-1970-1000', 404, b'vcs not found',
     HTTPError('/', 404, 'vcs not found', {},
               io.StringIO('vcs not found')), None),  # not found
    ('CVE-1970-1000', 400, b'something wrong', URLError('something wrong'),
     None),  # url error
    ('CVE-1970-1500', 404, b'not found', None, 'None'),  # no commit
    ('CVE-1970-9000', 404, b'not found', None, None),  # cve not found
    ('9999', 404, b'not found', None, None),  # id not found
    ('Invalid ID', 404, b'not found', None, None),  # invalid id
]


@pytest.mark.integration
@pytest.mark.parametrize("input_id,expected_status,expected_content",
                         VULN_VIEW_VARIANTS)
def test_view_vulnerability(client, input_id, expected_status,
                            expected_content):
    resp = client.get('/' + input_id)
    assert resp.status_code == expected_status
Exemplo n.º 24
0

# Test `rhessi.get_base_url()`


@mock.patch('sunpy.net.dataretriever.sources.rhessi.urlopen',
            return_value=None)
def test_get_base_url(mock_urlopen):
    """
    Success case, can successfully 'ping' first data_server
    """
    assert rhessi.get_base_url() == rhessi.data_servers[0]


@mock.patch('sunpy.net.dataretriever.sources.rhessi.urlopen',
            side_effect=URLError(''))
def test_get_base_url_on_urlerror(mock_urlopen):
    """
    If all tested URLs raise `URLError`, then raise an `IOError`
    """
    with pytest.raises(IOError):
        rhessi.get_base_url()


@mock.patch('sunpy.net.dataretriever.sources.rhessi.urlopen',
            side_effect=socket.timeout)
def test_get_base_url_on_timeout(mock_urlopen):
    """
    If all tested data servers timeout, then raise an `IOError`
    """
    with pytest.raises(IOError):
Exemplo n.º 25
0
 def test_oembed_invalid_request(self):
     config = {'side_effect': URLError('foo')}
     with patch.object(urllib.request, 'urlopen', **config):
         self.assertRaises(EmbedNotFoundException,
                           OEmbedFinder().find_embed,
                           "http://www.youtube.com/watch/")
Exemplo n.º 26
0
 def dummy_urlretrieve(url, local_path):
     local_path.touch()
     raise URLError("Test")
Exemplo n.º 27
0
 def test_instagram_failed_request(self):
     config = {'side_effect': URLError(reason="Testing error handling")}
     with patch.object(urllib.request, 'urlopen', **config):
         self.assertRaises(EmbedNotFoundException,
                           InstagramOEmbedFinder().find_embed,
                           "https://instagr.am/p/CHeRxmnDSYe/")
class TestImageLoader(unittest.TestCase):
    def setUp(self) -> None:
        self.image_loader = ImageLoader()

        self.im_paths = [image_name_to_path(f) for f in TEST_IMAGES]
        self.im_url = "http://file.biolab.si/images/bone-healing/D14/D14-" \
                      "0401-11-L1-inj-1-0016-m1.jpg"

    def test_load_images(self) -> None:
        image = self.image_loader.load_image_or_none(self.im_paths[0])
        self.assertTrue(isinstance(image, Image))

        image = self.image_loader.load_image_or_none(self.im_paths[0],
                                                     target_size=(255, 255))
        self.assertTrue(isinstance(image, Image))
        self.assertTupleEqual((255, 255), image.size)

    def test_load_images_url(self) -> None:
        """
        Handle loading images from http, https type urls
        """
        image = self.image_loader.load_image_or_none(self.im_url)
        self.assertTrue(isinstance(image, Image))

        image = self.image_loader.load_image_or_none(self.im_paths[0],
                                                     target_size=(255, 255))
        self.assertTrue(isinstance(image, Image))
        self.assertTupleEqual((255, 255), image.size)

        # invalid urls could be handled
        image = self.image_loader.load_image_or_none(self.im_url + "a")
        self.assertIsNone(image)

    @patch("requests.sessions.Session.get", side_effect=RequestException)
    def test_load_images_url_request_exception(self, _) -> None:
        """
        Handle loading images from http, https type urls
        """
        image = self.image_loader.load_image_or_none(self.im_url)
        self.assertIsNone(image)

    @patch("orangecontrib.imageanalytics.utils.embedder_utils.urlopen",
           return_value=image_name_to_path(TEST_IMAGES[0]))
    def test_load_images_ftp(self, _) -> None:
        """
        Handle loading images from ftp, data type urls. Since we do not have
        a ftp source we just change path to local path.
        """
        image = self.image_loader.load_image_or_none("ftp://abcd")
        self.assertTrue(isinstance(image, Image))

        image = self.image_loader.load_image_or_none(self.im_paths[0],
                                                     target_size=(255, 255))
        self.assertTrue(isinstance(image, Image))
        self.assertTupleEqual((255, 255), image.size)

    @patch("orangecontrib.imageanalytics.utils.embedder_utils.urlopen",
           side_effect=URLError("wrong url"))
    def test_load_images_ftp_error(self, _) -> None:
        """
        Handle loading images from ftp, data type urls. Since we do not have
        a ftp source we just change path to local path.
        """
        image = self.image_loader.load_image_or_none("ftp://abcd")
        self.assertIsNone(image)

    def test_load_image_bytes(self) -> None:
        for image in self.im_paths:
            image_bytes = self.image_loader.load_image_bytes(image)
            self.assertTrue(isinstance(image_bytes, bytes))

        # one with wrong path to get none
        image_bytes = self.image_loader.load_image_bytes(self.im_paths[0] +
                                                         "a")
        self.assertIsNone(image_bytes)

    @patch("PIL.Image.Image.convert", side_effect=ValueError())
    def test_unsuccessful_convert_to_RGB(self, _) -> None:
        image = self.image_loader.load_image_or_none(self.im_paths[2])
        self.assertIsNone(image)
Exemplo n.º 29
0
        soup = BeautifulSoup(html, 'html.parser')
        # Колонка 6, итого нечет
        total_odd = soup.find(id='mainidofrow6idofcell3').getText().replace(',', '.')
        # Колонка 6, итого чет
        total_even = soup.find(id='mainidofrow12idofcell3').getText().replace(',', '.')
        # Колонка 4, ИТОГО
        total_4 = soup.find(id='mainidofrow18idofcell1').getText().replace(',', '.')
        # Колонка 5, ИТОГО
        total_5 = soup.find(id='mainidofrow18idofcell2').getText().replace(',', '.')
        # Колонка 7, ИТОГО
        total_7 = soup.find(id='mainidofrow18idofcell4').getText().replace(',', '.')
        # Колонка 9, ИТОГО
        total_9 = soup.find(id='mainidofrow18idofcell6').getText().replace(',', '.')
except URLError as e:
    print(f"URLErorr e={e}")
    raise URLError(e)
    # todo: продумать логирование ошибки, мониторинг и реакцию на нее (если страницу не удалось загрузить)

# запись данных в excel
try:
    wb = load_workbook(os.path.join(CURRENT_DIR, XLSX_TEMPLATE))
    sheet = wb['trains']
    row = 1
    # находим первую незаполненную строку
    while row < XLSX_MAX_ROW:
        value = sheet.cell(row=row, column=1).value
        if not value:
            break
        row += 1
    if row < XLSX_MAX_ROW:
        sheet.cell(row=row, column=1).value = NOW
Exemplo n.º 30
0
 def test_client_had_exception(self):
     self.assertSendsEmail(
         {'body': self.test_client_had_exception.__name__},
         success=False,
         exception=URLError('sendgrid error'))