Ejemplo n.º 1
0
def ConfigureHostnames(config, external_hostname = None):
  """This configures the hostnames stored in the config."""
  if not external_hostname:
    try:
      external_hostname = socket.gethostname()
    except (OSError, IOError):
      print("Sorry, we couldn't guess your hostname.\n")

    external_hostname = RetryQuestion(
        "Please enter your hostname e.g. "
        "grr.example.com", "^[\\.A-Za-z0-9-]+$", external_hostname)

  print("""\n\n-=Server URL=-
The Server URL specifies the URL that the clients will connect to
communicate with the server. For best results this should be publicly
accessible. By default this will be port 8080 with the URL ending in /control.
""")
  frontend_url = RetryQuestion("Frontend URL", "^http://.*/$",
                               "http://%s:8080/" % external_hostname)
  config.Set("Client.server_urls", [frontend_url])

  frontend_port = urlparse.urlparse(frontend_url).port or grr_config.CONFIG.Get(
      "Frontend.bind_port")
  config.Set("Frontend.bind_port", frontend_port)

  print("""\n\n-=AdminUI URL=-:
The UI URL specifies where the Administrative Web Interface can be found.
""")
  ui_url = RetryQuestion("AdminUI URL", "^http[s]*://.*$",
                         "http://%s:8000" % external_hostname)
  config.Set("AdminUI.url", ui_url)
  ui_port = urlparse.urlparse(ui_url).port or grr_config.CONFIG.Get(
      "AdminUI.port")
  config.Set("AdminUI.port", ui_port)
Ejemplo n.º 2
0
    def __init__(self, uri, transport=None, encoding=None,
                 verbose=False, allow_none=False, use_datetime=False):
        parsed_url = urlparse(uri)
        self.__host = uri if parsed_url.scheme else None
        self.__handler = urlparse(uri).path
        if not self.__handler:
            self.__handler = '/'

        if not transport:
            transport = SCGITransport(use_datetime=use_datetime)
        self.__transport = transport
        self.__encoding = encoding or 'utf-8'
        self.__verbose = verbose
        self.__allow_none = allow_none
Ejemplo n.º 3
0
def get_udp_seeds(url, info_hash):
    parsed_url = urlparse(url)
    try:
        port = parsed_url.port
    except ValueError as ve:
        log.error('UDP Port Error, url was %s' % url)
        return 0

    log.debug('Checking for seeds from %s' % url)

    connection_id = 0x41727101980  # connection id is always this
    transaction_id = randrange(1, 65535)  # Random Transaction ID creation

    if port is None:
        log.error('UDP Port Error, port was None')
        return 0

    if port < 0 or port > 65535:
        log.error('UDP Port Error, port was %s' % port)
        return 0

    # Create the socket
    try:
        clisocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        clisocket.settimeout(5.0)
        clisocket.connect((parsed_url.hostname, port))

        # build packet with connection_ID, using 0 value for action, giving our transaction ID for this packet
        packet = struct.pack(b">QLL", connection_id, 0, transaction_id)
        clisocket.send(packet)

        # set 16 bytes ["QLL" = 16 bytes] for the fmq for unpack
        res = clisocket.recv(16)
        # check recieved packet for response
        action, transaction_id, connection_id = struct.unpack(b">LLQ", res)

        # build packet hash out of decoded info_hash
        packet_hash = binascii.unhexlify(info_hash)

        # construct packet for scrape with decoded info_hash setting action byte to 2 for scape
        packet = struct.pack(b">QLL", connection_id, 2, transaction_id) + packet_hash

        clisocket.send(packet)
        # set recieve size of 8 + 12 bytes
        res = clisocket.recv(20)

    except IOError as e:
        log.warning('Socket Error: %s', e)
        return 0
    # Check for UDP error packet
    (action,) = struct.unpack(b">L", res[:4])
    if action == 3:
        log.error('There was a UDP Packet Error 3')
        return 0

    # first 8 bytes are followed by seeders, completed and leechers for requested torrent
    seeders, completed, leechers = struct.unpack(b">LLL", res[8:20])
    log.debug('get_udp_seeds is returning: %s', seeders)
    clisocket.close()
    return seeders
Ejemplo n.º 4
0
    def test_verification_key_is_valid_with_redirect_url_set(self):
        data = _profile_data()
        self._create_user_using_profiles_endpoint(data)

        view = UserProfileViewSet.as_view({'get': 'verify_email'})
        rp = RegistrationProfile.objects.get(
            user__username=data.get('username')
        )
        _data = {
            'verification_key': rp.activation_key,
            'redirect_url': 'http://red.ir.ect'
        }
        request = self.factory.get('/', data=_data)
        response = view(request)

        self.assertEquals(response.status_code, 302)
        self.assertIn('is_email_verified', response.url)
        self.assertIn('username', response.url)

        string_query_params = urlparse(response.url).query
        dict_query_params = parse_qs(string_query_params)
        self.assertEquals(dict_query_params.get(
            'is_email_verified'), ['True'])
        self.assertEquals(
            dict_query_params.get('username'),
            [data.get('username')]
        )

        up = UserProfile.objects.get(user__username=data.get('username'))
        self.assertIn('is_email_verified', up.metadata)
        self.assertTrue(up.metadata.get('is_email_verified'))
Ejemplo n.º 5
0
    def single_request(self, host, handler, request_body, verbose=0):
        # Add SCGI headers to the request.
        headers = [('CONTENT_LENGTH', native_str(len(request_body))), ('SCGI', '1')]
        header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00'
        header = '%d:%s' % (len(header), header)
        request_body = '%s,%s' % (header, request_body)

        sock = None

        try:
            if host:
                parsed_host = urlparse(host)
                host = parsed_host.hostname
                port = parsed_host.port

                addr_info = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
                sock = socket.socket(*addr_info[0][:3])
                sock.connect(addr_info[0][4])
            else:
                sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                sock.connect(handler)

            self.verbose = verbose

            sock.sendall(request_body.encode())

            return self.parse_response(sock.makefile())
        finally:
            if sock:
                sock.close()
Ejemplo n.º 6
0
  def testEmailCronJobApprovalRequestLinkLeadsToACorrectPage(self):
    job_name = self._CreateOSBreakDownCronJobApproval()

    self.RequestCronJobApproval(
        job_name,
        reason=self.APPROVAL_REASON,
        approver=self.GRANTOR_USERNAME,
        requestor=self.token.username)

    self.assertEqual(len(self.messages_sent), 1)
    message = self.messages_sent[0]

    self.assertIn(self.APPROVAL_REASON, message)
    self.assertIn(self.token.username, message)
    self.assertIn("OSBreakDown", message)

    # Extract link from the message text and open it.
    m = re.search(r"href='(.+?)'", message, re.MULTILINE)
    link = urlparse.urlparse(m.group(1))
    self.Open(link.path + "?" + link.query + "#" + link.fragment)

    # Check that requestor's username and reason are correctly displayed.
    self.WaitUntil(self.IsTextPresent, self.token.username)
    self.WaitUntil(self.IsTextPresent, self.APPROVAL_REASON)
    # Check that host information is displayed.
    self.WaitUntil(self.IsTextPresent, cron_system.OSBreakDown.__name__)
    self.WaitUntil(self.IsTextPresent, "Frequency")
Ejemplo n.º 7
0
    def url_rewrite(self, task, entry):
        log.debug('Requesting %s' % entry['url'])
        page = requests.get(entry['url'])
        soup = get_soup(page.text)

        for link in soup.findAll('a', attrs={'href': re.compile(r'^/url')}):
            # Extract correct url from google internal link
            href = 'http://google.com' + link['href']
            args = parse_qs(urlparse(href).query)
            href = args['q'][0]

            # import IPython; IPython.embed()
            # import sys
            # sys.exit(1)
            # href = link['href'].lstrip('/url?q=').split('&')[0]

            # Test if entry with this url would be recognized by some urlrewriter
            log.trace('Checking if %s is known by some rewriter' % href)
            fake_entry = {'title': entry['title'], 'url': href}
            urlrewriting = plugin.get_plugin_by_name('urlrewriting')
            if urlrewriting['instance'].url_rewritable(task, fake_entry):
                log.debug('--> rewriting %s (known url pattern)' % href)
                entry['url'] = href
                return
            else:
                log.debug('<-- ignoring %s (unknown url pattern)' % href)
        raise UrlRewritingError('Unable to resolve')
Ejemplo n.º 8
0
    def url_rewrite(self, task, entry):
        url = entry['url']
        page = None
        for (scheme, netloc) in EZTV_MIRRORS:
            try:
                _, _, path, params, query, fragment = urlparse(url)
                url = urlunparse((scheme, netloc, path, params, query, fragment))
                page = task.requests.get(url).content
            except RequestException as e:
                log.debug('Eztv mirror `%s` seems to be down', url)
                continue
            break

        if not page:
            raise UrlRewritingError('No mirrors found for url %s' % entry['url'])

        log.debug('Eztv mirror `%s` chosen', url)
        try:
            soup = get_soup(page)
            mirrors = soup.find_all('a', attrs={'class': re.compile(r'download_\d')})
        except Exception as e:
            raise UrlRewritingError(e)

        log.debug('%d torrent mirrors found', len(mirrors))

        if not mirrors:
            raise UrlRewritingError('Unable to locate download link from url %s' % url)

        entry['urls'] = [m.get('href') for m in mirrors]
        entry['url'] = mirrors[0].get('href')
Ejemplo n.º 9
0
 def _parse_urls(self, urls):
   parsed_urls = [urlparse(url) for url in urls]
   for parsed_url in parsed_urls:
     if not parsed_url.scheme in self.SUPPORTED_PROTOCOLS:
       raise InvalidRESTfulCacheProtoError(
         'RESTfulArtifactCache only supports HTTP(S). Found: {0}'.format(parsed_url.scheme))
   return parsed_urls
Ejemplo n.º 10
0
 def program_id_from_url(self, url):
     parsed = urlparse(url)
     query_dict = parse_qs(parsed.query)
     if query_dict.get('_c'):
         return query_dict.get('_c')[0]
     else:
         return parsed.path.split('/')[-1]
Ejemplo n.º 11
0
def _domains_base_url(url):
    url_parts = urlparse(url)

    return "{scheme}://{domain}/".format(
        scheme=url_parts.scheme,
        domain=url_parts.netloc
    )
Ejemplo n.º 12
0
    def process_invalid_content(self, task, data, url):
        """If feedparser reports error, save the received data and log error."""

        if data is None:
            log.critical('Received empty page - no content')
            return
        else:
            data = tobytes(data)

        ext = 'xml'
        if b'<html>' in data.lower():
            log.critical('Received content is HTML page, not an RSS feed')
            ext = 'html'
        if b'login' in data.lower() or b'username' in data.lower():
            log.critical('Received content looks a bit like login page')
        if b'error' in data.lower():
            log.critical('Received content looks a bit like error page')
        received = os.path.join(task.manager.config_base, 'received')
        if not os.path.isdir(received):
            os.mkdir(received)
        filename = task.name
        sourcename = urlparse(url).netloc
        if sourcename:
            filename += '-' + sourcename
        filename = pathscrub(filename, filename=True)
        filepath = os.path.join(received, '%s.%s' % (filename, ext))
        with open(filepath, 'wb') as f:
            f.write(data)
        log.critical('I have saved the invalid content to %s for you to view', filepath)
Ejemplo n.º 13
0
 def test_enter_data_redir(self):
     if not self._running_enketo():
         raise SkipTest
     with HTTMock(enketo_mock):
         factory = RequestFactory()
         request = factory.get("/")
         request.user = self.user
         response = enter_data(
             request, self.user.username, self.xform.id_string
         )
         # make sure response redirect to an enketo site
         enketo_base_url = urlparse(settings.ENKETO_URL).netloc
         redirected_base_url = urlparse(response["Location"]).netloc
         # TODO: checking if the form is valid on enketo side
         self.assertIn(enketo_base_url, redirected_base_url)
         self.assertEqual(response.status_code, 302)
Ejemplo n.º 14
0
def get_hosts(urls):
    """Get hosts extracted from URLs.

    :param urls: URL addresses from which the function extracts hosts
    :returns: a list of hosts extracted from the URLs
    """
    return [urlparse(u).hostname for u in urls]
Ejemplo n.º 15
0
def show_terms_if_not_agreed(context, field=TERMS_HTTP_PATH_FIELD):
    """Displays a modal on a current page if a user has not yet agreed to the
    given terms. If terms are not specified, the default slug is used.

    How it works? A small snippet is included into your template if a user
    who requested the view has not yet agreed the terms. The snippet takes
    care of displaying a respective modal.
    """
    request = context["request"]
    all_agreed = True
    not_agreed_terms = []

    for terms in TermsAndConditions.get_active_list(as_dict=False):
        if not TermsAndConditions.agreed_to_terms(request.user, terms):
            all_agreed = False
            not_agreed_terms.append(terms)

    # stop here, if all terms have been agreed
    if all_agreed:
        return {}

    # handle excluded url's
    url = urlparse(request.META[field])
    protected = is_path_protected(url.path)

    if (not all_agreed) and not_agreed_terms and protected:
        return {"not_agreed_terms": not_agreed_terms, "returnTo": url.path}

    return {}
Ejemplo n.º 16
0
    def get_owner_and_repo(repourl):
        """
        Takes a git repository URL from Bitbucket and tries to determine the owner and repository name
        :param repourl: Bitbucket git repo in the form of
                    [email protected]:OWNER/REPONAME.git
                    https://bitbucket.com/OWNER/REPONAME.git
                    ssh://[email protected]/OWNER/REPONAME.git
        :return: owner, repo: The owner of the repository and the repository name
        """
        parsed = urlparse(repourl)

        if parsed.scheme:
            path = parsed.path[1:]
        else:
            # we assume git@host:owner/repo.git here
            path = parsed.path.split(':', 1)[-1]

        if path.endswith('.git'):
            path = path[:-4]
        while path.endswith('/'):
            path = path[:-1]

        parts = path.split('/')

        assert len(parts) == 2, 'OWNER/REPONAME is expected'

        return parts
Ejemplo n.º 17
0
 def _predicted_urls(url):
     domain = urlparse(url).netloc
     return {
         'http://{}/rss/'.format(domain),
         'http://{}/feeds/'.format(domain),
         'http://{}/feed/'.format(domain)
     }
Ejemplo n.º 18
0
def get_password_reset_email(user, reset_url,
                             subject_template_name='registration/password_reset_subject.txt',  # noqa
                             email_template_name='api_password_reset_email.html',  # noqa
                             token_generator=default_token_generator,
                             email_subject=None):
    """Creates the subject and email body for password reset email."""
    result = urlparse(reset_url)
    site_name = domain = result.hostname
    encoded_username = urlsafe_base64_encode(
        b(user.username.encode('utf-8')))
    c = {
        'email': user.email,
        'domain': domain,
        'path': result.path,
        'site_name': site_name,
        'uid': urlsafe_base64_encode(force_bytes(user.pk)),
        'username': user.username,
        'encoded_username': encoded_username,
        'token': token_generator.make_token(user),
        'protocol': result.scheme if result.scheme != '' else 'http',
    }
    # if subject email provided don't load the subject template
    subject = email_subject or loader.render_to_string(subject_template_name,
                                                       c)
    # Email subject *must not* contain newlines
    subject = ''.join(subject.splitlines())
    email = loader.render_to_string(email_template_name, c)

    return subject, email
Ejemplo n.º 19
0
    def to_internal_value(self, data):
        try:
            http_prefix = data.startswith(('http:', 'https:'))
        except AttributeError:
            self.fail('incorrect_type', data_type=type(data).__name__)
        input_data = data
        if http_prefix:
            # If needed convert absolute URLs to relative path
            data = urlparse(data).path
            prefix = get_script_prefix()
            if data.startswith(prefix):
                data = '/' + data[len(prefix):]

        try:
            match = self.resolve(data)
        except Resolver404:
            self.fail('no_match')

        if match.view_name not in self.view_names:
            self.fail('incorrect_match', input=input_data)

        self._setup_field(match.view_name)

        try:
            return self.get_object(match.view_name, match.args, match.kwargs)
        except (ObjectDoesNotExist, TypeError, ValueError):
            self.fail('does_not_exist')

        return data
Ejemplo n.º 20
0
    def url_rewrite(self, task, entry):
        url = entry["url"]
        page = None
        for (scheme, netloc) in EZTV_MIRRORS:
            try:
                _, _, path, params, query, fragment = urlparse(url)
                url = urlunparse((scheme, netloc, path, params, query, fragment))
                page = task.requests.get(url).content
            except RequestException as e:
                log.debug("Eztv mirror `%s` seems to be down", url)
                continue
            break

        if not page:
            raise UrlRewritingError("No mirrors found for url %s" % entry["url"])

        log.debug("Eztv mirror `%s` chosen", url)
        try:
            soup = get_soup(page)
            mirrors = soup.find_all("a", attrs={"class": re.compile(r"download_\d")})
        except Exception as e:
            raise UrlRewritingError(e)

        log.debug("%d torrent mirrors found", len(mirrors))

        if not mirrors:
            raise UrlRewritingError("Unable to locate download link from url %s" % url)

        entry["urls"] = [m.get("href") for m in mirrors]
        entry["url"] = mirrors[0].get("href")
Ejemplo n.º 21
0
    def on_task_download(self, task, config):
        config = self.prepare_config(config, task)
        for entry in task.accepted:
            ftp_url = urlparse(entry.get('url'))
            ftp_url = ftp_url._replace(path=unquote(ftp_url.path))
            current_path = os.path.dirname(ftp_url.path)
            try:
                ftp = self.ftp_connect(config, ftp_url, current_path)
            except ftplib.all_errors as e:
                entry.fail("Unable to connect to server : %s" % (e))
                break

            if not os.path.isdir(config['ftp_tmp_path']):
                log.debug('creating base path: %s' % config['ftp_tmp_path'])
                os.mkdir(config['ftp_tmp_path'])

            file_name = os.path.basename(ftp_url.path)

            try:
                # Directory
                ftp = self.check_connection(ftp, config, ftp_url, current_path)
                ftp.cwd(file_name)
                self.ftp_walk(ftp, os.path.join(config['ftp_tmp_path'], file_name), config, ftp_url, ftp_url.path)
                ftp = self.check_connection(ftp, config, ftp_url, current_path)
                ftp.cwd('..')
                if config['delete_origin']:
                    ftp.rmd(file_name)
            except ftplib.error_perm:
                # File
                self.ftp_down(ftp, file_name, config['ftp_tmp_path'], config, ftp_url, current_path)

            ftp.close()
Ejemplo n.º 22
0
  def GetCurrentUrlPath(self):
    url = urlparse.urlparse(self.driver.current_url)

    result = url.path
    if url.fragment:
      result += "#" + url.fragment

    return result
Ejemplo n.º 23
0
 def set_urls(self, url):
     url = url.rstrip('/')
     if self.url != url:
         self.url = url
         parsed_url = urlparse(url)
         self.url_match = re.compile('^%s://(?:torrents\.)?(%s)/.*$' % (re.escape(parsed_url.scheme),
                                                                        re.escape(parsed_url.netloc)))
         self.url_search = re.compile('^%s/search/.*$' % (re.escape(url)))
Ejemplo n.º 24
0
 def program_id_from_url(self, url):
     parsed = urlparse(url)
     query_dict = parse_qs(parsed.query)
     play = query_dict.get('play')
     if parsed.path.startswith('/tv/ohjelmat/') and play:
         return play[0]
     else:
         return parsed.path.split('/')[-1]
Ejemplo n.º 25
0
def is_valid_url(value):
    """Check if given value is a valid URL string.

    :param value: a value to test
    :returns: True if the value is valid
    """
    match = URL_REGEX.match(value)
    host_str = urlparse(value).hostname
    return match and is_valid_host(host_str)
Ejemplo n.º 26
0
  def call_url(self, expected_url, with_error=False):
    try:
      with self.best_url_selector.select_best_url() as url:
        self.assertEquals(urlparse(expected_url), url)

        if with_error:
          raise RequestException('error connecting to {}'.format(url))
    except RequestException:
      pass
Ejemplo n.º 27
0
    def on_task_exit(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled'] or task.options.learn:
            return
        if not self.client:
            self.client = self.create_rpc_client(config)
        tracker_re = re.compile(config['tracker'], re.IGNORECASE) if 'tracker' in config else None
        preserve_tracker_re = (
            re.compile(config['preserve_tracker'], re.IGNORECASE)
            if 'preserve_tracker' in config
            else None
        )

        session = self.client.get_session()

        remove_ids = []
        for torrent in self.client.get_torrents():
            log.verbose(
                'Torrent "%s": status: "%s" - ratio: %s -  date added: %s'
                % (torrent.name, torrent.status, torrent.ratio, torrent.date_added)
            )
            downloaded, dummy = self.torrent_info(torrent, config)
            if not downloaded:
                continue
            if config.get('transmission_seed_limits'):
                seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
                if not seed_ratio_ok or not idle_limit_ok:
                    continue
            if 'min_ratio' in config:
                if torrent.ratio < config['min_ratio']:
                    continue
            if 'finished_for' in config:
                # done date might be invalid if this torrent was added to transmission when already completed
                started_seeding = datetime.fromtimestamp(max(torrent.addedDate, torrent.doneDate))
                if started_seeding + parse_timedelta(config['finished_for']) > datetime.now():
                    continue
            tracker_hosts = (
                urlparse(tracker['announce']).hostname for tracker in torrent.trackers
            )
            if 'tracker' in config:
                if not any(tracker_re.search(tracker) for tracker in tracker_hosts):
                    continue
            if 'preserve_tracker' in config:
                if any(preserve_tracker_re.search(tracker) for tracker in tracker_hosts):
                    continue
            if config.get('directories'):
                if not any(
                    re.search(d, torrent.downloadDir, re.IGNORECASE) for d in config['directories']
                ):
                    continue
            if task.options.test:
                log.info('Would remove finished torrent `%s` from transmission', torrent.name)
                continue
            log.info('Removing finished torrent `%s` from transmission', torrent.name)
            remove_ids.append(torrent.id)
        if remove_ids:
            self.client.remove_torrent(remove_ids, config.get('delete_files'))
Ejemplo n.º 28
0
    def any_match(self, urls):
        """Check if any of the given URLs has a matching host.

        :param urls: an iterable containing URLs
        :returns: True if any host has a listed match
        :raises InvalidURLError: if there are any invalid URLs in
        the sequence
        """
        return any(urlparse(u).hostname in self for u in urls)
Ejemplo n.º 29
0
def redirect_to_terms_accept(current_path='/', slug='default'):
    """Redirect the user to the terms and conditions accept page."""
    redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
    if slug != 'default':
        redirect_url_parts[2] += slug
    querystring = QueryDict(redirect_url_parts[4], mutable=True)
    querystring[TERMS_RETURNTO_PARAM] = current_path
    redirect_url_parts[4] = querystring.urlencode(safe='/')
    return HttpResponseRedirect(urlunparse(redirect_url_parts))
Ejemplo n.º 30
0
def is_unresponsive(url):
    """
    Checks if host of given url has timed out within WAIT_TIME

    :param url: The url to check
    :return: True if the host has timed out within WAIT_TIME
    :rtype: bool
    """
    host = urlparse(url).hostname
    return host in unresponsive_hosts
Ejemplo n.º 31
0
def is_safe_url(target):
    ref_url = urlparse(request.host_url)
    test_url = urlparse(urljoin(request.host_url, target))
    return test_url.scheme in ('http',
                               'https') and ref_url.netloc == test_url.netloc
Ejemplo n.º 32
0
    def on_task_exit(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled'] or task.options.learn:
            return
        if not self.client:
            self.client = self.create_rpc_client(config)
        nrat = float(config['min_ratio']) if 'min_ratio' in config else None
        nfor = parse_timedelta(
            config['finished_for']) if 'finished_for' in config else None
        delete_files = bool(
            config['delete_files']) if 'delete_files' in config else False
        trans_checks = bool(
            config['transmission_seed_limits']
        ) if 'transmission_seed_limits' in config else False
        tracker_re = re.compile(config['tracker'],
                                re.IGNORECASE) if 'tracker' in config else None
        directories_re = config.get('directories')

        session = self.client.get_session()

        remove_ids = []
        for torrent in self.client.get_torrents():
            log.verbose(
                'Torrent "%s": status: "%s" - ratio: %s -  date added: %s - date done: %s'
                % (torrent.name, torrent.status, torrent.ratio,
                   torrent.date_added, torrent.date_done))
            downloaded, dummy = self.torrent_info(torrent, config)
            seed_ratio_ok, idle_limit_ok = self.check_seed_limits(
                torrent, session)
            tracker_hosts = (urlparse(tracker['announce']).hostname
                             for tracker in torrent.trackers)
            is_clean_all = nrat is None and nfor is None and trans_checks is None
            is_minratio_reached = nrat and (nrat <= torrent.ratio)
            is_transmission_seedlimit_unset = trans_checks and seed_ratio_ok is None and idle_limit_ok is None
            is_transmission_seedlimit_reached = trans_checks and seed_ratio_ok is True
            is_transmission_idlelimit_reached = trans_checks and idle_limit_ok is True
            is_torrent_seed_only = torrent.date_done <= torrent.date_added
            is_torrent_idlelimit_since_added_reached = nfor and (
                torrent.date_added + nfor) <= datetime.now()
            is_torrent_idlelimit_since_finished_reached = nfor and (
                torrent.date_done + nfor) <= datetime.now()
            is_tracker_matching = not tracker_re or any(
                tracker_re.search(host) for host in tracker_hosts)
            is_directories_matching = not directories_re or any(
                re.compile(directory, re.IGNORECASE).search(
                    torrent.downloadDir) for directory in directories_re)
            if (downloaded and
                (is_clean_all or is_transmission_seedlimit_unset
                 or is_transmission_seedlimit_reached
                 or is_transmission_idlelimit_reached or is_minratio_reached or
                 (is_torrent_seed_only
                  and is_torrent_idlelimit_since_added_reached) or
                 (not is_torrent_seed_only
                  and is_torrent_idlelimit_since_finished_reached))
                    and is_tracker_matching and is_directories_matching):
                if task.options.test:
                    log.info(
                        'Would remove finished torrent `%s` from transmission'
                        % torrent.name)
                    continue
                log.info('Removing finished torrent `%s` from transmission' %
                         torrent.name)
                remove_ids.append(torrent.id)
        if remove_ids:
            self.client.remove_torrent(remove_ids, delete_files)
Ejemplo n.º 33
0
 def program_id_from_url(self, url):
     parsed = urlparse(url)
     return parsed.path.split('/')[-1]
Ejemplo n.º 34
0
 def download_flavors(self, download_url, media_type):
     path = urlparse(download_url)[2]
     ext = os.path.splitext(path)[1] or None
     backend = WgetBackend(download_url, ext)
     return [StreamFlavor(media_type=media_type, streams=[backend])]
Ejemplo n.º 35
0
def get_udp_seeds(url, info_hash):
    parsed_url = urlparse(url)
    try:
        port = parsed_url.port
    except ValueError:
        log.error('UDP Port Error, url was %s', url)
        return 0

    log.debug('Checking for seeds from %s', url)

    connection_id = 0x41727101980  # connection id is always this
    transaction_id = randrange(1, 65535)  # Random Transaction ID creation

    if port is None:
        log.error('UDP Port Error, port was None')
        return 0

    if port < 0 or port > 65535:
        log.error('UDP Port Error, port was %s', port)
        return 0

    # Create the socket
    try:
        clisocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        clisocket.settimeout(5.0)
        clisocket.connect((parsed_url.hostname, port))

        # build packet with connection_ID, using 0 value for action, giving our transaction ID for this packet
        packet = struct.pack(b">QLL", connection_id, 0, transaction_id)
        clisocket.send(packet)

        # set 16 bytes ["QLL" = 16 bytes] for the fmq for unpack
        res = clisocket.recv(16)
        # check recieved packet for response
        action, transaction_id, connection_id = struct.unpack(b">LLQ", res)

        # build packet hash out of decoded info_hash
        packet_hash = binascii.unhexlify(info_hash)

        # construct packet for scrape with decoded info_hash setting action byte to 2 for scape
        packet = struct.pack(b">QLL", connection_id, 2,
                             transaction_id) + packet_hash

        clisocket.send(packet)
        # set recieve size of 8 + 12 bytes
        res = clisocket.recv(20)

    except IOError as e:
        log.warning('Socket Error: %s', e)
        return 0
    # Check for UDP error packet
    (action, ) = struct.unpack(b">L", res[:4])
    if action == 3:
        log.error('There was a UDP Packet Error 3')
        return 0

    # first 8 bytes are followed by seeders, completed and leechers for requested torrent
    seeders, _, _ = struct.unpack(b">LLL", res[8:20])
    log.debug('get_udp_seeds is returning: %s', seeders)
    clisocket.close()
    return seeders
Ejemplo n.º 36
0
def parse_url(url, proto='http'):
    # urlparse won't parse properly without a protocol
    if '://' not in url:
        url = proto + '://' + url
    return urlparse(url)
Ejemplo n.º 37
0
 def __init__(self, url):
     parsed_url = urlparse(url)
     self.url = url
     self.server = parsed_url.hostname.lower()
     self.has_ssl = parsed_url.scheme == 'https'
     super(RedirectError, self).__init__(text_type(self))
Ejemplo n.º 38
0
    def _create_beam_sdk(sdk_remote_location, temp_dir):
        # type: (...) -> List[beam_runner_api_pb2.ArtifactInformation]
        """Creates a Beam SDK file with the appropriate version.

      Args:
        sdk_remote_location: A URL from which the file can be downloaded or a
          remote file location. The SDK file can be a tarball or a wheel. Set
          to 'pypi' to download and stage a wheel and source SDK from PyPi.
        temp_dir: path to temporary location where the file should be
          downloaded.

      Returns:
        A list of ArtifactInformation of local files path and SDK files that
        will be staged to the staging location.

      Raises:
        RuntimeError: if staging was not successful.
      """
        if sdk_remote_location == 'pypi':
            sdk_local_file = Stager._download_pypi_sdk_package(temp_dir)
            sdk_sources_staged_name = Stager.\
                _desired_sdk_filename_in_staging_location(sdk_local_file)
            _LOGGER.info('Staging SDK sources from PyPI: %s',
                         sdk_sources_staged_name)
            staged_sdk_files = [
                Stager._create_file_stage_to_artifact(sdk_local_file,
                                                      sdk_sources_staged_name)
            ]
            try:
                abi_suffix = 'm' if sys.version_info < (3, 8) else ''
                # Stage binary distribution of the SDK, for now on a best-effort basis.
                sdk_local_file = Stager._download_pypi_sdk_package(
                    temp_dir,
                    fetch_binary=True,
                    language_version_tag='%d%d' %
                    (sys.version_info[0], sys.version_info[1]),
                    abi_tag='cp%d%d%s' %
                    (sys.version_info[0], sys.version_info[1], abi_suffix))
                sdk_binary_staged_name = Stager.\
                    _desired_sdk_filename_in_staging_location(sdk_local_file)
                _LOGGER.info(
                    'Staging binary distribution of the SDK from PyPI: %s',
                    sdk_binary_staged_name)
                staged_sdk_files.append(
                    Stager._create_file_stage_to_artifact(
                        sdk_local_file, sdk_binary_staged_name))
            except RuntimeError as e:
                _LOGGER.warning(
                    'Failed to download requested binary distribution '
                    'of the SDK: %s', repr(e))

            return staged_sdk_files
        elif Stager._is_remote_path(sdk_remote_location):
            sdk_remote_parsed = urlparse(sdk_remote_location)
            sdk_remote_filename = os.path.basename(sdk_remote_parsed.path)
            local_download_file = os.path.join(temp_dir, sdk_remote_filename)
            Stager._download_file(sdk_remote_location, local_download_file)
            staged_name = Stager._desired_sdk_filename_in_staging_location(
                local_download_file)
            _LOGGER.info('Staging Beam SDK from %s', sdk_remote_location)
            return [
                Stager._create_file_stage_to_artifact(local_download_file,
                                                      staged_name)
            ]
        else:
            raise RuntimeError(
                'The --sdk_location option was used with an unsupported '
                'type of location: %s' % sdk_remote_location)
Ejemplo n.º 39
0
    def publish(self, user, id_string=None, created_by=None):
        """
        Publish XLSForm.
        """
        if self.is_valid():
            # If a text (csv) representation of the xlsform is present,
            # this will save the file and pass it instead of the 'xls_file'
            # field.
            cleaned_xls_file = None
            if 'text_xls_form' in self.cleaned_data\
               and self.cleaned_data['text_xls_form'].strip():
                csv_data = self.cleaned_data['text_xls_form']

                # assigning the filename to a random string (quick fix)
                import random
                rand_name = "uploaded_form_%s.csv" % ''.join(
                    random.sample("abcdefghijklmnopqrstuvwxyz0123456789", 6))

                cleaned_xls_file = \
                    default_storage.save(
                        upload_to(None, rand_name, user.username),
                        ContentFile(csv_data))
            if 'xls_file' in self.cleaned_data and\
                    self.cleaned_data['xls_file']:
                cleaned_xls_file = self.cleaned_data['xls_file']
            if 'floip_file' in self.cleaned_data and\
                    self.cleaned_data['floip_file']:
                cleaned_xls_file = self.cleaned_data['floip_file']

            cleaned_url = (
                self.cleaned_data['xls_url'].strip() or
                self.cleaned_data['dropbox_xls_url'] or
                self.cleaned_data['csv_url'])

            if cleaned_url:
                cleaned_xls_file = urlparse(cleaned_url)
                cleaned_xls_file = \
                    '_'.join(cleaned_xls_file.path.split('/')[-2:])
                name, extension = os.path.splitext(cleaned_xls_file)

                if extension not in VALID_FILE_EXTENSIONS and name:
                    response = requests.get(cleaned_url)
                    if response.headers.get('content-type') in \
                            VALID_XLSFORM_CONTENT_TYPES and \
                            response.status_code < 400:
                        cleaned_xls_file = get_filename(response)

                cleaned_xls_file = \
                    upload_to(None, cleaned_xls_file, user.username)
                self.validate(cleaned_url)
                xls_data = ContentFile(urlopen(cleaned_url).read())
                cleaned_xls_file = \
                    default_storage.save(cleaned_xls_file, xls_data)

            project = self.cleaned_data['project']

            if project is None:
                project = get_user_default_project(user)
            else:
                project = self._project

            cleaned_xml_file = self.cleaned_data['xml_file']
            if cleaned_xml_file:
                return publish_xml_form(cleaned_xml_file, user, project,
                                        id_string, created_by or user)

            if cleaned_xls_file is None:
                raise forms.ValidationError(
                    _(u"XLSForm not provided, expecting either of these"
                      " params: 'xml_file', 'xls_file', 'xls_url', 'csv_url',"
                      " 'dropbox_xls_url', 'text_xls_form', 'floip_file'"))
            # publish the xls
            return publish_xls_form(cleaned_xls_file, user, project,
                                    id_string, created_by or user)
Ejemplo n.º 40
0
 def movie_delete_request(base_url, port, api_key):
     parsedurl = urlparse(base_url)
     log.debug('Received movie delete request')
     return '%s://%s:%s%s/api/%s/movie.delete?delete_from=wanted' % (
         parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)
Ejemplo n.º 41
0
 def movie_add_request(base_url, port, api_key):
     parsedurl = urlparse(base_url)
     log.debug('Received movie add request')
     return '%s://%s:%s%s/api/%s/movie.add' % (
         parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)
Ejemplo n.º 42
0
    def file_extension(self):
        """The file extension of :attr:`~.Media.url`. Read-only.

        :type: :obj:`str`
        """
        return '.' + urlparse(self.url).path.split('.')[-1]
Ejemplo n.º 43
0
 def profile_list_request(base_url, port, api_key):
     parsedurl = urlparse(base_url)
     log.debug('Received profile list request')
     return '%s://%s:%s%s/api/%s/profile.list' % (
         parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)
Ejemplo n.º 44
0
    def install_frontend(self,
                         source,
                         oauth_key,
                         oauth_secret,
                         backend_url,
                         settings_file=None,
                         network='public',
                         hub_id=None):
        """ Install connector-frontend in Odin Automation Hub, --source can be http(s):// or
        filepath"""

        with TemporaryDirectory() as tdir:
            is_http_source = True if source.startswith('http://') or source.startswith('https://') \
                else False

            if is_http_source:
                package_name = _download_file(source, target=tdir)
            else:
                package_name = os.path.basename(source)
                copyfile(os.path.expanduser(source),
                         os.path.join(tdir, package_name))

            package_path = os.path.join(tdir, package_name)
            with zipfile.ZipFile(package_path, 'r') as zip_ref:
                meta_path = zip_ref.extract('APP-META.xml', path=tdir)
                tenant_schema_path = zip_ref.extract('schemas/tenant.schema',
                                                     tdir)
                app_schema_path = zip_ref.extract('schemas/app.schema', tdir)

                try:
                    zip_ref.extract('schemas/user.schema', tdir)
                    user_service = True
                except KeyError:
                    user_service = False

            tree = xml_et.ElementTree(file=meta_path)
            namespace = '{http://aps-standard.org/ns/2}'
            connector_id = tree.find('{}id'.format(namespace)).text
            version = tree.find('{}version'.format(namespace)).text
            release = tree.find('{}release'.format(namespace)).text

            # Get connector name from id as <name> field may not be unique
            url_path = urlparse(connector_id).path
            connector_name = os.path.split(url_path)[-1]

            if not settings_file:
                settings_file = {}
            else:
                settings_file = json.load(open(settings_file))

            if backend_url.startswith('http://'):
                print(
                    "WARN: Make sure that the APS development mode enabled for http backend. "
                    "Run `apsconnect aps_devel_mode` command.")
            elif backend_url.startswith('https://'):
                pass
            else:
                print("Backend url must be URL http(s)://, got {}".format(
                    backend_url))
                sys.exit(1)

            cfg, hub = _get_cfg(), _get_hub()

            with open(package_path, 'rb') as package_binary:
                print("Importing connector {} {}-{}".format(
                    connector_id, version, release))
                import_kwargs = {'package_url': source} if is_http_source \
                    else {'package_body': xmlrpclib.Binary(package_binary.read())}
                response = hub.APS.importPackage(**import_kwargs)
                _osaapi_raise_for_status(response)

                application_id = str(response['result']['application_id'])

                print("Connector {} imported with id={} [ok]".format(
                    connector_id, application_id))

            payload = {
                'aps': {
                    'package': {
                        'type': connector_id,
                        'version': version,
                        'release': release,
                    },
                    'endpoint': backend_url,
                    'network': network,
                    'auth': {
                        'oauth': {
                            'key': oauth_key,
                            'secret': oauth_secret,
                        },
                    },
                },
            }

            # Get Unique OA id for using as hubId parameter while endpoint deploying
            base_aps_url = _get_aps_url(
                **{k: _get_cfg()[k]
                   for k in APS_CONNECT_PARAMS})

            app_properties = _get_properties(app_schema_path)

            if 'hubId' in app_properties:
                url = '{}/{}'.format(
                    base_aps_url,
                    'aps/2/resources?implementing(http://parallels.com/aps/types/pa/poa/1.0)',
                )

                response = request(method='GET',
                                   url=url,
                                   headers=_get_user_token(hub, cfg['user']),
                                   verify=False)
                response.raise_for_status()

                try:
                    data = json.loads(response.content.decode('utf-8'))
                except ValueError:
                    print("APSController provided non-json format")
                    sys.exit(1)

                if not data and not hub_id:
                    raise Exception(
                        "Core OA resource is not found\n"
                        "Use --hub-id={value} argument to specify the ID "
                        "manually or --hub-id=auto to generate it automatically"
                    )
                elif data:
                    hub_id = data[0]['aps']['id']
                elif hub_id == 'auto':
                    hub_id = str(uuid.uuid4())

                payload.update({'app': {'hubId': hub_id}})

            payload.update(settings_file)

            response = request(method='POST',
                               url='{}/{}'.format(base_aps_url,
                                                  'aps/2/applications/'),
                               headers=_get_user_token(hub, cfg['user']),
                               verify=False,
                               json=payload)
            try:
                response.raise_for_status()
            except Exception as e:
                if 'error' in response.json():
                    err = "{} {}".format(response.json()['error'],
                                         response.json()['message'])
                else:
                    err = str(e)
                print("Installation of connector {} FAILED.\n"
                      "Hub APS API response {} code.\n"
                      "Error: {}".format(connector_id, response.status_code,
                                         err))

            # Create app, tenant, users resource types
            resource_uid = json.loads(
                response.content.decode('utf-8'))['app']['aps']['id']

            core_resource_types_payload = [
                {
                    'resclass_name':
                    'rc.saas.service.link',
                    'name':
                    connector_name,
                    'act_params': [
                        {
                            'var_name': 'app_id',
                            'var_value': application_id
                        },
                        {
                            'var_name': 'resource_uid',
                            'var_value': resource_uid
                        },
                    ]
                },
                {
                    'resclass_name':
                    'rc.saas.service',
                    'name':
                    '{} tenant'.format(connector_name),
                    'act_params': [
                        {
                            'var_name': 'app_id',
                            'var_value': application_id
                        },
                        {
                            'var_name': 'service_id',
                            'var_value': 'tenant'
                        },
                        {
                            'var_name': 'autoprovide_service',
                            'var_value': '1'
                        },
                    ]
                },
            ]

            # Collect ids for service template creation
            resource_types_ids = []
            limited_resources = {}

            for type in core_resource_types_payload:
                response = hub.addResourceType(**type)
                _osaapi_raise_for_status(response)

                resource_types_ids.append(
                    response['result']['resource_type_id'])

            for id in list(resource_types_ids):
                limited_resources[id] = 1

            if user_service:
                user_resource_type_payload = {
                    'resclass_name':
                    'rc.saas.service',
                    'name':
                    '{} users'.format(connector_name),
                    'act_params': [
                        {
                            'var_name': 'app_id',
                            'var_value': application_id
                        },
                        {
                            'var_name': 'service_id',
                            'var_value': 'user'
                        },
                        {
                            'var_name': 'autoprovide_service',
                            'var_value': '0'
                        },
                    ]
                }

                response = hub.addResourceType(**user_resource_type_payload)
                _osaapi_raise_for_status(response)

                resource_types_ids.append(
                    response['result']['resource_type_id'])

            # Create counters resource types
            counters = _get_counters(tenant_schema_path)

            for counter in counters:
                payload = {
                    'resclass_name':
                    "rc.saas.resource.unit",
                    'name':
                    '{} {}'.format(connector_name, counter),
                    'act_params': [
                        {
                            'var_name': 'app_id',
                            'var_value': application_id
                        },
                        {
                            'var_name': 'service_id',
                            'var_value': "tenant"
                        },
                        {
                            'var_name': 'resource_id',
                            'var_value': counter
                        },
                    ]
                }

                response = hub.addResourceType(**payload)
                _osaapi_raise_for_status(response)
                resource_types_ids.append(
                    response['result']['resource_type_id'])

            # Create parameters resource types
            parameters = _get_parameters(tenant_schema_path)

            for parameter in parameters:
                payload = {
                    'resclass_name':
                    "rc.saas.resource.unit",
                    'name':
                    '{} {}'.format(connector_name, parameter),
                    'act_params': [
                        {
                            'var_name': 'app_id',
                            'var_value': application_id
                        },
                        {
                            'var_name': 'service_id',
                            'var_value': "tenant"
                        },
                        {
                            'var_name': 'resource_id',
                            'var_value': parameter
                        },
                    ]
                }

                response = hub.addResourceType(**payload)
                _osaapi_raise_for_status(response)

                resource_types_ids.append(
                    response['result']['resource_type_id'])
                limited_resources[response['result']['resource_type_id']] = 0

            print("Resource types creation [ok]")

        # Create service template
        payload = {
            'name': connector_name,
            'owner_id': 1,
            'resources': [],
        }

        for type_id in resource_types_ids:
            payload['resources'].append({'resource_type_id': type_id})

        response = hub.addServiceTemplate(**payload)
        _osaapi_raise_for_status(response)
        service_template_id = response['result']['st_id']
        print("Service template \"{}\" created with id={} [ok]".format(
            connector_name, service_template_id))

        # Set up limits
        payload = {
            'st_id': service_template_id,
            'limits': [],
        }

        for type_id, limit in limited_resources.items():
            payload['limits'].append({
                'resource_id': type_id,
                'resource_limit64': str(limit)
            })

        response = hub.setSTRTLimits(**payload)
        _osaapi_raise_for_status(response)
        print("Limits for Service template \"{}\" are applied [ok]".format(
            service_template_id))
Ejemplo n.º 45
0
 def _ExtractLinkFromMessage(self, message):
     m = re.search(r"href='(.+?)'", message, re.MULTILINE)
     link = urlparse.urlparse(m.group(1))
     return link.path + "/" + "#" + link.fragment
Ejemplo n.º 46
0
 def _baseurl(self, url):
   parsed_url = urlparse(url)
   return '{scheme}://{netloc}'.format(scheme=parsed_url.scheme, netloc=parsed_url.netloc)
Ejemplo n.º 47
0
    def on_task_input(self, task, config):
        """
        This plugin returns ALL of the shows monitored by Sickbeard.
        This includes both ongoing and ended.
        Syntax:

        sickbeard:
          base_url=<value>
          port=<value>
          api_key=<value>

        Options base_url and api_key are required.

        Use with input plugin like discover and/or configure_series.
        Example:

        download-tv-task:
          configure_series:
            settings:
              quality:
                - 720p
            from:
              sickbeard:
                base_url: http://localhost
                port: 8531
                api_key: MYAPIKEY1123
          discover:
            what:
              - next_series_episodes: yes
            from:
              torrentz: any
          download:
            /download/tv

        Note that when using the configure_series plugin with Sickbeard
        you are basically synced to it, so removing a show in Sickbeard will
        remove it in flexget as well, which could be positive or negative,
        depending on your usage.
        """
        parsedurl = urlparse(config.get('base_url'))
        url = '%s://%s:%s%s/api/%s/?cmd=shows' % (
            parsedurl.scheme, parsedurl.netloc, config.get('port'),
            parsedurl.path, config.get('api_key'))
        try:
            json = task.requests.get(url).json()
        except RequestException as e:
            raise plugin.PluginError(
                'Unable to connect to Sickbeard at %s://%s:%s%s. Error: %s' %
                (parsedurl.scheme, parsedurl.netloc, config.get('port'),
                 parsedurl.path, e))
        entries = []
        for _, show in list(json['data'].items()):
            log.debug('processing show: {}'.format(show))
            fg_qualities = ''  # Initializes the quality parameter
            if show['paused'] and config.get('only_monitored'):
                continue
            if show['status'] == 'Ended' and not config.get('include_ended'):
                continue
            if config.get('include_data'):
                show_url = '%s:%s/api/%s/?cmd=show&tvdbid=%s' % (
                    config['base_url'], config['port'], config['api_key'],
                    show['tvdbid'])
                show_json = task.requests.get(show_url).json()
                log.deubg('processing show data: {}'.format(show_json['data']))
                fg_qualities = self.quality_requirement_builder(
                    show_json['data']['quality_details']['initial'])
            entry = Entry(title=show['show_name'],
                          url='',
                          series_name=show['show_name'],
                          tvdb_id=show.get('tvdbid'),
                          tvrage_id=show.get('tvrage_id'))
            if len(fg_qualities) > 1:
                entry['configure_series_qualities'] = fg_qualities
            elif len(fg_qualities) == 1:
                entry['configure_series_quality'] = fg_qualities[0]
            else:
                entry['configure_series_quality'] = fg_qualities
            if entry.isvalid():
                entries.append(entry)
            else:
                log.error('Invalid entry created? %s' % entry)
                continue
            # Test mode logging
            if task.options.test:
                log.info("Test mode. Entry includes:")
                for key, value in list(entry.items()):
                    log.info('     {}: {}'.format(key.capitalize(), value))

        return entries
Ejemplo n.º 48
0
def urlparse_drop_netloc(url):
    url = urlparse(url)
    if url[4]:
        return url[2] + '?' + url[4]
    return url[2]
Ejemplo n.º 49
0
 def is_tv_ohjelmat_url(self, url):
     return urlparse(url).path.startswith('/tv/ohjelmat/')
Ejemplo n.º 50
0
    def prepare_request(self,
                        method,
                        uri,
                        params=None,
                        headers=None,
                        data=None,
                        json=None,
                        access_token=None):
        params = {} if params is None else params
        if not isinstance(params, dict):
            raise TypeError('params should be dict')

        if uri == '':
            uri = '/'

        method = method.upper()
        params = utf8_encoded_dict(params)
        url = '/'.join([self.api_server, self.endpoint, self.version
                        ]) + uri.strip()
        logger.debug(url)
        url_parsed = urlparse(url)
        enc_params = urlencode(params)
        logger.debug(enc_params)
        if url_parsed.query == '' or url_parsed.query is None:
            query = enc_params
        elif enc_params == '' or enc_params is None:
            query = url_parsed.query
        else:
            query = '%s&%s' % (url_parsed.query, enc_params)

        real_uri = urlunparse(('', '', url_parsed.path, url_parsed.params,
                               query, url_parsed.fragment))

        real_url = urlunparse(
            (url_parsed.scheme, url_parsed.netloc, url_parsed.path,
             url_parsed.params, query, url_parsed.fragment))

        self.request_data.uri = real_uri
        self.request_data.method = method
        self.request_data.headers = {
            # 'Accept': 'application/json; charset=utf-8',
            'Host': url_parsed.netloc
        }

        if headers is not None and isinstance(headers, dict):
            # headers 是字典
            self.request_data.headers.update(headers)

        if access_token is not None:
            self.request_data.headers[HEADER_X_ACCESS_TOKEN] = access_token

        self.request_data.body = ''
        if method in ['POST', 'PUT']:
            if json is not None:
                self.request_data.headers[
                    'Content-Type'] = 'application/json; charset=utf-8'
                self.request_data.body = json_util.dumps(json)
            else:
                self.request_data.body = '' if data is None else data

        return real_url
Ejemplo n.º 51
0
    def _parse_uri(self, client):
        """
        解析请求的 uri
        :type client: Client
        :return:
        """
        handler = self.handler
        endpoint = handler.client.request.endpoint

        if handler.client.encrypt_type == 'aes':
            # 如果是加密后的 uri,到这一步的时候已经解密完成
            # 需要重新解析 uri, 获取该请求真实的 uri
            try:
                _, _, _, uri = handler.request.uri.split('/', 3)
                if not uri.startswith('/'):
                    uri = '/' + uri

                handler.client.request.uri = uri
            except ValueError:
                raise ClientErrorException(ResultCode.BAD_REQUEST,
                                           PromptMessage.INVALID_REQUEST_URI)
        else:
            uri = handler.client.request.uri

        if endpoint.get('is_builtin', False):
            # 如果是内置的 endpoint, 就没有 forward_url
            forward_url = None

            # 寻找匹配的内置 Endpoint Handler
            key = '%s/%s' % (endpoint['name'], endpoint['version'])
            builtin_handlers = handler.builtin_endpoints.get(key, [])
            # 只匹配 uri 的 Path 部分
            # 这样 Handlers 那边的正则表达式就可以用 $ 来表示结尾
            uri_parsed = urlparse(uri)
            uri_path = uri_parsed.path
            for t in builtin_handlers:
                re_uri, _handler = t
                match = re.match(re_uri, uri_path)
                if match:
                    handler.real_api_handler = _handler
                    break
            else:
                handler.real_api_handler = None
        else:
            # 后端的 API, 需要代理访问
            handler.real_api_handler = BackendAPIHandler
            # 解析要转发的地址
            endpoint_url = endpoint.get('url')
            if endpoint_url is None:
                raise ServerErrorException(
                    ResultCode.BAD_CLIENT_CONFIG,
                    PromptMessage.NO_ENDPOINT_URL_CONFIG)

            endpoint_netloc = endpoint.get('netloc')
            if endpoint_netloc is None:
                url_parsed = urlparse(endpoint_url)
                endpoint['netloc'] = url_parsed.netloc

            if endpoint.get('skip_uri', False):
                # 判断是否要忽略掉用户传递的uri信息
                uri = ''
            elif endpoint_url.find('?') > 0 and uri == '/':
                # 有些后端站点,直接定位到查询参数的url,例如/?a=xx,因此后面不能再跟uri,
                # 而默认会带上/,变成/?a=xx/,因此这里要过滤掉/
                uri = ''

            # TODO uri 合法性校验和修正
            if endpoint_url.endswith('/'):
                forward_url = endpoint_url + uri[1:]
            else:
                forward_url = endpoint_url + uri

        handler.client.request.forward_url = forward_url
Ejemplo n.º 52
0
def split_url(url):
    parsed_url = urlparse(url)
    # Use netloc instead og hostname since hostname is None if URL is relative
    return parsed_url.scheme == 'https', parsed_url.netloc.lower(
    ), parsed_url.path
Ejemplo n.º 53
0
        'user': '******',
        'non-cookie-auth': '100/hour',
        'add-contributor': '10/second',
        'create-guid': '1000/hour',
        'root-anon-throttle': '1000/hour',
        'test-user': '******',
        'test-anon': '1/hour',
        'send-email': '2/minute',
    },
}

# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
    urlparse(osf_settings.DOMAIN).netloc,
    osf_settings.DOMAIN,
)
# This needs to remain True to allow cross origin requests that are in CORS_ORIGIN_WHITELIST to
# use cookies.
CORS_ALLOW_CREDENTIALS = True
# Set dynamically on app init
ORIGINS_WHITELIST = ()

MIDDLEWARE = (
    'api.base.middleware.DjangoGlobalMiddleware',
    'api.base.middleware.CeleryTaskMiddleware',
    'api.base.middleware.PostcommitTaskMiddleware',
    # A profiling middleware. ONLY FOR DEV USE
    # Uncomment and add "prof" to url params to recieve a profile for that url
    # 'api.base.middleware.ProfileMiddleware',
Ejemplo n.º 54
0
def generate_external_export(export_type, username, id_string, export_id=None,
                             options=None, xform=None):
    """
    Generates external export using ONA data through an external service.

    param: export_type
    params: username: logged in username
    params: id_string: xform id_string
    params: export_id: ID of export object associated with the request
    param: options: additional parameters required for the lookup.
        data_id: instance id
        query: filter_query for custom queries
        meta: metadata associated with external export
        token: authentication key required by external service
    """
    data_id = options.get("data_id")
    filter_query = options.get("query")
    meta = options.get("meta")
    token = options.get("token")

    if xform is None:
        xform = XForm.objects.get(
            user__username__iexact=username, id_string__iexact=id_string)
    user = User.objects.get(username=username)

    server, name = _get_server_from_metadata(xform, meta, token)

    # dissect the url
    parsed_url = urlparse(server)

    token = parsed_url.path[5:]

    ser = parsed_url.scheme + '://' + parsed_url.netloc

    # Get single submission data
    if data_id:
        inst = Instance.objects.filter(xform__user=user,
                                       xform__id_string=id_string,
                                       deleted_at=None,
                                       pk=data_id)

        instances = [inst[0].json if inst else {}]
    else:
        instances = query_data(xform, query=filter_query)

    records = _get_records(instances)

    status_code = 0

    if records and server:
        try:
            client = Client(ser)
            response = client.xls.create(token, json.dumps(records))

            if hasattr(client.xls.conn, 'last_response'):
                status_code = client.xls.conn.last_response.status_code
        except Exception as e:
            raise J2XException(
                u"J2X client could not generate report. Server -> {0},"
                u" Error-> {1}".format(server, e)
            )
    else:
        if not server:
            raise J2XException(u"External server not set")
        elif not records:
            raise J2XException(
                u"No record to export. Form -> {0}".format(id_string)
            )

    # get or create export object
    if export_id:
        export = Export.objects.get(id=export_id)
    else:
        export_options = get_export_options(options)
        export = Export.objects.create(xform=xform,
                                       export_type=export_type,
                                       options=export_options)

    export.export_url = response
    if status_code == 201:
        export.internal_status = Export.SUCCESSFUL
        export.filename = name + '-' + response[5:] if name else response[5:]
        export.export_url = ser + response
    else:
        export.internal_status = Export.FAILED

    export.save()

    return export
Ejemplo n.º 55
0
 def sendComment(self, pr_url, text):
     path = urlparse(unicode2bytes(pr_url)).path
     payload = {'text': text}
     return self._http.post(COMMENT_API_URL.format(
         path=bytes2NativeString(path)), json=payload)
Ejemplo n.º 56
0
def shardwriter(url, encode=True, pack=True):
    parsed = urlparse(url)
    stream = gopen(url, "wb")
    return tarrecords.TarWriter(stream, encode=encode)
Ejemplo n.º 57
0
    def __init__(self,
                 host=None,
                 port=None,
                 username='******',
                 password=None,
                 verify_ssl=None,
                 config_file=None,
                 use_proxy=False):
        """Dynamically determine host, port and credential information from current environment

        :param host: hostname or IP of API server
        :param port: port of API server
        :param username: username for basic http authentication (required if token unavailable)
        :param password: password for basic http authentication (required if token unavailable)
        :param verify_ssl: whether we trust self-signed certificate
        """
        self.in_pod = bool(os.getenv('KUBERNETES_SERVICE_HOST'))
        self.session = requests.Session()
        self.cert = None
        if use_proxy:
            self.host = host or "127.0.0.1"
            self.port = port or "8001"
            self.url = "http://{ip}:{port}".format(ip=self.host,
                                                   port=self.port)
            self.token = None
        else:
            session_verify = verify_ssl
            if self.in_pod and not config_file:
                self.host = host or 'kubernetes.default'
                self.port = port or int(
                    os.getenv('KUBERNETES_SERVICE_PORT', 443))
                with open(KUBE_TOKEN_PATH) as f:
                    self.token = 'Bearer ' + f.read().strip()
                swagger_client.configuration.ssl_ca_cert = KUBE_CACRT_PATH
                if verify_ssl is None:
                    verify_ssl = bool(self.host == KUBE_CACRT_HOST)
                    session_verify = KUBE_CACRT_PATH
            else:
                self.port = port or 443
                if verify_ssl is None:
                    verify_ssl = False
                    session_verify = False
                if host and password:
                    self.host = host
                    self.token = urllib3.util.make_headers(
                        basic_auth=username + ':' +
                        password).get('authorization')
                else:
                    config_path = os.path.expanduser(
                        config_file if config_file else DEFAULT_KUBECONFIG)
                    if not os.path.isfile(config_path):
                        raise ValueError(
                            "Unable to dynamically determine host/credentials from ~/.kube/config"
                        )
                    with open(config_path) as f:
                        kube_config = yaml.load(f)
                    cred_info = self._parse_config(kube_config)
                    self.host = cred_info['host']
                    self.token = cred_info.get('token')
                    if 'client-certificate' in cred_info:
                        self.cert = (cred_info['client-certificate'],
                                     cred_info['client-key'])

            p = urlparse('https://{}'.format(self.host))
            if p.port:
                self.port = p.port
                self.host = p.hostname

            self.url = 'https://{}:{}'.format(self.host, self.port)
            # NOTE: options set on the configuration singleton (e.g. verify_ssl),
            # must be set *before* instantiation of ApiClient for it to take effect.
            swagger_client.configuration.verify_ssl = verify_ssl
            self.session.verify = session_verify

        if self.cert:
            swagger_client.configuration.cert_file = self.cert[0]
            swagger_client.configuration.key_file = self.cert[1]
            self.session.cert = self.cert

        self.swag_client = swagger_client.ApiClient(self.url)
        if self.token:
            self.swag_client.set_default_header('Authorization', self.token)
            self.session.headers['Authorization'] = self.token
        # Add modules as attributes of this client
        self.api = swagger_client.Apiv1Api(self.swag_client)
        self.version = swagger_client.VersionApi(self.swag_client)
        self.batchv = swagger_client.Apisbatchv1Api(self.swag_client)
        self.apisappsv1beta1_api = swagger_client.Apisappsv1beta1Api(
            self.swag_client)
        self.extensionsv1beta1 = swagger_client.Apisextensionsv1beta1Api(
            self.swag_client)
        self.apisextensionsv1beta1_api = swagger_client.Apisextensionsv1beta1Api(
            self.swag_client)

        # Add additional (apps, autoscaling, batch, etc...)
        for attr in dir(swagger_client):
            match = re.match(r"^Apis(\w+)Api$", attr)
            if match:
                api_class = getattr(swagger_client, attr)
                api_name = match.group(1)
                api_instance = api_class(self.swag_client)
                setattr(self, api_name, api_instance)
Ejemplo n.º 58
0
 def parse_url(url):
     up = urlparse(url)
     upport = up.port
     port = upport if upport else 53
     return up.scheme, up.hostname, port
Ejemplo n.º 59
0
def ParseRegistryURI(uri):
    url = urlparse.urlparse(uri, scheme="file")
    return RegistryKeySpec(hive=url.netloc,
                           winreg_hive=getattr(winreg, url.netloc),
                           path=url.path.replace("/", "\\").lstrip("\\"))
Ejemplo n.º 60
0
 def movie_list_request(base_url, port, api_key):
     parsedurl = urlparse(base_url)
     log.debug('Received movie list request')
     return '%s://%s:%s%s/api/%s/movie.list?status=active' % (
         parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)