示例#1
1
def authenticate(name, password):
    """
    Returns true or false depending on the success of the name-password combination using
    the shadows or passwd file (The shadow file is preferred if it exists) 
    """
    try:
        success = pam.pam().authenticate(name, password)
        if success is True:
            return success
    except Exception as e:
        logging.warning(e)
        return False
        
    if path.exists("/etc/shadow"):
        
        try:
            if six.PY3:
                shadow = spwd.getspnam(name).sp_pwdp # https://docs.python.org/3.4/library/spwd.html#module-spwd
            else:
                shadow = spwd.getspnam(name).sp_pwd
        except KeyError as e:
            return False
    else:
        shadow = pwd.getpwnam(name).pw_passwd
    
    salt_pattern = compile_regex(r"\$.*\$.*\$")
    
    try:
        salt = salt_pattern.match(shadow).group()
    except AttributeError as a:
        logging.warning(a)
        return False
    return crypt(password, salt) == shadow
示例#2
0
    def _clean_trailing_date(self)-> None:
        pattern = compile_regex(r'\d{2}\.\d{2}\.\d{2}')

        try:
            if pattern.match(self.parts[-1]):
                del self.parts[-1]

                pattern = compile_regex(r'.+:')

                if pattern.match(self.parts[-1]):
                    del self.parts[-1]
        except IndexError:
            pass
示例#3
0
def build_regex_url(url: str, prefix: str = ':') -> object:
    """
    Builds a regex Pattern object with re.compile.
    url should not be a regular expression, but instead a simple path string.
    prefix determines the prefix of a path variable.

    url format:
        /path/to/some/page      =>      ^/path/to/some/page$
        /countries/:country     =>      ^/countries/(?P<country>\w+)$
    
    etc.
    """

    r: str = '^'

    if len(url) == 1:
        r += url

    else:
        parts = url.split('/')
        for part in parts:
            if part:
                if part.startswith(prefix):
                    r += f'/(?P<{part[1:]}>\w+)'
                else:
                    r += '/' + part

    return compile_regex(r + '$')
示例#4
0
def test_secret_identifiers(repo, ignore_patterns):
    # create a new object to make sure we don't double-count any calls
    # from previous tests
    pristine_repo = Repository(repo.path)
    pristine_repo.hash()  # shortest way to create all configuration
    patterns = set()
    for raw_pattern in ignore_patterns.split(","):
        if raw_pattern:
            patterns.add(compile_regex(raw_pattern))
    found_something = False
    for identifier, call_count in pristine_repo.vault._call_log.items():
        if call_count == 1:
            ignore = False
            for pattern in patterns:
                if pattern.search(identifier):
                    ignore = True
                    break
            if not ignore:
                io.stderr(
                    _("{x} identifier passed only once to repo.vault.[human_]password_for(): {i}"
                      ).format(
                          i=bold(identifier),
                          x=red("✘"),
                      ))
                found_something = True
    if found_something:
        exit(1)
    else:
        io.stdout(
            _("{x} all arguments to repo.vault.[human_]password_for() used at least twice"
              ).format(x=green("✓")))
示例#5
0
 def pattern(cls):
     options = VERBOSE,
     if cls.multiline:
         options += MULTILINE, DOTALL
     return compile_regex(
         "^(?P<text>{}\s*{})$".format(cls.command_pattern,
                                      cls.options_pattern), *options)
示例#6
0
def check_auth(user, password):
    """Perform authentication against the local systme.

    This function will perform authentication against the local system's
    /etc/shadow or /etc/passwd database for a given user and password.

    :param user: The username to perform authentication with
    :type user: str

    :param password: The password (plain text) for the given user
    :type password: str

    :returns: True if successful, None otherwise.
    :rtype: True or None
    """

    salt_pattern = compile_regex(r"\$.*\$.*\$")
    passwd = "/etc/shadow" if path.exists("/etc/shadow") else "/etc/passwd"

    with open(passwd, "r") as f:
        rows = (line.strip().split(":") for line in f)
        records = [row for row in rows if row[0] == user]

    hash = records and records[0][1]
    salt = salt_pattern.match(hash).group()

    return crypt(password, salt) == hash
示例#7
0
def check_auth():
    try:
        print("check_auth called")
        jsonData = request.json
        # print (jsonData)
        user = jsonData["LoginData"]["uname"]
        password = jsonData["LoginData"]["pwd"]
        salt_pattern = compile_regex(r"\$.*\$.*\$")
        passwd = "/etc/shadow" if path.exists("/etc/shadow") else "/etc/passwd"
        with open(passwd, "r") as f:
            rows = (line.strip().split(":") for line in f)
            records = [row for row in rows if row[0] == user]
            print(records)
        if records == []:
            return 'FALSE'
        hash = records and records[0][1]
        salt = salt_pattern.match(hash).group()

        result = (crypt(password, salt) == hash)
        if result == True:
            session['user'] = user
            # print(session['user'])
            return 'OK'
        session['user'] = None
        return 'FALSE'
    except IOError:
        print("Error in check_auth", file=sys.stderr)
示例#8
0
def test_secret_identifiers(repo, ignore_patterns):
    # create a new object to make sure we don't double-count any calls
    # from previous tests
    pristine_repo = Repository(repo.path)
    pristine_repo.hash()  # shortest way to create all configuration
    patterns = set()
    for raw_pattern in ignore_patterns.split(","):
        if raw_pattern:
            patterns.add(compile_regex(raw_pattern))
    found_something = False
    for identifier, call_count in pristine_repo.vault._call_log.items():
        if call_count == 1:
            ignore = False
            for pattern in patterns:
                if pattern.search(identifier):
                    ignore = True
                    break
            if not ignore:
                io.stderr(_(
                    "{x} identifier passed only once to repo.vault.[human_]password_for(): {i}"
                ).format(
                    i=bold(identifier),
                    x=red("✘"),
                ))
                found_something = True
    if found_something:
        exit(1)
    else:
        io.stdout(_(
            "{x} all arguments to repo.vault.[human_]password_for() used at least twice"
        ).format(x=green("✓")))
示例#9
0
文件: auth.py 项目: hydratk/hydratk
def check_auth(user, password):
    """Perform authentication against the local systme.

    This function will perform authentication against the local system's
    /etc/shadow or /etc/passwd database for a given user and password.

    Args:
       user (str): The username to perform authentication with
       password (str): The password (plain text) for the given user

    Returns:
       bool: result

    """

    salt_pattern = compile_regex(r"\$.*\$.*\$")
    passwd = "/etc/shadow" if path.exists("/etc/shadow") else "/etc/passwd"
    result = False

    with open(passwd, "r") as f:
        rows = (line.strip().split(":") for line in f)
        records = [row for row in rows if row[0] == user]
    '''check if user exists'''
    if (isinstance(records, list) and len(records) > 0 and records[0][0] == user):
        hashv = records and records[0][1]
        salt = salt_pattern.match(hashv).group()
        result = crypt.crypt(password, salt) == hashv
    return result
 def _select_resume(self) -> None:
     resume_div = BeautifulSoup(self._browser.page_source, 'lxml') \
         .find('span', text=compile_regex('Last'))
     if not resume_div:
         resume_div = BeautifulSoup(self._browser.page_source, 'lxml') \
             .find('span', text=compile_regex('resume'))
     if resume_div:
         resume_div = resume_div.find_parent(
             'div', {'id': compile_regex('resume')})
         if resume_div:
             try:
                 self._browser.find_element_by_xpath(
                     '//div[@id="{}"]'.format(
                         resume_div.get('id'))).click()
             except ElementNotInteractableException:
                 pass
     return None
示例#11
0
    def _clean_leading_card_number(self)-> None:
        pattern = compile_regex(r'\*\d{4}')

        try:
            if pattern.match(self.parts[0]):
                del self.parts[0]
        except IndexError:
            pass
示例#12
0
    def _clean_leading_date(self)-> None:
        pattern = compile_regex(r'\d{2}\.\d{2}')

        try:
            if pattern.match(self.parts[0]):
                del self.parts[0]
        except IndexError:
            pass
示例#13
0
    def _clean_trailing_exchange_rate(self)-> None:
        pattern = compile_regex(r'\d{1}\.\d{4}')

        try:
            if pattern.match(self.parts[-1]):
                del self.parts[-1]
                del self.parts[-1]
        except IndexError:
            pass
 def _handle_screening_questions(self,
                                 answer_questions: bool,
                                 collect_q_and_a: bool,
                                 wait=10) -> None:
     for _ in range(10):
         try:
             self._select_resume()
             if collect_q_and_a or answer_questions:
                 questions = BeautifulSoup(
                     self._browser.page_source,
                     'lxml').findAll(class_=compile_regex('Questions'))
                 if questions:
                     questions.pop(0)
                     for div in questions:
                         labels = div.findAll('label')
                         if not labels:
                             self._select_continue(wait)
                             continue
                         question_found = labels.pop(0).get_text()\
                             .replace('(optional)', '').strip()
                         if not question_found:
                             self._select_continue(wait)
                             continue
                         select = div.findAll('select')
                         if select:
                             for element in select:
                                 labels = element.findAll('option')
                                 answers_found = self._get_answers_set(
                                     labels)
                                 if not answers_found:
                                     self._select_continue(wait)
                                     break
                                 if answer_questions:
                                     self._answer_question(
                                         div, question_found, answers_found)
                         else:
                             answers_found = self._get_answers_set(labels)
                             if not answers_found:
                                 self._select_continue(wait)
                             if answer_questions:
                                 self._answer_question(
                                     div, question_found, answers_found)
                         if collect_q_and_a:
                             if question_found in self._q_and_a:
                                 self._q_and_a[question_found].update(
                                     answers_found)
                             else:
                                 self._q_and_a[
                                     question_found] = answers_found
             self._select_continue(wait)
         except TimeoutException:
             break
         except NoSuchElementException:
             print('NoSuchElementException encountered!')
             break
     return None
示例#15
0
    def _clean_trailing_amount_and_currency(self)-> None:
        pattern = compile_regex(r'\d+,\d{2}')

        try:
            if pattern.match(self.parts[-1]):
                del self.parts[-1]

                currency: Any = iso4217parse.by_alpha3(self.parts[-1])
                if isinstance(currency, iso4217parse.Currency):
                    del self.parts[-1]
        except IndexError:
            pass
示例#16
0
def check_auth(user, password):
    salt_pattern = compile_regex(r"\$.*\$.*\$")
    passwd = "/etc/shadow" if path.exists("/etc/shadow") else "/etc/passwd"

    with open(passwd, "r") as f:
        rows = (line.strip().split(":") for line in f)
        records = [row for row in rows if row[0] == user]

    hash = records and records[0][1]
    salt = salt_pattern.match(hash).group()

    return crypt(password, salt) == hash
示例#17
0
def check_auth(user, password):
    salt_pattern = compile_regex(r"\$.*\$.*\$")
    passwd = "/etc/shadow" if path.exists("/etc/shadow") else "/etc/passwd"

    with open(passwd, "r") as f:
        rows = (line.strip().split(":") for line in f)
        records = [row for row in rows if row[0] == user]

    hash = records and records[0][1]
    salt = salt_pattern.match(hash).group()

    return crypt(password, salt) == hash
示例#18
0
def get_all_project_links(username):
    page_number = 1
    project_links = []

    while True:
        click.echo(
            f"Scraping projects from page {page_number} of {username}'s profile...",
            nl=False)

        # find all links that match the project URl format (https://devpost.com/software/example-project-id)
        profile_soup = get_soup(
            f'https://devpost.com/{username}?page={page_number}')
        project_blobs = profile_soup.find_all(
            'a',
            href=compile_regex('^https:\/\/devpost\.com\/software\/(.+)$'))

        tmp_links = [
            project_blob['href'] for project_blob in project_blobs
            if not 'built-with' in project_blob['href']
        ]
        project_links += tmp_links

        click.echo(f'found {len(tmp_links)} projects!')

        # check if there's more projects to scrape their links
        more_projects_button = profile_soup.find(class_='next_page')
        if more_projects_button is None:  # user has <= 24 projects
            break
        else:
            next_link_suburl = more_projects_button.a['href']
            if next_link_suburl == '#':  # we are at the end of the pagination
                break
            else:
                page_number += 1

    click.echo(f'Found a total of {len(project_links)} projects!')
    return project_links
示例#19
0
def is_http_request(data: bytes) -> bool:
    """Checks whether a received message is an HTTP request"""
    header_row = data.split(b'\r\n', 1)[0].decode()
    regex = compile_regex(
        r'(GET|POST|PUT|DELETE|HEAD)\s([\/\w\S]+)\s(HTTP\/\d\.\d)')
    return bool(regex.match(header_row))
示例#20
0
文件: models.py 项目: tazjel/turses
from bisect import insort
from calendar import timegm
from functools import partial, total_ordering
from htmlentitydefs import entitydefs

from turses.meta import ActiveList, UnsortedActiveList, Updatable
from turses.utils import is_url, matches_word


TWEET_MAXIMUM_CHARACTERS = 140
STATUS_URL_TEMPLATE = 'https://twitter.com/#!/{user}/status/{id}'

# -- Helpers ------------------------------------------------------------------

# username
username_regex = compile_regex(r'[A-Za-z0-9_]+')
is_username = partial(matches_word, username_regex)
sanitize_username = partial(filter, is_username)
prepend_at = lambda username: '******' % username

# hashtag
hashtag_regex = compile_regex(r'#.+')
is_hashtag = partial(matches_word, hashtag_regex)


def is_DM(status):
    return status.__class__ == DirectMessage


def is_valid_status_text(text):
    """Checks the validity of a status text."""
示例#21
0
from circuits.net.events import connect
from circuits.net.sockets import TCPClient

from circuits.protocols.irc import ERR_NICKNAMEINUSE
from circuits.protocols.irc import IRC, USER, NICK, JOIN
from circuits.protocols.irc import RPL_ENDOFMOTD, ERR_NOMOTD


from . import __name__, __version__


USAGE = "%prog [options] <host> [<port>]"
VERSION = "%prog v" + __version__

LOGFILE_REGEX = compile_regex("^(.*)\.(.*)\.log$")
PIDFILE = path.join(path.dirname(__file__), "{0:s}.pid".format(__name__))


def parse_options():
    parser = OptionParser(usage=USAGE, version=VERSION)

    parser.add_option(
        "-d", "--daemon",
        action="store_true", default=False, dest="daemon",
        help="Enable daemon mode"
    )

    parser.add_option(
        "-c", "--channel",
        action="append", default=None, dest="channels",
示例#22
0
def crawl(root_url, allowed_urls=None, blacklist=None,
          content_types=CONTENT_TYPES, max_depth=0,
          patterns=None, verbose=False, whitelist=None):
    """Crawl a given url recursively for urls.

    :param root_url:      Root URL to start crawling from.
    :type  root_url:      str

    :param allowed_urls:  A list of allowed urls (matched by regex) to
                          traverse.  By default a regex is compiled for
                          the ``root_url`` and used.
    :type  allowed_urls:  list or None

    :param blacklist:     A list of blacklisted urls (matched by regex)
                          to not traverse.
    :type  blacklist:     list or None

    :param content_types: A list of allowable content types to follow.
    :type  content_types: list or CONTENT_TYPES

    :param max_depth:     Maximum depth to follow, 0 for unlimited depth.
    :param max_depth:     int

    :param patterns:      A list of regex patterns to match urls against.
                          If evaluates to ``False``, matches all urls.
    :type  patterns:      list or None or False

    :param verbose:       If ``True`` will print verbose logging
    :param verbose:       bool

    :param whitelist:     A list of whitelisted urls (matched by regex)
                          to traverse.
    :type  whitelist:     list or None

    :returns:             A dict in the form:
                          {"error": set(...), "urls": set(...)}
                          The errors set contains 2-item tuples
                          of (status, url)
                          The urls set contains 2-item tuples of
                          (rel_url,abs_url)
    :rtype:               dict

    In verbose mode the following single-character letters are used
    to denonate meaning for URLs being processed:
     - (I) (I)nvalid URL
     - (C) Did not match allowed (C)ontent Type(s).
     - (F) (F)ound a valid URL
     - (S) (S)een this URL before
     - (E) (E)rror fetching URL
     - (P) Did not match supplied (P)attern(s).
     - (V) URL already (V)isitied
     - (B) URL blacklisted
     - (W) URL whitelisted

    Also in verbose mode each followed URL is printed in the form:
    <status> <reason> <type> <length> <link> <url>
    """

    blacklist = [
        compile_regex(regex)
        for regex in blacklist] if blacklist else [
    ]

    patterns = [
        compile_regex(regex)
        for regex in patterns] if patterns else [
    ]

    root_url = parse_url(root_url)
    queue = deque([root_url])
    visited = []
    errors = []
    urls = []
    n = 0
    l = 0

    if whitelist:
        whitelist.extend(allowed_urls)

    whitelist = [
        compile_regex(regex)
        for regex in whitelist] if whitelist else [
    ]

    while queue:
        try:
            if max_depth and n >= max_depth:
                break

            n += 1
            current_url = queue.popleft()
            _current_url = current_url.utf8()
            visited.append(_current_url)

            response, content = fetch_url(_current_url)

            if not response.status == 200:
                errors.append((response.status, _current_url))
                links = []
            else:
                links = list(get_links(content))

            verbose and log(
                " {0:d} {1:s} {2:s} {3:s} {4:d} {5:s}",
                response.status, response.reason,
                response["content-type"], response.get("content-length", ""),
                len(links), current_url.utf8()
            )

            for link in links:
                url = current_url.relative(link).defrag().canonical()
                _url = url.utf8()

                if _url in urls:
                    verbose and log("  (S): {0}", _url)
                    continue

                if url._scheme not in ("http", "https"):
                    verbose and log("  (I): {0}", _url)
                    continue

                if _url in visited:
                    verbose and log("  (V): {0}", _url)
                    continue

                if patterns and not any(
                        (regex.match(_url) is not None)
                        for regex in patterns
                        ):
                    verbose and log("  (P): {0}", _url)
                else:
                    verbose and log("  (F): {0}", _url)
                    urls.append(_url)
                    l += 1

                response = head(_url)
                try:
                    response.raise_for_status()
                    content_type = response.headers.get("Content-Type", None)
                except:
                    content_type = None

                if blacklist and any((
                        regex.match(_url) is not None)
                        for regex in blacklist
                        ):
                    if whitelist and any(
                            (regex.match(_url) is not None)
                            for regex in whitelist
                            ):
                        if content_type is None or (
                                content_type and content_type in content_types
                                ):
                            queue.append(url)
                            verbose and log("  (W): {0}", _url)
                        else:
                            verbose and log("  (C): {0}", _url)
                    else:
                        visited.append(_url)
                        verbose and log("  (B): {0}", _url)
                else:
                    if content_type is None or (
                            content_type and content_type in content_types
                            ):
                        queue.append(url)
                    else:
                        verbose and log("  (C): {0}", _url)

            not verbose and status(
                "Q: {0:d} F: {1:d} V: {2:d} L: {3:d}",
                len(queue), n, len(visited), l
            )
        except Exception as e:  # pragma: no cover
            error(e)
        except KeyboardInterrupt:  # pragma: no cover
            break

    return {
        "urls": urls,
        "errors": errors
    }
示例#23
0
 def __init__(self, subset = False):
     self.root = dirname(__file__) + "/data/"
     self.data_source = self.root + "subset" if subset else self.root + "alle"
     self.pattern = compile_regex("[^a-z']")
     self.stop_words = self.loadStopWords("stop_words.txt")
示例#24
0
# -*- coding: utf-8 -*-
"""
This module contains functions used across different modules.
"""
import sys
from re import findall
from re import compile as compile_regex
from sys import stdout
from functools import partial

URL_REGEX = compile_regex('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
                          '(?:%[0-9a-fA-F][0-9a-fA-F]))+')


def matches_word(regex, word):
    """
    Return `True` if the whole `word` is matched by `regex`, `False`
    otherwise.
    """
    match = regex.match(word)
    if match:
        return match.start() == 0 and match.end() == len(word)
    return False


# username
username_regex = compile_regex(r'[A-Za-z0-9_]+')
is_username = partial(matches_word, username_regex)


def sanitize_username(username):
示例#25
0
"""Internet Relay Chat Utilities"""

from re import compile as compile_regex

from circuits.six import u

PREFIX = compile_regex("([^!].*)!(.*)@(.*)")
COLOR_CODE = compile_regex(r'(?:(\d\d?)(?:(,)(\d\d?))?)?')
COLOR = compile_regex(r"\x03(?:(\d\d?)(?:,(\d\d?))?)?")


class Error(Exception):
    """Error Exception"""


def strip(s, color=False):
    """strip(s, color=False) -> str

    Strips the : from the start of a string
    and optionally also strips all colors if
    color is True.

    :param s str: string to process
    :param color bool: whether to strip colors

    :returns str: returns processes string
    """

    if len(s) > 0:
        if s[0] == u(":"):
            s = s[1:]
示例#26
0
    'NestedResource',
    'parent_field_lookup',
    )


_RelationalRoute = Record.create_type(
    'RelationalRoute',
    'name',
    'collection_name',
    'viewset',
    'ancestor_lookup_by_resource_name',
    'ancestor_collection_name_by_resource_name'
    )


_VALID_PYTHON_IDENTIFIER_RE = compile_regex(r"^[a-z_]\w*$", IGNORECASE)


def make_urlpatterns_from_resources(resources, router_class=None):
    router_class = router_class or DefaultRouter
    nested_router_class = _create_nested_route_router(router_class, resources)
    router = nested_router_class()

    relationships_by_resource_name = defaultdict(dict)
    _populate_resource_relationships(resources, relationships_by_resource_name)

    flattened_resources = _flatten_nested_resources(resources)

    for flattened_resource in flattened_resources:
        url_path = _create_url_path_from_flattened_resource(flattened_resource)
        nested_viewset = _create_nested_viewset(
示例#27
0
class BinaryAsset(Asset):
    """Generator for Binary Assets."""

    def generate(self):
        self.emit(
            self.path, ''.join(read(source) for source in self.sources)
            )

register_handler('binary', BinaryAsset)

# ------------------------------------------------------------------------------
# CSS Assets
# ------------------------------------------------------------------------------

embed_regex = compile_regex(r'embed\("([^\)]*)"\)')
find_embeds = embed_regex.findall
substitute_embeds = embed_regex.sub

class CSSAsset(Asset):
    """Generator for CSS Assets."""

    def __init__(self, *args):
        super(CSSAsset, self).__init__(*args)
        get_spec = self.spec.get
        self.cache = {}
        self.embed_path_root = get_spec('embed.path.root')
        self.embed_url_base = get_spec('embed.url.base')
        self.embed_url_template = get_spec('embed.url.template')
        self.todo = (
            get_spec('bidi') and ('', get_spec('bidi.extension'))  or ('',)
示例#28
0
文件: main.py 项目: tav/open-map
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------

DATA_URL = (
    'https://spreadsheets.google.com/feeds/list/%s/od6/public/values?alt=json'
    % DATA_SPREADSHEET
    )

TYPES_URL = (
    'https://spreadsheets.google.com/feeds/list/%s/od6/public/values?alt=json'
    % TYPES_SPREADSHEET
    )

create_key = db.Key.from_path
find_hashtags = compile_regex(r'(?i)(?<=\#)\w+').findall

ORDINALS = [
    '', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th',
    '8th', '9th', '10th', '11th', '12th', '13th', '14th',
    '15th', '16th', '17th', '18th', '19th', '20th', '21st',
    '22nd', '23rd', '24th', '25th', '26th', '27th', '28th',
    '29th', '30th', '31st'
    ]

MONTHS = {
    'Jan': 'January',
    'Feb': 'February',
    'Mar': 'March',
    'Apr': 'April',
    'May': 'May',
示例#29
0
# -*- coding: utf-8 -*-
from re import compile as compile_regex

_MULTIPLE_PATHS = compile_regex(r"/{2,}")


def normalize_path(path):
    """
    Normalize ``path``.
    
    It returns ``path`` with leading and trailing slashes, and no multiple
    continuous slashes.
    
    """
    if path:
        if path[0] != "/":
            path = "/" + path

        if path[-1] != "/":
            path = path + "/"

        path = _MULTIPLE_PATHS.sub("/", path)
    else:
        path = "/"

    return path


# ------ UNIT TESTS ------
from nose.tools import eq_
示例#30
0
from re import compile as compile_regex, IGNORECASE

from flask import request, g
from flask_restful import abort
from flask.views import MethodViewType
from itsdangerous import URLSafeTimedSerializer, BadSignature
from werkzeug.datastructures import MultiDict

from app import app
from data import models as db_models

__SECRET_KEY = app.config.get('SECRET_KEY')

token_serializer = URLSafeTimedSerializer(__SECRET_KEY,
                                          app.config.get('TOKEN_SALT'))
flag_re = compile_regex(app.config.get('FLAG_REGEXP', r'^\w+ctf\w+$'))
username_re = compile_regex(r'^[a-z0-9]+(?:[._][a-z0-9]+)*$', IGNORECASE)
email_re = compile_regex(r'^[a-z0-9\-_.+=]+@\w+(?:.\w+)*$', IGNORECASE)


def generate_code(length=6) -> str:
    return ''.join(choice(digits) for _ in range(length))


def current_user(token):
    if token is None:
        return
    try:
        t = token_serializer.loads(token, max_age=12 * 3600)  # 12 hours
    except BadSignature:
        return
示例#31
0
"""Internet Relay Chat Utilities"""


from re import compile as compile_regex

from circuits.six import u

PREFIX = compile_regex("([^!].*)!(.*)@(.*)")
COLOR_CODE = compile_regex('(?:(\d\d?)(?:(,)(\d\d?))?)?')
COLOR = compile_regex("\x03(?:(\d\d?)(?:,(\d\d?))?)?")


class Error(Exception):
    """Error Exception"""


def strip(s, color=False):
    """strip(s, color=False) -> str

    Strips the : from the start of a string
    and optionally also strips all colors if
    color is True.

    :param s str: string to process
    :param color bool: whether to strip colors

    :returns str: returns processes string
    """

    if len(s) > 0:
        if s[0] == u(":"):
示例#32
0
"""Internet Relay Chat Utilities"""

from re import compile as compile_regex

from circuits.six import u

PREFIX = compile_regex("([^!].*)!(.*)@(.*)")


class Error(Exception):
    """Error Exception"""


def strip(s, color=False):
    """strip(s, color=False) -> str

    Strips the : from the start of a string
    and optionally also strips all colors if
    color is True.

    :param s str: string to process
    :param color bool: whether to strip colors

    :returns str: returns processes string
    """

    if len(s) > 0:
        if s[0] == u(":"):
            s = s[1:]
    if color:
        s = s.replace(u("\x01"), u(""))
示例#33
0
    used_char_args = {'h'}

    # Arange the params so that single-character arguments are first. This
    # esnures they don't have to get --long versions. sorted is stable, so the
    # parameters will otherwise still be in relative order.
    params = sorted(func_sig.parameters.values(),
                    key=lambda param: len(param.name) > 1)

    for param in params:
        _add_arguments(param, parser, used_char_args, add_nos)

    return parser


_DOCSTRING_SPLIT = compile_regex(r'\n\s*-{4,}\s*\n')


def parse_docstring(docstring):
    '''
    Given a docstring, parse it into a description and epilog part
    '''
    if docstring is None:
        return '', ''

    parts = _DOCSTRING_SPLIT.split(docstring)

    if len(parts) == 1:
        return docstring, ''
    elif len(parts) == 2:
        return parts[0], parts[1]
示例#34
0
 def displayed_tasks(self, all_tasks):
     if not self.task_regex:
         return all_tasks
     task_re = compile_regex(self.task_regex)
     return filter(lambda t: task_re.match(t), all_tasks)
示例#35
0
文件: utils.py 项目: 1reza/turses
# -*- coding: utf-8 -*-

"""
This module contains functions used across different modules.
"""

from re import findall
from re import compile as compile_regex
from subprocess import call
from sys import stdout
from os import devnull
from functools import partial


URL_REGEX = compile_regex('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
                          '(?:%[0-9a-fA-F][0-9a-fA-F]))+')


def matches_word(regex, word):
    """
    Return `True` if the whole `word` is matched by `regex`, `False`
    otherwise.
    """
    match = regex.match(word)
    if match:
        return match.start() == 0 and match.end() == len(word)
    return False

is_url = partial(matches_word, URL_REGEX)

示例#36
0
# ------------------------------------------------------------------------------


class BinaryAsset(Asset):
    """Generator for Binary Assets."""
    def generate(self):
        self.emit(self.path, ''.join(read(source) for source in self.sources))


register_handler('binary', BinaryAsset)

# ------------------------------------------------------------------------------
# CSS Assets
# ------------------------------------------------------------------------------

embed_regex = compile_regex(r'embed\("([^\)]*)"\)')
find_embeds = embed_regex.findall
substitute_embeds = embed_regex.sub


class CSSAsset(Asset):
    """Generator for CSS Assets."""
    def __init__(self, *args):
        super(CSSAsset, self).__init__(*args)
        get_spec = self.spec.get
        self.cache = {}
        self.embed_path_root = get_spec('embed.path.root')
        self.embed_url_base = get_spec('embed.url.base')
        self.embed_url_template = get_spec('embed.url.template')
        self.todo = (get_spec('bidi') and ('', get_spec('bidi.extension'))
                     or ('', ))
示例#37
0
"""Utilities for byte conversion."""
from binascii import hexlify
from re import compile as compile_regex
import array
import struct

from .const import ScannerMode

# compiled regex to match lowercase MAC-addresses coming from
# bt_addr_to_string
RE_MAC_ADDR = compile_regex('(?:[0-9a-f]{2}:){5}(?:[0-9a-f]{2})')


def is_valid_mac(mac):
    """"Returns True if the given argument matches RE_MAC_ADDR, otherwise False"""
    return RE_MAC_ADDR.match(mac) is not None


def data_to_hexstring(data):
    """Convert an array of binary data to the hex representation as a string."""
    return hexlify(data_to_binstring(data)).decode('ascii')


def data_to_uuid(data):
    """Convert an array of binary data to the iBeacon uuid format."""
    string = data_to_hexstring(data)
    return string[0:8] + '-' + string[8:12] + '-' + string[
        12:16] + '-' + string[16:20] + '-' + string[20:32]


def data_to_binstring(data):
示例#38
0
文件: root.py 项目: egilchri/tweetapp
    write = start_response('%d %s' % tuple(response.status), raw_headers)

    if http_method != 'HEAD':
        write(content)

    response.stream.close()

    return [''] # @/@ why do we have this instead of None ??

# ------------------------------------------------------------------------------
# http request objekt
# ------------------------------------------------------------------------------

VALID_CHARSETS = frozenset(['utf-8'])
find_charset = compile_regex(r'(?i);\s*charset=([^;]*)').search

class RequestAPI(object):
    """HTTP Request."""

    def __init__(
        self, environ, response, parse_query_string=parse_query_string,
        find_charset=find_charset, urlunquote=urlunquote
        ):

        self.service_name = ''
        self.request_method = environ['REQUEST_METHOD']

        self.environ = environ
        self.response = response
示例#39
0
from zope.interface import implements
from paste.httpexceptions import HTTPUnauthorized
from paste.deploy.converters import asbool
from paste.response import remove_header

from repoze.who.middleware import PluggableAuthenticationMiddleware
from repoze.who.config import WhoConfig, \
                              make_middleware_with_config as mk_mw_cfg
from repoze.who.interfaces import IIdentifier, IAuthenticator, IChallenger

__all__ = ['AuthenticationForgerPlugin', 'AuthenticationForgerMiddleware',
           'make_middleware', 'make_middleware_with_config']


_HTTP_STATUS_PATTERN = compile_regex(r'^(?P<code>[0-9]{3}) (?P<reason>.*)$')


class AuthenticationForgerPlugin(object):
    """
    :mod:`repoze.who` plugin to forge authentication easily and bypass
    :mod:`repoze.who` challenges.

    This plugin enables you to write identifier and challenger-independent
    tests. As a result, your protected areas will be easier to test:

    #. To forge authentication, without bypassing identification (i.e., running
       MD providers), you can use the following WebTest-powered test::

           def test_authorization_granted(self):
               '''The right subject must get what she requested'''
示例#40
0
文件: utils.py 项目: Erik-k/turses
# -*- coding: utf-8 -*-

"""
This module contains functions used across different modules.
"""

from re import findall
from re import compile as compile_regex
from subprocess import call
from sys import stdout
from os import devnull
from functools import partial


URL_REGEX = compile_regex('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
                          '(?:%[0-9a-fA-F][0-9a-fA-F]))+')


def matches_word(regex, word):
    """
    Return `True` if the whole `word` is matched by `regex`, `False`
    otherwise.
    """
    match = regex.match(word)
    if match:
        return match.start() == 0 and match.end() == len(word)
    return False

# username
username_regex = compile_regex(r'[A-Za-z0-9_]+')
is_username = partial(matches_word, username_regex)
示例#41
0
from re import compile as compile_regex
from sys import exit as exit_program
from math import sqrt
from json import loads

f = open("tests.json")
tests = loads(f.read())
f.close()

regex = compile_regex(r"([-+]?\d*)\w?\^?\d?")
more = False

print("""Valid Inputs:
    expression following the format ax^2+bx+c
    exit
    run tests
    more : toggles info such as a b c delta On or Off : default = Off""")


def num_after_point(x):
    s = str(x)
    if not '.' in s:
        return 0
    return len(s) - s.index('.') - 1


def solve(expression):
    matches = regex.finditer(expression)

    a, b, c = (int((match.group(1) if match.group(1) != "-" else -1) or 1)
               for matchNum, match in enumerate(matches, start=1)
示例#42
0
from argparse import ArgumentParser

from os.path import expanduser, join as pjoin, isfile, isdir, dirname
from os import makedirs

from re import match, compile as compile_regex

from datetime import datetime, timedelta

import string

import traceback


TODO_FILE = pjoin( expanduser("~"), "Documents", "todo.txt" )
TODO_PATTERN = compile_regex( r"(?:(?P<later>\;\ )?(?:\((?P<priority>[A-Z])\)\ )?(?:\.(?P<due_date>\d{4}\-\d{2}\-\d{2})\ )?(?:(?P<creation_date>\d{4}\-\d{2}\-\d{2})\ )?(?P<todo>[^\:\+\n]*[^\:\+\ ])(?:\ \+(?P<project_name>[^\s\#]+)(?:\#(?P<project_seq>\d+))?)?(?P<tags>(?:\ \:[A-Z\_\d]+)*)?)|^(?P<comment>\;\;).*" )
URGENT_TIME = timedelta(days = 7)


class TodoTask:
	def __init__(self, line, comment = False, prioritize = True):
		line = line.strip()
		if comment or line.startswith(";; "):
			self.later = None
			self.priority = None
			self.due_date = None
			self.creation_date = None
			self.todo = None
			self.project_name = None
			self.project_seq = None
			self.tags = set()
示例#43
0
from urwid.raw_display import Screen
from urwid import AttrWrap, Edit, Frame, ListBox, Pile, SimpleListWalker, Text


USAGE = "%prog [options] host [port]"
VERSION = "%prog v" + systemVersion

MAIN_TITLE = "cIRC - {0:s}".format(systemVersion)

HELP_STRINGS = {
    "main": "For help, type: /help"
}

CMD_REGEX = compile_regex(
    "\/(?P<command>[a-z]+) ?"
    "(?P<args>.*)(?iu)"
)


def back_merge(l, n, t=" "):
    return l[:-n].extend([t.join(l[-n:])])


def parse_options():
    parser = OptionParser(usage=USAGE, version=VERSION)

    parser.add_option(
        "-c", "--channel",
        action="store", default="#circuits", dest="channel",
        help="Channel to join"
    )
示例#44
0
"""Internet Relay Chat Utilities"""


from re import compile as compile_regex


from circuits.six import u


PREFIX = compile_regex("([^!].*)!(.*)@(.*)")


class Error(Exception):
    """Error Exception"""


def strip(s, color=False):
    """strip(s, color=False) -> str

    Strips the : from the start of a string
    and optionally also strips all colors if
    color is True.

    :param s str: string to process
    :param color bool: whether to strip colors

    :returns str: returns processes string
    """

    if len(s) > 0:
        if s[0] == u(":"):
示例#45
0
#!/usr/bin/python3
"""Define several functions SiteInformations."""

from re import compile as compile_regex
from urllib.parse import urlparse

from swiftea_bot.data import BAD_EXTENTIONS, DIR_STATS

regex = compile_regex(r'(\w+|\d+)')


def clean_text(text):
    """Clean up text by removing tabulation, blank and carriage return.

	:param text: text to clean_text
	:type text: str
	:return: cleaned text

	"""
    return ' '.join(text.split())


def get_base_url(url):
    """Get base url using urlparse.

	:param url: url
	:type url: str
	:return: base url of given url

	"""
    infos_url = urlparse(url)
示例#46
0
from os import kill
from re import compile as compile_regex
from signal import SIGKILL, SIGCHLD

from Constants import FOLLOW_NEW_PROCS, COLOR_NORMAL, COLOR_CURRENT_PROCESS, COLOR_TERMINATED_PROCESS, USE_ASCII
from ProcessWrapper import ProcessWrapper, LaunchArguments, ProcessSignal
from logging2 import debug
from ptrace.debugger import PtraceDebugger, PtraceProcess
from ptrace.debugger.process import ProcessError
from ptrace.debugger.process_event import ProcessEvent, ProcessExit, NewProcessEvent, ProcessExecution
from ptrace.func_call import FunctionCallOptions
from utilsFolder.Parsing import parseInteger
from utilsFolder.PaulaPoll import PaulaPoll, BiDict
from utilsFolder.tree import format_tree, format_ascii_tree

TRACE_SYSCALL_ARGS = compile_regex(r"(not )?([\w]+|\*)")


class ProcessManager:
    def __init__(self, args: LaunchArguments, pollobj: PaulaPoll):
        self.pollobj = pollobj  # PollObj used by the input monitor, needed to register new processes
        self.syscalls_to_trace = []

        self.processList = []
        self.debugger = self.startDebugger(args)
        self.currentProcess = self.processList[0]

        self.syscall_options = FunctionCallOptions(
            write_types=True,
            write_argname=True,
            write_address=True,
示例#47
0
文件: circ.py 项目: spaceone/circuits
from circuits.protocols.irc import (
    ERR_NICKNAMEINUSE, ERR_NOMOTD, IRC, JOIN, NICK, PART, PRIVMSG, QUIT,
    RPL_ENDOFMOTD, USER, Message, request,
)

USAGE = "%prog [options] host [port]"
VERSION = "%prog v" + systemVersion

MAIN_TITLE = "cIRC - {0:s}".format(systemVersion)

HELP_STRINGS = {
    "main": "For help, type: /help"
}

CMD_REGEX = compile_regex(
    "\/(?P<command>[a-z]+) ?"
    "(?P<args>.*)(?iu)"
)


def back_merge(l, n, t=" "):
    return l[:-n].extend([t.join(l[-n:])])


def parse_options():
    parser = OptionParser(usage=USAGE, version=VERSION)

    parser.add_option(
        "-c", "--channel",
        action="store", default="#circuits", dest="channel",
        help="Channel to join"
    )
示例#48
0
    used_char_args = {'h'}

    # Arange the params so that single-character arguments are first. This
    # esnures they don't have to get --long versions. sorted is stable, so the
    # parameters will otherwise still be in relative order.
    params = sorted(
        func_sig.parameters.values(),
        key=lambda param: len(param.name) > 1)

    for param in params:
        _add_arguments(param, parser, used_char_args, add_nos)

    return parser


_DOCSTRING_SPLIT = compile_regex(r'\n\s*-{4,}\s*\n')


def parse_docstring(docstring):
    '''
    Given a docstring, parse it into a description and epilog part
    '''
    if docstring is None:
        return '', ''

    parts = _DOCSTRING_SPLIT.split(docstring)

    if len(parts) == 1:
        return docstring, ''
    elif len(parts) == 2:
        return parts[0], parts[1]
示例#49
0
from re import compile as compile_regex


from circuits import Component

from circuits.net.events import write
from circuits.protocols.line import Line


from .commands import PONG
from .utils import parsemsg
from .events import response


NUMERIC = compile_regex("[0-9]+")


class IRC(Component):

    """IRC Protocol Component

    Creates a new IRC Component instance that implements the IRC Protocol.
    Incoming messages are handled by the "read" Event Handler, parsed and
    processed with appropriate Events created and exposed to the rest of
    the system to listen to and handle.
    """

    def __init__(self, *args, **kwargs):
        super(IRC, self).__init__(*args, **kwargs)
示例#50
0
def send_debug(message):
    if weechat.config_get_plugin('debug_mode') in ['on', 'true']:
        for line in message.split('\n'):
            weechat.prnt('', '%s: %s' % (SCRIPT_NAME, line))

def send_traceback():
    send_debug(format_exc())

# ============================== [ URL regex ] =============================== #

url_octet = r'(?:2(?:[0-4]\d|5[0-5])|1\d\d|\d{1,2})'
url_ip_addr = r'%s(?:\.%s){3}' % (url_octet, url_octet)
url_label = r'[0-9a-z][-0-9a-z]*[0-9a-z]?'
url_domain = r'%s(?:\.%s)*\.[a-z][-0-9a-z]*[a-z]?' % (url_label, url_label)
url_regex = compile_regex(
    r'(http[s]?://(?:%s|%s)(?::\d+)?(?:/[^\])>\s]*)?)' % (url_domain, url_ip_addr),
    IGNORECASE
)

# ========================== [ images comparison ] =========================== #

def fast_normalize (im1, im2):
    # Makes sure that the two images have the same size. This function
    # does not care about proportions.

    (x1, y1) = im1.size
    (x2, y2) = im2.size

    if x1 * y1 > x2 * y2:
        return fast_normalize (im2, im1)

    else:
示例#51
0
# -*- coding: utf-8 -*-
from re import compile as compile_regex

_MULTIPLE_PATHS = compile_regex(r"/{2,}")


def normalize_path(path):
    """
    Normalize ``path``.
    
    It returns ``path`` with leading and trailing slashes, and no multiple
    continuous slashes.
    
    """
    if path:
        if path[0] != "/":
            path = "/" + path
        
        if path[-1] != "/":
            path = path + "/"
        
        path = _MULTIPLE_PATHS.sub("/", path)
    else:
        path = "/"
    
    return path


# ------ UNIT TESTS ------
from nose.tools import eq_
示例#52
0
                            "-s",
                            help="Do not gather commute info",
                            action="store_true")

    cli_args = cli_parser.parse_args()

    # sadly, https://ochdatabase.umd.edu/, doesn't have an API, but there is a degree of consistency to search queries and their matching URLs
    # the simplest way forward is to build a search manually and then copy/paste the URL below, as we have done
    url = "https://ochdatabase.umd.edu/housing/price-under+2100"
    page = get_request(url)
    soup = BeautifulSoup(page.content, "html.parser")

    search_results = soup.find(id="expo")
    postings = search_results.find_all(
        "article",
        class_=compile_regex(r"^ocp-property-search property-\d?.*"))

    parsed_posts = []
    for post in postings:
        prop = collect_info(post, cli_args.test)

        if prop is not None:
            parsed_posts.append(prop)

    if not parsed_posts:
        final_message = "No new postings found"
    else:
        if not cli_args.simple:
            gather_commutes(parsed_posts)

        final_message = ""
示例#53
0
class BinaryAsset(Asset):
    """Generator for Binary Assets."""

    def generate(self):
        self.emit(
            self.path, ''.join(read(source) for source in self.sources)
            )

register_handler('binary', BinaryAsset)

# ------------------------------------------------------------------------------
# CSS Assets
# ------------------------------------------------------------------------------

embed_regex = compile_regex(r'embed\("?([^\)]*)"?\)')
substitute_embeds = embed_regex.sub

class CSSAsset(Asset):
    """Generator for CSS Assets."""

    def __init__(self, *args):
        super(CSSAsset, self).__init__(*args)
        get_spec = self.spec.get
        self.cache = {}
        self.embed_only = get_spec('embed.only')
        self.embed_path_root = get_spec('embed.path.root')
        self.embed_url_base = get_spec('embed.url.base')
        self.embed_url_template = get_spec('embed.url.template')
        self.todo = (
            get_spec('bidi') and ('', get_spec('bidi.extension'))  or ('',)
示例#54
0
文件: main.py 项目: micrypt/assetgen
class BinaryAsset(Asset):
    """Generator for Binary Assets."""

    def generate(self):
        self.emit(
            self.path, ''.join(read(source) for source in self.sources)
            )

register_handler('binary', BinaryAsset)

# ------------------------------------------------------------------------------
# CSS Assets
# ------------------------------------------------------------------------------

embed_regex = compile_regex(r'embed\("?([^\)]*)"?\)')
# find_embeds = embed_regex.findall
substitute_embeds = embed_regex.sub

class CSSAsset(Asset):
    """Generator for CSS Assets."""

    def __init__(self, *args):
        super(CSSAsset, self).__init__(*args)
        get_spec = self.spec.get
        self.cache = {}
        self.embed_only = get_spec('embed.only')
        self.embed_path_root = get_spec('embed.path.root')
        self.embed_url_base = get_spec('embed.url.base')
        self.embed_url_template = get_spec('embed.url.template')
        self.todo = (