Example #1
0
 def errorxss_check(self, xss_list, error_string=None) -> str:
     for xssy in xss_list:
         xssz = urldecode(xssy)
         if not urldecode(error_string) in xssz:
             return xssy
         if urldecode(argv.error_string) in xssz:
             return 'WAF Triggered'
     return 'WAF Triggered'
Example #2
0
 def return_xsscolor(self, xss_payload, joinable) -> str:
     xssz = urldecode(urldecode(xss_payload)).rstrip(' ')
     if len(joinable) > 1:
         if not "".join(joinable[1:]) in xssz:
             xss_payload = joinable[0] + colored(
                 xssz, color='red') + "".join(joinable[1:])
     elif len(joinable) == 1:
         if not xssz in joinable[0]:
             xss_payload = joinable[0] + colored(xssz, color='red')
         else:
             xss_payload = joinable[0].split(xssz)[0] + colored(xssz,
                                                                color='red')
     return xss_payload
Example #3
0
def evligen(title,start_datetime,end_datetime,location,discription):
    '''
    This Function get data about event and return url for publish event and ics object

    Parameters
    ----------
    title : str
        Title of event.
    start_datetime : str
        start date and time of envent in Jalali calendar. accepted format is : {Year}{Month}{Day}T{Hour}{Minute}
    end_datetime : str
        end date and time of envent in Jalali calendar. accepted format is : {Year}{Month}{Day}T{Hour}{Minute}
    location : str
        Location of event.
    discription : str
        Detail of event such as address of other info.

    Returns
    -------
    tuple
        Url for publishing event, calendar object

    '''
    c=Calendar()
    e=Event()
    e.name=title
    e.description=discription
    e.location=location
    
    main_url='https://calendar.google.com/calendar/render?action=TEMPLATE&dates={}%2F{}&details={}&location={}&text={}' #for create link we must use this format
    title=urldecode(title)
    location=urldecode(location)
    discription=urldecode(discription)
    
    start_datetime=jdt.strptime(start_datetime,"%Y/%m/%d-%H:%M").togregorian()
    start_datetime=dt(start_datetime.year,start_datetime.month,start_datetime.day,start_datetime.hour,start_datetime.minute) # Convert to datetime object because JalaliDateTime use earlier version of datetime and in that version timezone don't work properly.
    start_datetime=start_datetime.replace(tzinfo=tz.gettz('Iran')).astimezone(tz=tz.gettz('UTC'))
    e.begin=start_datetime.strftime("%Y-%m-%d %H:%M:%S")
    
    start_datetime=start_datetime.strftime("%Y%m%dT%H%M%SZ") # Google calendar only accept this format.
    
    end_datetime=jdt.strptime(end_datetime,"%Y/%m/%d-%H:%M").togregorian()
    end_datetime=dt(end_datetime.year,end_datetime.month,end_datetime.day,end_datetime.hour,end_datetime.minute)
    end_datetime=end_datetime.replace(tzinfo=tz.gettz('Iran')).astimezone(tz=tz.gettz('UTC'))
    e.end=end_datetime.strftime("%Y-%m-%d %H:%M:%S")
    
    end_datetime=end_datetime.strftime("%Y%m%dT%H%M%SZ")
    
    c.events.add(e)
    
    return main_url.format(start_datetime,end_datetime,discription,location,title),c
def handle_admin_commands(bot_id, command):
    cmd, *args = command.split()
    if cmd in ('上线', '下线'):
        wechat_client = 'default'
        if args:
            wechat_client = args[0]
        wechat_client_encoded = urlencode(wechat_client)

        if cmd == '上线':
            resp = wechat.start_client(client=wechat_client_encoded)
            if not resp.ok or resp.json()['code'] != 0:
                # failed
                reply_or_initiate(user_id=bot_id,
                                  body='%s 上线失败' % wechat_client)
            elif resp.json()['status'] == 'client already exists':
                reply_or_initiate(user_id=bot_id,
                                  body='%s 已在线上(有可能正在等待扫码登录)' % wechat_client)
        elif cmd == '下线':
            resp = wechat.stop_client(client=wechat_client_encoded)
            if not resp.ok or resp.json()['code'] != 0:
                reply_or_initiate(user_id=bot_id,
                                  body='%s 下线失败(可能当前不在线上)' % wechat_client)
            elif resp.json()['status'] == 'success':
                reply_or_initiate(user_id=bot_id,
                                  body='%s 已下线' % wechat_client)
    elif cmd == '查看':
        resp = wechat.check_client()
        if resp.ok and resp.json()['code'] == 0:
            reply_or_initiate(user_id=bot_id,
                              body='\n'.join([
                                  urldecode(x['account']) + ': ' + x['state']
                                  for x in resp.json()['client']
                              ]))
Example #5
0
async def list_leeches(client, message):
    user_id = message.from_user.id
    text = ''
    quote = None
    parser = pyrogram_html.HTML(client)
    for i in await aria2_tell_active(session):
        if i.get('bittorrent'):
            info = i['bittorrent'].get('info')
            if not info:
                continue
            tor_name = info['name']
        else:
            tor_name = os.path.basename(i['files'][0]['path'])
            if not tor_name:
                tor_name = urldecode(
                    os.path.basename(
                        urlparse(i['files'][0]['uris'][0]['uri']).path))
        a = f'''<b>{html.escape(tor_name)}</b>
<code>{i['gid']}</code>\n\n'''
        futtext = text + a
        if len((await parser.parse(futtext))['message']) > 4096:
            await message.reply_text(text, quote=quote)
            quote = False
            futtext = a
        text = futtext
    if not text:
        text = 'No leeches found.'
    await message.reply_text(text, quote=quote)
Example #6
0
    def worker_request(
        self,
        request_method,
        request_url,
        request_params=None,
        request_data=None,
        request_retry=None,
        request_headers=None,
        request_label=None
    ):
        """
        :param request_method:
        :param request_url:
        :param request_params:
        :param request_data:
        :param request_retry:
        :param request_headers:
        :param request_label:
        :return:
        """
        request_data_decoded = None
        if request_data:
            request_data_decoded = urldecode(request_data)

        self.logger.debug(
            "Request",
            extra={
                'request_method': request_method,
                'request_url': request_url,
                'request_params': request_params,
                'request_data_decoded': request_data_decoded})

        response = None
        delay_secs = 2
        tries = 0

        while True:
            tries += 1
            if tries > 1:
                _request_label = f'{request_label}: Attempt {tries}'
            else:
                _request_label = request_label

            try:
                response = self.base_request.request(
                    request_method=request_method,
                    request_url=request_url,
                    request_params=request_params,
                    request_data=request_data,
                    request_retry=request_retry,
                    request_retry_excps_func=None,
                    request_headers=request_headers,
                    request_label=_request_label
                )
            except Exception as ex:
                print_traceback(ex)
                raise

            return response
Example #7
0
def evaluate():
    code = urldecode(request.query_string.decode('utf-8'))

    result = evaluator.run(code)
    if not result.strip():
        result = '(no output)'

    return make_response(result, 200, {'Content-Type': 'text/plain'})
Example #8
0
def register():
    if 'redirect_to' in flask.request.args:
        flask.session['redirect_to'] = urldecode(flask.request.args['redirect_to'])

    if flask.session.user_id is not None:
        return flask.redirect('/me')

    return flask.render_template('register_form.html')
Example #9
0
File: app.py Project: sbp/dotpy3
def evaluate():
    code = urldecode(request.query_string.decode('utf-8'))

    result = evaluator.run(code)
    if not result.strip():
        result = '(no output)'
    
    return make_response(result, 200, {'Content-Type': 'text/plain'})
Example #10
0
 def xss_reflection_check(self, xss_list, xss_strings) -> str:
     match_string = error_string = ""
     if xss_strings['match_string']:
         match_string = urldecode(urldecode(xss_strings['match_string']))
     if xss_strings['error_string']:
         error_string = urldecode(urldecode(xss_strings['error_string']))
     xss_payload = urldecode(urldecode(self.xss_payload))
     reflections = {
         'xss_matches': [],
         'waf_matches': [],
     }
     for xss_line in xss_list.split('\n'):
         xss_line = urldecode(xss_line)
         if match_string:
             if match_string in xss_line:
                 for xssp in re.findall(match_string, xss_line):
                     reflections['xss_matches'].append(
                         (match_string, xss_line))
         elif error_string:
             if error_string in xss_line:
                 reflections['waf_matches'].append("WAF Triggered")
         else:
             if xss_payload in xss_line:
                 for xssp in re.findall(xss_payload, xss_line):
                     reflections['xss_matches'].append((xssp, xss_line))
     return reflections
Example #11
0
def sign_in():
    if 'redirect_to' in flask.request.args:
        flask.session['redirect_to'] = urldecode(
            flask.request.args['redirect_to'])

    if flask.session.user_id is not None:
        return flask.redirect('/me')

    return flask.render_template('profile/authorize_form.html')
 def decode(self, value, encode_types):
     """根据encode_types对字符串进行解码"""
     for encode_type in encode_types:
         if encode_type == 'urlencode':
             value = urldecode(value)
         if encode_type == 'base64encode':
             value = str(base64decode(value.encode('utf-8')),
                         encoding='utf-8')
     return value
Example #13
0
class ExfiltrateHandler(MyRequestHandler):
    """Exfiltrates data from GET / POST requests. Supports multiple popular decoders"""

    decoders = {
        'plain': lambda x: x,
        'b64': b64decode,
        'b64u': lambda x: b64decode(urldecode(x.decode())),
        'url': lambda x: urldecode(x.decode())
    }

    def write_decoded(self, decoder, payload):
        decode = self.decoders.get(
            decoder,
            self.decoders['url']
        )
        decoded_payload = decode(payload)
        
        assert(type(decoded_payload) in [str, bytes])

        if type(decoded_payload) == bytes:
            sys.stdout.buffer.write(decoded_payload)
        else:
            sys.stdout.write(decoded_payload)

    def get(self, oob_type):
        decoder = self.get_query_argument('decoder', 'url')

        sys.stdout.write("=" * 10 + '\n')

        if oob_type == "query":
            data = self.get_query_argument('data', None, strip=False).encode()
        elif oob_type == "post":
            data = self.request.body

        if data is not None:
            self.write_decoded(decoder, data)
        else:
            sys.stdout.write(
                """Warning: No `data` query parameter"""
            )

        sys.stdout.write('\n' + "=" * 10 + '\n')

        self.write("OK")
Example #14
0
 def stringxss_check(self, xss_list, match_string=None) -> str:
     for xssy in xss_list:
         if match_string:
             if urldecode(urldecode(match_string)) in urldecode(xssy):
                 return xssy
         else:
             if urldecode(urldecode(self.xss_payload)) in urldecode(xssy):
                 return xssy
     return 'WAF Triggered'
Example #15
0
def register():
    if 'redirect_to' in flask.request.args:
        flask.session['redirect_to'] = urldecode(
            flask.request.args['redirect_to'])

    if flask.session.user_id is not None:
        return flask.redirect('/me')

    # get all tutors
    try:
        tutors = get_tutors()
    except requests.exceptions.RequestException:
        return flask.render_template(
            'error.html', reason='Сервис пользователей недоступен'), 503

    return flask.render_template('profile/register_form.html', tutors=tutors)
Example #16
0
 def color_xss(self, xssy):
     match, line = xssy
     xss_payload = urldecode(match)
     if xss_payload == line:
         final = colored(xss_payload, color='red', attrs=['bold'])
     elif match in line:
         first, last = line.split(xss_payload, 1)
         middle = colored(xss_payload, color='red', attrs=['bold'])
         if match in last:
             first = first + middle
             last_first, last_last = last.split(xss_payload, 1)
             final = first + last_first + colored(
                 xss_payload, color='red', attrs=['bold']) + last_last
         else:
             final = first + xss_payload + last
     return final
Example #17
0
def translate(txt):
    print("-> translate: " + txt)
    if MODE == 2:
        if (os.path.exists('./2.mp3')):
            playsound('./2.mp3')
    elif MODE == 3:
        if (os.path.exists('./3.mp3')):
            playsound('./3.mp3')
    salt = str(random.randint(1000000000, 9999999999))
    md5 = hashlib.md5()
    s = TSLTAPI_APPID + txt + salt + TSLTAPI_KEY
    md5.update(s.encode(encoding='UTF-8'))
    sign = md5.hexdigest()
    if MODE == 2:
        t_from = 'en'
        t_to = 'zh'
    else:
        t_from = 'zh'
        t_to = 'en'
    data = {
        'q': txt,
        'from': t_from,
        'to': t_to,
        'appid': TSLTAPI_APPID,
        "salt": salt,
        'sign': sign
    }
    data = HTTP.request(
        'GET',
        TSLTAPI + "?" + urlencode(data),
        headers={"Content-Type": "application/x-www-form-urlencoded"})
    if (data):
        if (os.path.exists('./1.mp3')):
            playsound('./1.mp3')
        data = json.loads(data.data.decode('utf-8'))
        # 整合结果
        dist = ''
        for d in data['trans_result']:
            dist += d['dst'] + "\r\n"
        dist = urldecode(dist)[0:-2]
        print('-> 翻译结果:' + dist)
        return dist
    else:
        if (os.path.exists('./0.mp3')):
            playsound('./0.mp3')
        print('-> 翻译失败')
        return False
Example #18
0
    def dig(parts, context):
        """
        Search a context object for something matching a $ref (recursive)
        """
        key = parts[0].replace("~1", "/").replace("~0", "~")  # unescaped
        key = urldecode(key)
        try:
            key = int(key)
        except:
            pass
        if key not in context.keys():
            raise IndexError(key)

        if len(parts) == 1:
            return context[key]
        else:
            return ApiDef.dig(parts[1:], context[key])
    def get_encode_types(self, value):
        value = str(value)
        encode_types = []
        if value == '':
            return encode_types
        """判断是否存在urlencode"""
        try:
            url_decode_value = urldecode(value)
        except:
            return False
        if url_decode_value != value:
            encode_types.append('urlencode')
        """判断是否为int或者float,避免后面的base64判断出错"""
        try:
            json_load_value = json.loads(url_decode_value)
            if isinstance(json_load_value, int) or isinstance(
                    json_load_value, float):
                return encode_types
        except:
            pass
        """调用base64decode,判断返回字符串是否在可见范围内"""
        try:
            base64decode_value = base64decode(url_decode_value.encode('utf-8'))
            string_basedecode = base64decode_value.decode()
            for _ in string_basedecode:
                if 32 <= ord(_) <= 126:
                    continue
                else:
                    return encode_types
        except:
            return encode_types
        """判断是否为base64加密"""
        try:
            if str(base64encode(base64decode_value),
                   encoding='utf-8') == url_decode_value:
                encode_types.append("base64encode")
        except:
            return encode_types

        return encode_types
def wechat_entry():
    ctx = request.json

    wechat_client = 'default'
    if 'client' in request.args:
        wechat_client = urldecode(request.args['client'])
    g.wechat_client = wechat_client
    g.wechat_client_encoded = urlencode(wechat_client)

    if ctx['post_type'] == 'receive_message' \
            and ctx['type'] == 'friend_message':
        handle_friend_message(ctx)
    elif ctx['post_type'] == 'event' \
            and 'INTERCOM_BOT_USER_ID' in app.config:
        if ctx['event'] == 'input_qrcode':
            qrcode_url = ctx['params'][-1]
            reply_or_initiate(user_id=app.config['INTERCOM_BOT_USER_ID'],
                              body='%s 登录二维码:%s' %
                              (g.wechat_client, qrcode_url))
        elif ctx['event'] == 'login':
            reply_or_initiate(user_id=app.config['INTERCOM_BOT_USER_ID'],
                              body='%s 登录成功,开始等待客人了~' % g.wechat_client)
    return '', 204
Example #21
0
    def getHostGuestMapping(self):
        mapping = {'hypervisors': []}
        for host_id, host in list(self.hosts.items()):
            if self.skip_for_parent(host_id, host):
                continue
            guests = []

            try:
                if self.config['hypervisor_id'] == 'uuid':
                    uuid = host['hardware.systemInfo.uuid']
                elif self.config['hypervisor_id'] == 'hwuuid':
                    uuid = host_id
                elif self.config['hypervisor_id'] == 'hostname':
                    uuid = host['config.network.dnsConfig.hostName']
                    domain_name = host['config.network.dnsConfig.domainName']
                    if domain_name:
                        uuid = self._format_hostname(uuid, domain_name)
            except KeyError:
                self.logger.debug("Host '%s' doesn't have hypervisor_id property", host_id)
                continue

            if host['vm']:
                for vm_id in host['vm'].ManagedObjectReference:
                    if vm_id.value not in self.vms:
                        self.logger.debug("Host '%s' references non-existing guest '%s'", host_id, vm_id.value)
                        continue
                    vm = self.vms[vm_id.value]
                    if 'config.uuid' not in vm:
                        self.logger.debug("Guest '%s' doesn't have 'config.uuid' property", vm_id.value)
                        continue
                    if not vm['config.uuid'].strip():
                        self.logger.debug("Guest '%s' has empty 'config.uuid' property", vm_id.value)
                        continue
                    state = virt.Guest.STATE_UNKNOWN
                    try:
                        if vm['runtime.powerState'] == 'poweredOn':
                            state = virt.Guest.STATE_RUNNING
                        elif vm['runtime.powerState'] == 'suspended':
                            state = virt.Guest.STATE_PAUSED
                        elif vm['runtime.powerState'] == 'poweredOff':
                            state = virt.Guest.STATE_SHUTOFF
                    except KeyError:
                        self.logger.debug("Guest '%s' doesn't have 'runtime.powerState' property", vm_id.value)
                    guests.append(virt.Guest(self.getVmUuid(vm), self.CONFIG_TYPE, state))
            try:
                name = host['config.network.dnsConfig.hostName']
                domain_name = host['config.network.dnsConfig.domainName']
                if domain_name:
                    name = self._format_hostname(name, domain_name)
            except KeyError:
                self.logger.debug("Unable to determine hostname for host '%s'. Ommitting from report", uuid)
                continue

            facts = {
                virt.Hypervisor.CPU_SOCKET_FACT: str(host['hardware.cpuInfo.numCpuPackages']),
                virt.Hypervisor.HYPERVISOR_TYPE_FACT: host.get('config.product.name', 'vmware'),
                virt.Hypervisor.SYSTEM_UUID_FACT: host['hardware.systemInfo.uuid']
            }

            if host['parent'] and host['parent']._type == 'ClusterComputeResource':
                cluster_id = host['parent'].value
                # print('', self.clusters, cluster_id)
                cluster = self.clusters[cluster_id]
                facts[virt.Hypervisor.HYPERVISOR_CLUSTER] = urldecode(cluster['name'])

            version = host.get('config.product.version', None)
            if version:
                facts[virt.Hypervisor.HYPERVISOR_VERSION_FACT] = version

            mapping['hypervisors'].append(virt.Hypervisor(hypervisorId=uuid, guestIds=guests, name=name, facts=facts))
        return mapping
Example #22
0
async def handle_leech(client, message, gid, reply, user_id, flags):
    prevtext = None
    torrent_info = await aria2_tell_status(session, gid)
    last_edit = 0
    start_time = time.time()
    message_identifier = (reply.chat.id, reply.message_id)
    leech_statuses[message_identifier] = gid
    download_speed = None
    while torrent_info['status'] in ('active', 'waiting', 'paused'):
        if torrent_info.get('seeder') == 'true':
            break
        status = torrent_info['status'].capitalize()
        total_length = int(torrent_info['totalLength'])
        completed_length = int(torrent_info['completedLength'])
        download_speed = format_bytes(torrent_info['downloadSpeed']) + '/s'
        if total_length:
            formatted_total_length = format_bytes(total_length)
        else:
            formatted_total_length = 'Unknown'
        formatted_completed_length = format_bytes(completed_length)
        seeders = torrent_info.get('numSeeders')
        peers = torrent_info.get('connections')
        if torrent_info.get('bittorrent'):
            tor_name = torrent_info['bittorrent']['info']['name']
        else:
            tor_name = os.path.basename(torrent_info['files'][0]['path'])
            if not tor_name:
                tor_name = urldecode(
                    os.path.basename(
                        urlparse(
                            torrent_info['files'][0]['uris'][0]['uri']).path))
        text = f'''{html.escape(tor_name)}
<code>{html.escape(return_progress_string(completed_length, total_length))}</code>

<b>GID:</b> <code>{gid}</code>
<b>Status:</b> {status}
<b>Total Size:</b> {formatted_total_length}
<b>Downloaded Size:</b> {formatted_completed_length}
<b>Download Speed:</b> {download_speed}
<b>ETA:</b> {calculate_eta(completed_length, total_length, start_time)}'''
        if seeders is not None:
            text += f'\n<b>Seeders:</b> {seeders}'
        if peers is not None:
            text += f'\n<b>{"Peers" if seeders is not None else "Connections"}:</b> {peers}'
        if (time.time() -
                last_edit) > PROGRESS_UPDATE_DELAY and text != prevtext:
            await reply.edit_text(text)
            prevtext = text
            last_edit = time.time()
        torrent_info = await aria2_tell_status(session, gid)
    if torrent_info['status'] == 'error':
        error_code = torrent_info['errorCode']
        error_message = torrent_info['errorMessage']
        text = f'Aria2 Error Occured!\n{error_code}: {html.escape(error_message)}'
        if error_code == '7' and not error_message and torrent_info[
                'downloadSpeed'] == '0':
            text += '\n\nThis error may have been caused due to the torrent being too slow'
        await asyncio.gather(message.reply_text(text), reply.delete())
    elif torrent_info['status'] == 'removed':
        await asyncio.gather(
            message.reply_text('Your download has been manually cancelled.'),
            reply.delete())
    else:
        leech_statuses.pop(message_identifier)
        task = None
        if upload_queue._unfinished_tasks:
            task = asyncio.create_task(
                reply.edit_text('Download successful, waiting for queue...'))
        upload_queue.put_nowait(
            (client, message, reply, torrent_info, user_id, flags))
        try:
            await aria2_remove(session, gid)
        except Aria2Error as ex:
            if not (ex.error_code == 1 and ex.error_message
                    == f'Active Download not found for GID#{gid}'):
                raise
        finally:
            if task:
                await task
Example #23
0
def step_ai():
    level = level_str_to_num(urldecode(request.POST['level']))
    field = json.loads(request.POST['field'])
    return json.dumps(step(level, field))
Example #24
0
def uri_to_title(url):
    url = urldecode(url.strip('/'))
    words = split_re.split(camel_re.sub(' \\1', url))
    title = ' '.join(words).title()
    return title
Example #25
0
def parse_url(string,
              tags=TAGS,
              attrs=ATTRS,
              eventHandlersAttrs=EVENTHANDLERSATTRS,
              keywords_param=KEYWORDS_PARAM,
              keywords_evil=KEYWORDS_EVIL):
    """
    Parses a URL as str and returns a dict of features for future model uses
    """
    string = urldecode(string)
    data = {}
    data['url_length'] = len(string)
    data['url_duplicated_characters'] = ('<<' in string) or ('>>' in string)
    #data['url_special_characters'] = any(i in string for i in '"\'>')
    # ex: ", ">, "/>
    # idea to bypass: using `
    for tag in tags:
        data['url_tag_' + tag] = bool(
            re.search('<\s*' + tag + '.*>|<\s*/\s*' + tag + '\s*>',
                      string,
                      flags=re.IGNORECASE))
        # TODO: handle HTML entities?
        # check for whitespace and ignore case
        # checked on https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet
    for attr in attrs:
        data['url_attr_' + attr] = bool(
            re.search(attr + '\s*=', string, flags=re.IGNORECASE))
    for event in eventHandlersAttrs:
        data['url_event_' + event] = bool(
            re.search(attr + '\s*=', string, flags=re.IGNORECASE))
    data['url_cookie'] = ('document.cookie' in string)
    data['url_redirection'] = any(i in string for i in [
        'window.location', 'window.history', 'window.navigate', 'document.URL',
        'document.documentURI', 'document.URLUnencoded', 'document.baseURI',
        'location', 'window.open', 'self.location', 'top.location'
    ])
    # From paper:
    # window.location, window.history, window.navigate
    # From: https://code.google.com/archive/p/domxsswiki/wikis/LocationSources.wiki
    # document.URL, document.documentURI, document.URLUnencoded,
    # document.baseURI, location, location.href, location.search,
    # location.hash, location.pathname
    # window.open
    # https://stackoverflow.com/a/21396837
    # self.location, top.location
    # jQuery: $(location).attr('href','http://www.example.com')
    #         $(window).attr('location','http://www.example.com')
    #         $(location).prop('href', 'http://www.example.com')
    # https://stackoverflow.com/a/4745012
    # document.location
    data['url_number_keywords_param'] = sum(i in string.lower()
                                            for i in keywords_param)
    data['url_number_keywords_evil'] = sum(i in string.lower()
                                           for i in keywords_evil)
    data['url_number_domain'] = len(
        re.findall(r'(?:(?!-)[A-Za-z0-9-]{1,63}(?!-)\.)+[A-Za-z]{2,6}',
                   string))
    # adapted from: http://www.mkyong.com/regular-expressions/domain-name-regular-expression-example/
    # idea to bypass: IDN domain names: https://stackoverflow.com/a/26987741
    # becareful to decode URL before
    data['url_number_ip'] = len(
        re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(\.\d{1,3}\.\d{1,3})?',
                   string))
    # add number of IP addresses v4 or v6
    # https://stackoverflow.com/a/44891763
    return data
Example #26
0
def search(s, verbose=False):
    param = urlencode({'q': 'site:youtube.com %s' % s})
    url = 'https://html.duckduckgo.com/html/?%s' % param
    body = subprocess.check_output('curl -k -H "user-agent: Mozilla/5.0" %s' %
                                   url,
                                   shell=True,
                                   stderr=subprocess.DEVNULL).decode()
    if verbose:
        print(body)
    artist = ''
    songs = []
    urls = set()
    names = set()
    hits = []
    for pagelink in pages.finditer(body):
        url = urldecode(pagelink.group(1).partition('%26')[0])
        name = html.unescape(pagelink.group(2))
        name = name.encode().partition(b'\xe2')[0].decode()
        name = tags.sub(' ', name)
        name = name.replace('~', ' - ').replace(':', ' - ').replace(
            '"', ' ').replace('`', "'")
        name = ''.join((ch if ord(ch) < 10000 else ' ') for ch in name)
        if verbose:
            print(name, url)
        if _match_words(url, bad_urls) or _match_words(name, bad_names):
            continue
        assert len(url) <= 60, 'url too long: ' + url
        words = [
            w for w in name.split()
            if not [b for b in drop_words if b in w.lower()]
        ]
        name = ' '.join(words).strip()
        name = clean_ends(name)
        if not name:
            continue
        exists = (url in urls or name in names)
        urls.add(url)
        names.add(name)
        if exists:
            continue
        hits.append([name, url])
    # sort by length
    score = {h: len(h) * 2 for h, u in hits}
    if verbose:
        print(score)
    # sort by no paranthesis
    score = {h: score[h] + h.find('(') + h.find('[') for h, u in hits}
    if verbose:
        print(score)
    # place those with dashes (-) first, assumed to be better named
    score = {
        h: score[h] + (h.index('-') if '-' in h else +50)
        for h, u in hits
    }
    if verbose:
        print(score)
    # place those with spaced dashes ( - ) first, assumed to be better named
    score = {
        h: score[h] + (h.index(' - ') if ' - ' in h else +50)
        for h, u in hits
    }
    if verbose:
        print(score)
    # sort by liked words
    score = {
        h: score[h] - 75 * len([1 for w in like_words if w in h.lower()])
        for h, u in hits
    }
    if verbose:
        print(score)
    # sort by exact match, phrase by phrase + additional points for the correct order
    sl = s.lower()
    search_phrases = [p.strip() for p in sl.split('-')]

    def matchlen_pos(s1, s2, i1):
        i2 = s2.find(s1)
        score = 100
        if i2 >= 0:
            score = abs(i1 - i2)
            # if verbose:
            # print(s1, s2, score)
        for w in dislike_words:
            if w in s2:
                score += 50
        return score

    score_phrase = lambda h: sum(
        [matchlen_pos(p, h.lower(), sl.index(p)) for p in search_phrases])
    score = {h: score[h] + score_phrase(h) for h, u in hits}
    if verbose:
        print(score)
    hits = sorted(hits, key=lambda nu: score[nu[0]])
    if verbose:
        print('post score sort')
        print('\n'.join(str(h) for h in hits))
        print()
    # cleanup names
    for i, (name, url) in enumerate(hits):
        r = parenths.match(name)
        if r:
            words = [r.group(1), r.group(3)]
            inparenths = r.group(2).strip()
            if inparenths.lower() in sl:
                words += [inparenths]
            name = clean_ends(' '.join(w.strip() for w in words))
            hits[i][0] = name
    if verbose:
        print('post cleanup')
        print('\n'.join(str(h) for h in hits))
    # 1st pick artist if present in any hit
    artists = []  # ordered; don't use set()
    for name, url in hits:
        words = [w.strip() for w in name.split('-')]
        if len(words) >= 2:
            artist = words[0].strip()
            if artist not in artists:
                artists.append(artist)
    # ok, lets add up the songs (using the artist stated by any of the songs previously)
    known_songname = s.partition('-')[2].strip()
    for name, url in hits:
        words = [w.strip() for w in name.split('-')]
        if len(words) >= 2:
            artist = words[0]
            song = words[1]
            for i in range(2, len(words)):
                if known_songname and words[i].lower().startswith(
                        known_songname.lower()):
                    song = words[i]
                    break
            songs += [ABSong(song, artist, url)]
        elif len(words) == 1:
            song = words[0]
            for artist in artists:
                artist_index = song.lower().find(artist.lower())
                if artist_index >= 0:
                    song = (song[:artist_index].strip() + ' ' +
                            song[artist_index + len(artist):].strip()).strip()
                    if song:
                        songs += [ABSong(song, artist, url)]
                    break
            else:
                songs += [ABSong(song, s, url)]
    if verbose:
        print('done')
        print(songs)
    # os._exit(1)
    return songs
Example #27
0
    def worker_request(self,
                       request_method,
                       request_url,
                       request_params=None,
                       request_data=None,
                       request_retry=None,
                       request_headers=None,
                       request_label=None):
        """Wrapper to requests handler created in requests-fortified.
        :param request_method:
        :param request_url:
        :param request_params:
        :param request_data:
        :param request_retry:
        :param request_headers:
        :param request_label:
        :return:
        """
        request_data_decoded = None
        if request_data:
            request_data_decoded = urldecode(request_data)

        self.logger.debug("Request",
                          extra={
                              'request_method': request_method,
                              'request_url': request_url,
                              'request_params': request_params,
                              'request_data_decoded': request_data_decoded
                          })

        response = None
        tries = 0

        while True:
            tries += 1
            if tries > 1:
                _request_label = "{0}: Attempt {1}".format(
                    request_label, tries)
            else:
                _request_label = request_label

            try:
                response = self.base_request.request(
                    request_method=request_method,
                    request_url=request_url,
                    request_params=request_params,
                    request_data=request_data,
                    request_retry={'timeout': 10},
                    request_retry_excps=[requests.exceptions.RetryError],
                    request_retry_http_status_codes=[
                        HttpStatusCode.TOO_MANY_REQUESTS
                    ],
                    request_retry_excps_func=None,
                    request_headers=request_headers,
                    request_label=_request_label)
            except requests.exceptions.RetryError as ex:
                self.logger.warning("Request Retry",
                                    extra={
                                        "request_url": request_url,
                                        "error": get_exception_message(ex)
                                    })
                time.sleep(1)  # Sleep 1 second before retry.
                continue

            except Exception as ex:
                self.logger.error("Request Error",
                                  extra={
                                      "request_url": request_url,
                                      "error": get_exception_message(ex)
                                  })
                raise

            return response
Example #28
0
def parse_url(string):
    url_encod_characters = [
        '%08', '%09', '%0A', '%0D', '%20', '%21', '%22', '%23', '%24', '%25',
        '%26', '%27', '%28', '%29', '%2A', '%2B', '%2C', '%2D', '%2E', '%2F',
        '%30', '%31', '%32', '%33', '%34', '%35', '%36', '%37', '%38', '%39',
        '%3A', '%3B', '%3C', '%3D', '%3E', '%3F', '%40', '%41', '%42', '%43',
        '%44', '%45', '%46', '%47', '%48', '%49', '%4A', '%4B', '%4C', '%4D',
        '%4E', '%4F', '%50', '%51', '%52', '%53', '%54', '%55', '%56', '%57',
        '%58', '%59', '%5A', '%5B', '%5C', '%5D', '%5E', '%5F', '%60', '%61',
        '%62', '%63', '%64', '%65', '%66', '%67', '%68', '%69', '%6A', '%6B',
        '%6C', '%6D', '%6E', '%6F', '%70', '%71', '%72', '%73', '%74', '%75',
        '%76', '%77', '%78', '%79', '%7A', '%7B', '%7C', '%7D', '%7E', '%A2',
        '%A3', '%A5', '%A6', '%A7', '%AB', '%AC', '%AD', '%B0', '%B1', '%B2',
        '%B4', '%B5', '%BB', '%BC', '%BD', '%BF', '%C0', '%C1', '%C2', '%C3',
        '%C4', '%C5', '%C6', '%C7', '%C8', '%C9', '%CA', '%CB', '%CC', '%CD',
        '%CE', '%CF', '%D0', '%D1', '%D2', '%D3', '%D4', '%D5', '%D6', '%D8',
        '%D9', '%DA', '%DB', '%DC', '%DD', '%DE', '%DF', '%E0', '%E1', '%E2',
        '%E3', '%E4', '%E5', '%E6', '%E7', '%E8', '%E9', '%EA', '%EB', '%EC',
        '%ED', '%EE', '%EF', '%F0', '%F1', '%F2', '%F3', '%F4', '%F5', '%F6',
        '%F7', '%F8', '%F9', '%FA', '%FB', '%FC', '%FD', '%FE', '%FF'
    ]
    ptr = [
        'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
        'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b',
        'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',
        'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3',
        '4', '5', '6', '7', '8', '9', '-', '.', '_', '~', ':', '/', '?', '#',
        '[', ']', '@', '!', '$', '&', '(', ')', '*', '+', ',', ';', '='
    ]
    global attr
    methods = ('getElementsByTagName', 'write', 'getElementById', 'alert',
               'prompt', 'eval', 'fromCharCode', 'fetch', 'confirm')

    tags = [
        'frame', 'form', 'div', 'style', 'video', 'img', 'input', 'textarea',
        'iframe', 'script', 'meta', 'applet', 'object', 'embed', 'link', 'svg'
    ]
    attrs = [
        'href',
        'archive',
        'http-equiv',
        'lowsrc',
        'content',
        'data',
        'data-*',
        'dir',
        'download',
        'form',
        'formaction',
        'method',
        'dir',
        'accept',
        'onsubmit',
        'poster',
        'src',
        'background',
        'bgcolor',
        'content',
        'min',
        'action',
        'autofocus',
        'id',
        'class',
        'codebase',
        'novalidate',
        'srcset',
        'required',
        'target',
        'pattern'
        'cite',
        'classid',
        'profile',
        'charset',
        'style',
        'list',
        'manifest',
    ]
    eventHandlersAttrs = [
        'onbeforedeactivate', 'onbeforeeditfocus', 'onbeforepaste',
        'onbeforeprint', 'onabort', 'onactivate', 'onafterprint',
        'onafterupdate', 'onbeforeactivate', 'onbeforecopy', 'onbeforecut',
        'onchange', 'onclick', 'oncontextmenu', 'oncontrolselect', 'oncopy',
        'onbeforeunload', 'onbeforeupdate', 'onblur', 'onbounce',
        'oncellchange', 'ondragleave', 'ondragover', 'ondragstart', 'ondrop',
        'onerror', 'oncut', 'ondataavailable', 'ondatasetchanged',
        'ondatasetcomplete', 'ondblclick', 'ondeactivate', 'ondrag',
        'ondragend', 'ondragenter', 'onmousedown', 'onmouseenter',
        'onmouseleave', 'onmousemove', 'onmouseout', 'onerrorupdate',
        'onfilterchange', 'onfinish', 'onfocus', 'onfocusin',
        'onreadystatechange', 'onreset', 'onresize', 'onresizeend',
        'onresizestart', 'onfocusout', 'onhashchange', 'onhelp', 'oninput',
        'onkeydown', 'onkeypress', 'onkeyup', 'onload', 'onlosecapture',
        'onmessage', 'onsearch', 'onselect', 'onselectionchange',
        'onselectstart', 'onstart', 'onmovestart', 'onoffline', 'ononline',
        'onpaste', 'onpropertychange', 'onmouseover', 'onmouseup',
        'onmousewheel', 'onmove', 'onmoveend', 'onstop', 'onsubmit',
        'onunload', 'onrowenter', 'onrowexit', 'onrowsdelete',
        'onrowsinserted', 'onscroll'
    ]

    keywords_param = [
        'redirect', 'url', 'search', 'query', 'login', 'signup', 'contact'
    ]

    keywords_evil = [
        'pwd', 'pown', 'anonymous', 'control by', 'XSS', 'evil', 'hack',
        'controled by', 'in control', 'under the control', 'h4ck', 'h@ck'
    ]

    data = {}
    data['url_ecode_length'] = len(string)
    data['url_ecode'] = string
    data['url_ecode_character2'] = 0
    for i in string:
        if i not in ptr:
            data['url_ecode_character2'] += 1
        else:
            continue

    string = urldecode(string)
    data['url_urldecode'] = string
    # data = {}
    # print(string)
    data['url_length'] = len(string)
    if ('<<' in string) or ('>>' in string):
        data['url_duplicated_characters'] = 1
    else:
        data['url_duplicated_characters'] = 0
    if any(i in string for i in '"\'>'):
        data['url_special_characters'] = 1
    else:
        data['url_special_characters'] = 0
    for tag in tags:
        if (re.search('<\s*' + tag + '.*>|<\s*/\s*' + tag + '\s*>',
                      string,
                      flags=re.IGNORECASE)):
            data['url_tag_' + tag] = 1
        else:
            data['url_tag_' + tag] = 0
    for attr in attrs:
        if (re.search(attr + '\s*=', string, flags=re.IGNORECASE)):
            data['url_attr_' + attr] = 1
        else:
            data['url_attr_' + attr] = 0
    for event in eventHandlersAttrs:
        if (re.search(event + '\s*=', string, flags=re.IGNORECASE)):
            data['url_event_' + event] = 1
        else:
            data['url_event_' + event] = 0

    if ('document.cookie' in string):
        data['url_cookie'] = 1
    else:
        data['url_cookie'] = 0
    # ------------------------------------------------------------
    data['url_redirection'] = any(i in string for i in [
        'document.documentURI', 'document.URLUnencoded', 'document.baseURI',
        'window.history', 'window.location', 'window.navigate', 'document.URL',
        'location', 'top.location', 'self.location', 'window.open'
    ])
    # data['url_number_domain_2'] =0
    data['url_number_keywords_evil'] = sum(i in string.lower()
                                           for i in keywords_evil)
    data['url_number_keywords_param'] = sum(i in string.lower()
                                            for i in keywords_param)
    data['url_number_ip'] = len(
        re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(\.\d{1,3}\.\d{1,3})?',
                   string))
    data['url_number_domain'] = len(
        re.findall(
            r'(([\w]+:)?//)?(([\d\w]|%[a-fA-f\d]{2,2})+(:([\d\w]|%[a-fA-f\d]{2,2})+)?@)?([\d\w][-\d\w]{0,253}[\d\w]\.)+[\w]{2,63}(:[\d]+)?(/([-+_~.\d\w]|%[a-fA-f\d]{2,2})*)*(\?(&?([-+_~.\d\w]|%[a-fA-f\d]{2,2})=?)*)?(#([-+_~.\d\w]|%[a-fA-f\d]{2,2})*)?',
            string))
    return data
Example #29
0
def success(resp):
    if 'message' in resp.url:
        logger.warn(urldecode(resp.url)['message'])
        return False
    return True
Example #30
0
		day_notice=i[index].split(' de ')[0]
		month_notice=get_month_by_name(i[index].split(' de ')[1],True)
		year_notice=i[index].split(' de ')[2]

		full_date_notice='{0}/{1}/{2}'.format(day_notice,month_notice,year_notice).strip().replace(' ','')
		full_date_today=today.strftime('%d/%m/%Y')

		if full_date_notice==full_date_today:

			notice_url=a[index]
			soup_notice=bs4(get_content(notice_url),'html.parser')

			img_src=soup_notice.find_all('div',class_='texto')[0].find_all('img')[0].get('src')
			img_link='http://www.arcoverde.pe.gov.br{0}'.format(img_src)
			
			image_name=urldecode(img_src.split('/')[len(img_src.split('/'))-1])
			image_name=image_name.replace('"','').replace('\\', '')

			if 'corona' in image_name.lower():

				if dev:
					dev='({0})'.format(image_name)
				else:
					dev=''

				if check_cache(image_name)==False:

					if config['beep']:
						beep(config['beep_times'])
					
					print('[+] UPDATE {0} - {1} {dev}'.format(full_date_today,hours,dev=dev))