예제 #1
0
파일: check_ssrf.py 프로젝트: Arryboom/x386
    def _request_check_location(r, *args, **kwargs):
        """
        hook的回调函数
        :param r:
        :param args:
        :param kwargs:
        :return:
        """
        if not r.is_redirect:
            return

        url = r.headers['location']

        # The scheme should be lower case...
        parsed = urlparse.urlparse(url)
        url = parsed.geturl()
        # Facilitate relative 'location' headers, as allowed by RFC 7231.
        # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
        # Compliant with RFC3986, we percent encode the url.
        if not parsed.netloc:
            url = urlparse.urljoin(r.url, requote_uri(url))
        else:
            url = requote_uri(url)
        succ, errstr = check_ssrf(url)
        if not succ:
            print "SSRF Attack: %s" % (errstr)
            raise requests.exceptions.InvalidURL("SSRF Attack: %s" % (errstr,))
예제 #2
0
파일: bases.py 프로젝트: dphans/pidown
 def search(self, keyword):
     if type(keyword) is not str:
         raise Exception(
             'Error while processing search keyword, keyword must be str type!'
         )
     search_url = self.search_url_datasource()
     if not search_url:
         raise Exception(
             'Error while building search query, unknown URL returned!')
     html_contents = self.helper_get_contents(
         search_url.format(requote_uri(keyword)))
     parsed_object = self.search_results_datasource(html_contents)
     return parsed_object if type(parsed_object) is list else []
예제 #3
0
def recursive_folder(path):
    try:

        uri = fs_api + path

        folder = client.GET(requote_uri(uri))
        # folder = client.GET(fs_api + urllib.quote(path.encode('utf8')))
        folder_attributes = folder.json()

        # if folder_attributes.has_key('files'):
        if "files" in folder_attributes:
            print(type(folder_attributes['files']))
            print("\n ****************** FILES FOUND ******************\n")
            print(folder_attributes['files'])
            print(
                "\n****************** FILES FOUND ENDED ******************\n")

            for f in folder_attributes['files']:
                versions = f['num_versions']
                file_path = f['path']
                uploaded_by = f['uploaded_by']
                modified_date = datetime.strptime(f['last_modified'][5:16],
                                                  '%d %b %Y')

                if versions > 1 and uploaded_by in user_restrict and modified_date == restrict_date:
                    with codecs.open('files_to_delete.txt', 'ab',
                                     'utf8') as fout:
                        fout.write('%s\n' % file_path)

    #with codecs.open('data.log', 'ab', 'utf-8') as log:
    #log.write('{Path: %s, Uploaded_by: %s, last_modified: %s, num_versions: %s}\n' %(file_path, uploaded_by, f['last_modified'], versions))

    # if folder_attributes.has_key('folders'):
        if 'folders' in folder_attributes:

            print("\n****************** FOLDERS FOUND ******************\n")
            print(folder_attributes['folders'])
            print(
                "\n****************** FOLDERS FOUND ENDED ******************\n"
            )
            for dirs in folder_attributes['folders']:
                print('Walking through: ' + dirs['path'])
                recursive_folder(dirs['path'])
    except Exception as e:
        print('Error on path: %s' % path)
        with open('error.txt', 'ab') as error:
            error.write('%s\t %r\n' % (path, e))

    print(
        "\nPlease look at 'files_to_delete.txt' for the files found. You may also look at error.txt for any errors"
    )
예제 #4
0
async def upload_to_gdrive(file_upload, message):
    subprocess.Popen(('touch', 'rclone.conf'), stdout=subprocess.PIPE)
    with open('rclone.conf', 'a', newline="\n") as fole:
        fole.write("[DRIVE]\n")
        fole.write(f"{RCLONE_CONFIG}")
    destination = f'{DESTINATION_FOLDER}'
    if os.path.isfile(file_upload):
        tmp = subprocess.Popen([
            'rclone', 'copy', '--config=rclone.conf', f'{file_upload}',
            'DRIVE:'
            f'{destination}', '-v'
        ],
                               stdout=subprocess.PIPE)
        out = tmp.communicate()
        print(out)
        indexurl = f"{INDEX_LINK}/{file_upload}"
        g_link = requote_uri(indexurl)
        await message.edit_text(
            f'{file_upload} has been Uploaded successfully to your cloud 🤒\n\n Direct Downlaod link: <a href="{g_link}">here</a>'
        )
        os.remove(file_upload)
    else:
        tt = os.path.join(destination, file_upload)
        print(tt)
        tmp = subprocess.Popen([
            'rclone', 'copy', '--config=rclone.conf', f'{file_upload}',
            'DRIVE:'
            f'{tt}', '-v'
        ],
                               stdout=subprocess.PIPE)
        out = tmp.communicate()
        print(out)
        indexurl = f"{INDEX_LINK}/{file_upload}/"
        g_link = requote_uri(indexurl)
        await message.edit_text(
            f'Folder has been Uploaded successfully to {tt} in your cloud 🤒\n\n Direct Downlaod link: <a href="{g_link}">here</a>'
        )
        shutil.rmtree(file_upload)
예제 #5
0
 def getCountryNameFromPincode(self, pincode):
     if (re.fullmatch("\w{2}\d{2}|\d{5,6}", pincode)):
         url = self.baseUrl + self.pincodeUrl
         url = url.replace("#pincode", pincode)
         url = requote_uri(url)
         print(url)
         response = requests.get(url)
         if (response.status_code == 200):
             print(response.text)
             return response.text
         else:
             return "Sorry..!! I could not reach my internal api to get the details by country"
     else:
         return "Pincode is not on the appropriate format..!! Please give the picode as 110001 "
예제 #6
0
def profile():
    """Handle Elevation profile requests."""
    url = request.args.get('url', default='', type=str)
    step = request.args.get('step', default=0.30, type=float)

    url = requote_uri(url)

    if not url:
        raise TilerError("Missing 'url' parameter")

    # Gives an list of string
    x = request.args.get('x')
    y = request.args.get('y')

    x = np.array(x.split(','), dtype=float)
    y = np.array(y.split(','), dtype=float)

    print('data:', x)
    print('length:', len(x))

    coord_x = []
    coord_y = []

    # Checking length of x and y
    if len(x) != len(y):
        raise TilerError('Error: Length of X and Y parameters are not equal')

    elif len(x) < 2:
        raise TilerError(
            'Error: Length of parameters should be greater than 1')

    for i in range(len(x)-1):
        print('Generating line from points')
        temp_x, temp_y = get_profile.get_point2line(
            x[i], y[i], x[i+1], y[i+1], step=step)

        if temp_x is None or temp_y is None:
            raise TilerError(
                'Error: Distance between points should be less than 10KM')

        for j in range(len(temp_x)):
            coord_x.append(temp_x[j])
            coord_y.append(temp_y[j])

    print('Generated %d number of points' % (len(coord_x)))
    # Initializing dataset
    data = []
    info = get_value.get_value(
        address=url, coord_x=coord_x, coord_y=coord_y)
    return (jsonify(info))
예제 #7
0
    def sendEmailToRest(self, entity):
        data = {}
        data['name'] = ""
        data['mailid'] = entity
        data['mobile'] = ""

        url = self.baseUrl + self.mailURL
        url = requote_uri(url)
        data = json.dumps(data)
        response = requests.post(url, json=data)
        if (response.status_code == 200):
            return response.text
        else:
            return response.text
예제 #8
0
def new_search(request):
    # store the user input into a variable
    search = request.POST.get('search')
    # insert the search text into the search table in the database
    models.Search.objects.create(search_text=search)
    final_url = BASE_CRAIGLIST_URl.format(requote_uri(search))
    response_from_craiglist_website = requests.get(final_url)
    data = response_from_craiglist_website.text
    # Parsing the beautiful soup to create a BeautifulSoup object for it.
    soup = BeautifulSoup(data, features='html.parser')
    # Extracting all <a> tags whose class name is 'result-row' into a list
    post_listings = soup.find_all('li', class_='result-row')
    #print('posts: ', post_listings)
    final_postings = []
    for post in post_listings:
        post_title = post.find('a', class_='result-title').text
        post_url = post.find('a', class_='result-title').get('href')
        #post_price = post.find(class_='result-price').text

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        # check if data-ids is present in the result-image
        if post.find(class_='result-image').get('data-ids'):
            CRAIGLIST_IMAGES_URL = 'https://images.craigslist.org/{}'
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            post_image_url = CRAIGLIST_IMAGES_URL.format(
                post_image_id) + '_600x450.jpg'
            #print(post_image_url)
        else:
            post_image_url = 'https://london.craigslist.org/images/peace.jpg'

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))
        #print(final_postings)
        # if post.find(class_='result-price'):
        #     post_price = post.find(class_='result-price').text
        # else:
        #     new_response = request.get(post_url)
        #     new_data = new_response.text
        #     new_soup = BeautifulSoup(new_data, features='html.parser')
        #     post_text = new_soup.find(id='postingbody').text
        #
        #     r1 = re.

    frontend_context = {'search': search, 'final_postings': final_postings}
    return render(request, 'my_app/new_search.html', frontend_context)
예제 #9
0
    async def encode_url(self, ctx, *, input: commands.clean_content = None):
        if not input:
            e = discord.Embed(description=":no_entry_sign: You must give an input string", colour=0xE74C3C)
            await ctx.send(embed=e)
            return

        e = discord.Embed(title="Result", colour=0x2ECC71)
    
        result = requote_uri(str(input))
        e.add_field(name="Input", value=f"`{input}`")
        e.add_field(name="Output", value=f"`{result}`")
        e.set_footer(text="Made with ❤️ by Roxiun")

        await ctx.send(embed=e)  
예제 #10
0
 def start_request(self):
     url = requote_uri(
         f'http://www.51bc.net/cy/serach.php?f_key={self.encode_key()}&f_type=chengyu&f_type2='
     )
     headers = {
         'user-agent':
         'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',
         'Referer': 'http://www.51bc.net/cy/serach.php'
     }
     r = requests.get(url, headers=headers)
     content = r.content.decode('gbk')
     doc = pq(content)
     idioms = doc("u").text()
     return idioms.split(" ")
예제 #11
0
    async def npmSearch(self, ctx, *, args):
        parsed = requote_uri(args)

        req = requests.get(f"https://api.npms.io/v2/search?q={parsed}")

        if req.status_code != 200:
            embed = discord.Embed(
                title="Error API STATUS",
                description=f"Código de la API: {req.status_code}",
                color=discord.Color.red(),
            )
            embed.set_thumbnail(
                url="https://media1.tenor.com/images/46ce1235c5697ce170c6e84f4b4fb4e7/tenor.gif"
            )
            await ctx.send(embed=embed)
        elif req.status_code == 200:
            try:
                resp = req.json()
                results_info = resp["results"]
                resultsGet_info = results_info[0]
                pkgInfo = resultsGet_info["package"]
                linksPkg = pkgInfo["links"]
                embed = discord.Embed(
                    title=f"La información sobre el paquete **{args}** es: ",
                    description=f"""
                    Nombre: {pkgInfo["name"]}
                    Versión: {pkgInfo["version"]}
                    Descripción: {pkgInfo["description"]}
                    Fecha: {pkgInfo["date"]}
                    Link: {linksPkg["npm"]}
                  """,
                    color=discord.Color.random(),
                )
                embed.set_thumbnail(
                    url="https://authy.com/wp-content/uploads/npm-logo.png"
                )
                await ctx.send(embed=embed)

            except Exception as err:
                embed = discord.Embed(
                    title="Error API STATUS",
                    description=f"Código de la API: {err}",
                    color=discord.Color.red(),
                )
                embed.set_thumbnail(
                    url="https://media1.tenor.com/images/46ce1235c5697ce170c6e84f4b4fb4e7/tenor.gif"
                )
                await ctx.send(embed=embed)
        else:
            print(req.status_code)
예제 #12
0
def create_time_schedule_url(campus, quarter, department):
    """
        Returns a URL for the UW Time Schedule
        for the given quarter and campus.
    """
    quarter = validate_quarter_name(quarter)
    campus = validate_campus_name(campus)
    department = department.lower()
    url = f'{TIME_SCHEDULE_URL}/{campus}{quarter}/{department}.html'
    if not is_url(url):
        msg = f'Generated URL is an invalid URL. ' \
              f'Given arguments are invalid: {campus}, {quarter}, {department}'
        raise ValueError(msg)
    return requote_uri(url)
예제 #13
0
def getCompanyIDByName(company):
    http = urllib3.PoolManager()
    url = requote_uri(companies_url + "?format=json&name=" + quote(company.name))
    r = http.request('GET', url)
    jsondata = json.loads(r.data.decode('utf-8'))

    if len(jsondata) == 0:
        # Company name not found
        company.company_id = -1
    else:
        # More than one company with the same name found
        company.company_id = jsondata[0]['id']

    return company
예제 #14
0
파일: views.py 프로젝트: saurya3d/Cragslist
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    # print(requote_uri(search))

    final_url = BASE_CRAGSLIST_URL.format(requote_uri(search))
    # print(final_url)
    response = requests.get(final_url)
    data = response.text
    # print(data)
    soup = BeautifulSoup(data, features='html.parser')

    post_listing = soup.find_all('li', {'class': 'result-row'})
    # post_titles = post_listing[0].find(class_= 'result-title').text
    # post_url = post_listing[0].find('a').get('href')
    # post_price = post_listing[0].find(class_= 'result-price').text

    # print(post_titles)
    # print(post_url)
    # print(post_price)

    final_postings = []
    for post in post_listing:
        post_titles = post.find(class_='result-title').text
        post_url = post.find('a').get('href')
        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(
                class_='result-image').get('data-ids').split(':')[1].replace(
                    ",3", "")
            print(post_image_id)
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
            print(post_image_url)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append(
            (post_titles, post_url, post_price, post_image_url))

    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }

    return render(request, 'my_app/new_search.html', stuff_for_frontend)
예제 #15
0
    def post(self):
        token = request.args.get('token')
        verify = VerifyKey(KEYS_PATH, token)
        if(verify.isAuthorized()):
            url = request.json.get('url')
            url = requote_uri(url)
            full = request.json['options'].get('fullPage')
            formatType = request.json['options'].get('type')
            quality = request.json['options'].get('quality')
            tor = request.json['options'].get('tor')
            timeout = request.json['options'].get('timeout')
            browser = request.json['options'].get('browser')
            height = request.json['options'].get('height')
            width = request.json['options'].get('width')
           
            # Set defaults values
            if(quality == None):
                quality = 100
            if(tor == None):
                tor = False
            if(height == None):
                height = 600
            if(width == None):
                width = 800

            checker = Checker(url, full, formatType, quality, tor, timeout, browser, height, width)
            checkerAnswer = checker.verifyAll()

            if(checkerAnswer != 0):
                return {'error':checkerAnswer.first , 'error-description':checkerAnswer.second}

            netloc = urlparse(url).netloc
            netloc = netloc.replace('.', '_')
            netloc = netloc.replace(':', '_')
            ts = calendar.timegm(time.gmtime())
            filename = 'mps_{}_{}'.format(ts, netloc)

            screenshot = Screenshot(SCREENSHOT_PATH, FIREFOX_PATH, CHROME_PATH, TOR_PROFILE, TOR_URL)
            answer = screenshot.getImage(full, filename, url, formatType, tor, timeout, browser, height=height, width=width)

            if(answer == 0):
                mimeType = 'image/{}'.format(formatType)
                filename = '{}/{}.{}'.format(SCREENSHOT_PATH ,filename, formatType)
                return send_file(filename, mimetype=mimeType)
            else:
                return {'error':answer.first, 'error-description':answer.second}
        
        else:
            return {'error': Errors.UNAUTHORIZED.first, 'error-description': Errors.UNAUTHORIZED.second}
예제 #16
0
def creating_file_list(app, exception):
	''' Creates a files containing the path to every html file that was compiled. This files are `.doclist` and the sitemap. '''
	if app.builder.name == 'html':
		build_path = app.outdir
		separator = '\n'
		with open(build_path+'/.doclist', 'w') as doclist_file:
			list_text = separator.join(list_compiled_html)
			doclist_file.write(list_text)
		sitemap = '<?xml version=\'1.0\' encoding=\'utf-8\'?>'+separator
		sitemap += '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'+separator
		for compiled_html in list_compiled_html:
			sitemap += '\t<url><loc>' + requote_uri(html_theme_options.get('wazuh_doc_url') + '/' + version + '/' + compiled_html) + '</loc></url>' + separator
		sitemap += '</urlset>'
		with open(build_path+'/'+version+'-sitemap.xml', 'w') as sitemap_file:
			sitemap_file.write(sitemap)
예제 #17
0
def expandUrl(url: str):
    header = {
        "Authorization": settings["bitlyAuth"],
        "Content-Type": "application/json"
    }
    params = {"bitlink_id": requote_uri(url)}
    try:
        response = post("https://api-ssl.bitly.com/v4/expand",
                        json=params,
                        headers=header)
        data = response.json()
        long = data["long_url"]
    except Exception:
        long = url
    return long
예제 #18
0
    def data_get_entry_history(self, entryTypeId, modifiedSince):
        """
        Get Record Ids for entries that were modified or deleted. 

        :param entryTypeId: entryListID or entry apiName. Required.

        :param modifiedSince: Date since modified
        :type modifiedSince: datetime.

        :returns: response.json
        """
        modifiedSince = requote_uri(modifiedSince)
        endpoint = f"/api/rest/v4/data/entrydata/{entryTypeId}/entries/history?modifiedSince={modifiedSince}"
        response = self._get_response(method='get', endpoint=endpoint)
        return response.json()
예제 #19
0
    def translate(self, word, return_all=False, **kwargs):
        """
        function that uses PONS to translate a word
        @param word: word to translate
        @type word: str
        @param return_all: set to True to return all synonym of the translated word
        @type return_all: bool
        @return: str: translated word
        """
        if self._validate_payload(word, max_chars=50):
            url = "{}{}-{}/{}".format(self.__base_url, self._source,
                                      self._target, word)
            url = requote_uri(url)
            response = requests.get(url)

            if response.status_code == 429:
                raise TooManyRequests()

            if response.status_code != 200:
                raise RequestError()

            soup = BeautifulSoup(response.text, 'html.parser')
            elements = soup.findAll(self._element_tag, self._element_query)

            if not elements:
                raise ElementNotFoundInGetRequest(word)

            filtered_elements = []
            for el in elements:
                temp = ''
                for e in el.findAll('a'):
                    if e.parent.name == 'div':
                        if e and "/translate/{}-{}/".format(
                                self._target, self._source) in e.get('href'):
                            temp += e.get_text() + ' '
                filtered_elements.append(temp)

            if not filtered_elements:
                raise ElementNotFoundInGetRequest(word)

            word_list = [
                word for word in filtered_elements if word and len(word) > 1
            ]

            if not word_list:
                raise TranslationNotFound(word)

            return word_list if return_all else word_list[0]
예제 #20
0
    def __init__(self,
                 record_to_request: str,
                 image_as_base64: bool,
                 input_type="name",
                 temp_file="image"):
        #############################
        # greenprint("[+] Running as Discord Attachment")
        # greenprint("[+] Not running as Discord Attachment")
        #print(str(os.environ['DISCORDAPP']))
        if image_as_base64 == False:
            self.filename = temp_file + ".png"
        if search_validate(input_type):  #in pubchem_search_types:
            greenprint("searching for an image : " + record_to_request)
            # fixes local code/context to work with url/remote context
            if input_type == "iupac_name":
                self.input_type = "name"
            else:
                self.input_type = input_type
            self.request_url        = requote_uri("{}/compound/{}/{}/PNG".format(\
                                            API_BASE_URL,self.input_type,record_to_request))
            blueprint("[+] Requesting: " + makered(self.request_url))
            self.rest_request = requests.get(self.request_url)
            # redprint("[-] Request failure at local level")
            # True means no error
            if self.was_there_was_an_error() == True:
                # request good
                # Store image
                self.image_storage = Image.open(
                    BytesIO(self.rest_request.content))
                if image_as_base64 == False:
                    try:
                        greenprint("[+] Saving image as {}".format(
                            self.filename))
                        self.image_storage.save(self.filename, format="png")
                    except Exception as blorp:
                        redprint(
                            "[-] Exception when opening or writing image file")
                        print(blorp)
                elif image_as_base64 == True:
                    print(self.rest_request.raw)
                    greenprint("[+] Encoding Image as Base64")
                    self.image_storage = base64.b64encode(self.image_storage)

                else:
                    redprint("[-] Error with Class Variable self.base64_save")
        else:
            redprint("[-] Input type was wrong for Image Search")
            return None
예제 #21
0
    def _lookup_ips(self, param):
        action_result = self.add_action_result(ActionResult(dict(param)))
        ret_val = self._check_apikey(action_result)
        if phantom.is_fail(ret_val):
            return ret_val

        try:
            ips = [x.strip() for x in param["ips"].split(",")]
            ips = list(filter(None, ips))
            if not ips:
                return action_result.set_status(
                    phantom.APP_ERROR,
                    INVALID_COMMA_SEPARATED_VALUE_ERR_MSG.format(key='ips'))
            ips = ",".join(ips)
            ips_string = requote_uri(ips)
        except Exception as e:
            err = self._get_error_message_from_exception(e)
            err_msg = "Error occurred while processing 'ips' action parameter. {0}".format(
                err)
            return action_result.set_status(phantom.APP_ERROR, err_msg)

        ret_val, response_json, status_code = self._make_rest_call(
            action_result,
            "get",
            LOOKUP_IPS_URL.format(ips=ips_string),
            headers=self._headers)
        if phantom.is_fail(ret_val):
            return ret_val

        result_data = []
        action_result.add_data(result_data)
        try:
            for result in response_json:
                if result["code"] in CODES:
                    result["code_meaning"] = CODES[result["code"]]
                else:
                    result["code_meaning"] = "This code is unmapped"

                result["visualization"] = VISUALIZATION_URL.format(
                    ip=result["ip"])
                result_data.append(result)

            return action_result.set_status(phantom.APP_SUCCESS)
        except Exception as e:
            err = self._get_error_message_from_exception(e)
            err_msg = "Error occurred while processing results: {0}".format(
                err)
            return action_result.set_status(phantom.APP_ERROR, err_msg)
예제 #22
0
def getopenedtickets(timespan, config):
    '''Need to fix - just here to make PyLint happy'''
    with open('queryOpened.json', 'r') as fileopened:
        queryjson = json.load(fileopened)
    # here we search for all tickets opened before the end of the search day
    queryjson["list_info"]["search_criteria"][0]["value"] = timespan
    baseurl = config["url"] + '/api/v3/requests?TECHNICIAN_KEY=' + config[
        "technicianKey"]
    ticketcount = 0
    for statuscode in config["statusCodes"]:
        queryjson["list_info"]["search_criteria"][1]["value"][
            "id"] = statuscode
        more = True
        resultsindex = 1
        while more:
            queryjson["list_info"]["start_index"] = resultsindex
            encodeduri = requote_uri(json.dumps(queryjson))
            queryurl = baseurl + '&input_data=' + encodeduri
            response = fetchdata(queryurl)
            jsonresults = response.json()
            for request in jsonresults["requests"]:
                reqid = request["id"]
                if request["priority"]:
                    if request["priority"]["name"] in config[
                            "priorityTranslations"]:
                        reqpri = config["priorityTranslations"][
                            request["priority"]["name"]]["Priority"]
                    else:
                        reqpri = request["priority"]["name"]
                else:
                    reqpri = ""
                if request["site"]:
                    if request["site"]["name"] in config["siteTranslations"]:
                        reqsite = config["siteTranslations"][request["site"]
                                                             ["name"]]["Name"]
                    else:
                        reqsite = request["site"]["name"]
                else:
                    reqsite = "NONE"
                ticketcount = ticketcount + 1
                reqcreated = datetime.datetime.fromtimestamp(
                    int(int(request["created_time"]["value"]) /
                        1000)).strftime("%Y-%m-%d %H:%M:%S")
                resultstring = reqid + "," + reqpri + "," + reqsite + "," + reqcreated + "\n"
                config["results"].append(resultstring)
            more = jsonresults["list_info"]["has_more_rows"]
            if more:
                resultsindex = resultsindex + 100
예제 #23
0
def get_attribute_value(selected_html, find_all_tabs, attribute, mode):
    try:
        values_list = []
        if len(selected_html) > 0:
            for link in selected_html:
                all_tabs = link.find_all(find_all_tabs)  # For ex:- a ,input
                for a in all_tabs:
                    if mode == "url":
                        values_list.append(requote_uri(a.get(attribute)))
                    else:
                        values_list.append(a.get(attribute))

            return values_list

    except Exception as e:
        print("get_attribute_value " + str(e))
예제 #24
0
    async def define(self, context):
        args = context.message.clean_content.split(maxsplit=1)
        if 2 > len(args):
            await utils.say(
                context.channel,
                content=
                f"Type `{context.prefix}{context.command.name} {context.command.usage}` to look up a term in the dictionary."
            )
            return

        search_term = re.sub(r"\s+", " ", args[1].strip())
        search_results = utils.dictionary.regular_lookup(search_term)

        if search_results:
            base_url = Settings.command_settings("define")["base_url"]
            search_url = requote_uri(f"{base_url}{search_term}")
            reply = discord.Embed(title=f'Define "{search_term}"',
                                  url=search_url)
            reply.set_footer(text=f"Source: {search_url}")
            try:
                num_entries = len(search_results)
                definitions = []
                for i, entry in enumerate(search_results):
                    if i > 2:
                        break

                    is_offensive = " *(offensive)*" if entry.is_offensive else ""
                    term_type = entry.term_type
                    definitions.append(
                        f"**{search_term}** {i + 1}/{num_entries} *({term_type})*{is_offensive}"
                    )
                    definitions.append("".join(
                        ["*", "\n\n".join(entry.short_definitions), "*"]))
                    definitions.append("\n")

                reply.description = "\n".join(definitions)
            except AttributeError:
                # Suggested search terms
                reply.url = ""
                suggestions = "\n".join(search_results)
                reply.description = f"**Did you mean...**\n*{suggestions}*"

            await utils.say(context.channel, embed=reply)
        else:
            await utils.say(
                context.channel,
                content=f"Couldn't find a definition for `{search_term}`.")
예제 #25
0
    def post(self, request):
        url = requote_uri("https://shopee.vn/search?keyword=" +
                          request.POST.get('keyword'))

        task = crawl_shopee_url.apply_async(kwargs={
            'url':
            url,
            'required_class':
            'shopee-search-item-result__item',
            'scroll_to_bottom':
            True,
            'label':
            'shopee_crawling_search_result',
        },
                                            link=get_shopee_products_url.s())

        return Response({"task_id": task.id})
예제 #26
0
def getclosedtickets(timespan, config):
    '''Need to fix - just here to make PyLint happy'''
    with open('queryClosed.json', 'r') as fileclosed:
        queryjson = json.load(fileclosed)
    queryjson["list_info"]["search_criteria"][0]["value"] = timespan[0]
    queryjson["list_info"]["search_criteria"][1]["value"] = timespan[1]
    baseurl = config["url"] + '/api/v3/requests?TECHNICIAN_KEY=' + config[
        "technicianKey"]
    more = True
    resultsindex = 1
    ticketcount = 0
    while more:
        queryjson["list_info"]["start_index"] = resultsindex
        encodeduri = requote_uri(json.dumps(queryjson))
        queryurl = baseurl + '&input_data=' + encodeduri
        response = fetchdata(queryurl)
        jsonresults = response.json()
        for request in jsonresults["requests"]:
            #reqid = request["id"]
            #if (request["priority"]):
            #    if (request["priority"]["name"] in config["priorityTranslations"]):
            #        reqpri = config["priorityTranslations"][request["priority"]["name"]]["Priority"]
            #    else:
            #        reqpri = request["priority"]["name"]
            #else:
            #    reqpri = ""
            if request["site"]:
                if request["site"]["name"] in config["siteTranslations"]:
                    reqsite = config["siteTranslations"][request["site"]
                                                         ["name"]]["Name"]
                else:
                    reqsite = request["site"]["name"]
            else:
                reqsite = "NONE"
            if reqsite in config["results"]:
                config["results"][reqsite]["TotalClosed"] = str(
                    int(config["results"][reqsite]["TotalClosed"]) + 1)
            else:
                config["UnknownSites"].append(reqsite)
                reqsite = "UNKNOWN"
                config["results"][reqsite]["TotalClosed"] = str(
                    int(config["results"][reqsite]["TotalClosed"]) + 1)
            ticketcount = ticketcount + 1
        more = jsonresults["list_info"]["has_more_rows"]
        if more:
            resultsindex = resultsindex + 100
예제 #27
0
    def post(self, request):
        url = requote_uri("https://tiki.vn/search?q=" +
                          request.POST.get('keyword'))

        task = craw_tiki_url.apply_async(kwargs={
            'url':
            url,
            'required_class':
            'product-box-list',
            'scroll_to_bottom':
            False,
            'label':
            'tiki_crawling_search_result',
        },
                                         link=get_tiki_products_url.s())

        return Response({"task_id": task.id})
예제 #28
0
def getURL():
    with open(URLS_PATH, 'r') as file:
        urls = ''
        for i, line in enumerate(file):
            if i == 0:
                url = line
            else:
                urls += line

    if not urls:
        # urls var is empty, so
        os.remove(URLS_PATH)
    else:
        with open(URLS_PATH, 'w') as file:
            file.write(urls)

    return requote_uri(url).rstrip('%0A')
예제 #29
0
    def getTopCountyCases(self, entity=1, sortType="DESC", type=""):
        if (entity == "" or entity == None):
            entity = 1

        url = self.baseUrl + self.topCountyDetailsUrl
        url = url.replace("#entity", str(entity))
        url = url.replace("#sortType", sortType)
        url = url.replace("#type", type)
        url = requote_uri(url)
        print(url)
        response = requests.get(url)
        if (response.status_code == 200):
            responseMsg = response.text
        else:
            responseMsg = self.errMsg

        return responseMsg
예제 #30
0
def getListingIDBySymbol(listing):
    http = urllib3.PoolManager()
    url = requote_uri(ticker_url + "?format=json&ticker=" + quote(listing.ticker))
    r = http.request('GET', url)
    jsondata = json.loads(r.data.decode('utf-8'))
    print("getListingIDBySymbol: retrieving: " + url)

    if len(jsondata) == 0:
        # Ticker symbol not found
        print("getListingIDBySymbol: Did not find ticker: " + listing.ticker)
        listing.listing_id = -1
    else:
        # More than one lising with the same name found
        listing.listing_id = jsondata[0]['id']
        print("getListingIDBySymbol: Found ticker: " + listing.ticker + " with ID: " + str(listing.listing_id))

    return listing
예제 #31
0
파일: livedns.py 프로젝트: asplunden/giu
    def _send(self,
              endpoint: str,
              method: str = 'GET',
              data: Optional[Dict] = None) -> Optional[Dict]:
        """Send a HTTP request to Gandi Live DNS API.

        Args:
            endpoint (str): Live DNS API enpoint.
            method (str): HTTP method.
            data (str): payload to attach to the request.

        Returns:
            None on error.
            JSON object representing the response of Live DNS API

        Raises:
            None."""
        headers = self._headers.copy()

        # parameters
        if not self._url.endswith('/') and not endpoint.startswith('/'):
            endpoint = f'/{endpoint}'

        url = requote_uri(f'{self._url}{endpoint}')

        if data:
            headers['Content-type'] = 'application/json'

        # request
        try:
            r = requests.request(method=method,
                                 url=url,
                                 headers=headers,
                                 json=data,
                                 timeout=60.0)
        except Timeout:
            return None

        if not r.ok:
            return None

        if r.status_code == 204:  # HTTP/204 No content (on success)
            return {'code': 204, 'message': 'ok'}

        return r.json()
예제 #32
0
def test_requote_uri_with_unquoted_percents(uri, expected):
    """See: https://github.com/requests/requests/issues/2356"""
    assert requote_uri(uri) == expected