def iter_recipes(self, pattern): query = {'query': pattern, 'page': 1, 'pagesize': 20, 'sort': 're'} if not self.TOKEN: self.fill_token() return self.results.go(query=urlencode(query)).iter_recipes()
def __do_request(self, method, params): params["sed"] = time.strftime('%Y%m%d', time.localtime()) params["sig"] = base64.b64encode( hashlib.sha1(self.SECRET_KEY + urlencode(params)).digest()) return self.request('http://api.allocine.fr/rest/v3/{}'.format(method), params=params)
def iter_history(self, subscription): self.searchp.stay_or_go() date_deb = self.page.doc.xpath( '//input[@name="vp_recherche_paiement_tiers_payant_portlet_1dateDebutRecherche"]' )[0].value date_fin = self.page.doc.xpath( '//input[@name="vp_recherche_paiement_tiers_payant_portlet_1dateFinRecherche"]' )[0].value data = { 'vp_recherche_paiement_tiers_payant_portlet_1dateDebutRecherche': date_deb, 'vp_recherche_paiement_tiers_payant_portlet_1dateFinRecherche': date_fin, 'vp_recherche_paiement_tiers_payant_portlet_1codeOrganisme': 'null', 'vp_recherche_paiement_tiers_payant_portlet_1actionEvt': 'rechercheParDate', 'vp_recherche_paiement_tiers_payant_portlet_1codeRegime': '01', } self.session.headers.update( {'Content-Type': 'application/x-www-form-urlencoded'}) self.historyp.go(data=urlencode(data)) if self.historyp.is_here(): return self.page.iter_history()
def search_housings(self, type, cities, nb_rooms, area_min, area_max, cost_min, cost_max, house_types, advert_types): if type not in TYPES: raise TypeNotSupported() data = {'ci': ','.join(cities), 'idtt': TYPES.get(type, 1), 'org': 'advanced_search', 'surfacemax': area_max or '', 'surfacemin': area_min or '', 'tri': 'd_dt_crea', } if type == POSTS_TYPES.SALE: data['pxmax'] = cost_max or '' data['pxmin'] = cost_min or '' else: data['px_loyermax'] = cost_max or '' data['px_loyermin'] = cost_min or '' if nb_rooms: data['nb_pieces'] = nb_rooms ret = [] for house_type in house_types: if house_type in RET: ret.append(RET.get(house_type)) if ret: data['idtypebien'] = ','.join(ret) return self.search.go(request=urlencode(data)).iter_housings( query_type=type, advert_types=advert_types )
def search_housings(self, type, cities, nb_rooms, area_min, area_max, cost_min, cost_max, house_types): if type not in self.TYPES: raise TypeNotSupported() self.session.headers.update({ 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8' }) data = { 'geo_objets_ids': ','.join(cities), 'surface[min]': area_min or '', 'surface[max]': area_max or '', 'prix[min]': cost_min or '', 'prix[max]': cost_max or '', 'produit': self.TYPES.get(type, 'location'), 'recherche': 1, 'nb_resultats_par_page': 40, } if nb_rooms: data['nb_pieces[min]'] = nb_rooms data['nb_pieces[max]'] = nb_rooms ret = [] for house_type in house_types: if house_type in self.RET: ret.append(self.RET.get(house_type)) _data = '%s%s%s' % (urlencode(data), '&typesbien%5B%5D=', '&typesbien%5B%5D='.join(ret)) return self.search_page.go(data=_data).iter_housings()
def obj_url(self): keys_to_copy = { 'idDocument': 'idDoc', 'dateDocument': 'dateDoc', 'idLocalisation': 'idLocalisation', 'viDocDocument': 'viDocDocument', } # Here we parse the json with ibancrypte in it, for most cases if 'ibanCrypte' in self.el: url = 'demat-wspl/rest/consultationDocumentDemat?' keys_to_copy.update({ 'typeCpt': 'typeCompte', 'familleDoc': 'famDoc', 'ibanCrypte': 'ibanCrypte', 'typeDoc': 'typeDoc', 'consulted': 'consulted', }) request_params = {'typeFamille': 'R001', 'ikpiPersonne': ''} # Here we parse the json with idcontrat in it. For the cases present # on privee.mabanque where sometimes the doc url is different else: url = 'demat-wspl/rest/consultationDocumentSpecialBpfDemat?' keys_to_copy.update({ 'heureDocument': 'heureDoc', 'numClient': 'numClient', 'typeReport': 'typeReport', }) request_params = {'ibanCrypte': ''} for k, v in keys_to_copy.items(): request_params[k] = Dict(v)(self) return Env('baseurl')(self) + url + urlencode(request_params)
def api0_request(self, command, action, parameter='', data=None, nologin=False): if data is None: # Always do POST requests. data = '' elif isinstance(data, (list,tuple,dict)): data = urlencode(data) elif isinstance(data, unicode): data = data.encode('utf-8') url = self.buildurl('http://api.adopteunmec.com/api.php', S=self.APIKEY, C=command, A=action, P=parameter, O='json') buf = self.openurl(url, data).read() try: r = json.loads(buf[buf.find('{'):]) except ValueError: raise ValueError(buf) if 'errors' in r and r['errors'] != '0' and len(r['errors']) > 0: code = r['errors'][0] if code in (u'0.0.2', u'1.1.1', u'1.1.2'): if not nologin: self.login() return self.api0_request(command, action, parameter, data, nologin=True) else: raise BrowserIncorrectPassword(AuMException.ERRORS[code]) else: raise AuMException(code) return r
def search_housings(self, type, cities, nb_rooms, area_min, area_max, cost_min, cost_max, house_types, advert_types): if type not in TYPES: raise TypeNotSupported() data = { 'ci': ','.join(cities), 'idtt': TYPES.get(type, 1), 'org': 'advanced_search', 'surfacemax': area_max or '', 'surfacemin': area_min or '', 'tri': 'd_dt_crea', } if type == POSTS_TYPES.SALE: data['pxmax'] = cost_max or '' data['pxmin'] = cost_min or '' else: data['px_loyermax'] = cost_max or '' data['px_loyermin'] = cost_min or '' if nb_rooms: data['nb_pieces'] = nb_rooms ret = [] for house_type in house_types: if house_type in RET: ret.append(RET.get(house_type)) if ret: data['idtypebien'] = ','.join(ret) return self.search.go(request=urlencode(data)).iter_housings( query_type=type, advert_types=advert_types)
def search_housings(self, type, cities, nb_rooms, area_min, area_max, cost_min, cost_max, house_types, advert_types): if type not in self.TYPES: raise TypeNotSupported() ret = [] if type == POSTS_TYPES.VIAGER: ret = ['Viager'] else: for house_type in house_types: if house_type in self.RET: ret.append(self.RET.get(house_type)) data = {'location': ','.join(cities).encode('iso 8859-1'), 'furnished': type == POSTS_TYPES.FURNISHED_RENT, 'areaMin': area_min or '', 'areaMax': area_max or '', 'priceMin': cost_min or '', 'priceMax': cost_max or '', 'transaction': self.TYPES.get(type, 'location'), 'recherche': '', 'mode': '', 'proximity': '0', 'roomMin': nb_rooms or '', 'page': '1'} query = u'%s%s%s' % (urlencode(data), '&type=', '&type='.join(ret)) return self.search.go(query=query).iter_housings( query_type=type, advert_types=advert_types )
def search_videos(self, pattern): data = {'search': pattern, 'submit': 'Rechercher'} self.location('/online/', urlencode(data)) assert self.is_on_page(VideoListPage) for vid in self.page.iter_video(self.AVAILABLE_VIDEOS): yield vid
def next_page(self): params = self.env['params'] params['max_position'] = self.page.min_position if 'min_position' in self.env and not params['max_position']: params['max_position'] = self.env['min_position'] if self.page.has_next: return u'%s?%s' % (self.page.url.split('?')[0], urlencode(params))
def remove_issue(self, id): self.location('/issues/%s' % id) assert self.is_on_page(IssuePage) token = self.page.get_authenticity_token() data = (('authenticity_token', token), ) self.openurl('/issues/%s/destroy' % id, urlencode(data))
def search_videos(self, pattern): data = { 'search': pattern, 'submit': 'Rechercher' } self.location('/online/', urlencode(data)) assert self.is_on_page(VideoListPage) for vid in self.page.iter_video(self.AVAILABLE_VIDEOS): yield vid
def iter_transactions(self): url = self.get_part_url() if url is None: # There are no transactions in this kind of account return is_deferred_card = bool(self.doc.xpath(u'//div[contains(text(), "Différé")]')) has_summary = False if is_deferred_card: coming_debit_date = None # get coming debit date for deferred_card date_string = Regexp(CleanText(u'//option[contains(text(), "détail des factures à débiter le")]'), r'(\d{2}/\d{2}/\d{4})', default=NotAvailable)(self.doc) if date_string: coming_debit_date = parse_d(date_string) while True: d = XML(self.browser.open(url).content) el = d.xpath('//dataBody') if not el: return el = el[0] s = unicode(el.text).encode('iso-8859-1') doc = fromstring(s) for tr in self._iter_transactions(doc): if tr.type == Transaction.TYPE_CARD_SUMMARY: has_summary = True if is_deferred_card and tr.type is Transaction.TYPE_CARD: tr.type = Transaction.TYPE_DEFERRED_CARD if not has_summary: if coming_debit_date: tr.date = coming_debit_date tr._coming = True yield tr el = d.xpath('//dataHeader')[0] if int(el.find('suite').text) != 1: return url = urlparse(url) p = parse_qs(url.query) args = {} args['n10_nrowcolor'] = 0 args['operationNumberPG'] = el.find('operationNumber').text args['operationTypePG'] = el.find('operationType').text args['pageNumberPG'] = el.find('pageNumber').text args['idecrit'] = el.find('idecrit').text or '' args['sign'] = p['sign'][0] args['src'] = p['src'][0] url = '%s?%s' % (url.path, urlencode(args))
def get_tracking_info(self, _id): data = {'HTMLVersion': '5.0', 'USER_HISTORY_LIST': '', 'loc': 'en_US', 'track.x': 'Track', 'trackNums': _id.encode('utf-8'), } self.location('https://wwwapps.ups.com/WebTracking/track', urlencode(data)) assert self.is_on_page(TrackPage) return self.page.get_info(_id)
def get_station_departures(self, station_id, arrival_id, date): query = {'fn': station_id} if arrival_id: query['tn'] = arrival_id if date: _date = datetime.strftime(date, "%d/%m/%Y") query['db'] = _date _heure = datetime.strftime(date, "%H") query['hb'] = _heure query['he'] = '24' return self.departures.open(qry=urlencode(query)).get_station_departures()
def __do_request(self, method, params): params["sed"] = time.strftime('%Y%m%d', time.localtime()) params["sig"] = base64.b64encode( hashlib.sha1( self.SECRET_KEY + urlencode(params) ).digest() ) return self.request( 'http://api.allocine.fr/rest/v3/{}'.format(method), params=params )
def iter_video(self, family): data = {'a': 'ge', 'famille': family, 'emissions': 0} while True: self.location('/do.php', urlencode(data)) assert self.is_on_page(VideoListPage) if self.page.is_list_empty(): break for vid in self.page.iter_video(self.AVAILABLE_VIDEOS): yield vid data['emissions'] = data['emissions'] + 1
def get_station_departures(self, station_id, arrival_id, date): query = {'fn': station_id} if arrival_id: query['tn'] = arrival_id if date: _date = datetime.strftime(date, "%d/%m/%Y") query['db'] = _date _heure = datetime.strftime(date, "%H") query['hb'] = _heure query['he'] = '24' return self.departures.open( qry=urlencode(query)).get_station_departures()
def login(self, username, password): assert isinstance(username, basestring) assert isinstance(password, basestring) data = {'ipb_login_username': username, 'ipb_login_password': password} self.location('http://e-hentai.org/bounce_login.php', urlencode(data), no_login=True) assert self.is_on_page(LoginPage) if not self.page.is_logged(): raise BrowserIncorrectPassword() # necessary in order to reach the fjords self.home()
def prepare_url(url, fields): components = urlparse(url) query_pairs = [(f, v) for (f, v) in parse_qsl(components.query) if f not in fields] for (field, value) in fields.items(): query_pairs.append((field, value)) new_query_str = urlencode(query_pairs) new_components = (components.scheme, components.netloc, components.path, components.params, new_query_str, components.fragment) return urlunparse(new_components)
def iter_video(self, family): data = { 'a': 'ge', 'famille': family, 'emissions': 0 } while True: self.location('/do.php', urlencode(data)) assert self.is_on_page(VideoListPage) if self.page.is_list_empty(): break for vid in self.page.iter_video(self.AVAILABLE_VIDEOS): yield vid data['emissions'] = data['emissions'] + 1
def search_videos(self, pattern, sortby): post_data = {"firstfocus" : "", "category" : "free", "keyword" : pattern.encode('utf-8'), "conference_id" : "", } post_data = urlencode(post_data) # probably not required self.addheaders = [('Referer', 'http://gdcvault.com/'), ("Content-Type" , 'application/x-www-form-urlencoded') ] # is_logged assumes html page self.location('http://gdcvault.com/search.php', data=post_data, no_login=True) assert self.is_on_page(SearchPage) return self.page.iter_videos()
def market_pagination(self, account_id): # Next page is handled by js. Need to build the right url by changing params in current url several_pages = self.get_pages() if several_pages: current_page, total_pages = map(int, several_pages) if current_page < total_pages: params = { 'action': 11, 'idCptSelect': self.get_dropdown_menu(account_id), 'numPage': current_page + 1, } url_to_keep = urlsplit(self.browser.url)[:3] url_to_complete = ( urlencode(params), '' ) # '' is the urlsplit().fragment needed for urlunsplit next_page_url = urlunsplit(url_to_keep + url_to_complete) return next_page_url
def search_housings(self, cities, area_min, area_max, cost_min, cost_max): def get_departement(city): return re.split(";", city)[0][:2] def get_ville(city): return re.split(";", city)[1].lower() city = cities[0] query = urlencode({ "departement": get_departement(city), "ville": get_ville(city), "prixMin": cost_min or '', "prixMax": cost_max or '', "surfMin": area_min or '', "surfMax": area_max or '', }) return self.search.go(query=query).iter_housings()
def iter_history(self, subscription): self.searchp.stay_or_go() date_deb = self.page.doc.xpath('//input[@name="vp_recherche_paiement_tiers_payant_portlet_1dateDebutRecherche"]')[0].value date_fin = self.page.doc.xpath('//input[@name="vp_recherche_paiement_tiers_payant_portlet_1dateFinRecherche"]')[0].value data = {'vp_recherche_paiement_tiers_payant_portlet_1dateDebutRecherche': date_deb, 'vp_recherche_paiement_tiers_payant_portlet_1dateFinRecherche': date_fin, 'vp_recherche_paiement_tiers_payant_portlet_1codeOrganisme': 'null', 'vp_recherche_paiement_tiers_payant_portlet_1actionEvt': 'rechercheParDate', 'vp_recherche_paiement_tiers_payant_portlet_1codeRegime': '01', } self.session.headers.update({'Content-Type': 'application/x-www-form-urlencoded'}) self.historyp.go(data=urlencode(data)) if self.historyp.is_here(): return self.page.iter_history()
def advanced_search_job(self, metier='', place=None, contrat=None, salary=None, qualification=None, limit_date=None, domain=None): data = {} data['lieux'] = self.decode_place(place.split('|')) data['offresPartenaires'] = True data['rayon'] = 10 data['tri'] = 0 data['typeContrat'] = contrat data['qualification'] = qualification data['salaireMin'] = salary data['uniteSalaire'] = 'A' data['emission'] = limit_date data['domaine'] = domain data['motsCles'] = metier return self.search.go(param=urlencode(data)).iter_job_adverts()
def iter_transactions(self): url = self.get_part_url() if url is None: # There are no transactions in this kind of account return is_deferred_card = bool( self.doc.xpath(u'//div[contains(text(), "Différé")]')) has_summary = False while True: d = XML(self.browser.open(url).content) el = d.xpath('//dataBody') if not el: return el = el[0] s = unicode(el.text).encode('iso-8859-1') doc = fromstring(s) for tr in self._iter_transactions(doc): if tr.type == Transaction.TYPE_CARD_SUMMARY: has_summary = True if is_deferred_card and tr.type is Transaction.TYPE_CARD: tr.type = Transaction.TYPE_DEFERRED_CARD if not has_summary: tr._coming = True yield tr el = d.xpath('//dataHeader')[0] if int(el.find('suite').text) != 1: return url = urlparse(url) p = parse_qs(url.query) args = {} args['n10_nrowcolor'] = 0 args['operationNumberPG'] = el.find('operationNumber').text args['operationTypePG'] = el.find('operationType').text args['pageNumberPG'] = el.find('pageNumber').text args['idecrit'] = el.find('idecrit').text or '' args['sign'] = p['sign'][0] args['src'] = p['src'][0] url = '%s?%s' % (url.path, urlencode(args))
def login(self, user, pwd): post_data = {"credential" : str(user), "password" : str(pwd), "save_user": "******", "save_pwd" : "false", "save_TC" : "true", "action" : "valider", "usertype" : "", "service" : "", "url" : "http://www.orange.fr", "case" : "", "origin" : "", } post_data = urlencode(post_data) self.browser.addheaders = [('Referer', 'http://id.orange.fr/auth_user/template/auth0user/htm/vide.html'), ("Content-Type" , 'application/x-www-form-urlencoded') ] self.browser.open(self.browser.geturl(), data=post_data)
def get_wiki_preview(self, project, page, data): if (not self.is_on_page(WikiEditPage) or self.page.groups[0] != project or self.page.groups[1] != page): self.location( '%s/projects/%s/wiki/%s/edit' % (self.BASEPATH, project, quote(page.encode('utf-8')))) url = '%s/projects/%s/wiki/%s/preview' % (self.BASEPATH, project, quote(page.encode('utf-8'))) params = {} params['content[text]'] = data.encode('utf-8') params['authenticity_token'] = "%s" % self.page.get_authenticity_token( ) preview_html = lxml.html.fragment_fromstring(self.readurl( url, urlencode(params)), create_parent='div') preview_html.find("fieldset").drop_tag() preview_html.find("legend").drop_tree() return lxml.html.tostring(preview_html)
def api0_request(self, command, action, parameter='', data=None, nologin=False): if data is None: # Always do POST requests. data = '' elif isinstance(data, (list, tuple, dict)): data = urlencode(data) elif isinstance(data, unicode): data = data.encode('utf-8') url = self.buildurl('http://api.adopteunmec.com/api.php', S=self.APIKEY, C=command, A=action, P=parameter, O='json') buf = self.openurl(url, data).read() try: r = json.loads(buf[buf.find('{'):]) except ValueError: raise ValueError(buf) if 'errors' in r and r['errors'] != '0' and len(r['errors']) > 0: code = r['errors'][0] if code in (u'0.0.2', u'1.1.1', u'1.1.2'): if not nologin: self.login() return self.api0_request(command, action, parameter, data, nologin=True) else: raise BrowserIncorrectPassword(AuMException.ERRORS[code]) else: raise AuMException(code) return r
def prepare_url(url, fields): components = urlparse(url) query_pairs = [(f, v) for (f, v) in parse_qsl(components.query) if f not in fields] for (field, value) in fields.items(): query_pairs.append((field, value)) new_query_str = urlencode(query_pairs) new_components = ( components.scheme, components.netloc, components.path, components.params, new_query_str, components.fragment ) return urlunparse(new_components)
def query_issues(self, project_name, **kwargs): self.location('/projects/%s/issues' % project_name) token = self.page.get_authenticity_token() method = self.page.get_query_method() data = ( (self.METHODS[method]['project_id'], project_name), (self.METHODS[method]['column'], 'tracker'), ('authenticity_token', token), (self.METHODS[method]['column'], 'status'), (self.METHODS[method]['column'], 'priority'), (self.METHODS[method]['column'], 'subject'), (self.METHODS[method]['column'], 'assigned_to'), (self.METHODS[method]['column'], 'updated_on'), (self.METHODS[method]['column'], 'category'), (self.METHODS[method]['column'], 'fixed_version'), (self.METHODS[method]['column'], 'done_ratio'), (self.METHODS[method]['column'], 'author'), (self.METHODS[method]['column'], 'start_date'), (self.METHODS[method]['column'], 'due_date'), (self.METHODS[method]['column'], 'estimated_hours'), (self.METHODS[method]['column'], 'created_on'), ) for key, value in kwargs.iteritems(): if value: value = self.page.get_value_from_label( self.METHODS[method]['value'] % key, value) data += ((self.METHODS[method]['value'] % key, value), ) data += ((self.METHODS[method]['field'], key), ) data += ((self.METHODS[method]['operator'] % key, '~'), ) if method == 'POST': self.location('/issues?set_filter=1&per_page=100', urlencode(data)) else: data += (('set_filter', '1'), ('per_page', '100')) self.location(self.buildurl('/issues', *data)) assert self.is_on_page(IssuesPage) return { 'project': self.page.get_project(project_name), 'iter': self.page.iter_issues(), }
def login(self): if self.password is None: return params = {'remember_me': 0, 'email': self.username, 'password': self.password, } data = self.readurl('http://gdcvault.com/api/login.php', urlencode(params)) # some data returned as JSON, not sure yet if it's useful if data is None: self.openurl('/logout', '') raise BrowserBanned('Too many open sessions?') self.location('/', no_login=True) if not self.is_logged(): raise BrowserIncorrectPassword()
def translate(self, lan_from, lan_to, text): if lan_from != 'English' or lan_to != 'Nigger!': raise LanguageNotSupported() with self.browser: data = {'English': text.encode('utf-8')} doc = self.browser.location('http://joel.net/EBONICS/Translator', urlencode(data)) try: text = doc.getroot().cssselect( 'div.translateform div.bubble1 div.bubblemid')[0].text except IndexError: raise TranslationFail() if text is None: raise TranslationFail() translation = Translation(0) translation.lang_src = unicode(lan_from) translation.lang_dst = unicode(lan_to) translation.text = unicode(text).strip() return translation
def iter_history_old(self, account): if self.cache.get(account.id, None) is None: self.cache[account.id] = {} self.cache[account.id]["history"] = [] if not self.accounts.is_here() and not self.accounts2.is_here(): self.go_on_accounts_list() url = account.url if not url: return while url is not None: if self.accounts.is_here() or self.accounts2.is_here(): self.location(url) else: form = self.page.get_form(name='leftnav') form.url = url form.submit() assert self.transactions.is_here() trs = sorted_transactions( self.page.get_history(account.currency)) for tr in trs: self.cache[account.id]["history"] += [tr] yield tr if self.page.is_last(): url = None else: v = urlsplit(url) args = dict(parse_qsl(v.query)) args['BPIndex'] = int(args['BPIndex']) + 1 url = '%s?%s' % (v.path, urlencode(args)) else: for tr in self.cache[account.id]["history"]: yield tr
def search_housings(self, type, cities, nb_rooms, area_min, area_max, cost_min, cost_max, house_types): if type not in TYPES: raise TypeNotSupported() self.session.headers.update({'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}) data = {'geo_objets_ids': ','.join(cities), 'surface[min]': area_min or '', 'surface[max]': area_max or '', 'prix[min]': cost_min or '', 'prix[max]': cost_max or '', 'produit': TYPES.get(type, 'location'), 'recherche': 1, 'nb_resultats_par_page': 40, } if nb_rooms: data['nb_pieces[min]'] = nb_rooms data['nb_pieces[max]'] = nb_rooms if type == POSTS_TYPES.FURNISHED_RENT: data['tags[]'] = 'meuble' ret = [] if type == POSTS_TYPES.VIAGER: ret = ['viager'] else: for house_type in house_types: if house_type in RET: ret.append(RET.get(house_type)) _data = '%s%s%s' % (urlencode(data), '&typesbien%5B%5D=', '&typesbien%5B%5D='.join(ret)) return self.search_page.go(data=_data).iter_housings( query_type=type )
def add_qs(url, **kwargs): parts = list(urlparse(url)) qs = OrderedDict(parse_qsl(parts[4])) qs.update(kwargs) parts[4] = urlencode(qs) return urlunparse(parts)
def build_authorization_uri(self): p = urlparse(self.AUTHORIZATION_URI) q = dict(parse_qsl(p.query)) q.update(self.build_authorization_parameters()) return p._replace(query=urlencode(q)).geturl()
def get_params(self): params = {'billid': Dict('id')(self), 'billDate': Dict('dueDate')(self)} return urlencode(params)
def obj_url(self): billdate = urlencode({'billDate': Dict('dueDate')(self)}) url = 'https://espaceclientpro.orange.fr/api/contract/%s/bill/%s/facture?billId=&%s' % ( Env('subid')(self), Dict('mainDir')( self.el['documents'][0]), billdate) return url
def advanced_search_job(self, job_name, place, contract, limit_date): search = '' if not contract else contract query = {'q': quote_plus(job_name), 'where': place, 'tm': limit_date} return self.adv_search.go(search='%s?%s' % (search, urlencode(query)), page=1).iter_job_adverts()
def getOptions(self): data = {'talent': '', 'generations': '', 'tier': ''} self.pokeSearch.go(request=urlencode(data)) return self.page.get_options()
def do_pokeSearch(self, request): data = {'talent': request.ability, 'generations': request.generations, 'tier': request.tier} self.pokeSearch.go(request = urlencode(data)) return self.page.get_pokemons()