def vk_auth(login=None,password=None,captcha_handler=captcha_saver): vk_api.VkApi.too_many_rps_handler=too_many_rps_handler; if login is None: login = input("Введите логин: "); if password is not None: vk = vk_api.VkApi(login, password, auth_handler=auth_handler, captcha_handler=captcha_handler, app_id=6479356, scope=140492255); if fileexists("vk_config.v2.json"): with open("vk_config.v2.json") as vk_config_json: vk_config_data = vk_config_json.read(); vk_access_token = json.loads(vk_config_data)[f"{login}"]['token']["app6479356"]["scope_140492255"]['access_token']; else: vk_access_token = vk.token['access_token']; else: if fileexists("vk_config.v2.json"): with open("vk_config.v2.json") as vk_config_json: vk_config_data = vk_config_json.read(); if f"{login}" in json.loads(vk_config_data): vk_access_token = json.loads(vk_config_data)[f"{login}"]['token']["app6479356"]["scope_140492255"]['access_token']; vk = vk_api.VkApi(login, auth_handler=auth_handler, captcha_handler=captcha_handler, app_id=6479356, scope=140492255); else: password = getpass("Введите пароль: "); vk = vk_api.VkApi(login, password, auth_handler=auth_handler, captcha_handler=captcha_handler, app_id=6479356, scope=140492255); vk_access_token = vk.token['access_token']; else: password = getpass("Введите пароль: "); vk = vk_api.VkApi(login, password, auth_handler=auth_handler, captcha_handler=captcha_handler, app_id=6479356, scope=140492255); vk_access_token = vk.token['access_token']; vk.auth(token_only=True); try: with vk_api.VkRequestsPool(vk) as pool: my_id = pool.method('users.get', {'access_token': vk_access_token}); my_screen_id = pool.method('account.getProfileInfo'); my_id = my_id.result[0]['id'] my_screen_id = my_screen_id.result['screen_name'] except vk_api.exceptions.ApiError: vk, vk_access_token, my_id, my_screen_id = vk_auth(login,getpass("Введите пароль: ")) return vk, vk_access_token, my_id, my_screen_id;
def write_results(matched, alignset, targetset, resultfile): """ Given a matched dictionnay, an alignset and a targetset to the resultfile """ openmode = 'a' if fileexists(resultfile) else 'w' with open(resultfile, openmode) as fobj: if openmode == 'w': fobj.write('aligned;targetted;distance\n') for aligned in matched: for target, dist in matched[aligned]: alignid = alignset[aligned][0] targetid = targetset[target][0] fobj.write('%s;%s;%s\n' % (alignid.encode('utf-8') if isinstance(alignid, basestring) else alignid, targetid.encode('utf-8') if isinstance(targetid, basestring) else targetid, dist ))
' be shown initially', type=int) parser.add_argument("tdelta_days_showing", help='The number of days of data to show on the inital ' + 'page load. The user will still be able to zoom out or ' + ' pan back to tdelta_days', type=int) args = parser.parse_args() output = args.output jsonfile = args.jsonfile tdelta_days = args.tdelta_days tdelta_days_showing = args.tdelta_days_showing # SANITY CHECKS if not fileexists(jsonfile): raise RuntimeError('Could not find file %s' % jsonfile) if not fileexists(dirname(output)): raise RuntimeError('Invalid path for %s' % output) if tdelta_days < tdelta_days_showing: raise RuntimeError('The number of days of data is less than the number' + 'of days showing when loaded, did you switch them?') if tdelta_days < 1: raise RuntimeError('tdelta_days must be >= 1') if tdelta_days_showing < 1: raise RuntimeError('tdelta_days_showing must be >= 1') # if fileexists(output): # remove(output)
def json(self): return { "id": self.id, "data_aquisicao": self.data_aquisicao, "filme_id": self.filme_id, "tipo_de_midia_id": self.tipo_de_midia_id, "filme": self.filme.json(), "tipo_de_midia": self.tipo_de_midia.json() } if __name__ == "__main__": if fileexists(arquivobd): removefile(arquivobd) db.create_all() titanic = Filme(titulo="Titanic", data_lancamento="05-09-1997", preco=10.99) homens_de_preto = Filme(titulo="MIB: Homens de Preto", data_lancamento="01-02-2005", preco=1.99) covid_19 = Filme(titulo="Fim dos Tempo", data_lancamento="05-09-2020", preco=100.99)
def get_html(url, name): cachefile = sep.join(["cache", "%s.html" % name]) if not cache: h = download(url) with open(cachefile, 'w') as f: f.write(h) else: with open(cachefile, 'r') as f: h = f.read() h = clean_nonutf_title(h) h = h.decode('utf-8', 'replace') h = clean_html(h) return h ANdoublons = {} if fileexists('divers_deprecie/doublonsAN.json'): with open('divers_deprecie/doublonsAN.json') as f: ANdoublons = dict((int(k), v) for k, v in json.load(f).items()) with open('data/registre-lobbying-AN-v2.json') as f: oldData = dict((dic[u'id'], dic) for dic in json.load(f)) # Extrait les informations de la fiche détaillée d'un représentant def extract_data(text): res = {} sublevel = False field, spefield = "", "" val = res for line in text.split('\n'): if line.startswith('<dt'): spefield = clean_text(line) elif line.startswith('</ul'):
def get_html(url, name): cachefile = sep.join(["cache", "%s.html" % name]) if not cache: h = download(url) with open(cachefile, 'w') as f: f.write(h) else: with open(cachefile, 'r') as f: h = f.read() h = clean_nonutf_title(h) h = h.decode('utf-8', 'replace') h = clean_html(h) return h ANdoublons = {} if fileexists('doublonsAN.json'): with open('doublonsAN.json') as f: ANdoublons = dict((int(k), v) for k, v in json.load(f).items()) with open('data/registre-lobbying-AN-v2.json') as f: oldData = dict((dic[u'id'], dic) for dic in json.load(f)) # Extrait les informations de la fiche détaillée d'un représentant def extract_data(text): res = {} sublevel = False field, spefield = "", "" val = res for line in text.split('\n'): if line.startswith('<dt'): spefield = clean_text(line) elif line.startswith('</ul'):