def _update_from_iter(self): if self.it != None: self.file = self.tree.get_model().get_value(self.it, 1) if self.tree.get_model().get_value(self.it, 9): # FIXME: Controlla se esiste gia il file(se l'abbiamo scaricato precedentemente) tmp = os.path.join(utils.UPDT_DIR, self.file) if os.path.exists(tmp): # Controlliamo se il file e' corretto bytes = os.path.getsize(tmp) md5 = generate.Generator.checksum(tmp) if md5 != self.tree.get_model().get_value(self.it, 4) or int(bytes) != self.tree.get_model().get_value(self.it, 3): os.remove(tmp) self._thread(self._update_file, utils.url_encode(BASE_DIR + self.file)) else: self._update_percentage() self._go_with_next_iter() else: self._thread(self._update_file, utils.url_encode(BASE_DIR + self.file)) else: self._update_percentage() self._go_with_next_iter() else: self.xml_util.dump_tree_to_file(self.diff_object, os.path.join(utils.UPDT_DIR, ".diff.xml")) utils.info(_("Riavvia per procedere all'aggiornamento di PyAcqua")) self.destroy()
def verify_file(self, filename): option_files = glob.glob(str(BASE_DIR) + '/storages/**/options.json', recursive=True) # loop though all options avalible for option in option_files: json_option = utils.reading_json(option) stdout_path = json_option.get('WORKSPACES') + "/" + filename if utils.not_empty_file(stdout_path): return json_option.get('WORKSPACES'), os.path.normpath( filename) # get real path p = Path(filename) ws = p.parts[0] if ws != utils.url_encode(ws): # just replace the first one filename_encode = filename.replace(ws, utils.url_encode(ws), 1) stdout_path_encode = json_option.get( 'WORKSPACES') + filename_encode if utils.not_empty_file(stdout_path_encode): return json_option.get('WORKSPACES'), os.path.normpath( filename_encode) return False, False
def get_instrument_history( cls, in_instrument, # <Instrument> granularity, # string count, # optional- int - leave out if both start & end specified from_time, # optional- datetime to, # optional- datetime price, # optional - string include_first, # optional - bool - Oanda wants 'true'/'false' daily_alignment, # 0 to 23 - optional alignment_timezone, # timezone - optional weekly_alignment # 'Monday' etc. - optional ): """Return type: dict or None """ if count != None and from_time != None and to != None: raise Exception args = '' if granularity != None: args = args + '&granularity=' + granularity if count != None: args = args + '&count=' + str(count) if from_time != None: args = args + '&from=' + utils.url_encode( util_date.date_to_string(from_time)) if to != None: args = args + '&to=' + utils.url_encode( util_date.date_to_string(to)) if price != None: args = args + '&price=' + price if include_first != None: if include_first: args = args + '&includeFirst=' + 'true' else: args = args + '&includeFirst=' + 'true' if daily_alignment != None: args = args + '&dailyAlignment=' + str(daily_alignment) if alignment_timezone != None: args = args + '&alignmentTimezone=' + alignment_timezone if weekly_alignment != None: args = args + '&weeklyAlignment=' + weekly_alignment result = cls.fetch(in_url='{}/v3/instruments/{}/candles?{}'.format( Config.oanda_url, in_instrument.get_name(), args)) if result == None: DB.bug('"oanda.py" get_instrument_history(): Failed to fetch.') Log.write('"oanda.py" get_instrument_history(): Failed to fetch.') return None else: return result
def get_instrument_history( cls, in_instrument, # <Instrument> granularity, # string count, # optional- int - leave out if both start & end specified from_time, # optional- datetime to, # optional- datetime price, # optional - string include_first, # optional - bool - Oanda wants 'true'/'false' daily_alignment, # 0 to 23 - optional alignment_timezone, # timezone - optional weekly_alignment # 'Monday' etc. - optional ): """Return type: dict or None """ if count != None and from_time != None and to != None: raise Exception args='' if granularity != None: args = args + '&granularity=' + granularity if count != None: args = args + '&count=' + str(count) if from_time != None: args = args + '&from=' + utils.url_encode(util_date.date_to_string(from_time)) if to != None: args = args + '&to=' + utils.url_encode(util_date.date_to_string(to)) if price != None: args = args + '&price=' + price if include_first != None: if include_first: args = args + '&includeFirst=' + 'true' else: args = args + '&includeFirst=' + 'true' if daily_alignment != None: args = args + '&dailyAlignment=' + str(daily_alignment) if alignment_timezone != None: args = args + '&alignmentTimezone=' + alignment_timezone if weekly_alignment != None: args = args + '&weeklyAlignment=' + weekly_alignment result = cls.fetch( in_url='{}/v3/instruments/{}/candles?{}'.format(Config.oanda_url, in_instrument.get_name(), args) ) if result == None: DB.bug('"oanda.py" get_instrument_history(): Failed to fetch.') Log.write('"oanda.py" get_instrument_history(): Failed to fetch.') return None else: return result
def _get_search_result_url(query, start_date, end_date): url_prefix = 'https://search.naver.com/search.naver?where=news&query={0}&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=3&ds={1}&de={2}' start_date_ = start_date.replace('-', '.') end_date_ = end_date.replace('-', '.') search_result_url = url_prefix.format(url_encode(query), start_date_, end_date_) return search_result_url
def search(self, scope, query): q = url_encode({ 'q': query, '_': '%s' % int(time.time() * 1000) }) url = 'https://www.pinterest.com/search/{}/?q={}'.format(scope, query) print(url) response = self.get(url=url) # html = response.text html = response.text[response.text.find('application/json'):] # print(html) html = html[html.find('{'):html.find('}</script>') + 1] # print(html) search_result = json.loads(html) results = [] # print(search_result) # print(search_result['resources']['data']['BaseSearchResource']) # print(search_result['resources']['data']['BaseSearchResource'].values()) # print(len(search_result['resources']['data']['BaseSearchResource'])) # print(next(iter(search_result['resources']['data']['BaseSearchResource'].values()))) try: if len(search_result['resources']['data']['BaseSearchResource']): search_resource = next(iter(search_result['resources']['data']['BaseSearchResource'].values())) # print(search_resource) # print(search_resource['data']) # print(search_resource['data']['results']) results = search_resource['data']['results'] # print(search_resource['nextBookmark']) self.next_book_marks[scope][query] = search_resource['nextBookmark'] except KeyError: pass return results
def __search_next_page(self, scope, query): if not self.next_book_marks[scope].get(query): return self.search(scope, query) q = url_encode({ 'source_url': '/search/{}/?q={}'.format(scope, query), 'data': json.dumps({ 'options': { 'bookmarks': [self.next_book_marks[scope][query]], 'query': query, 'scope': scope }, "context": {} }).replace(' ', ''), '_': '%s' % int(time.time() * 1000) }) url = 'https://www.pinterest.com/resource/BaseSearchResource/get/?{}'.format(q) response = self.get(url=url, ajax=True).json() # print(response) results = [] # for key, value in response['resource_response']['data'].items(): # print(key) # print(response['resource']) # print(response['resource_response']['data']['results']) try: if response['resource_response']['error'] is not None: error = response['resource_response']['error'] raise Exception('[{}] {}'.format(error['http_status'], error['message'])) results = response['resource_response']['data']['results'] self.next_book_marks[scope][query] = response['resource']['options']['bookmarks'] except KeyError: pass return results
def post(self, workspace): ws_name = utils.get_workspace(workspace=workspace) options_path = current_path + \ '/storages/{0}/options.json'.format(ws_name) self.options = utils.reading_json(options_path) module = request.args.get('module') ws_name = os.path.basename(os.path.normpath(workspace)) ws_name_encode = utils.url_encode(ws_name) utils.print_debug(ws_name) if ws_name in os.listdir(self.options['WORKSPACES']): ws_json = self.options['WORKSPACES'] + "/{0}/log.json".format( ws_name) raw_logs = utils.reading_json(ws_json) elif ws_name_encode in os.listdir(self.options['WORKSPACES']): ws_json = self.options['WORKSPACES'] + "/{0}/log.json".format( utils.url_encode(ws_name)) # utils.print_debug(ws_json_encode) raw_logs = utils.reading_json(ws_json) if raw_logs: all_commands = [] for k in raw_logs.keys(): for item in raw_logs[k]: cmd_item = item cmd_item["module"] = k cmd_item['std_path'] = utils.replace_argument( self.options, item.get('std_path')).replace( self.options['WORKSPACES'], '') cmd_item['output_path'] = utils.replace_argument( self.options, item.get('output_path')).replace( self.options['WORKSPACES'], '') cmd_item["module"] = k all_commands.append(cmd_item) return {"commands": all_commands} else: return { "error": "Not found logs file for {0} workspace".format(ws_name) }
def __checkFileToUpdate(self, schema): self.mirrors = schema.getList("mirrors", "url") self.program = schema.getProgramName() if self.program == None or self.mirrors == None: utils.error(_("Impossibile procedere con l'aggiornamento. Nessun mirror trovato o nome programma assente")) self.status.push(0, _("Nessun mirror fornito o nome programma assente")) return self._thread(self.__markFileForUpdate, utils.url_encode("%s/%s-update.db" % (self.mirrors[0], self.program)))
def __markFileForUpdate(self, data, response): # Finche non abbiamo scaricato il database proviamo con il mirror successivo if not data or response.status != 200: if len(self.mirrors) == 0: utils.error(_("Impossibile scaricare il database delle revisioni")) self.status.push(0, _("Impossibile scaricare il database delle revisioni")) return else: self._thread(self.__markFileForUpdate, utils.url_encode("%s/%s-update.db" % (self.mirrors[0], self.program))) del self.mirrors[0] # Ok abbiamo scaricato correttamente il database self.__diffDatabase(data)
def main(): # should change these to use a dynamically loaded file drugs = utils.load_drug_file() creds = utils.load_credentials_file() api_client = twython.Twython(*creds) drugs_to_search = ['"' + el + '"' if el.find(" ") >= 0 else el for el in drugs["risperidone"]] query = utils.url_encode(" OR ".join(drugs_to_search)) print(query) # additional parameters can control date range, guessed language, etc. results = api_client.search(q=query, count=100, lang="en") for result in results["statuses"]: print(result["text"]) print("") tl = get_timeline(api_client, "1265683232") tlt = [t["text"] for t in tl] print(tlt)
def index(): if not session.get('auth_header'): auth_url = url_encode(SPOTIFY_AUTH_URL, AUTH_QUERY_PARAMETERS) return redirect(auth_url) else: return redirect(url_for('home'))