def test_get_add(): response = tester.get("/add?a=-1&b=2") logger.debug(response.status_code) assert response.status_code == 200 logger.debug(response.json()) assert response.json()["total"] == 1
def test_add(): response = tester.post("/add", {"items": [-1, 2, 3]}) logger.debug(response.status_code) assert response.status_code == 200 logger.debug(response.json()) assert response.json()["total"] == 4
def __request(self, symbol, stat): try: url = 'http://finance.yahoo.com/d/quotes.csv?s=%s%s&f=%s' % (symbol, self.__chinaSymbolPrefix(symbol), stat) logger.debug("querying finance.yahoo.com...") return urllib.urlopen(url).read().strip().strip('"') except IOError: raise UfException(Errors.NETWORK_ERROR, "Can't connect to Yahoo server") except BaseException: raise UfException(Errors.UNKNOWN_ERROR, "Unknown Error in YahooFinance.__request %s" % traceback.format_exc())
def on_deleted(self, event): ''' @summary: ファイル削除イベント ''' if event.is_directory: pass else: self.watcher.index.remove([event.src_path]) logger.debug("DELETE %s" % event.src_path) FileSystemEventHandler.on_deleted(self, event)
def unschedule(self, path): ''' @summary: 監視リストから指定したファイルパスを監視するwatchを除去する 整理はしていない @warning: Linuxのみwatchスレッドが死んでいない テスト&要修正 ''' path = abspath(path) Observer.unschedule(self, self.watch_path[path]) del self.watch_path[path] logger.debug("unschedule %s" % path)
def getQuotes(self, symbol, start, end): """ Get historical prices for the given ticker symbol. Date format is 'YYYY-MM-DD' Returns a nested list. """ try: _start = start.strftime('%Y%m%d') _end = end.strftime('%Y%m%d') url = 'http://ichart.yahoo.com/table.csv?s=%s%s&' % (symbol, self.__chinaSymbolPrefix(symbol)) + \ 'd=%s&' % str(int(_end[4:6]) - 1) + \ 'e=%s&' % str(int(_end[6:8])) + \ 'f=%s&' % str(int(_end[0:4])) + \ 'g=d&' + \ 'a=%s&' % str(int(_start[4:6]) - 1) + \ 'b=%s&' % str(int(_start[6:8])) + \ 'c=%s&' % str(int(_start[0:4])) + \ 'ignore=.csv' logger.debug("querying finance.yahoo.com for stock %s..." % symbol) resp = urllib.urlopen(url) if resp.getcode() == 404: raise UfException(Errors.NETWORK_404_ERROR, "data error, not found") days = resp.readlines() values = [day.decode('utf-8')[:-2].split(',') for day in days] # sample values:[['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Clos'], \ # ['2009-12-31', '112.77', '112.80', '111.39', '111.44', '90637900', '109.7']...] data = [] for value in values[1:]: if len(value) < 7: raise UfException(Errors.UNKNOWN_ERROR, "data error, value=%s" % value) day = datetime.datetime.strptime(value[0], '%Y-%m-%d') data.append(Quote(day, value[1], value[2], value[3], value[4], value[5], value[6])) dateValues = sorted(data, key = lambda q: q.time) return dateValues except IOError: raise UfException(Errors.NETWORK_ERROR, "Can't connect to Yahoo server") except UfException as excp: raise excp except BaseException as excp: raise UfException(Errors.UNKNOWN_ERROR, "Error in YahooFinance: %s" % excp) #sample output #[stockDaylyData(date='2010-01-04, open='112.37', high='113.39', low='111.51', close='113.33', volume='118944600', adjClose='111.6'))...]
def getAllStockSymbols(self): """ Get all china stock symbols. Returns a dictionary. """ stocks = {} url = 'http://quote.eastmoney.com/stocklist.html' logger.debug("querying quote.eastmoney.com for full stock list...") html = urllib.urlopen(url).read() html = html.decode('gb2312', 'replace') soup = BeautifulSoup(html) for item in soup.find_all("a", href=re.compile("^http://quote.eastmoney.com/(sh|sz)\d{6}\.html"), target="_blank"): symbol = item.string[-7:-1] name = item.string[0:-8] prefix = symbol[0:3] if prefix not in (chinaStock.SS_PREFIX | chinaStock.SZ_PREFIX): continue stocks[symbol] = name return stocks
def unarchive(datdir, archdir=None, filepaths=None, remove_tiger_files=False): """ Unarchive all dat files in `archdir`, and put them in `datdir`. """ if not (archdir or filepaths): print_err('No file paths or archive directory specified!') return 1 try: # Known archived files. archive = Archive(archdir, datdir) except (OSError, ValueError) as ex: print_err(ex) return 1 if filepaths: filepath_errs = 0 for filepath in filepaths: if filepath not in archive: print_err('Not an archived file: {}'.format(filepath)) filepath_errs += 1 if filepath_errs: return filepath_errs errs = 0 success = 0 for archfile in archive.files: if filepaths and (archfile.filepath not in filepaths): debug('Archive file not selected: {}'.format(archfile.filepath)) continue try: archfile.unarchive(remove_tiger_files=remove_tiger_files) except OSError as ex: print_err(ex) errs += 1 else: status('Unarchived', archfile.dest_path) success += 1 status( 'Unarchived Files', '{} ({} {})'.format( success, errs, 'Error' if errs == 1 else 'Errors', )) config_increment(unarchive_files=success, default=0) return errs
def analyze_file(self, path): ''' @summary: ファイル解析 スレッドに委託するならこのメソッドをワーカーにする ''' try: index = ChildIndex() # mimeがGC時のバグがある模様 mtype = self.mime.from_file(path) if mtype in EXCLUDE_MIME: index.add(path, os.path.basename(path)) logger.debug("IGNORE: %s" % path) # 各パターンごとに判定して、テキスト読取 elif pattern['pdf'].search(mtype): # PDFからテキストを抽出してインデックス追加の第二引数に指定 index.add(path, os.path.basename(path)) logger.debug("PDF '%s'" % path) else: index.add(path) logger.debug("ADD Index '%s'" % path) except Exception, err: logger.error(err)
async def read_main(): logger.debug("Hello") logger.info("Hello") return {"msg": "Hello World"}
def get_add(a: float, b: float): logger.debug("input {} + {}", a, b) item = Item(items=[a, b]) return add_item(item)
def add_item(item: Item): logger.debug("input {}", item) item.total = add(item.items) return item
def main(argd): """ Main entry point, parses arguments and dispatches accordingly. Arguments: argd : Docopt arg dict. """ set_debug_mode(argd['--debug']) debug('Debugging enabled.') # Get input paths, with no blanks (mainly for testing error messages). argd['FILE'] = [s for s in argd['FILE'] if s.strip()] inpaths = argd['FILE'] or config_get('dat_dir', []) debug('Input paths for conversion: {}'.format(inpaths)) if all(s.lower().endswith('.tiger') for s in inpaths): # If all input files are tiger files, --view is implicit. argd['--view'] = True outdir = (argd['--output'] or config_get('tiger_dir', './tigertamer_output')) archdir = (argd['--archive'] or config_get('archive_dir', './tigertamer_archive')) ignore_dirs = set(config_get('ignore_dirs', [])) ignore_dirs.update(set(argd['--ignore'])) ignore_strs = set(config_get('ignore_strs', [])) ignore_strs.update(set(argd['--IGNORE'])) if outdir and (outdir != '-'): ignore_dirs.add(outdir) if archdir and (archdir != '-'): ignore_dirs.add(archdir) # Handle config/arg flags. argd['--extra'] = config_get('extra_data', argd['--extra']) argd['--nosplit'] = config_get('no_part_split', argd['--nosplit']) if argd['--gui'] and argd['--ARCHIVE']: # Little hack to force calling `cmd_menu_unarchive` on load. argd['--func'] = 'cmd_menu_unarchive' if argd['--gui'] or argd['--func']: # The GUI handles arguments differently, send it the correct config. if argd['--view'] or argd['--preview']: # Supply input paths from config. They won't be used. # It keeps the GUI from overwriting the last known dat dir # when viewing/previewing files. inpaths = config_get('dat_dir', []) debug('Input paths reloaded/saved: {}'.format(inpaths)) return load_gui( auto_exit=config_get('auto_exit', False), auto_run=argd['--run'], extra_data=argd['--extra'], no_part_split=argd['--nosplit'], geometry=config_get('geometry', ''), geometry_about=config_get('geometry_about', ''), geometry_labels=config_get('geometry_labels', ''), geometry_report=config_get('geometry_report', ''), geometry_unarchive=config_get('geometry_unarchive', ''), geometry_viewer=config_get('geometry_viewer', ''), theme=config_get('theme', ''), archive_dir='' if archdir in (None, '-') else archdir, dat_dir=inpaths[0] if inpaths else '', tiger_dir='' if outdir in (None, '-') else outdir, ignore_dirs=tuple(ignore_dirs), ignore_strs=tuple(ignore_strs), run_function=argd['--func'], tiger_files=argd['FILE'] if argd['--view'] else None, preview_files=argd['FILE'] if argd['--preview'] else None, ) # Console mode, need a lock. try: lock_acquire() except ValueError: print_err('{} already running.'.format(NAME)) return 3 if argd['--ARCHIVE']: # List archive files. return list_archive(archdir, inpaths[0]) if argd['--functions']: # List functions available for -f. return list_funcs() if argd['--labelconfig']: # List label config being used. return list_labelconfig() if argd['--masterfile'] or argd['--MASTERFILE']: return view_lines_files(argd['FILE'], separate_widths=argd['--MASTERFILE']) if argd['--preview']: # Preview a .dat file as a .tiger file. return preview_files(argd['FILE']) if argd['--tree'] or argd['--TREE']: return view_tree_files(argd['FILE'], separate_widths=argd['--TREE']) if argd['--view']: # View a tiger file. return view_tigerfiles(argd['FILE']) if argd['--unarchive'] or argd['--UNARCHIVE']: if not (argd['ARCHIVE_FILE'] or options_are_set(inpaths, archdir)): raise InvalidConfig( '.dat dir and archive dir must be set in config.') errs = unarchive( inpaths[0], archdir, filepaths=argd['ARCHIVE_FILE'], remove_tiger_files=argd['--UNARCHIVE'], ) return errs # Run in console mode. if not inpaths: raise InvalidArg('No input files/directories!') time_start = time() mozfiles = load_moz_files( inpaths, ignore_dirs=ignore_dirs, ignore_strs=ignore_strs, split_parts=not argd['--nosplit'], ) parentfiles = set() errs = 0 for mfile in mozfiles: parentfiles.add(mfile.parent_file) errs += handle_moz_file( mfile, outdir, names_only=argd['--namesonly'], archive_dir=archdir, extra_data=argd['--extra'], ) parentlen = len(parentfiles) status( C(' ').join( C('Finished with', 'cyan'), C(parentlen, 'blue', style='bright'), C(' ').join( C('master', 'cyan'), C('file' if parentlen == 1 else 'files', 'cyan'), ), C(' ').join( C(errs, 'blue', style='bright'), C('error' if errs == 1 else 'errors', 'cyan'), ).join('(', ')', style='bright'), )) for pfile in sorted(parentfiles): debug('Parent file: {}'.format(pfile)) config_increment( master_files=parentlen, tiger_files=len(mozfiles), runs=1, runtime_secs=time() - time_start, default=0, ) return errs