def collect(ip,arg): """ collect foo/a.txt rec:bar=*.py Copies foo/a.txt to ~/_ipython/collect/foo/a.txt and *.py from bar, likewise Without args, try to open ~/_ipython/collect dir (in win32 at least). """ from IPython.external.path import path basedir = path(ip.ipython_dir + '/collect') try: fs = mglob.expand(arg.split(None,1)[1]) except IndexError: os.startfile(basedir) return for f in fs: f = path(f) trg = basedir / f.splitdrive()[1].lstrip('/\\') if f.isdir(): print "mkdir",trg trg.makedirs() continue dname = trg.dirname() if not dname.isdir(): dname.makedirs() print f,"=>",trg shutil.copy2(f,trg)
def collect(ip, arg): """ collect foo/a.txt rec:bar=*.py Copies foo/a.txt to ~/_ipython/collect/foo/a.txt and *.py from bar, likewise Without args, try to open ~/_ipython/collect dir (in win32 at least). """ from IPython.external.path import path basedir = path(ip.options.IPYTHONDIR + '/collect') try: fs = mglob.expand(arg.split(None, 1)[1]) except IndexError: os.startfile(basedir) return for f in fs: f = path(f) trg = basedir / f.splitdrive()[1].lstrip('/\\') if f.isdir(): print "mkdir", trg trg.makedirs() continue dname = trg.dirname() if not dname.isdir(): dname.makedirs() print f, "=>", trg shutil.copy2(f, trg)
def sniff_repository_type(_path): if (path(_path) / '.hg').exists(): return 'hg' if (path(_path) / '.git').exists(): return 'git' if (path(_path) / '.svn').exists(): return 'svn' raise NotImplementedError("unabled to determine repository type")
def get_paths(self): try: return self.__paths except AttributeError: self.__paths = [path(p) for p in self.split('\n') if os.path.exists(p)] return self.__paths
def get_paths(self): try: return self.__paths except AttributeError: self.__paths = [ path(p) for p in self.split('\n') if os.path.exists(p) ] return self.__paths
def __getattr__(self,name): if name in self.ents: if self.path.endswith('/'): sep = '' else: sep = '/' tgt = self.path + sep + pathobj_unmangle(name) #print "tgt",tgt if os.path.isdir(tgt): return PathObj(tgt) if os.path.isfile(tgt): return path(tgt) raise AttributeError, name # <<< DON'T FORGET THIS LINE !!
def __getattr__(self, name): if name in self.ents: if self.path.endswith('/'): sep = '' else: sep = '/' tgt = self.path + sep + pathobj_unmangle(name) #print "tgt",tgt if os.path.isdir(tgt): return PathObj(tgt) if os.path.isfile(tgt): return path(tgt) raise AttributeError, name # <<< DON'T FORGET THIS LINE !!
def main(): all = path('.').files('test_*py') results = {} res_exc = [None] def exchook(self,*e): res_exc[0] = [e] ip.IP.set_custom_exc((Exception,), exchook) startdir = os.getcwd() for test in all: print test res_exc[0] = 'ok' os.chdir(startdir) ip.runlines(test.text()) results[str(test)] = res_exc[0] os.chdir(startdir) pprint.pprint(results)
def main(): all = path('.').files('test_*py') results = {} res_exc = [None] def exchook(self, *e): res_exc[0] = [e] ip.IP.set_custom_exc((Exception, ), exchook) startdir = os.getcwd() for test in all: print test res_exc[0] = 'ok' os.chdir(startdir) ip.runlines(test.text()) results[str(test)] = res_exc[0] os.chdir(startdir) pprint.pprint(results)
characters can be found in our source tree. """ # Config # If true, all lines that have tabs are printed, with line number full_report_tabs = True # If true, all lines that have tabs are printed, with line number full_report_rets = False # Code begins from IPython.external.path import path rets = [] tabs = [] for f in path("..").walkfiles("*.py"): errs = "" cont = f.bytes() if "\t" in cont: errs += "t" tabs.append(f) if "\r" in cont: errs += "r" rets.append(f) if errs: print("%3s" % errs, f) if "t" in errs and full_report_tabs: for ln, line in enumerate(f.lines()):
from IPython.external.path import path fs = path('..').walkfiles('*.py') for f in fs: errs = '' cont = f.bytes() if '\t' in cont: errs+='t' if '\r' in cont: errs+='r' if errs: print "%3s" % errs, f
from IPython.external.path import path fs = path('..').walkfiles('*.py') for f in fs: errs = '' cont = f.bytes() if '\t' in cont: errs += 't' if '\r' in cont: errs += 'r' if errs: print "%3s" % errs, f
characters can be found in our source tree. """ # Config # If true, all lines that have tabs are printed, with line number full_report_tabs = True # If true, all lines that have tabs are printed, with line number full_report_rets = False # Code begins from IPython.external.path import path rets = [] tabs = [] for f in path('..').walkfiles('*.py'): errs = '' cont = f.bytes() if '\t' in cont: errs += 't' tabs.append(f) if '\r' in cont: errs += 'r' rets.append(f) if errs: print "%3s" % errs, f if 't' in errs and full_report_tabs: for ln, line in enumerate(f.lines()):
def upgrade_dir(srcdir, tgtdir): """ Copy over all files in srcdir to tgtdir w/ native line endings Creates .upgrade_report in tgtdir that stores md5sums of all files to notice changed files b/w upgrades. """ def pr(s): print s junk = ['.svn','ipythonrc*','*.pyc', '*.pyo', '*~', '.hg'] def ignorable(p): for pat in junk: if p.startswith(pat) or p.fnmatch(pat): return True return False modded = [] files = [path(srcdir).relpathto(p) for p in path(srcdir).walkfiles()] #print files rep = tgtdir / '.upgrade_report' try: rpt = pickle.load(rep.open()) except: rpt = {} for f in files: if ignorable(f): continue src = srcdir / f tgt = tgtdir / f if not tgt.isfile(): pr("Creating %s" % str(tgt)) tgt.write_text(src.text()) rpt[str(tgt)] = md5.new(tgt.text()).hexdigest() else: cont = tgt.text() sum = rpt.get(str(tgt), None) #print sum if sum and md5.new(cont).hexdigest() == sum: pr("%s: Unedited, installing new version" % tgt) tgt.write_text(src.text()) rpt[str(tgt)] = md5.new(tgt.text()).hexdigest() else: pr(' == Modified, skipping %s, diffs below == ' % tgt) #rpt[str(tgt)] = md5.new(tgt.bytes()).hexdigest() real = showdiff(tgt,src) pr('') # empty line if not real: pr("(Ok, it was identical, only upgrading checksum)") rpt[str(tgt)] = md5.new(tgt.text()).hexdigest() else: modded.append(tgt) #print rpt pickle.dump(rpt, rep.open('w')) if modded: print "\n\nDelete the following files manually (and rerun %upgrade)\nif you need a full upgrade:" for m in modded: print m
else: cont = tgt.text() sum = rpt.get(str(tgt), None) #print sum if sum and md5.new(cont).hexdigest() == sum: pr("%s: Unedited, installing new version" % tgt) tgt.write_text(src.text()) rpt[str(tgt)] = md5.new(tgt.text()).hexdigest() else: pr(' == Modified, skipping %s, diffs below == ' % tgt) #rpt[str(tgt)] = md5.new(tgt.bytes()).hexdigest() real = showdiff(tgt,src) pr('') # empty line if not real: pr("(Ok, it was identical, only upgrading checksum)") rpt[str(tgt)] = md5.new(tgt.text()).hexdigest() else: modded.append(tgt) #print rpt pickle.dump(rpt, rep.open('w')) if modded: print "\n\nDelete the following files manually (and rerun %upgrade)\nif you need a full upgrade:" for m in modded: print m import sys if __name__ == "__main__": upgrade_dir(path(sys.argv[1]), path(sys.argv[2]))
''' exit(1) print "Search path: %s" % str(sys.argv[1]) remove = False link = False if len(sys.argv) == 3: if sys.argv[2] == 'remove': remove = True elif sys.argv[2] == 'symlink': link = True p = path.path(sys.argv[1]) d = {} c = {} dup = set() for f in p.walk(): if not f.isdir() and not f.islink(): s = io.open(f, 'rb') try: h = md5.md5(s.read()) h = h.hexdigest() #print '{}'.format(h) if h not in d: d[h] = [] d[h].append(f) c[h] = len(d[h]) if c[h] > 1:
characters can be found in our source tree. """ # Config # If true, all lines that have tabs are printed, with line number full_report_tabs = True # If true, all lines that have tabs are printed, with line number full_report_rets = False # Code begins from IPython.external.path import path rets = [] tabs = [] for f in path('..').walkfiles('*.py'): errs = '' cont = f.bytes() if '\t' in cont: errs+='t' tabs.append(f) if '\r' in cont: errs+='r' rets.append(f) if errs: print "%3s" % errs, f if 't' in errs and full_report_tabs: for ln,line in enumerate(f.lines()):
import pandas import datetime import calendar from pandas import DataFrame, Index, Series from IPython.external.path import path datapath = path('~/workspace/math/facts').expanduser() def get_gas_prices(): # http://www.eia.gov/dnav/pet/hist/LeafHandler.ashx?n=PET&s=EMA_EPM0_PTA_NUS_DPG&f=M # http://www.eia.gov/dnav/pet/hist_xls/EMA_EPM0_PTA_NUS_DPGm.xls gas_cpg = 'EMA_EPM0_PTA_NUS_DPGm.xls' xls = pandas.ExcelFile(datapath / gas_cpg) gasdata = xls.parse(xls.sheet_names[1], skiprows=(0, 1)) gasdata['prices'] = gasdata[gasdata.columns[1]] del gasdata[gasdata.columns[1]] gasdata['date'] = gasdata['Date'] del gasdata['Date'] gasdata.index = gasdata['date'] return gasdata.resample('M', convention='end') def clean_cpiai(path): def normalize(path): with open(path) as f: yield ('date', 'price') for i, l in enumerate(f): l = l.strip() if i > 17: l = l.strip()
#!/usr/bin/env python # encoding: utf-8 from __future__ import print_function """ progname """ import BeautifulSoup from IPython.external.path import path import logging DATAPATH = path('~/workspace/math/facts/data').expanduser() def parse_tax_rates_table(): # https://en.wikipedia.org/wiki/Income_tax_in_the_United_States datapath = DATAPATH / 'taxes' htmlpath = datapath / 'Income tax in the United States - Wikipedia, the free encyclopedia.html' bs = BeautifulSoup.BeautifulSoup(open(htmlpath)) def find_unadjusted_table(bs): # TODO tables = bs.findAll('table') #print(len(tables)) return tables[-2] table = find_unadjusted_table(bs) row = table.findAll('tr')[1] headings = tuple(col.text for col in row.findAll('th')) print(headings)