def saveGist(soup): div = soup.findAll('div') for d in div: if d.has_key('class'): if d['class'] == 'path': gistnum = getunicode(d).split()[-1] spans = soup.findAll('span') for span in spans: if span.has_key('class'): if span['class'] == 'code': filename = getunicode(span).split()[0] filename = '_'.join([gistnum,filename]) f = open(filename,'w') for d in div: if d.has_key('class'): if d['class'] == 'line': f.writelines(getPrintUnicode(d)+'\n') f.close() return
def searchgists(soup, gist_dict={}, search_count=0): divs = soup.findAll('div') for div in divs: if div.has_key('class'): if div['class'] == 'info': contents = div.contents for content in contents: if not isinstance(content, unicode): if content.has_key('href'): title = getunicode(content) url = content['href'] search_count+=1 gist_dict[search_count] = {'title':title,'url':url} return (gist_dict, search_count)
def saverepo(soup): div = soup.findAll('div') for d in div: if d.has_key('class'): if d.has_key('data-path'): data_path = d['data-path'] break title = soup.find('title') title = getunicode(title).split()[-3] data_path = os.path.join(os.getcwd(),title,data_path) path, filename = os.path.split(os.path.split(data_path)[0]) makeDir(path) filepath = os.path.join(path, filename) f = open(filepath,'w') for d in div: if d.has_key('class'): if d['class'] == 'line': f.writelines(getPrintUnicode(d)+'\n') f.close() return