def loaddir(directory, clear=False): if clear: Book.objects.all().delete() queue = os.listdir(directory) while queue: next = queue.pop() if next[0] == '.': continue if next in ('categories', 'template'): continue next = path.join(directory, next) if path.isdir(next): queue.extend([ path.join(next,f) for f in os.listdir(next) ]) continue filecontent = file(next).read() header, content = filecontent.split('\n---\n') header = yaml.load(header) content = preprocess_rst_content(content) review_date = parsedate(header['review_date']) review_date = time.strftime('%Y-%m-%d', review_date) btags = [] for c in header.get('tags','').split(): btags.append(tag_for(c)) B = Book(slug=header['slug'], title=header['title'], booktitle=header['booktitle'], author=header['author'], content=content, review_date=review_date) B.save() for t in btags: B.tags.add(t)
def loaddir(directory, clear=False): if clear: Book.objects.all().delete() queue = os.listdir(directory) while queue: next = queue.pop() if next[0] == '.': continue if next in ('categories', 'template'): continue next = path.join(directory, next) if path.isdir(next): queue.extend([path.join(next, f) for f in os.listdir(next)]) continue filecontent = file(next).read() header, content = filecontent.split('\n---\n') header = yaml.load(header) content = preprocess_rst_content(content) review_date = parsedate(header['review_date']) review_date = time.strftime('%Y-%m-%d', review_date) btags = [] for c in header.get('tags', '').split(): btags.append(tag_for(c)) B = Book(slug=header['slug'], title=header['title'], booktitle=header['booktitle'], author=header['author'], content=content, review_date=review_date) B.save() for t in btags: B.tags.add(t)
def loaddir(directory, clear=False): if clear: BlogPost.objects.all().delete() queue = listdir(directory) while queue: next = queue.pop() if next[0] == '.': continue if next in ('template.rst', 'template'): continue next = path.join(directory, next) if path.isdir(next): queue.extend([ path.join(next,f) for f in listdir(next) ]) continue filecontent = file(next).read() parts = filecontent.split('\n---\n', 1) if len(parts) != 2: raise IOError('gitcms.blog.load: expected "---" separator in file %s' % next) fields, content = parts fields = yaml.load(fields) fields['content'] = preprocess_rst_content(content) fields['timestamp'] = parsedatetime(fields['timestamp']) fields['timestamp'] = time.strftime('%Y-%m-%d %H:%M', fields['timestamp']) categories = fields.get('categories', '') if 'categories' in fields: del fields['categories'] ptags = [] if categories: for c in categories.split(): ptags.append(tag_for(c)) # if we arrived here and no errors, then it is safe # to add our post. # P = BlogPost(**fields) P.save() for t in ptags: P.tags.add(t)
def loaddir(directory, clear=False): if clear: BlogPost.objects.all().delete() queue = listdir(directory) while queue: next = queue.pop() if next[0] == '.': continue if next in ('template.rst', 'template'): continue next = path.join(directory, next) if path.isdir(next): queue.extend([path.join(next, f) for f in listdir(next)]) continue filecontent = file(next).read() parts = filecontent.split('\n---\n', 1) if len(parts) != 2: raise IOError( 'gitcms.blog.load: expected "---" separator in file %s' % next) fields, content = parts fields = yaml.load(fields) fields['content'] = preprocess_rst_content(content) fields['timestamp'] = parsedatetime(fields['timestamp']) fields['timestamp'] = time.strftime('%Y-%m-%d %H:%M', fields['timestamp']) categories = fields.get('categories', '') if 'categories' in fields: del fields['categories'] ptags = [] if categories: for c in categories.split(): ptags.append(tag_for(c)) # if we arrived here and no errors, then it is safe # to add our post. # P = BlogPost(**fields) P.save() for t in ptags: P.tags.add(t)
def loaddir(directory, clear=False): if clear: Book.objects.all().delete() queue = os.listdir(directory) while queue: next = queue.pop() if next[0] == ".": continue if next in ("categories", "template"): continue next = path.join(directory, next) if path.isdir(next): queue.extend([path.join(next, f) for f in os.listdir(next)]) continue filecontent = file(next).read() header, content = filecontent.split("\n---\n") header = yaml.load(header) content = preprocess_rst_content(content) review_date = parsedate(header["review_date"]) review_date = time.strftime("%Y-%m-%d", review_date) btags = [] for c in header.get("tags", "").split(): btags.append(tag_for(c)) B = Book( slug=header["slug"], title=header["title"], booktitle=header["booktitle"], author=header["author"], content=content, review_date=review_date, ) B.save() for t in btags: B.tags.add(t)
def loaddir(directory, clear=False): if clear: Article.objects.all().delete() queue = os.listdir(directory) urls = set() while queue: artfile = queue.pop() if artfile[0] == '.': continue if artfile in ('template', 'template.rst', 'template.txt'): continue artfile = path.join(directory, artfile) if path.isdir(artfile): queue.extend([ path.join(artfile,f) for f in os.listdir(artfile) ]) continue input = file(artfile) header = {} linenr = 0 while True: line = input.readline().strip() linenr += 1 if line in ('---', '..'): break if line.find(':') < 0: raise IOError('gitcms.pages.load: In file %s, line %s. No \':\' found!' % (artfile, linenr)) tag,value = line.split(':',1) value = value.strip() header[tag] = value blank = input.readline() linenr += 1 if blank.strip(): raise IOError, 'Blank line expected while processing file (%s:%s)\nGot "%s"' % (artfile, linenr,blank) content = input.read() content = preprocess_rst_content(content) url = header['url'] if url and url[-1] == '/': import warnings warnings.warn('''\ gitcms.pages.loaddir: Removing / at end of url (%s) (Both versions will work for accessing the page.) ''' % url) url = url[:-1] if url and url[0] == '/': import warnings warnings.warn('gitcms.pages.loaddir: Removing / at start of url ({0})' .format(url)) url = url[1:] if url in urls: raise IOError('gitcms.pages.loaddir: repeated URL detected (%s)' % url) taglist = [] for c in header.get('categories','').split(): taglist.append(tag_for(c)) # if we got so far, implies that our article is safe to store. urls.add(url) A = Article(title=header['title'], url=url, meta=header.get('meta', ''), author=header.get('author', ''), content=content) A.save() for c in taglist: A.tags.add(c)