def store_archive_info(url, timestamp, archive_folder, token): ''' Stores details about an archived repository into the archive table if not exists. Returns None if invalid github slug ''' slug = re.sub(r"http(s)?://github\.com/", '', url).replace('.git', '').strip() try: owner, name = slug.split('/') except ValueError: return None language = get_repo_language(slug, token) relative_folder = os.path.join(*archive_folder.rsplit( os.path.sep, 3)[-3:]) # last 3 parts of archive path relative_folder = os.path.join( relative_folder, str(timestamp)) # last 3 parts of archive path exists = db.session.query(Archive).filter_by( archive_folder=archive_folder).first() if not exists: archive = Archive(url=url, name=name, owner=owner, github_slug=slug, \ language=language, timestamp=str(timestamp), archive_folder=relative_folder) db.session.add(archive) db.session.commit() return slug
def save(self): if self.obj is None: post = Post() else: post = self.obj # make slug title = self.title.data if (self.slug.data is None) or (self.slug.data.strip() == "") or (self.slug.data.strip() == u""): slug = pretty_slug(title) else: slug = slugify(self.slug.data.strip()) # category if (self.category.data.strip() is "") or (self.category.data.strip() is u""): category = None else: category = ndb.Key(urlsafe=self.category.data.strip()) # tag tag = [] for word in self.tag.data.split(","): word = word.strip() if not word == "": t = Tag(id=pretty_slug(word).lower()) t.title = word key = t.put() tag.append(key) t = datetime.now() archive = Archive(id=t.strftime("%Y_%m")) archive.time = date(year=t.year, month=t.month, day=1) archive_key = archive.put() post.title = title post.slug = slug post.text = self.text.data post.category = category post.tag = tag post.archive = archive_key post.order = self.order.data post.status = self.status.data post.allowComment = self.allowComment.data post.allowPing = self.allowPing.data post.allowFeed = self.allowFeed.data post.put() app.config["LATEST"] = datetime.now()
def archive(): title = request.args.get('title', None) url = request.args.get('url', None) archived = Archive.query.filter_by(article_link=url).first() if archived: flash('Article already archived') return redirect(url_for('archives')) new_archive = Archive(user_id=current_user.id, article_title=title, article_link=url) db.session.add(new_archive) db.session.commit() return redirect(url_for('archives'))
def archive_metadata(sc_id, metafile): # cd_tree: coverage description tree extracted from the # metadata XML file cd_tree = base_xml_parse(metafile, True) wcseo_type = determine_wcseo_type(cd_tree) coverage_id = extract_eoid(cd_tree, wcseo_type) if IE_DEBUG > 1: logger.info("Sc_id " + ` sc_id ` + ": Archiving meta for " + ` cd_tree.tag ` + ", cid='" + coverage_id + "'.") cd_tree = None scenario = Scenario.objects.get(id=int(sc_id)) archive_record = Archive(scenario=scenario, eoid=coverage_id) archive_record.save() return True