def get_table_list(self, cursor): kinds = metadata.get_kinds() try: from django.db.backends.base.introspection import TableInfo return [ TableInfo(x, "t") for x in kinds ] except ImportError: return kinds # Django <= 1.7
def get_table_list(self, cursor): kinds = metadata.get_kinds() try: from django.db.backends.base.introspection import TableInfo return [TableInfo(x, "t") for x in kinds] except ImportError: return kinds # Django <= 1.7
def post_likes(self, post_id): kinds = metadata.get_kinds() if u'PostLike' in kinds: likes = db.GqlQuery( "SELECT * " "FROM PostLike " "WHERE ANCESTOR IS :1", post_key(post_id)).count() else: likes = 0 return likes
def delete_everything(): # get all of our classes for kind in metadata.get_kinds(): entities_remain = True while entities_remain: q = db.GqlQuery("SELECT __key__ FROM " + kind) if q.count(): db.delete(q.fetch(200)) time.sleep(0.5) else: entities_remain = False
def delete_all_entities(): for namespace in get_namespaces(): set_namespace(namespace) for kind in get_kinds(): if kind.startswith('__'): continue while True: data = Query(kind=kind, keys_only=True).Get(200) if not data: break Delete(data)
def post_likes(self, post_id): # gets the metadata about the datastor kinds = metadata.get_kinds() # checks to see if any likes exist and if so displays them if u'PostLike' in kinds: likes = db.GqlQuery( "SELECT * " "FROM PostLike " "WHERE ANCESTOR IS :1", post_key(post_id)).count() else: likes = 0 return likes
def post_likes(self, post_id): # gets the metadata about the datastor kinds = metadata.get_kinds() # checks to see if any likes exist and if so displays them if u'PostLike' in kinds: likes = db.GqlQuery("SELECT * " "FROM PostLike " "WHERE ANCESTOR IS :1", post_key(post_id)).count() else: likes = 0 return likes
def delete_all_entities(): from google.appengine.api.datastore import Delete, Query from google.appengine.ext.db.metadata import get_kinds, get_namespaces from google.appengine.api.namespace_manager import set_namespace for namespace in get_namespaces(): set_namespace(namespace) for kind in get_kinds(): if kind.startswith('__'): continue while True: data = Query(kind=kind, keys_only=True).Get(200) if not data: break Delete(data)
def get(self): s = BackupSettings.get_or_insert("settings") self.response.write("""<html><body><form method="POST"> Available models:<br> %s Ignore models:<br> <textarea name="ignore-models" rows="5" cols="80">%s</textarea><br> <br> Set password:<br> <input type="password" name="pass1"/><br> Repeat:<br> <input type="password" name="pass2"/><br> <br> <input type="submit" value="submit"/> """ % (", ".join(get_kinds()), "\n".join(s.ignore_models)))
def get(self, login_id): url_str = self.request.path post_id = url_str.rsplit('post-', 1)[1] key = post_key(post_id) post = db.get(key) kinds = metadata.get_kinds() if u'Comment' in kinds: comments = db.GqlQuery( "SELECT * " "FROM Comment " "WHERE ANCESTOR IS :1", key) else: comments = '' self.render("postlink.html", post=post, comments=comments)
def post(self): settings = get_settings() if not settings: logging.error("Could not load settings") self.abort(500) return if not validate_password(settings, self.request.get("password")): logging.error("Could not validate password!") self.abort(500) return kinds = list() for kind in get_kinds(): if not (kind in settings.ignore_models or kind.startswith("_")): kinds.append(kind) self.response.headers['Content-Type'] = 'application/json-rpc' json.dump(kinds, self.response.out)
def latest_data(self): immitable = metadata.get_kinds() # current_app.logger.info(immitable) if ('Immiscraper' in immitable): latest_data_home_file_status = self.checkmodification( query=Immiscraper.query().get().latest_page_html_update_date) # print "file exists" if (latest_data_home_file_status): html = self.requestfile( "https://www.homeaffairs.gov.au/trav/work/skil#tab-content-2" ) self.soup = BeautifulSoup(html.text, "html.parser") Immiscraper(latest_page_html_update_date=calendar.timegm( time.gmtime()), latest_page_html=self.soup.prettify().encode( 'ascii', 'ignore')).put() else: html = self.readfile( query=Immiscraper.query().get().latest_page_html) self.soup = BeautifulSoup(html, "html.parser") else: html = self.requestfile( "https://www.homeaffairs.gov.au/trav/work/skil#tab-content-2") self.soup = BeautifulSoup(html.text, "html.parser") Immiscraper(latest_page_html_update_date=calendar.timegm( time.gmtime()), latest_page_html=self.soup.prettify().encode( 'ascii', 'ignore')).put() # #print latest_data_home_file_status latest_data_home = self.soup.find_all(class_="tabbody")[2] link = latest_data_home.find("a")["href"] # print link update_date = re.search(r'\d*-\D*-\d{4}', link).group() # print update_date latest_data_dict = self.get_data(self.home_url + link) # print latest_data_dict latest_data_dict["update_date"] = update_date # #print latest_data_dict # file_to_write_to_js = '{} {} {}'.format("const latest_data = ", json.dumps( # latest_data_dict, indent=4, sort_keys=True), "\nexport default latest_data") # latest_js_file = open(os.path.join(os.path.dirname( # os.getcwd()), "containers/latest_data.js"), "w") # latest_js_file.write(file_to_write_to_js.strip()) # # print latest_js_file # latest_js_file.close() return json.dumps(latest_data_dict)
def get(self): if(self.request.get('secret_key') != SECRET_KEY): pass else: m = metadata.get_kinds() arr = {} count = 0 for n in m: if n.startswith("_"): pass else: arr[count] = n count += 1 # RESPONSE === BEGIN # self.response.headers['Content-Type'] = 'application/json' self.response.out.write("%s(" % urllib2.unquote(self.request.get('callback'))) self.response.out.write(json.dumps(arr)) self.response.out.write(")")
def get(self): if (self.request.get('secret_key') != SECRET_KEY): pass else: m = metadata.get_kinds() arr = {} count = 0 for n in m: if n.startswith("_"): pass else: arr[count] = n count += 1 # RESPONSE === BEGIN # self.response.headers['Content-Type'] = 'application/json' self.response.out.write( "%s(" % urllib2.unquote(self.request.get('callback'))) self.response.out.write(json.dumps(arr)) self.response.out.write(")")
def get(self, login_id): """ uses GET request to get the post_id from the new post and renders permalink.html if the blog post exists by passing the template into render from the TemplateHandler class. """ url_str = self.request.path post_id = url_str.rsplit('post-', 1)[1] key = post_key(post_id) post = db.get(key) kinds = metadata.get_kinds() # checks to see if any comments exist and if so displays them if u'Comment' in kinds: comments = db.GqlQuery( "SELECT * " "FROM Comment " "WHERE ANCESTOR IS :1", key) else: comments = '' self.render("postlink.html", post=post, comments=comments)
def get(self, login_id): """ uses GET request to get the post_id from the new post and renders permalink.html if the blog post exists by passing the template into render from the TemplateHandler class. """ url_str = self.request.path post_id = url_str.rsplit('post-', 1)[1] key = post_key(post_id) post = db.get(key) kinds = metadata.get_kinds() # checks to see if any comments exist and if so displays them if u'Comment' in kinds: comments = db.GqlQuery("SELECT * " "FROM Comment " "WHERE ANCESTOR IS :1", key) else: comments = '' self.render("postlink.html", post=post, comments=comments)
def initialized(): if not ModelInitializer._initialized_state: #let's be atomic here: if len(metadata.get_kinds(start='A',end='Z'))!=0: ModelInitializer._initialized_state=True return ModelInitializer._initialized_state
def get_table_list(self, cursor): return metadata.get_kinds()
def _get_datastore_kinds(): # Return only user-defined names, not __internal_appengine_names__. return [ k for k in metadata.get_kinds() if not _INTERNAL_DATASTORE_KIND_REGEX.match(k)]
def sql_flush(self, style, tables, seqs, allow_cascade=False): if getattr(settings, "COMPLETE_FLUSH_WHILE_TESTING", False): if "test" in sys.argv: tables = metadata.get_kinds() return [ FlushCommand(table) for table in tables ]
def listKinds(self, backupkey=None, *args, **kwargs): global backupKey assert safeStringComparison(backupKey, backupkey) return (pickle.dumps({"kinds": metadata.get_kinds()}).encode("HEX"))
def initialized(): if not ModelInitializer._initialized_state: #let's be atomic here: if len(metadata.get_kinds(start='A', end='Z')) != 0: ModelInitializer._initialized_state = True return ModelInitializer._initialized_state
def create_links(self): # the page has four tabbody class on page # the 3rd is links to all the invitations on page and 4th # is the occupational ceiling. # extract all the 'ul' which houses the links on that page immitable = metadata.get_kinds() main_page_html_property = metadata.get_representations_of_kind( "Immiscraper") print main_page_html_property if ('Immiscraper' in immitable and main_page_html_property['main_page_html_update_time'] is not None): main_data_home_file_status = self.checkmodification( query=Immiscraper.query().get().main_page_html_update_time) current_app.logger.info("sataus", main_data_home_file_status) if (main_data_home_file_status): html = self.requestfile( "https://www.homeaffairs.gov.au/trav/work/skil#tab-content-2" ) self.soup = BeautifulSoup(html.text, "html.parser") Immiscraper(main_page_htmi_update_time=calendar.timegm( time.gmtime()), main_page_html=self.soup.prettify().encode( 'ascii', 'ignore')).put() else: html = self.readfile( query=Immiscraper.query().get().main_page_html) self.soup = BeautifulSoup(html, "html.parser") else: html = self.requestfile( "https://www.homeaffairs.gov.au/trav/work/skil#tab-content-2") self.soup = BeautifulSoup(html.text, "html.parser") Immiscraper( main_page_html_update_time=calendar.timegm(time.gmtime()), main_page_html=self.soup.prettify().encode('ascii', 'ignore')).put() tab_content_invitations = self.soup.find_all(class_="tabbody") # ##print a invitation_links = tab_content_invitations[2].find_all("ul") # #print invitation_links # the 'a' gives us list with two lists inside one list i.e 2018 and 2017. # the first one is 2018 and second one is 2017. # ##print a[0] for yearly_links in invitation_links: for dates in yearly_links: year = re.search(r'\d{4}', str(dates)) date = re.search(r'\d+\s+\w+', str(dates)) link = re.search(r'(\/[A-Za-z\/\-0-9.aspx]+)', str(dates)) # time.sleep(2) # ##print link.group() if (year): # ##print year.group() # ##print date.group() # ##print link.group() if (year.group() in self.invitation_year_dict) and type( self.invitation_year_dict[year.group()]) is dict: # #print invitation_year_dict self.invitation_year_dict[year.group()][ date.group()] = link.group() # #print invitation_year_dict else: self.invitation_year_dict[year.group()] = {} # #print invitation_year_dict self.invitation_year_dict[year.group()][ date.group()] = link.group() # #print invitation_year_dict return json.dumps(self.invitation_year_dict)
def testGetKinds(self): """Test for get_kinds.""" self.assertEqual(['Bar', 'Baz', 'Foo'], metadata.get_kinds()) self.assertEqual(['Baz', 'Foo'], metadata.get_kinds('Bat')) self.assertEqual(['Bar', 'Baz'], metadata.get_kinds('', 'C')) self.assertEqual([], metadata.get_kinds('A', ''))