def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) # E.g., /ccp/var/transit/metc/ g.assurt(conf.transitdb_filename) self.dname_gtfsdb = os.path.dirname(conf.transitdb_filename) # E.g., ftp://gisftp.metc.state.mn.us/google_transit.zip source_name = conf.transit_db_source g.assurt(source_name) # E.g., gisftp.metc.state.mn.us/google_transit.zip (source_name, num_subs) = re.subn(r'^ftp://', r'', source_name) (source_name, num_subs) = re.subn(r'^http://', r'', source_name) # E.g., google_transit.zip self.bname_transit_feed = os.path.basename(source_name) # E.g., /ccp/var/transit/metc/google_transit.zip self.fname_transit_feed = os.path.join(self.dname_gtfsdb, self.bname_transit_feed) # E.g., /ccp/var/transit/metc/minnesota.gdb gserver_db_name = os.path.basename(conf.transitdb_filename) (gserver_db_name, num_subs) = re.subn(r'\.gtfsdb$', r'.gdb', gserver_db_name) self.fname_transit_gdb = os.path.join(self.dname_gtfsdb, gserver_db_name) # NOTE: 2011.06.15 Transit Feed # -rw-rw-r--. 1 pee cyclop 285M Jun 15 15:38 minnesota.gdb # Then, 2011.08.04 # -rw-rw-r--. 1 pee cyclop 516M Aug 9 01:31 minnesota.gdb # Then, 2011.08.09? Or did I change something in the import script? # -rw-rw-r--. 1 pee cyclop 743M Aug 9 02:19 minnesota.gdb # self.regex_not_retrieved = None self.tfeed_not_retrieved = True self.tfeed_xmldate = None self.tfeed_calspan = None self.tfeed_zipdate = None self.cache_up_to_date = False
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) # Tell Ccp_Script_Base not to make the query builder. This prevents two # issues: 1. when we first run this script, the database might not be # CcpV2-compliant, so we won't be able to make a qb; and 2. the qb # starts a database transaction, and we don't want it locking our tables # (or we don't want to have to close the transaction in go_main). self.skip_query_builder = True
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) # self.attr_alert_email = None self.group_names_or_ids = set() # self.stats = dict() self.stats['total_new_links'] = 0
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) self.cfg_f = None # MAGIC_NUMBER: Port 80 is the default http port. if conf.server_port != 80: self.server_url = '%s:%s' % (conf.server_name, conf.server_port,) else: self.server_url = conf.server_name
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) self.cfg_f = None # MAGIC_NUMBER: Port 80 is the default http port. if conf.server_port != 80: self.server_url = '%s:%s' % ( conf.server_name, conf.server_port, ) else: self.server_url = conf.server_name
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) # self.the_branch = None # The branch group.One() object. self.owner_group = None self.arbiter_group = None self.editor_group = None # The branch group stack IDs. self.sid_owners = 0 self.sid_arbiters = 0 self.sid_editors = 0 # self.current_qb = None
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) # self.attr_cycle_facil = None # Piggy-back! So we don't need another script, attaching, # for controlled-access roadways (well, highways and freeways): self.attr_no_access = None # 2013.06.14: [lb] made the cautionary facils their own attribute. But we # don't need those for this script, since CcpV1 has no cautionaries. # Nope: self.attr_cautionary = None # self.stats = dict() self.stats['cnt_attrs_all'] = 0 self.stats['cnt_attrs_cyle_facil'] = 0 self.stats['cnt_attrs_no_access'] = 0 self.stats['cnt_facils_kvals'] = {}
def query_builder_prepare(self): Ccp_Script_Base.query_builder_prepare(self) # We could probably make the cache for whatever rev, but now it's just # for Current. g.assurt(isinstance(self.qb.revision, revision.Current)) # Grab the latest revision for now. #self.revision_id = conf.rid_inf self.revision_id = revision.Revision.revision_max(self.qb.db) # MAYBE: Just change to Historic? #self.qb.revision = revision.Historic(self.revision_id) # FIXME/BUG nnnn: Schedule maintenance window when transit # source is updated: run script to wget -N # and see if source is new, then schedule # maintenance mode, then wait, then update. # 2014.09.19: Can we not do this and just assume that cron or a developer # won't overlap calls to this script? We're not touching the # item tables, so we don't need the revision lock. # Excessive/bad: revision.Revision.revision_lock_dance( # self.qb.db, caller='query_builder_prepare') self.qb.db.transaction_begin_rw()
def query_builder_prepare(self): Ccp_Script_Base.query_builder_prepare(self) self.qb.filters.skip_geometry_raw = True self.qb.filters.skip_geometry_svg = True self.qb.filters.skip_geometry_wkt = False
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) self.headers = ''
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script)
def query_builder_prepare(self): Ccp_Script_Base.query_builder_prepare(self)
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) self.map_f = None
def __init__(self): Ccp_Script_Base.__init__(self, ArgParser_Script) self.user_ids = [] self.invalid_ids = [] self.not_okay = [] self.user_infos = []