コード例 #1
0
ファイル: main.py プロジェクト: mahmud83/NUS-shuttle-bus
def execution(result):
	# download the raw data
	update(date_begin,date_end)

	#### Modify folderpath ####
	folderpath = '../Download/BusLocation_09_05_2017'
	aggregate(folderpath)

	# ridership analysis
	database="ridership"
	user="******"
	password="******"
	host="localhost"
	port="5432"
	ridership(database,user,password,host,port)

	# round trip time
	folderpath = '../output'
	database="RTT"
	RTT_upload(database,user,password,host,port)
	run_RTT(folderpath)

	# interarrival
	database="inter_arrival"
	inter_arrival_upload(database,user,password,host,port)
	run_interarrival(folderpath)
	return

	
	
コード例 #2
0
def main():
    print("Start Main")

    env_params = init_env_params()
    B_new, env_params_updated = update_environment(env_params, B_init)
    k_prime, _ = init_kprime_kcross(env_params_updated)

    print(np.sum(env_params_updated['id_shocks']))
    print(np.sum(env_params_updated['agg_shocks']))

    diff_B = dif_B

    while diff_B > criter_B:

        k_prime_new, c = individual_optimization(k_prime, env_params_updated)

        km_series, k_cross_new = aggregate(k_prime_new, env_params_updated)

        B_new, env_params_updated = update_environment(env_params,
                                                       B_new,
                                                       k_cross_new=k_cross_new,
                                                       km_ts=km_series)
        print("diffB", env_params_updated['diffB'])

        diff_B = env_params_updated['diffB']
        k_prime = k_prime_new

    plot_accuracy(km_series, env_params_updated['agg_shocks'], B_new)
    plot_policy(k_prime_new, km_series, env_params_updated)
    plot_lorenz(k_cross_new)
コード例 #3
0
	def publish(self, src, dest):
		srcns = []
		fullsrc = self.dw.resolve(src, [], srcns)
		# doc = self.dw.getpage(src, [], srcns)
		
		destns = []
		self.dw.resolve(dest, [], destns)

		toc = [
			'  - [[%s]]' % fullsrc
		]
		
		doc, chapters = aggregate(self.dw, toc, srcns)
		
		# print(len(doc))
		# print(chapters)
		
		newdoc = []
		
		for line in doc:
			re1line = wiki.rx_link.sub(lambda m: self.resolve_link(srcns, destns, m), line)
			re2line = wiki.rx_image.sub(lambda m: self.resolve_image(srcns, destns, m), re1line)
			newdoc.append(re2line)

		self.dw.putpage(newdoc, dest, summary='publish page')
コード例 #4
0
    def publish(self, src, dest):
        srcns = []
        fullsrc = self.dw.resolve(src, [], srcns)
        # doc = self.dw.getpage(src, [], srcns)

        destns = []
        self.dw.resolve(dest, [], destns)

        toc = ['  - [[%s]]' % fullsrc]

        doc, chapters = aggregate(self.dw, toc, srcns)

        # print(len(doc))
        # print(chapters)

        newdoc = []

        for line in doc:
            re1line = Wiki.rx_link.sub(
                lambda m: self.resolve_link(srcns, destns, m), line)
            re2line = Wiki.rx_image.sub(
                lambda m: self.resolve_image(srcns, destns, m), re1line)
            newdoc.append(re2line)

        self.dw.putpage(newdoc, dest, summary='publish page')
コード例 #5
0
	def perform(self, fidoc):
		dw = fidoc.get_wiki()
		logging.info("Loading table of contents %s ..." % self.tocpage)
		tocns = []
		toc = dw.getpage(self.tocpage, pagens = tocns)
		if toc is None:
			logging.error("Table of contents %s not found." % self.tocpage)
			return False
			
		logging.info("Aggregating pages ...")
		doc, chapters = aggregate(dw, toc, tocns, self.embedwikilinks)

		logging.info("Flushing generated content to page %s ..." % self.outpage)
		res = dw.putpage(doc, self.outpage)
		# print(res)
		# locks = dw.lockpage(self.outpage)
		# logging.info("Locks: %s" % locks)
		return res
コード例 #6
0
    def perform(self, fidoc):
        dw = fidoc.get_wiki()
        logging.info("Loading table of contents %s ..." % self.tocpage)
        tocns = []
        toc = dw.getpage(self.tocpage, pagens=tocns)
        if toc is None:
            logging.error("Table of contents %s not found." % self.tocpage)
            return False

        logging.info("Aggregating pages ...")
        doc, chapters = aggregate(dw, toc, tocns, self.embedwikilinks)

        logging.info("Flushing generated content to page %s ..." %
                     self.outpage)
        res = dw.putpage(doc, self.outpage)
        # print(res)
        # locks = dw.lockpage(self.outpage)
        # logging.info("Locks: %s" % locks)
        return res
コード例 #7
0
def generatedoc(templatefile, generatefile, fidoc, tocpage, aggregatefile=None, chapterfile=None, injectrefs=False, ignorepagelinks=[], imagepath = "_media/"):
	dw = fidoc.get_wiki()
	
	document = docxwrapper(templatefile, imagepath)

	tocns = []
	toc = dw.getpage(tocpage, pagens = tocns)
	if toc is None:
		logging.fatal("Table of Contents %s not found." % tocpage)

	doc, chapters = aggregate(dw, toc, tocns, showwikiurl = injectrefs)
	
	# print()
	# rx_pub_ns = re.compile('^:ficontent:((socialtv|smartcity|gaming|common):.*|deliverables:d[0-9]+|fiware:ge_usage)$')
	# pub = wikipublisher(dw, mirror.public_pages())
	pub = fidoc.get_publisher()
	
	wp = wikiprocessor(document, dw, tocns, chapters, pub, imagepath, ignorepagelinks)
	
	if aggregatefile is not None:
		with open(aggregatefile, "w") as fo:
			fo.writelines([l + '\n' for l in doc])

	if chapterfile is not None:
		import json
		with open(chapterfile, 'w') as cf:
			json.dump(chapters, cf, sort_keys = False, indent = 4)

	
	# process aggregated document
	
	collectedlines = []
	lastline = None
	
	for line in doc:
		# print(line)
		# line = unicode(line, "utf-8")
		linetype = None
		
		line = wp.replacelinks(line)
		
		for key, rx in rx_line.items():
			result = rx.match(line)
			if result is not None:
				linetype = key
				break
		
		# print "linetype1:", linetype

		if linetype is None:
			linetype = "text"

		if linetype == "endcode":
			linetype = "empty"
		elif lastline == "code":
			linetype = "code"

		if linetype == "endgraph":
			linetype = "empty"
		elif lastline == "graph":
			linetype = "graph"
		
		
		# print "linetype2:", linetype
		
		if lastline != linetype:
			try:
				wp.processlines(lastline, collectedlines)
				collectedlines = []
			except (Exception,UnicodeEncodeError) as e:
				print("Problem with lines!")
				for l in collectedlines:
					print(l)
					
				print()
				print(e)
				raise e
				break
			# print "Process lines:", lastline, " - ", len(collectedlines)
		
		lastline = linetype
		collectedlines.append(line)
	
	document.generate(generatefile)
コード例 #8
0
  def _processAutostackMetricRequests(self, engine, requests, modelSwapper):
    """ Execute autostack metric requests, aggregate and stream
    collected metric data

    :param engine: SQLAlchemy engine object
    :type engine: sqlalchemy.engine.Engine
    :param requests: sequence of AutostackMetricRequest objects
    :param modelSwapper: Model Swapper
    """
    # Start collecting requested metric data
    collectionIter = self._metricGetter.collectMetricData(requests)

    # Aggregate each collection and dispatch to app MetricStreamer
    for metricCollection in collectionIter:
      request = requests[metricCollection.refID]

      metricObj = request.metric
      data = None

      if metricCollection.slices:
        aggregationFn = getAggregationFn(metricObj)
        if aggregationFn:
          data = aggregate(metricCollection.slices,
                           aggregationFn=aggregationFn)
        else:
          data = aggregate(metricCollection.slices)

      try:
        with engine.connect() as conn:
          repository.retryOnTransientErrors(repository.setMetricLastTimestamp)(
            conn, metricObj.uid, metricCollection.nextMetricTime)
      except ObjectNotFoundError:
        self._log.warning("Processing autostack data collection results for "
                          "unknown model=%s (model deleted?)", metricObj.uid)
        continue

      if data:
        try:
          self.metricStreamer.streamMetricData(data,
                                               metricID=metricObj.uid,
                                               modelSwapper=modelSwapper)
        except ObjectNotFoundError:
          # We expect that the model exists but in the odd case that it has
          # already been deleted we don't want to crash the process.
          self._log.info("Metric not found when adding data. metric=%s" %
                         metricObj.uid)

        self._log.debug(
          "{TAG:APP.AGG.DATA.PUB} Published numItems=%d for metric=%s;"
          "timeRange=[%sZ-%sZ]; headTS=%sZ; tailTS=%sZ",
          len(data), getMetricLogPrefix(metricObj),
          metricCollection.timeRange.start.isoformat(),
          metricCollection.timeRange.end.isoformat(),
          data[0][0].isoformat(), data[-1][0].isoformat())

      else:
        self._log.info(
          "{TAG:APP.AGG.DATA.NONE} No data for metric=%s;"
          "timeRange=[%sZ-%sZ]", getMetricLogPrefix(metricObj),
          metricCollection.timeRange.start.isoformat(),
          metricCollection.timeRange.end.isoformat())
コード例 #9
0
def generatedoc(templatefile,
                generatefile,
                fidoc,
                tocpage,
                aggregatefile=None,
                chapterfile=None,
                injectrefs=False,
                ignorepagelinks=[],
                imagepath="_media/"):
    dw = fidoc.get_wiki()

    document = docxwrapper(templatefile, imagepath)

    tocns = []
    toc = dw.getpage(tocpage, pagens=tocns)
    if toc is None:
        logging.fatal("Table of Contents %s not found." % tocpage)

    doc, chapters = aggregate(dw, toc, tocns, showwikiurl=injectrefs)

    # print()
    # rx_pub_ns = re.compile('^:ficontent:((socialtv|smartcity|gaming|common):.*|deliverables:d[0-9]+|fiware:ge_usage)$')
    # pub = wikipublisher(dw, mirror.public_pages())
    pub = fidoc.get_publisher()

    wp = wikiprocessor(document, dw, tocns, chapters, pub, imagepath,
                       ignorepagelinks)

    if aggregatefile is not None:
        with open(aggregatefile, "w") as fo:
            fo.writelines([l + '\n' for l in doc])

    if chapterfile is not None:
        import json
        with open(chapterfile, 'w') as cf:
            json.dump(chapters, cf, sort_keys=False, indent=4)

    # process aggregated document

    collectedlines = []
    lastline = None

    for line in doc:
        # print(line)
        # line = unicode(line, "utf-8")
        linetype = None

        line = wp.replacelinks(line)

        for key, rx in rx_line.items():
            result = rx.match(line)
            if result is not None:
                linetype = key
                break

        # print "linetype1:", linetype

        if linetype is None:
            linetype = "text"

        if linetype == "endcode":
            linetype = "empty"
        elif lastline == "code":
            linetype = "code"

        if linetype == "endgraph":
            linetype = "empty"
        elif lastline == "graph":
            linetype = "graph"

        # print "linetype2:", linetype

        if lastline != linetype:
            try:
                wp.processlines(lastline, collectedlines)
                collectedlines = []
            except (Exception, UnicodeEncodeError) as e:
                print("Problem with lines!")
                for l in collectedlines:
                    print(l)

                print()
                print(e)
                raise e
                break
            # print "Process lines:", lastline, " - ", len(collectedlines)

        lastline = linetype
        collectedlines.append(line)

    document.generate(generatefile)