Example #1
0
    def load(self, _force=False):
        '''
		Load commit info from GitLogPath
		:return:  {bugID:[{'hash':u'', 'author':u'', 'commit_date':u'', 'message':u'', 'fixedFiles':{}}, {}, ...], ...}
		'''
        if os.path.exists(self.GitLogPath) is False or _force is True:
            self.make()

        logfile = codecs.open(self.GitLogPath, 'r', 'utf-8')
        progress = Progress(u'[%s] loading git log data' % self.__name__, 1000,
                            20000, False)
        progress.set_point(0)
        progress.start()
        for logitem in self.file_loader(logfile, _with_filter=False):
            # filter unuseful logs
            if len(logitem['fixedFiles']) == 0: continue

            # We only use bug report id in log message
            # mapping bug report ID
            logitem['linked_bug'] = re.findall(
                r'%s-[0-9]+' % self.ProjectName.upper(), logitem['message'])
            logitem['linked_bug'] = set(logitem['linked_bug'])
            for linked_id in logitem['linked_bug']:
                if linked_id not in self.logs:
                    self.logs[linked_id] = [logitem]
                else:
                    self.logs[linked_id].append(logitem)
            progress.check()
        progress.done()
        logfile.close()
        return self.logs
Example #2
0
    def loads(self):
        '''
		loads a raw file of bug report
		:return:
		'''
        fileConnt = self.getFileCounts(self.SourceBugPath)

        bugitems = []

        # show progress
        progress = Progress(u'[%s] Loading bug reports' % self.__name__, 2, 10,
                            True)
        progress.set_upperbound(fileConnt)
        progress.start()
        for root, dirs, files in os.walk(self.SourceBugPath):
            for f in files:
                if f[:f.find(u'-')].strip().lower() != self.ProjectName.lower(
                ):
                    continue
                #shutil.copy(os.path.join(root, f), os.path.join(_dest, f))
                bugitem = self.get_bugitem(os.path.join(root, f))
                if bugitem is not None:
                    bugitems.append(bugitem)
                progress.check()
        progress.done()
        return bugitems
def setup_subscriptions(account_id, environment_id, trails_configuration, sourceAccountSession, targetAccountSession):
    prefix = "Setting up CloudTrails subscriptions for environment %s:" % environment_id
    regions = trails_configuration.keys()
    if not len(regions): return trails_configuration

    progress = Progress(
                len(regions),
                prefix + "\t\t")

    for region in regions:
        trail = trails_configuration.pop(region)
        try:
            trails_configuration[region] = aws_setup_subscription(
                            account_id,
                            environment_id,
                            trail,
                            sourceAccountSession,
                            targetAccountSession,
                            progress)
        except Exception as e:
            print "Error: %s" % (e)
        #    details = e.args[0]
            # trails_configuration[u'invalid_trails'][region] = details['reason']
        progress.report()
    progress.done()
    return trails_configuration
Example #4
0
def upload():
    print 'Connecting to Server...'
    print UPDATE_SERVER_USER + ' ' + UPDATE_SERVER_PWD + ' ' + UPDATE_SERVER_PATH
    ftp = FTP(UPDATE_SERVER_URL)
    ftp.login(UPDATE_SERVER_USER, UPDATE_SERVER_PWD)
    ftp.cwd(UPDATE_SERVER_PATH)

    print 'Uploading full game...'
    fullGamePath = os.path.join(OUT_DIR, 'latest.7z')
    progress = Progress(os.stat(fullGamePath).st_size, 50)
    progress.print_header(10)
    with open(fullGamePath, 'rb') as f:
        ftp.storbinary('STOR latest',
                       f,
                       blocksize=8192,
                       callback=lambda x: progress.add_progress(8192))

    print 'Uploading patch...'
    version = find_application_version(NEW_DIR)
    patchPath = os.path.join(OUT_DIR, 'patches', 'v' + str(version))
    progress = Progress(os.stat(patchPath).st_size, 50)
    progress.print_header(10)
    with open(patchPath, 'rb') as f:
        ftp.cwd('patches')
        ftp.storbinary('STOR v' + str(version),
                       f,
                       blocksize=8192,
                       callback=lambda x: progress.add_progress(8192))

    ftp.quit()
    print 'Upload Complete'
Example #5
0
	def fill_SubjectSheet(self, _sheet, _group, _srcCounts, _bugCounts, _dupCounts):
		projects = _bugCounts.keys()
		projects.sort()

		size = sum([len(_bugCounts[project]) for project in projects])
		progress = Progress(u'[%s] fill subject' % self.__name__, 2, 10, True)
		progress.set_point(0).set_upperbound(size)
		progress.start()

		styles = [self.base_format, self.base_format, self.base_format, self.number_format, self.number_format]
		for project in projects:
			for version in _bugCounts[project].keys():
				if version == 'all': continue
				values = [_group, project, version.upper(), _bugCounts[project][version], _srcCounts[project][version]]
				self.input_row(_sheet, self.subj_data_row, 6, values, styles)
				self.subj_data_row += 1
				progress.check()
		progress.done()

		#summary
		styles = [self.subtitle_format, self.subtitle_format, self.number_format, self.number_format, self.number_format]
		for project in projects:
			values = [_group, project.upper(),  _bugCounts[project]['all'], _dupCounts[project], _srcCounts[project]['all']]
			self.input_row(_sheet, self.subj_summary_row, 0, values, styles)
			self.subj_summary_row += 1
		pass
Example #6
0
    def source_counting(self, _group, _project):
        statistics = {}

        progress = Progress('source counting', 2, 10, True)
        progress.set_upperbound(len(self.S.versions[_project].keys()))
        progress.start()
        for version in self.S.versions[_project].keys():
            vname = VersionUtil.get_versionName(version, _project)
            repo = os.path.join(self.S.getPath_source(_group, _project,
                                                      vname), )
            result = self.getCodeCount(repo)
            if result is None: continue
            statistics[vname] = result
            progress.check()
        progress.done()

        maxValue = 0
        for vname in statistics:
            if maxValue < statistics[vname]:
                maxValue = statistics[vname]
        statistics['max'] = maxValue

        pretty = PrettyStringBuilder(_indent_depth=2)
        text = pretty.get_dicttext({_project: statistics})

        f = open(
            os.path.join(self.S.getPath_base(_group, _project),
                         u'sources.txt'), 'w')
        f.write(text)
        f.close()
Example #7
0
    def make_childmap(self):
        visited = set([])

        q = Queue()
        q.put((self.git.head.commit, None))  # (commit, child_hash)

        progress = Progress(u'[%s] making git childmap' % self.__name__, 500,
                            10000, False)
        progress.set_point(0)
        progress.start()
        while q.empty() is False:
            progress.check()
            commit, child_hash = q.get()
            commit_hash = str(commit)[:7]

            # create child map
            if commit_hash not in self.childmap:
                self.childmap[commit_hash] = set([])
            if child_hash is not None:
                self.childmap[commit_hash].add(child_hash)

            if commit_hash in visited: continue
            visited.add(commit_hash)

            # pass itself to parent
            for parent in commit.parents:
                q.put((parent, commit_hash))

            # add ancestors if this commit has no parents
            if len(commit.parents) == 0:
                self.ancestors.add(commit_hash)
        progress.done()

        pass
Example #8
0
    def no_swaps_through_time(self,data):
        if len(data.swaps) <= 0:
            logging.info("No swaps, skipping swap plot")
            return

        dividedby = 1
        if(len(data.banks) > 0):
            dividedby = len(data.banks)

        logging.info("Generating swaps through time")
        maxval = len(data.swaps)
        pgs = Progress(maxval)
        rng = [0]*data.steps

        for (j,(x,y,z,start,end,ten)) in enumerate(data.swaps.values()):
            for i in range(end-start):
                rng[i+start] += 1/dividedby
            pgs.update(j+1)

        avgswaps = sum(rng)/data.steps

        pplot.figure()
        pplot.plot(range(len(rng)),rng)
        pplot.title("Swaps per bank through time, average through time: %f"%avgswaps)

        if(self.save_to_file):
            if(not os.path.exists(self.root_path)):
                os.makedirs(self.root_path)

            pplot.savefig(self.root_path+('/%s_swaps_tt.png'%self.prefix))
        else:
            pplot.show()
        pplot.close()

        print ""
Example #9
0
def download_patches(ftp, patches, numPatchBytes):
    print "Downloading " + str(len(patches)) + " Patches (" + str(numPatchBytes / 1000000) + " MB)"
    clear_temp_dir()
    progress = Progress(numPatchBytes, 50)
    progress.print_header(10)

    remoteBaseDir = ftp.pwd()
    ftp.cwd("patches")

    for patch in patches:
        download_file(ftp, patch, os.path.join(TEMP_DIR, patch), progress)

    ftp.cwd(remoteBaseDir)
Example #10
0
def download_patches(ftp, patches, numPatchBytes):
    print 'Downloading ' + str(len(patches)) + ' Patches (' + str(
        numPatchBytes / 1000000) + ' MB)'
    clear_temp_dir()
    progress = Progress(numPatchBytes, 50)
    progress.print_header(10)

    remoteBaseDir = ftp.pwd()
    ftp.cwd('patches')

    for patch in patches:
        download_file(ftp, patch, os.path.join(TEMP_DIR, patch), progress)

    ftp.cwd(remoteBaseDir)
Example #11
0
def get_hump_error_per_type():
    query = """SELECT DISTINCT run.aggregate_id, aggregate_type.type_id FROM `run`
               LEFT OUTER JOIN aggregate_type
                    ON run.aggregate_id = aggregate_type.aggregate_id
               WHERE dissipation < 0.01"""

    session = get_session()

    aggregate_ids = session.execute(query).fetchall()
    session.close()

    errors = {
        1: [],
        2: [],
        3: [],
        4: [],
        5: []
    }
    p = Progress(len(aggregate_ids))
    p.start()
    cnt = 0
    for aggregate_id,type_id in aggregate_ids:
        err,alpha = get_hump_info(aggregate_id)
        errors[type_id].append(err)
        cnt += 1
        p.update(cnt)
    p.finish()

    x = errors.keys()
    y = [np.mean(yi) for yi in errors.values()]
    yerr = [np.std(yi) for yi in errors.values()]

    ymax = [np.max(yi) for yi in errors.values()]
    ymin = [np.min(yi) for yi in errors.values()]

    with file('./simulation_data/type_hump_error_ranges.bin','wb') as fp:
        pickle.dump(errors,fp)

    fig = pplot.figure()
    ax = fig.add_subplot(311)

    ax.bar(x,y,yerr=yerr,color='b')
    ax.set_ylabel("Avergage power law error")
    ax.set_xlabel("Type")

    ax = fig.add_subplot(312)
    ax.set_ylabel("Maximum power law error")
    ax.set_xlabel("Type")
    ax.bar(x,ymax, color='r')

    ax = fig.add_subplot(313)
    ax.set_ylabel("Minimum power law error")
    ax.set_xlabel("Type")
    ax.bar(x,ymin, color='g')

    pplot.show()
Example #12
0
def download_full_game(ftp):
    fileSize = find_full_game_size(ftp)
    print "Downloading full application (" + str(fileSize / 1000000) + " MB)..."
    clear_temp_dir()
    progress = Progress(fileSize, 50)
    progress.print_header(10)
    filename = os.path.join(TEMP_DIR, "latest")
    download_file(ftp, "latest", filename, progress)
    print "Extracting files..."
    unzip_directory(filename, TEMP_DIR)
    if os.path.exists(PROJECT_DIR):
        print "Deleting old files..."
        shutil.rmtree(PROJECT_DIR)
    print "Copying new files..."
    os.rename(os.path.join(TEMP_DIR, "bin"), PROJECT_DIR)
    shutil.rmtree(TEMP_DIR)
    print "Done."
Example #13
0
def upload():
    print 'Connecting to Server...'
    print UPDATE_SERVER_USER + ' ' + UPDATE_SERVER_PWD + ' ' + UPDATE_SERVER_PATH
    ftp = FTP(UPDATE_SERVER_URL)
    ftp.login(UPDATE_SERVER_USER, UPDATE_SERVER_PWD)
    ftp.cwd(UPDATE_SERVER_PATH)

    print 'Uploading full game...'
    fullGamePath = os.path.join(OUT_DIR, 'latest.7z')
    progress = Progress(os.stat(fullGamePath).st_size, 50)
    progress.print_header(10)
    with open(fullGamePath, 'rb') as f:
        ftp.storbinary('STOR latest', f, blocksize=8192, callback = lambda x: progress.add_progress(8192))

    print 'Uploading patch...'
    version = find_application_version(NEW_DIR)
    patchPath = os.path.join(OUT_DIR, 'patches', 'v' + str(version))
    progress = Progress(os.stat(patchPath).st_size, 50)
    progress.print_header(10)
    with open(patchPath, 'rb') as f:
        ftp.cwd('patches')
        ftp.storbinary('STOR v' + str(version), f, blocksize=8192, callback = lambda x: progress.add_progress(8192))

    ftp.quit()
    print 'Upload Complete'
Example #14
0
def download_full_game(ftp):
    fileSize = find_full_game_size(ftp)
    print 'Downloading full application (' + str(
        fileSize / 1000000) + ' MB)...'
    clear_temp_dir()
    progress = Progress(fileSize, 50)
    progress.print_header(10)
    filename = os.path.join(TEMP_DIR, 'latest')
    download_file(ftp, 'latest', filename, progress)
    print 'Extracting files...'
    unzip_directory(filename, TEMP_DIR)
    if os.path.exists(PROJECT_DIR):
        print 'Deleting old files...'
        shutil.rmtree(PROJECT_DIR)
    print 'Copying new files...'
    os.rename(os.path.join(TEMP_DIR, 'bin'), PROJECT_DIR)
    shutil.rmtree(TEMP_DIR)
    print 'Done.'
Example #15
0
    def make_tagmap(self, ):
        q = Queue()
        visited = set([])

        # root node find (queue init)
        for item in list(self.ancestors):
            q.put((item, None))  # (commit_hash, tagname)

        # For each item in queue
        progress = Progress(u'[%s] making git tagmaps' % self.__name__, 500,
                            10000, False)
        progress.set_point(0)
        progress.start()
        while q.empty() is False:
            commit_hash, parent_tag = q.get()

            # If this commit in tags, map with commit_hash and tag
            if commit_hash in self.tags:
                commit_tag = self.tags[commit_hash]
                self.tagmap[commit_hash] = commit_tag

            # if this commit not in tags, map with child commit_hash and tag
            else:
                if commit_hash not in self.tagmap:
                    self.tagmap[commit_hash] = parent_tag
                else:
                    # compare time previous_tag and parent_tag
                    previous_tag = self.tagmap[commit_hash]
                    pre_time = self.tagtimes[previous_tag]
                    par_time = self.tagtimes[parent_tag]
                    if par_time > pre_time:
                        self.tagmap[commit_hash] = parent_tag
                commit_tag = parent_tag

            if commit_hash not in visited:
                visited.add(commit_hash)
                for child_hash in self.childmap[commit_hash]:
                    q.put((child_hash, commit_tag))

            progress.check()
        progress.done()
        pass
Example #16
0
def download(url,
             local_file,
             progress_title: str,
             progress_text: str = '',
             file_size: int = None):
    logger.info('downloading {} -> {}'.format(url, local_file))

    Progress.start(progress_title)
    Progress.set_label(progress_text)

    def hook(data):
        Progress.set_value(float(data['percent_complete']))

    dl = Downloader(
        url=url,
        filename=local_file,
        progress_hooks=[hook],
        content_length=file_size,
    )

    return dl.download()
Example #17
0
    def run_epoch(self, train, dev, epoch):
        batch_size = self.config.batch_size
        num_batch = (len(train) + batch_size - 1) // batch_size
        prog = Progress(target=num_batch)

        for i, (word, label) in enumerate(minibatches(train, batch_size)):
            fd, _ = self.feed_dict(word, label, self.config.lr_rate,
                                   self.config.drop_out)
            _, train_loss, summary = self.session.run(
                [self.train_op, self.loss, self.merged], feed_dict=fd)

            prog.update(i + 1, [("train loss", train_loss)])
            if (i % 10 == 0):
                self.file_writer.add_summary(summary, epoch * num_batch + i)

        metric = self.evaluate(dev)
        msg = " - ".join(
            ["{} {:04.2f}".format(k, v) for k, v in metrics.items()])
        self.log.info(msg)

        return metrics["f1"]
Example #18
0
    def balance_through_time(self,data):
        if(len(data.gross) <= 0 or len(data.net) <= 0):
            logging.info("No Balances, skipping swap plot")
            return

        logging.info("Generating balances through time")
        pgs = Progress(len(data.net))

        gross = defaultdict(float)
        net = defaultdict(float)

        for j,b in enumerate(data.net):
            for i,x in enumerate(data.net[b]):
                net[i] += math.fabs(x)

            for i,x in enumerate(data.gross[b]):
                gross[i] += math.fabs(x)

            pgs.update(j+1)

        for i in range(len(gross)):
            gross[i] = gross[i]/len(data.net)
            net[i] = net[i]/len(data.net)

        pplot.figure()
        pplot.plot(gross.keys(),gross.values(),label="Gross")
        pplot.plot(net.keys(),net.values(),label="net")
        pplot.title("Absolute net and gross through time")

        if(self.save_to_file):
            if(not os.path.exists(self.root_path)):
                os.makedirs(self.root_path)

            pplot.savefig(self.root_path+('/%s_balance_tt.png'%self.prefix))
        else:
            pplot.show()
        pplot.close()

        print ""
Example #19
0
    def load_raw(self, _force=False):
        '''
		Load commit info from GitLogPath
		:return:  {bugID:[{'hash':u'', 'author':u'', 'commit_date':u'', 'message':u'', 'fixedFiles':{}}, {}, ...], ...}
		'''
        if os.path.exists(self.GitLogPath) is False or _force is True:
            self.make()

        self.logs = []
        logfile = codecs.open(self.GitLogPath, 'r', 'utf-8')
        progress = Progress(u'[%s] loading git log data' % self.__name__, 1000,
                            20000, False)
        progress.set_point(0)
        progress.start()
        for logitem in self.file_loader(logfile):
            # filter unuseful logs
            #if len(logitem['fixedFiles'])==0: continue
            if logitem['hash'] == '': continue
            self.logs.insert(0, logitem)
            progress.check()
        progress.done()
        logfile.close()
        return self.logs
Example #20
0
    def unhash_folder(_src, _dest):
        '''
		hashed folder ==> unshed folder
		example) path/aa/00/filename  ==> path/filename
		:param _src:
		:param _dest:
		:return:
		'''
        if os.path.exists(_dest) is False:
            os.makedirs(_dest)
        progress = Progress(u'Bug reports is merging', 20, 1000, False)
        progress.start()
        for root, dirs, files in os.walk(_src):
            for f in files:
                shutil.copy(os.path.join(root, f), os.path.join(_dest, f))
                progress.check()
        progress.done()
Example #21
0
        query).fetchall()
    session.close()
    return [a[0] for a in agg_ids]


def get_weight(aggregate_id):
    x, y, _, raw = get_aggregate_dist(aggregate_id)
    _, alp = get_hump_error(x, y, raw)
    cumv = get_hump_cumulative_value(x,y)
    return (aggregate_id, alp, cumv)

if __name__ == "__main__":

    aggregateids = get_aggregate_ids()

    rows = []
    p = Progress(len(aggregateids))
    p.start()
    for (i,aggid) in enumerate(aggregateids):
        rows.append(AggregateHumpWeight(*get_weight(aggid)))
        p.update(i)
    
    p.finish()
    
    session = get_session()
    session.bulk_save_objects(rows)
    session.commit()
    session.close()


Example #22
0
 def hook(data):
     Progress.set_value(float(data['percent_complete']))
Example #23
0
    def run(self):
        agstart = time.time()
        for i in xrange(self.no_sims):
            logging.info("Going for simulation %d"%(i+1))
            gc.collect()
            run_id = str(uuid4())

            with DataContainer(self.config,run_id,self.aggregate_id) as dc:
                p = Progress(self.config['model']['no_steps'])

                model_class = None
                if(self.market_type == 1):
                    logging.info("Using default Market")
                    model_class = Market
                elif(self.market_type == 2):
                    logging.info("Using ShuffleIRSMarket")
                    model_class = ShuffleIRSMarket
                elif(self.market_type == 3):
                    logging.info("Using SortedIRSMarket")
                    model_class = SortedIRSMarket
                elif(self.market_type == 4):
                    logging.info("Using RandomSortedIRSMarket")
                    model_class = SortedRandomIRSMarket
                elif(self.market_type == 5):
                    logging.info("Using RandomShuffleIRSMarket")
                    model_class = ShuffleRandomIRSMarket
                elif(self.market_type == 6):
                    logging.info("Using ConstantRandomShuffleIRSMarket")
                    model_class = ConstShuffleIRSMarket
                elif(self.market_type == 7):
                    logging.info("Using quick CRS-IRS-Mkt")
                    model_class = sim
                else:
                    raise "No such market type"

                p.start()
                start = time.time()
                with model_class(self.config['model'],dc,p.update) as m:
                    m.run()

                t = time.time()-start
                p.finish()

                print ""
                logging.info("Run took %f seconds"%t)

                if(self.config['analysis']['do_analysis']):
                    start = time.time()
                    self.do_analysis(dc,run_id)
                    t = time.time()-start
                    logging.info("Analysis took %f seconds"%t)

                if(self.save_data):
                    start = time.time()
                    dc.save_data()
                    t = time.time()-start
                    logging.info("Saving data took %f seconds"%t)

            gc.collect()
            print ""
            print ""

        gc.collect()
        dt = (time.time() - agstart) / 60
        logging.info("Experiment took %f minutes"%dt)

        if(self.config['aggregate']['do_aggregate'] and self.save_data):
            start = time.time()
            self.do_aggregate(dc,run_id)
            logging.info('Aggregation took %f seconds'%(time.time()-start))
Example #24
0
def start_ui(test=False):
    from PyQt5.QtWidgets import QApplication
    import sys
    from src.ui.tab_reorder import TabReorder
    from src.ui.tab_log import TabLog
    from src.ui.tab_config import TabConfig
    from src.ui.tab_skins import TabSkins
    logger.debug('starting QtApp object')
    global_.QT_APP = QApplication([])
    global_.MAIN_UI = MainUi()
    global_.MAIN_UI.add_tab(TabLog(), helpers={'write_log': 'write'})
    global_.MAIN_UI.add_tab(TabReorder(),
                            helpers={
                                'tab_reorder_update_view_after_remote_scan':
                                'tab_reorder_update_view_after_remote_scan'
                            })

    from src.misc import dcs_installs
    dcs_installs.discover_dcs_installations()

    global_.MAIN_UI.add_tab(TabSkins(), helpers={})

    global_.MAIN_UI.add_tab(TabConfig(),
                            helpers={'update_config_tab': 'update_config_tab'})
    global_.MAIN_UI.show()

    def pre_update_hook():
        if not hasattr(sys, 'frozen'):
            logger.warning('skipping update on script run')
            return False
        else:
            I.hide()
            return True

    def cancel_update_hook():
        I.show()

    from utils import Progress
    # noinspection PyTypeChecker
    Progress.register_adapter(I)

    from src.updater import updater

    updater.find_and_install_latest_release(
        current_version=global_.APP_VERSION,
        executable_path='emft.exe',
        channel=Config().update_channel,
        cancel_update_hook=cancel_update_hook,
        pre_update_hook=pre_update_hook,
    )

    global_.MAIN_UI.update_config_tab()

    if test:

        logger.critical('RUNNING IN TEST MODE')
        import time
        from utils import ThreadPool, nice_exit

        def test_hook():
            time.sleep(10)
            nice_exit()

        pool = ThreadPool(1, 'test')
        pool.queue_task(test_hook)

    sys.exit(global_.QT_APP.exec())
Example #25
0
from __future__ import division
from market import Market,RandomIRSMarket
from math import fabs
from utils import Progress

import matplotlib.pyplot as pplot
import logging

if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)

    steps = 50000
    step_rng = range(steps)
    number_of_agents = 50

    pgrs = Progress(steps)

    logging.info("Starting market simulation")

    mkt = RandomIRSMarket(number_of_agents,steps,False,pgrs.update)
    mkt.run()
    print ""

    logging.info("Ended market simulation, starting plotting")

    ## Plotting
    fig = pplot.figure()
    sx = 4
    sy = 3
    axn = 1
    subdim = (sy,sx)
Example #26
0
def do_run(steps,
           no_banks,
           threshold,
           max_tenure,
           max_irs_value,
           avalanche_fraction=0.9):
    #steps = 10000
    save = False
    save_risk = False
    save_risk_avalanche_time_series = False
    save_dist = False
    save_giant_component = False
    save_avalanche_progression = False
    save_critical_info = False
    save_avalanche_tree = False
    save_degree_distribution = False
    no_connection_scatter_moments = 0
    connection_scatter_moments = np.random.randint(
        0, steps, no_connection_scatter_moments)

    seed = np.random.randint(0, 1000)
    dcconfig = {
        'model': {
            'no_banks': no_banks,
            'no_steps': steps,
            'threshold': threshold,
            'sigma': 1,
            'max_irs_value': max_irs_value,
            'irs_threshold': -1,
            'dissipation': 0.0,
            'max_tenure': max_tenure
        },
        'analysis': {
            'data_to_save': ['defaults']
        },
        'file_root': './simulation_data/',
        'market_type': 7,
        'seed': seed
    }

    measure_no_steps = 2 * dcconfig['model']['max_tenure']

    ###########################################################################
    dc = DataContainer(dcconfig, str(uuid4()), str(uuid4()))
    p = Progress(steps)

    s = sim(dcconfig['model'],
            dc,
            p.update,
            save_risk,
            save_dist,
            connection_scatter_moments,
            seed,
            avalanche_fraction=avalanche_fraction)
    s.save_degree_distribution = save_degree_distribution
    if (s.save_degree_distribution):
        s.degrees = np.zeros((steps, dcconfig['model']['no_banks']))
        s.no_irs = np.zeros((steps, dcconfig['model']['no_banks']))
    s.save_avalanche_progression = save_avalanche_progression
    s.save_risk_avalanche_time_series = save_risk_avalanche_time_series
    s.collect_critical_info = save_critical_info
    s.save_giant_component = save_giant_component
    s.save_avalanche_tree = save_avalanche_tree
    s.avalanche_tree_file_path = './simulation_data/trees/%s/' % dc.aggregate_id

    s.irs_creations = np.zeros(steps)
    s.irs_removals = np.zeros(steps)

    if (s.save_avalanche_tree):
        os.makedirs(s.avalanche_tree_file_path)

    if (save_giant_component): s.giant_components = np.zeros(s.no_steps)
    ###########################################################################

    start = time.time()
    p.start()
    tme, size = s.run()
    print
    p.finish()

    defaulting_bank = s.defaulting_bank_no
    start_at = tme - measure_no_steps + 1

    print "Large enough avalanche found at %d of size %d" % (tme, size)

    print
    print "Run took %d seconds" % (time.time() - start)
    print
    print "Going for the analysis"

    ###########################################################################
    ## Actual stuff thats needed
    dc = DataContainer(dcconfig, str(uuid4()), str(uuid4()))
    p = Progress(steps)

    s = sim(dcconfig['model'],
            dc,
            p.update,
            save_risk,
            save_dist,
            connection_scatter_moments,
            seed,
            start_at,
            defaulting_bank,
            avalanche_fraction=avalanche_fraction)

    nb = dcconfig['model']['no_banks']
    s.measured_balances = np.zeros((measure_no_steps, nb))
    s.measured_gross_balances = np.zeros((measure_no_steps, nb))
    s.degrees = np.zeros((measure_no_steps, nb))
    s.no_irs = np.zeros((measure_no_steps, nb))
    #s.giant_component = []
    s.defaulted_nodes = []
    s.irs_pb = []
    s.network = np.zeros((nb, nb))
    s.irs_creations = np.zeros(steps)
    s.irs_removals = np.zeros(steps)

    #################
    s.save_degree_distribution = save_degree_distribution
    s.save_avalanche_progression = save_avalanche_progression
    s.save_risk_avalanche_time_series = save_risk_avalanche_time_series
    s.collect_critical_info = save_critical_info
    s.save_giant_component = save_giant_component
    s.save_avalanche_tree = save_avalanche_tree
    s.avalanche_tree_file_path = './simulation_data/trees/%s/' % dc.aggregate_id
    if (s.save_avalanche_tree):
        os.makedirs(s.avalanche_tree_file_path)
    if (save_giant_component): s.giant_components = np.zeros(s.no_steps)
    ###########################################################################

    start = time.time()
    p.start()
    tme, size = s.run()
    p.finish()
    print
    print "Large enough avalanche found at %d of size %d" % (tme, size)

    if s.save_avalanche_progression:
        print "Saving avalanche progression"
        file_path = './simulation_data/avalanche_progression/%s.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.avalanche_progressions, fp)
            pickle.dump(dcconfig, fp)

    if s.collect_critical_info:
        print "Critical info"
        file_path = './simulation_data/critical/%s.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.critical_info, fp)
            pickle.dump(s.max_default_size_t.tolist(), fp)
            if (s.save_giant_component):
                pickle.dump(s.giant_components.tolist(), fp)
            pickle.dump(dcconfig, fp)

    if len(connection_scatter_moments) > 0:
        print "Connection Scatters"
        file_path = './simulation_data/connection_scatters/%s.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.connection_scatters, fp)

    if save_dist:
        file_path = './simulation_data/dists/%s.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.trials, fp)
            pickle.dump(dcconfig['model']['no_banks'], fp)

    if (True):
        os.makedirs("./simulation_data/large_avalanche_data/%s" %
                    dc.aggregate_id)
        print "Saving stuff"
        file_path = './simulation_data/large_avalanche_data/%s/degrees.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.degrees.tolist(), fp)

        file_path = './simulation_data/large_avalanche_data/%s/no_irs.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.no_irs.tolist(), fp)
            pickle.dump(s.irs_pb, fp)

        file_path = './simulation_data/large_avalanche_data/%s/balances.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.measured_balances.tolist(), fp)
            pickle.dump(s.measured_gross_balances.tolist(), fp)

        #file_path = './simulation_data/large_avalanche_data/%s/gc.bin'%dc.aggregate_id
        #with file(file_path,'wb') as fp:
        #    pickle.dump(s.giant_component,fp)

        file_path = './simulation_data/large_avalanche_data/%s/network.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.network.tolist(), fp)

        file_path = './simulation_data/large_avalanche_data/%s/defaulted.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.defaulted_nodes, fp)

        file_path = './simulation_data/large_avalanche_data/%s/irs_data.bin' % dc.aggregate_id
        with file(file_path, 'wb') as fp:
            pickle.dump(s.irs_creations.tolist(), fp)
            pickle.dump(s.irs_removals.tolist(), fp)

        dcconfig['failed_bank'] = s.defaulting_bank_no
        file_path = './simulation_data/large_avalanche_data/%s/config.json' % dc.aggregate_id
        with open(file_path, 'w') as fp:
            json.dump(dcconfig, fp, indent=4)

    print dc.aggregate_id
Example #27
0
            'threshold': 10,
            'sigma': 1,
            'max_irs_value': 7,  #4,
            'irs_threshold': -1,
            'dissipation': 0.0,
            'max_tenure': 400
        },
        'analysis': {
            'data_to_save': ['defaults']
        },
        'file_root': './simulation_data/',
        'market_type': 7
    }

    dc = DataContainer(dcconfig, str(uuid4()), str(uuid4()))
    p = Progress(steps)

    s = sim(dcconfig['model'], dc, p.update, save_risk, save_dist,
            connection_scatter_moments)
    s.save_degree_distribution = save_degree_distribution
    if (s.save_degree_distribution):
        s.degrees = np.zeros((steps, dcconfig['model']['no_banks']))
        s.no_irs = np.zeros((steps, dcconfig['model']['no_banks']))
    s.save_avalanche_progression = save_avalanche_progression
    s.save_risk_avalanche_time_series = save_risk_avalanche_time_series
    s.collect_critical_info = save_critical_info
    s.save_giant_component = save_giant_component
    s.save_avalanche_tree = save_avalanche_tree
    s.avalanche_tree_file_path = './simulation_data/trees/%s/' % dc.aggregate_id
    s.save_degree_on_default = save_degree_on_default
    s.save_default_rate = save_default_rate
Example #28
0
    def _decode(self):

        logger.info('decoding lua tables')

        if not self.zip_content:
            self.unzip(overwrite=False)

        Progress.start('Decoding MIZ file', length=3)

        Progress.set_label('Decoding map resource')
        logger.debug('reading map resource file')
        with open(self.map_res_file, encoding=ENCODING) as f:
            self._map_res, self._map_res_qual = SLTP().decode(f.read())
        Progress.set_value(1)

        Progress.set_label('Decoding dictionary file')
        logger.debug('reading l10n file')
        with open(self.dictionary_file, encoding=ENCODING) as f:
            self._l10n, self._l10n_qual = SLTP().decode(f.read())
        Progress.set_value(2)

        Progress.set_label('Decoding mission file')
        logger.debug('reading mission file')
        with open(self.mission_file, encoding=ENCODING) as f:
            mission_data, self._mission_qual = SLTP().decode(f.read())
            self._mission = Mission(mission_data, self._l10n)
        Progress.set_value(3)

        logger.info('decoding done')
Example #29
0
def rca(functionNode):
    logger = functionNode.get_logger()
    logger.info("==>>>> in rca (root cause analysis " +
                functionNode.get_browse_path())
    progressNode = functionNode.get_child("control").get_child("progress")
    progressNode.set_value(0.1)

    variables = functionNode.get_child("selectedVariables").get_leaves()
    tag = functionNode.get_child("selectedTags").get_value()  #only one tag
    annotations = functionNode.get_child("annotations").get_leaves()
    feature = functionNode.get_child("selectedFeatures").get_value()
    algo = functionNode.get_child("selectedAlgorithms").get_value()
    target = functionNode.get_child("selectedTarget").get_target()

    p = Progress(progressNode)
    p.set_divisor(len(annotations) / 0.5)
    p.set_offset(0.1)
    #now create the data as x-y

    results = {"x": [], "y": []}
    var = variables[0]
    #now iterate over all annotations of the matching type and create feature
    for idx, anno in enumerate(annotations):
        p.set_progress(idx)
        if (anno.get_child("type").get_value()
                == "time") and (tag in anno.get_child("tags").get_value()):
            startTime = anno.get_child("startTime").get_value()
            endTime = anno.get_child("endTime").get_value()
            data = var.get_time_series(startTime, endTime)
            #now create the feature
            feat = calc_feature(data["values"], feature)
            targetValue = get_target(
                target, (date2secs(startTime) + date2secs(endTime)) / 2)
            if feat and targetValue and numpy.isfinite(
                    feat) and numpy.isfinite(targetValue):
                results["x"].append(feat)
                results["y"].append(targetValue)
            else:
                logger.warning(
                    f"no result for {var.get_name} @ {startTime}, anno:{tag}, feat:{feat}, target: {target}"
                )

    #now we have all the x-y

    progressNode.set_value(0.7)
    fig = figure(title="x-y Correlation Plot " + var.get_name(),
                 tools=[PanTool(),
                        WheelZoomTool(),
                        ResetTool(),
                        SaveTool()],
                 plot_height=300,
                 x_axis_label=feature + "(" + var.get_name() + ") @ " + tag,
                 y_axis_label=target.get_name())
    fig.toolbar.logo = None
    curdoc().theme = Theme(json=themes.darkTheme)
    fig.xaxis.major_label_text_color = themes.darkTickColor
    fig.yaxis.major_label_text_color = themes.darkTickColor

    fig.scatter(x=results["x"],
                y=results["y"],
                size=5,
                fill_color="#d9b100",
                marker="o")
    fileName = functionNode.get_child("outputFileName").get_value()
    filePath = os.path.join(myDir, './../web/customui/' + fileName)
    progressNode.set_value(0.8)
    output_file(
        filePath, mode="inline"
    )  #inline: put the bokeh .js into this html, otherwise the default cdn will be taken, might cause CORS problems)
    save(fig)

    #print(results)

    return True
def main():
    args = get_user_input()

    targetAccountSession = args.profile and boto3.session.Session(profile_name = args.profile) or None
    sourceAccountSession = args.source_profile and boto3.session.Session(profile_name = args.source_profile) or None

    #
    # Connect to CloudInsight
    #
    ci = CI_API(args.user, args.password, account_id = args.account, locality = args.locality)

    print "Successfully logged in into CloudInsight. Account: %s(%s), User: %s" % \
            (ci.auth_account_name, ci.auth_account_id, ci.auth_user_name)
    #
    # Load configuration file
    #
    config = {}
    environments = []
    with open(args.config) as data_file:    
        config = json.load(data_file)
        if u'role' not in config:
            raise Exception("Missing 'role' attribute in '%s' configuration file" % (args.config))
        if u'external_id' not in config:
            raise Exception("Missing 'external_id' attribute in '%s' configuration file" % (args.config))
        if u'trails' not in config and u'regions' not in config :
            raise Exception("Missing 'trails' and 'regions' configuration in '%s' configuration file" % (args.config))

        role_arn = config[u'role']
        external_id = config[u'external_id']

        if u'environments' in config:
            environments = config[u'environments']
        elif u'aws_account_id' in config:
            environments = ci.get_environments(config[u'aws_account_id'])

    #
    # Get CloudInsight Credential ID for the specified role
    #
    credential_id = get_credential(ci, role_arn, external_id)[u'credential'][u'id']
    print "Obtained credential id for '%s' role" % (role_arn)

    #
    # Get sources for environments specified in the configuration file
    #
    sources = []
    trails = {}
    progress = Progress(
                len(config[u'regions']),
                "Validating configuration.\t\t\t\t\t\t\t\t\t\t")
    for region_name, region_config in config[u'regions'].iteritems():
        progress.report()
        if region_config[u'type'] == u'queue':
            if not u'queue' in region_config:
                raise Exception("Invalid config file. 'queue' property is missing for '%s' region" % region_name)

            if targetAccountSession and not validate_queue(region_name, region_config[u'queue'], targetAccountSession):
                raise Exception("Invalid config file. '%s' queue doesn't exist in '%s' region in '%s' AWS Account." %\
                               (region_config[u'queue'], region_name, get_account_id(targetAccountSession) ) )

            bucket_region = u'bucket_region' in region_config and region_config[u'bucket_region'] or u'us-east-1'
            for environment_id in environments:
                result = ci.get_sources(environment_id = environment_id, region = region_name)
                sources.append(update_source_config(
                        len(result) and result[0] or None,
                        ci.account_id,
                        environment_id,
                        region_name,
                        credential_id = credential_id,
                        bucket_region = bucket_region,
                        queue = get_queue_name(region_config[u'queue'])))
        elif region_config[u'type'] == u'trail':
            if u'trail' not in region_config or not region_config[u'trail']:
                raise Exception("Invalid config file. 'trail' property is missing '%s' region" % region_name)
            
            trail = get_cloud_trail_configuration(
                                    region_name,
                                    region_config[u'trail'], 
                                    sourceAccountSession,
                                    targetAccountSession)
            if trail:
                 trails[region_name] = trail
    progress.done()

    #
    # Setup CloudTrail subscriptions
    #
    for environment_id in environments:
        trails_configuration = setup_subscriptions(
                                    args.account,
                                    environment_id,
                                    trails,
                                    sourceAccountSession,
                                    targetAccountSession)

        for region_name, trail_configuration in trails_configuration.iteritems():
                result = ci.get_sources(environment = environment_id, region = region_name)
                sources.append(update_source_config(
                        len(result) and result[0] or None,
                        ci.account_id,
                        environment_id,
                        region_name,
                        credential_id = credential_id,
                        bucket_region = trail_configuration[u'bucket_region'],
                        queue = trail_configuration[u'sqs_queue_name']))

    #
    # Create CloudInsight sources
    #
    for source in sources:
        print "Updating '%s' source in '%s' environment." %\
              (source[u'source'][u'name'], source[u'source'][u'environment'])
        ci.create_source(source)
    print "Successfully updated CloudInsight configuration."
    print_instructions(role_arn)
Example #31
0
def varstatistics(functionNode):
    logger = functionNode.get_logger()
    logger.info("==>>>> statistics " + functionNode.get_browse_path())
    progressNode = functionNode.get_child("control").get_child("progress")
    progressNode.set_value(0)
    #functionNode.get_child("control.signal").set_value(None)

    vars = functionNode.get_child("variable").get_targets()
    widget = functionNode.get_child("widget").get_target()
    bins = functionNode.get_child("bins").get_value()
    tags = functionNode.get_child("annotations").get_value()
    startTime = date2secs(widget.get_child("startTime").get_value())
    endTime = date2secs(widget.get_child("endTime").get_value())

    vars = {var.get_id(): {"node": var} for var in vars}

    #first 30% progress:
    prog = Progress(progressNode)
    progressNode.set_value(0.1)
    prog.set_offset(0.1)
    #prog.set_divisor()

    if tags:
        allAnnoNodes = widget.get_child(
            "hasAnnotation.annotations").get_leaves()
        allAnnos = []
        prog.set_divisor(len(allAnnoNodes) / 0.2)
        for index, node in enumerate(allAnnoNodes):
            prog.set_progress(index)
            if node.get_child("type").get_value() == "time":
                thisTags = node.get_child("tags").get_value()
                if any(tag in tags for tag in thisTags):
                    anno = {}
                    for child in node.get_children():
                        anno[child.get_name()] = child.get_value()
                    if date2secs(anno["startTime"]) >= startTime and date2secs(
                            anno["endTime"]
                    ) <= endTime:  #take this anno only if it is inside the current start/end time
                        allAnnos.append(anno)
        if allAnnos == []:
            give_up(functionNode, "no matching annotations in selected time")
            return False
    else:
        allAnnos = []

    progressNode.set_value(0.3)

    logger.debug(f"statistics annotations to look at: {len(allAnnos)}")
    prog.set_offset(0.3)
    totalAnnos = max(len(allAnnos), 1)
    totalCount = len(vars) * totalAnnos

    prog.set_divisor(totalCount / 0.3)
    totalValids = 0
    for varIndex, var in enumerate(vars):
        info = vars[var]
        if tags:
            #iterate over all start and end times
            values = numpy.asarray([], dtype=numpy.float64)
            for annoIndex, anno in enumerate(allAnnos):
                thisValues = info["node"].get_time_series(
                    anno["startTime"], anno["endTime"])["values"]
                values = numpy.append(values, thisValues)
                myCount = varIndex * totalAnnos + annoIndex
                prog.set_progress(myCount)
        else:
            values = info["node"].get_time_series(startTime, endTime)["values"]

        valids = numpy.count_nonzero(~numpy.isfinite(values))
        totalValids += valids
        hist, edges = numpy.histogram(values, bins=bins)
        hist = hist / len(values)  #normalize
        info["hist"] = hist
        info["edges"] = edges

    #make a plot
    if totalValids == 0:
        give_up(
            functionNode,
            "all Variables are have no data in the time and annotations selected"
        )
        return False

    progressNode.set_value(0.6)

    hover1 = HoverTool(tooltips=[('x,y', '$x,$y')], mode='mouse')
    hover1.point_policy = 'snap_to_data'
    hover1.line_policy = "nearest"

    tools = [
        PanTool(),
        WheelZoomTool(),
        BoxZoomTool(),
        ResetTool(),
        SaveTool(), hover1
    ]

    title = "Statistics of " + str(
        [info["node"].get_name() for var, info in vars.items()])
    if tags:
        title = title + " in annotation: " + str(tags)

    fig = figure(title=title, tools=tools, plot_height=300)
    fig.toolbar.logo = None

    curdoc().theme = Theme(json=themes.darkTheme)
    fig.xaxis.major_label_text_color = themes.darkTickColor
    fig.yaxis.major_label_text_color = themes.darkTickColor

    for index, var in enumerate(vars):
        info = vars[var]
        col = themes.darkLineColors[index]
        hist = info["hist"]
        edges = info["edges"]

        fig.quad(top=hist,
                 bottom=0,
                 left=edges[:-1],
                 right=edges[1:],
                 fill_color=col,
                 line_color=col,
                 alpha=0.8,
                 legend_label=info["node"].get_name())

    fig.legend.location = "top_left"
    fileName = functionNode.get_child("fileName").get_value()
    filePath = os.path.join(myDir, './../web/customui/' + fileName)

    # now make the trend box plot, but only for tags
    # for each variable we create statistics for the annotations and prepare the data
    # {"node":Node(), "boxLower":[], "boxUpper", "mean", "limitUpper", "limitLower"}
    #

    startTime = date2secs(widget.get_child("startTime").get_value(
    ))  #we only take tags that are inside the current zoom of the widgets
    endTime = date2secs(widget.get_child("endTime").get_value())

    boxPlots = []
    allTimes = []
    if tags:
        for index, var in enumerate(vars):
            info = {
                "node": vars[var]["node"],
                "boxLower": [],
                "boxUpper": [],
                "median": [],
                "time": [],
                "limitUpper": [],
                "limitLower": [],
                "mean": []
            }
            for anno in allAnnos:
                data = info["node"].get_time_series(anno["startTime"],
                                                    anno["endTime"])
                if len(data["values"]):
                    data["values"] = data["values"][numpy.isfinite(
                        data["values"])]
                    #remove the nan
                if len(data["values"]):

                    #make the statistics
                    info["time"].append(numpy.median(data["__time"]) * 1000)
                    allTimes.append(numpy.median(data["__time"]) * 1000)
                    info["limitLower"].append(
                        numpy.quantile(data["values"], 0.01))
                    info["limitUpper"].append(
                        numpy.quantile(data["values"], 0.99))
                    info["boxLower"].append(
                        numpy.quantile(data["values"], 0.25))
                    info["boxUpper"].append(
                        numpy.quantile(data["values"], 0.75))
                    info["median"].append(numpy.median(data["values"]))
                    info["mean"].append(numpy.mean(data["values"]))
            boxPlots.append(info)

        format = "%Y-%m-%d-T%H:%M:%S"
        custom = """var local = moment(value).tz('UTC'); return local.format();"""  #%self.server.get_settings()["timeZone"]

        hover = HoverTool(tooltips=[('date', '@x{%F}')],
                          formatters={'@x': CustomJSHover(code=custom)},
                          mode='mouse')
        hover.point_policy = 'snap_to_data'
        hover.line_policy = "nearest"
        tools = [
            PanTool(),
            BoxZoomTool(),
            WheelZoomTool(),
            ResetTool(), hover,
            SaveTool()
        ]

        fig2 = figure(title="trends",
                      tools=tools,
                      plot_height=300,
                      x_axis_type='datetime')
        fig2.xaxis.major_label_text_color = themes.darkTickColor
        fig2.yaxis.major_label_text_color = themes.darkTickColor

        progressNode.set_value(0.7)

        fig2.xaxis.formatter = DatetimeTickFormatter(years=format,
                                                     days=format,
                                                     months=format,
                                                     hours=format,
                                                     hourmin=format,
                                                     minutes=format,
                                                     minsec=format,
                                                     seconds=format)
        fig2.toolbar.logo = None
        #fig2.line([1,2,3],[1,2,3])
        #calc with of vbars
        if len(allAnnos) > 1:
            xTimesStart = min(allTimes)
            xTimesEnd = max(allTimes)
            width = (xTimesEnd - xTimesStart) / 2 / len(allAnnos)
        else:
            width = 1000000

        for index, info in enumerate(boxPlots):
            #each info is for one variable
            col = themes.darkLineColors[index]
            fig2.segment(info["time"],
                         info["limitUpper"],
                         info["time"],
                         info["boxUpper"],
                         line_color=col)
            fig2.segment(info["time"],
                         info["limitLower"],
                         info["time"],
                         info["boxLower"],
                         line_color=col)

            width = 20
            #fig2.vbar(info["time"],width=width,bottom=info["median"],top=info["boxUpper"],fill_color=col,line_color="black",width_units='screen')
            #fig2.vbar(info["time"],width=width,bottom=info["boxLower"],top=info["median"],fill_color=col,line_color="black",width_units='screen')
            #upper box
            sizUpper = numpy.asarray(info["boxUpper"]) - numpy.asarray(
                info["median"])
            medUpper = numpy.asarray(info["median"]) + sizUpper / 2
            fig2.rect(x=info["time"],
                      y=medUpper,
                      width_units='screen',
                      width=20,
                      height=sizUpper,
                      fill_color=col,
                      line_color="black")

            #lower box
            sizLower = numpy.asarray(info["median"]) - numpy.asarray(
                info["boxLower"])
            medLower = numpy.asarray(info["median"]) - sizLower / 2
            fig2.rect(x=info["time"],
                      y=medLower,
                      width_units='screen',
                      width=20,
                      height=sizLower,
                      fill_color=col,
                      line_color="black")

            #sort data for line
            x = numpy.asarray(info["time"])
            y = numpy.asarray(info["mean"])
            order = numpy.argsort(x)
            x = x[order]
            y = y[order]
            fig2.line(x, y, line_color=col)

        progressNode.set_value(0.8)
    else:
        #no fig2
        pass

    output_file(
        filePath, mode="inline"
    )  #inline: put the bokeh .js into this html, otherwise the default cdn will be taken, might cause CORS problems
    if tags:
        save(layout([[fig], [fig2]]))
    else:
        save(fig)

    return True
Example #32
0
    def _encode(self):

        logger.info('encoding lua tables')

        Progress.start('Decoding MIZ file', length=3)

        Progress.set_label('Encoding map resource')
        logger.debug('encoding map resource')
        with open(self.map_res_file, mode='w', encoding=ENCODING) as f:
            f.write(SLTP().encode(self._map_res, self._map_res_qual))
        Progress.set_value(1)

        Progress.set_label('Encoding map resource')
        logger.debug('encoding l10n dictionary')
        with open(self.dictionary_file, mode='w', encoding=ENCODING) as f:
            f.write(SLTP().encode(self.l10n, self._l10n_qual))
        Progress.set_value(2)

        Progress.set_label('Encoding map resource')
        logger.debug('encoding mission dictionary')
        with open(self.mission_file, mode='w', encoding=ENCODING) as f:
            f.write(SLTP().encode(self.mission.d, self._mission_qual))
        Progress.set_value(3)

        logger.info('encoding done')