def loadData(experiment):
    if experiment.has_key("size"):
        size = experiment["size"]
    else:
        size = 0
    data, label, description, reduce = experiment["dataset"]()

    if size > 0:
        initialReduceBlockSize = np.arange(size, size+0.2, 0.1)
        testSetPercentage = 0.2
        trainDataBlocks, trainLabelBlocks, testDataBlocks, testLabelBlocks = data_factory.splitDatasetInBlocks(data, np.array(label), initialReduceBlockSize, testSetPercentage)

        data = trainDataBlocks[0][0]
        label = trainLabelBlocks[0][0]

    # if required (cancer datasets) perform binary encoding
    if experiment['binary_encode']:
        print "perform binary encode"
        analyze(data, label, "before encode")
        # encode features (one-hot-encoder / dummy coding)
        enc = OneHotEncoder()
        enc.fit(data)
        data = enc.transform(data).toarray()
        analyze(data, label, "after encode")

    return data, label, description, reduce
def drawGraphForDatasets(datasets, fileName, item, trainBlockSizes, metric, ylim = []):

    plt.subplot(111)
    plt.figure(item)
    plt.title(metric)
    plt.xlabel("% of dataset")
    plt.ylabel("score: %s" % metric)

    plt.grid()

    for load in datasets:
        # load it lazy
        data, label, desc = load()

        # for test - make dataset smaller
        initialReduceBlockSize = np.arange(0.5, 0.7, 0.1)
        trainDataBlocks, trainLabelBlocks, testDataBlocks, testLabelBlocks = factory.splitDatasetInBlocks(data, np.array(label), initialReduceBlockSize, testSetPercentage)
        data = trainDataBlocks[0][0]
        label = trainLabelBlocks[0][0]

        analyze(data, label)

        maxItemsInDataset = len(label)

        testSetPercentage = 0.02

        trainDataBlocks, trainLabelBlocks, testDataBlocks, testLabelBlocks = factory.splitDatasetInBlocks(data, np.array(label), trainBlockSizes, testSetPercentage)

        x = list()
        y = list()

        for i in range(len(trainDataBlocks)):
            trainData = trainDataBlocks[i]
            trainLabel = trainLabelBlocks[i]
            # testData = testDataBlocks[i]
            # testLabel = testLabelBlocks[i]

            numInstances = np.shape(trainData[0])
            score = calcScore(metric, trainData[0], trainLabel[0])


            xPercentage = (numInstances[0] * 100) / maxItemsInDataset
            x.append(xPercentage)

            #y.append(float("%.4f" % score))
            y.append(score)
            #print "x:%s, y:%s" % (numInstances[0], score)
        print "------------------------"
        print y
        print np.mean(y)
        print "------------------------"

        plt.plot(x, y, label=desc)


    plt.legend(loc="best")
    if len(ylim) > 0:
        plt.ylim(ylim)
    plt.savefig("performance/output/%s_%s.png" % (fileName, metric), dpi=320)
示例#3
0
文件: test.py 项目: eatonphil/LSA
def test(path=TEST_DIR):
	for circ in os.listdir(path):
		if circ.endswith('.circ'):
			analyze.analyze(path+circ)
			for lsa in os.listdir('./'):
				if lsa.endswith(analyze.LSA_FORMAT):
					shutil.move(lsa, path+circ[:-5]+'-'+lsa)
		if os.path.isdir(path+circ):
			test(path+circ+'/')
示例#4
0
def main():
    if len(sys.argv) > 1:
        os.chdir(sys.argv[1])
    db = sqlite3.connect('gtfs.db')
    files = open_files()
    create_tables(db)
    parse_files(db, files)
    analyze.analyze(db)
    db.close()
示例#5
0
def evaluate_pot(table):
    # Todo:  Need more complete unit test coverage of this function
    """ Evaluates pot on table and creates side pots if necessary """
    pot = table.pots[-1]
    if pot.side_pots:
        # import pdb
        # pdb.set_trace()
        pot.side_pots = sorted(pot.side_pots)
        while pot.side_pots:
            amount = pot.side_pots.pop(0)

            x = 0
            new_players = []
            for player in pot.players:
                player.equity -= amount
                x += 1
                new_players.append(player)

            for player in pot.players:
                if player.stack == 0 and player.equity == 0:
                    pot.players.remove(player)
            for p in pot.side_pots:
                ind = pot.side_pots.index(p)
                pot.side_pots[ind] -= amount

            amount = amount * x
            amount += pot.amount
            pot.amount = 0
            new_pot = Pot(new_players, amount)
            table.pots.insert(0, new_pot)

            if len(pot.players) == 1:
                pot.side_pots = []

    for player in pot.players:
        pot.amount += player.equity
        player.equity = 0

    if len(pot.players) == 1:
        # Give last guy in pot money
        pot.players[0].stack += pot.amount
        table.pots.pop()

        if not table.pots:

            # Start new hand
            new_hand(table)
        else:
            while len(table.community_cards) < 5:
                deal(table)
            analyze(table)
    else:
        if len(table.community_cards) < 5:
            deal(table)
        else:
            analyze(table)
示例#6
0
文件: pulse.py 项目: relh/cath-hacks
def operate(queue):
    while True:
        time.sleep(0.25)
        #analyze current queue
        try:
            analyze.analyze(copy.copy(map(lambda x:x[1],queue.queue)))
        except Exception,e:
            print 'ANALYZE PROBLEM'
            print str(e)
            pass # BAD!
  def LoadShowImage(self):
    QtGui.QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
    #initialization
    self.data_struct = data_struct.h5()
    self.stk_sam = data_stack.data(self.data_struct)
    self.stk_bkg = data_stack.data(self.data_struct)
    self.anlz_sam = analyze.analyze(self.stk_sam)
    self.anlz_bkg = analyze.analyze(self.stk_bkg)
    self.common = common()

    #load sample and background
    if self.ui.samTxrm.isChecked() == True:            
        #self.new_stack_refresh()  
        self.stk_sam.new_data()
        #self.stk.data_struct.delete_data()
        self.anlz_sam.delete_data()  
        self.stk_sam.read_txrm(self.sam_filepath)        
                
    if self.ui.samXrm.isChecked() == True:              
        self.stk_sam.new_data()
        self.anlz_sam.delete_data()
        #self.sam_filelist = os.path.basename(str(self.sam_filepaths))
        self.stk_sam.read_xrm_list(self.sam_filepaths) 

    if self.ui.bkgTxrm.isChecked() == True:
        self.stk_bkg.new_data()
        self.anlz_bkg.delete_data() 
        self.stk_bkg.read_txrm(self.bkg_filepath)

    if self.ui.bkgXrm.isChecked() == True:
        self.stk_bkg.new_data()
        self.anlz_bkg.delete_data()
        #self.bkg_filelist = os.path.basename(str(self.bkg_filepaths))
        self.stk_bkg.read_xrm_list(self.bkg_filepaths)
 
    self.common.stack_loaded == 1

    #update image information
    self.iev = int(self.stk_sam.n_ev)
    x=self.stk_sam.n_cols
    y=self.stk_sam.n_rows
    z=self.iev  
    print(z)             
    self.ix = int(x/2)
    self.iy = int(y/2)
    
    #calculate scaleimg
    sam_image_stack = self.stk_sam.absdata.copy() 
    bkg_image_stack = self.stk_bkg.absdata.copy()
    self.scale_image_stack = np.true_divide(sam_image_stack,bkg_image_stack)

    #refresh_widgets
    #show image
    self.ShowImage()
    QtGui.QApplication.restoreOverrideCursor()
示例#8
0
 def test_analyze(self):
     """ Will analyzer run appropriately? """
     pot = self.table.pots[-1]
     pot.amount = 101
     pot.players[1].hole_cards[0].value = 14
     pot.players[1].hole_cards[0].suit = 'h'
     pot.players[1].hole_cards[1].value = 13
     pot.players[1].hole_cards[1].suit = 'h'
     analyze.analyze(self.table)
     expected1 = 151
     expected2 = 150
     self.assertEqual(expected1, pot.players[0].stack)
     self.assertEqual(expected2, pot.players[1].stack)
示例#9
0
文件: main.py 项目: jdahlin/proj
    def _analyze(self, source_view):
        v = analyze.analyze(source_view.get_content())
        colors = _TANGO_PALETTE[:]
        for view in self._analyze_views:
            view.destroy()
        self._analyze_views = []
        for reference in v.references.values():
            if not reference.value:
                continue
            doc = reference.value.__doc__
            if not doc:
                continue
            info_ = TextArea()
            n = reference.node
            info_.set_content(doc.encode('utf-8'))

            color = colors.pop()
            rgba = html_to_rgba(color, alpha=0.2)
            source_view.highlight(n.lineno - 1,
                                  n.col_offset,
                                  n.col_offset + len(n.id),
                                  rgba)
            info_.view.set_editable(False)
            info_.view.set_cursor_visible(False)
            info_.set_background_color(rgba)
            self.right_box.add_child(info_, expand=True)
            self._analyze_views.append(info_)

        return source_view
示例#10
0
def sentiments(text):
    analysis = analyze(text)
    result = {
        "Text": text,
        "Result": analysis
    }
    return result
示例#11
0
文件: doendo.py 项目: jornada/DOENDO
def main():
	(options, args) = parse()

	print '''
==========================================================================
  Welcome to DOENDO - the tool that makes your FORTRAN code less painful   
              DOENDO Copyright (C) 2011  Felipe H. da Jornada
              This program comes with ABSOLUTELY NO WARRANTY.
=========================================================================='''

	fname = sys.argv[1]
	fin = open(fname)
	lines = fin.readlines()
	fin.close()

	#need file as single character string
	data = ''.join(lines)

	#prepare DOM of source code
	doc = analyze.analyze(fname, data)
	#print useful info about code (get small variables for free)
	small_vars = analyze.print_info(doc)

	while (1):
		task_loop(doc, lines)
示例#12
0
def process(infile, inbuffer, imgid, imgname, imgurl, uid):
	app.logger.info('Render req from uid: %s', uid)
	ctx = zmq.Context()
	sock = ctx.socket(zmq.PUSH)
	sock.connect('tcp://127.0.0.1:64646')
	sock.send_pyobj({
		'uid': uid,
		'message': 'Reading...',
		'status': 'inprogress'
	})
	def callback(event, tid, desc=None, secs=None):
		if event == 'start':
			sock.send_pyobj({
				'uid': uid,
				'message': desc,
				'status': 'inprogress'
			})
	track = pymasvis.load_file(infile, inbuffer)
	if type(track) is int:
		sock.send_pyobj({
			'uid': uid,
			'message': 'Failed to find audio in file',
			'status': 'error'
		})
		eventlet.sleep(2)
		return {
			'imgid': imgid,
			'error': 'Failed to find audio in file'
		}
	sock.send_pyobj({
		'uid': uid,
		'message': 'Analysing...',
		'status': 'inprogress'
	})
	result = pymasvis.analyze(track, callback=callback)
	detailed, overview = pymasvis.render(track, result, track['metadata']['name'], render_overview=False, callback=callback)
	img = Image.open(detailed)
	img = img.convert(mode='P', palette='ADAPTIVE', colors=256)
	imgbuf = io.BytesIO()
	img.save(imgbuf, 'PNG', optimize=True)
	detailed.close()
	img.close()
	imgbuf.seek(0)
	sock.send_pyobj({
		'uid': uid,
		'message': 'Done!',
		'url': imgurl,
		'status': 'finished'
	})
	sock.close()
	return {
		'imgid': imgid,
		'imgbuf': imgbuf,
		'imgname': imgname,
		'imgurl': imgurl,
		'uid': uid,
		'ts': time.time()
	}
def execute(experiment):
    folder = setupExperimentFolder(experiment)
    algos = experiment['algos']
    metrics = experiment['yValues']
    dimensions = experiment["dimensions"]
    experimentName = experiment["name"]

    # now load the data as the function was passed as a lazy reference
    data, label, description, reduce = loadData(experiment)

    # just to make sure data are correct
    analyze(data, label)

    # we want one figure for each y-metric
    x, yValues = runExperimentForMetric(data, label, algos, dimensions)
    for i in range(len(metrics)):
        metric = metrics[i]
        plt.figure(i)
        plt.subplot(111)
        plt.grid()
        plt.xlabel("dimensions")
        plt.ylabel(metric)

        for algo in yValues.iterkeys():

            y = yValues[algo][metric]
            lbl = "%s - (%.2f)" % (algo, np.mean(y))
            #print "*******"
            #print(lbl)
            plt.plot(x, y, label=lbl)

        #plt.legend(loc="best")
        plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.08),
                   fancybox=True, shadow=True, ncol=2)

        plt.savefig("%s/dimension_vs_%s.png" % (folder, metric), dpi=320, bbox_inches = "tight")

        with open("%s/log_dimension_vs_%s.csv" % (folder, metric), "wb") as csvfile:
            writer = csv.writer(csvfile, delimiter=",", quotechar="|", quoting=csv.QUOTE_MINIMAL)
            x = [str(i) for i in x]
            writer.writerow(["dimensions"]+x)

            for algo in yValues.iterkeys():
                y = yValues[algo][metric]
                writer.writerow([algo] + y)
示例#14
0
 def analyze(self, file):
     try:
         data = analyze(self.translate_path(file))
         self.send_response(200)
         self.send_header("Content-type", "application/json")
         self.end_headers()
         self.wfile.write(bytes(json.dumps(data), "utf-8"))
     except FileNotFoundError:
         self.send_error(422, "Log file not found", "Wrong URL parameter")
示例#15
0
def save_post(post):
	"""Save a post into the database."""
	id = post["data"]["id"]
	created = post["data"]["created_utc"]
	title = post["data"]["title"]

	# Combine data to create the key. This allows us to show some
	# basic info on the website without needing to fetch all data.

	key = "%i-%s-%s" % (int(created), id, title)

	# Store new post? Only if we didn't already save it.
	# Perform analysis functions on post.

	if key not in db:
		analyze(post)
		db[key] = post

	db.sync()
示例#16
0
	def update(self, dt):
		global timeStart0
		global time0
		global cycle
		if GPIO.input(station1) == True:
			self.state = 'normal'				#normal/non-activated state
			self.background_normal = ""			#turns off 'shading'
			self.background_down = ""			#turns off 'shading'
			self.background_color = [0,0,0,0]		#specifies the non-activated background color in RGBA (A is opacity)
			self.text = 'Home'				#specifies button text
			timeStart0 = 'start'				#used to grab first instance of the time
		else:
			self.state = 'down'				#activated state
			self.background_normal = ""			#turns off 'shading'
			self.background_down = ""			#turns off 'shading'
			self.background_color = [0,1,0,0.5]		#specifies the activated background color in RGBA (A is opacity)
			if timeStart0 == 'start':
				time0 = time.time()			#starts the variable to grab the time
			#	self.text = time0			#displays the time variable
				timeStart0 = 'stop'			#makes sure the variable doesn't update the loop when the button is held down
				cycle = 'run'
				analyze()
示例#17
0
 def test_aaAltimxToken(self):
     name = "aaAltimxToken"
     funcname = "foo(uint256[])"
     fname = os.path.join(optPath, name, '{0}.sol'.format(name))
     D, R = analyze(fname, funcname=funcname)
     self.compare_refinement(R, R.types, set(['i']), set(['i', '_amountOfLands']),
                             set(['totalAmount', 'i', 'amount']),
                             set(['_amountOfLands', 'Factor', 'amount', 'i',
                                  'totalAmount']))
     self.compare_dependencies(D.dependencies,
                               {'totalAmount':
                                set(['totalAmount', 'amount', '_amountOfLands',
                                     'i', 'Factor']),
                                'amount':
                                set(['_amountOfLands', 'Factor', 'i']),
                                'i': set(['i'])})
示例#18
0
 def test_AquaToken(self):
     name = "AquaToken"
     fname = os.path.join(optPath, name, '{0}.sol'.format(name))
     D, R = analyze(fname)
     self.compare_refinement(R, R.types, set(['idx']), set(['idx', 'toRewardIdx']),
                             set(['idx', 'updatedBalance']),
                             set(['idx', 'rewards', 'updatedBalance',
                                  'holding_totalTokens', 'toRewardIdx',
                                  'fromRewardIdx']))
     self.compare_dependencies(D.dependencies,
                               {'updatedBalance':
                                set(['rewards', 'updatedBalance', 'idx',
                                     'holding_totalTokens',
                                     'fromRewardIdx']),
                                'idx':
                                set(['fromRewardIdx', 'idx'])})
示例#19
0
def analyze_endpoint(uid):
    r = requests.get(DB_URL + str(uid))
    weibo = r.json()
    batch = []
    for w in weibo:
        batch.append(w['content'])
        batch.extend([[c] for c in w['comments']])
    res = [float(r) for r in analyze(batch, 0)]
    for w in weibo:
        w['content_sentiment'] = res[0]
        res = res[1:]
        l = len(w['comments'])
        w['comments_sentiment'] = res[0:l]
        res = res[l:]
    response = jsonify(weibo)
    response.headers['Access-Control-Allow-Origin'] = '*'
    return response
示例#20
0
def evaluate(dev_path, vocab):
    precisions = []
    recalls = []
    f1s = []
    with open(dev_path, 'r', encoding='utf-8') as dev_file:
        for line in dev_file:
            origin_line = line.replace(' ', '')
            preprocess_sentence, matched = preprocess.preprocess(origin_line)
            split_sentence = analyze.analyze(preprocess_sentence, matched, vocab).split()
            target_sentence = line.split()
            precisions.append(precision(target_sentence, split_sentence))
            recalls.append(recall(target_sentence, split_sentence))
            f1s.append(f1_score(target_sentence, split_sentence))

        print("Precision: ", average(precisions))
        print("Recall: ", average(recalls))
        print("F1-Score: ", average(f1s))
示例#21
0
def main(sol_file):
    seed = None
    # assert False

    logger.info('Analyzing Input...')
    deps, refs = analyze(sol_file, "C", "foo()")
    lambdas = analyze_lambdas(sol_file, "C", "foo()")
    logger.info('Analysis Successful!')

    # print(deps.dependencies)
    # print(refs.pprint_refinement())

    actual_spec, prog_decl, types, i_global, global_vars = instantiate_dsl(
        sol_file, refs.types, lambdas)

    # print(actual_spec)

    logger.info('Parsing Spec...')
    spec = S.parse(actual_spec)
    logger.info('Parsing succeeded')

    # Fetch other contract names
    slither = Slither(sol_file)
    other_contracts = list(
        filter(lambda x: x != 'C', map(str, slither.contracts)))

    logger.info('Building synthesizer...')
    synthesizer = Synthesizer(
        enumerator=DependencyEnumerator(spec,
                                        max_depth=4,
                                        seed=seed,
                                        analysis=deps.dependencies,
                                        types=types),
        decider=SymdiffDecider(interpreter=SymDiffInterpreter(
            prog_decl, other_contracts, i_global, global_vars),
                               example=sol_file,
                               equal_output=check_eq))
    logger.info('Synthesizing programs...')

    prog = synthesizer.synthesize()
    if prog is not None:
        logger.info('Solution found: {}'.format(prog))
        return True
    else:
        logger.info('Solution not found!')
        return False
示例#22
0
def run_nt(file_):

    series = pd.read_csv(file_)

    series = series.fillna(0)

    #12 to 5
    print("GETTING RAINY")
    series_rainy = get_rainy(series)

    print("ANALYZING")
    rainy = analyze(series_rainy)
    #rainy = represent(rainy)

    print("TRAINUI")
    df = train_ui(rainy)

    return df
示例#23
0
文件: gen.py 项目: JeffreyJosanne/NLP
def main(args):
    POS = analyze.read_vocab()
    POS_pairs = analyze.analyze("dev.sen", POS)
    symdict = {}
    for pos in POS.values():
        symdict[pos] = 1
    syms = symdict.keys()
    syms.sort()
    sys.stdout.write("1\tS2\n")
    for sym in syms:
        count = POS_pairs[('',sym)] + 1
        sys.stdout.write("%d\tS2\t_%s\n" % (count, sym))
    for sym in syms:
        count = POS_pairs[(sym, '')] + 1
        sys.stdout.write("%d\t_%s\t%s\n" % (count, sym, sym))
        for sym1 in syms:
            count = POS_pairs[(sym, sym1)] + 1
            sys.stdout.write("%d\t_%s\t%s _%s\n" % (count, sym, sym, sym1))
    return 0
示例#24
0
    def init(self):
        """
        Init the chatbot
        :return:
        """
        #time.ctime(os.path.getmtime('brain.db'))
        a = analyze()
        #get MainSection
        self.sectionActual = a.getSectionMain()
        if self.sectionActual == -1:
            print("Nao ha question main!")
            return 0

        #section main
        s = section(self.sectionActual)
        #get patterns of the section main
        self.patterns = s.getPatterns()
        #get all sections
        self.sections = a.getSections()
示例#25
0
文件: worker.py 项目: truongdo/sopare
 def __init__(self, queue, debug, plot, dict, wave):
     multiprocessing.Process.__init__(self, name="worker for prepared queue")
     self.queue = queue
     self.debug = debug
     self.plot = plot
     self.dict = dict
     self.wave = wave
     self.visual = visual.visual()
     self.condense = condense.packing()
     self.util = util.util(debug, None)
     self.analyze = analyze.analyze(debug)
     self.characteristic = characteristics.characteristic(debug)
     self.running = True
     self.counter = 0
     self.reset_counter = 0
     self.rawbuf = [ ]
     self.reset()
     self.DICT = self.util.getDICT()
     self.start()
示例#26
0
def question4():
    q = """
    4.
    The event is defined as when the actual close of the stock price drops below $9.00, more specifically, when:
    price[t-1]>=9.0 and price[t]<9.0 an event has occurred on date t.
    * Test this event using the Event Profiler over the period from 1st Jan, 2008 to 31st Dec 2009.
    * Using the symbol list - SP5002012
    * Starting Cash: $50,000
    * At every event Buy 100 shares of the equity, and Sell them 5 trading days later. In case not enough days are available Sell them on the last trading day. (Similar to what the homework 4 description wanted).
    * Run this in your simulator and analyze the results.
    What is the sharpe ratio of the fund ?
    * 1.0 to 1.1
    * 0.9 to 1.0
    * 0.8 to 0.9
    * 0.7 to 0.8
    """

    dt_start = dt.datetime(2008, 1, 1)
    dt_end = dt.datetime(2009, 12, 31)
    cash = 50000

    ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
    dataobj = da.DataAccess('Yahoo')
    ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']

    ls_2012_symbols = ev.get_symbols_in_year(dataobj, 2012)
    d_2012_data = ev.get_data(dataobj, ldt_timestamps, ls_2012_symbols)

    order_file = 'orders.csv'
    analysis_file = 'values_9_dollar_event.csv'
    benchmark_symbol = '$SPX'

    df_events = ev.find_9_dollar_events(ls_2012_symbols, d_2012_data)
    ev.generate_orders(ls_2012_symbols, df_events, order_file)

    simulation_result = mksim.simulate(cash, order_file)
    mksim.write_simulation_result(simulation_result, analysis_file)

    fund, benchmark = an.analyze(analysis_file, benchmark_symbol)

    return q, fund.sharpe
示例#27
0
 def __init__(self, hatch, queue):
     multiprocessing.Process.__init__(self, name="worker for filtered data")
     self.hatch = hatch
     self.queue = queue
     self.visual = visual.visual()
     self.util = util.util(self.hatch.get('debug'))
     self.analyze = analyze.analyze(self.hatch.get('debug'))
     self.compare = comparator.compare(self.hatch.get('debug'), self.util)
     self.running = True
     self.counter = 0
     self.plot_counter = 0
     self.reset_counter = 0
     self.rawbuf = [ ]
     self.rawfft = [ ]
     self.raw = [ ]
     self.fft = [ ]
     self.word_tendency = None
     self.character = [ ]
     self.raw_character = [ ]
     self.uid = str(uuid.uuid4())
     self.start()
示例#28
0
def question10():
    q = """
    The event is defined as when the actual close of the stock price drops below $10.00, more specifically, when:
    price[t-1]>=10.0 and price[t]<10.0 an event has occurred on date t.
    * Test this event using the Event Profiler over the period from 1st Jan, 2008 to 31st Dec 2009.
    * Using the symbol list - SP5002012
    * Starting Cash: $50,000
    * At every event Buy 100 shares of the equity, and Sell them 5 trading days later. In case not enough days are available Sell them on the last trading day. (Similar to what the homework 4 description wanted).
    * Run this in your simulator and analyze the results.
    What is the total return of the fund ?
    * 1.15 to 1.25
    * 1.25 to 1.35
    * 1.05 to 1.15
    * 1.35 to 1.45
    """

    analysis_file = 'values_10_dollar_event.csv'
    benchmark_symbol = '$SPX'

    fund, benchmark = an.analyze(analysis_file, benchmark_symbol)

    return q, fund.total_return
示例#29
0
 def __init__(self, queue, debug, plot, dict, wave):
     multiprocessing.Process.__init__(self, name="worker for filtered data")
     self.queue = queue
     self.debug = debug
     self.plot = plot
     self.dict = dict
     self.wave = wave
     self.visual = visual.visual()
     self.util = util.util(debug)
     self.analyze = analyze.analyze(debug)
     self.compare = comparator.compare(debug, self.util)
     self.running = True
     self.counter = 0
     self.plot_counter = 0
     self.reset_counter = 0
     self.rawbuf = []
     self.rawfft = []
     self.raw = []
     self.fft = []
     self.word_tendency = None
     self.character = []
     self.raw_character = []
     self.uid = str(uuid.uuid4())
     self.start()
示例#30
0
def main():
    """
    Main call of the function

    This function recovers the input from the command line arguments, from
    :mod:`parser_mp`, the parameter files.

    It then extracts the path of the used Monte Python code, assuming a
    standard setting (the data folder is in the same directory as the code
    folder).

    It finally proceeds to initialize a :class:`data` instance, a cosmological
    code instance, and runs the Markov chain.

    .. note::
        A possible parallelization would take place here.
    """
    # Parsing line argument
    command_line = parser_mp.parse()

    # Default configuration
    path = {}

    # On execution, sys.path contains all the standard locations for the
    # libraries, plus, on the first position (index 0), the directory from
    # where the code is executed. By default, then, the data folder is located
    # in the same root directory. Any setting in the configuration file will
    # overwrite this one.
    path['MontePython'] = sys.path[0] + '/'
    path['data'] = path['MontePython'][:-5] + 'data/'

    # Configuration file, defaulting to default.conf in your root directory.
    # This can be changed with the command line option -conf. All changes will
    # be stored into the log.param of your folder, and hence will be reused for
    # an ulterior run in the same directory
    conf_file = path['MontePython'][:-5] + command_line.config_file
    if os.path.isfile(conf_file):
        for line in open(conf_file):
            exec(line)
        for key, value in path.iteritems():
            if not value.endswith('/'):
                path[key] = value + '/'
    else:
        io_mp.message(
        "You must provide a .conf file (default.conf by default in your \
        montepython directory that specifies the correct locations for your \
        data folder, Class (, Clik), etc...",
        "error")

    sys.stdout.write('Running MontePython version 1.2\n')

    # If the info flag was used, read a potential chain (or set of chains) to
    # be analysed with default procedure. If the argument is a .info file, then
    # it will extract information from it (plots to compute, chains to analyse,
    # etc...)
    if command_line.files is not None:
        from analyze import analyze   # analysis module, only invoked when analyzing
        analyze(command_line)
        exit()

    # If the restart flag was used, load the cosmology directly from the
    # log.param file, and append to the existing chain.
    if command_line.restart is not None:
        if command_line.restart[0] == '/':
            folder = ''
        else:
            folder = './'
        for elem in command_line.restart.split("/")[:-1]:
            folder += ''.join(elem+'/')
        command_line.param = folder+'log.param'
        command_line.folder = folder
        sys.stdout.write('Reading {0} file'.format(command_line.restart))
        Data = data.data(command_line, path)

    # Else, fill in data, starting from  parameter file. If output folder
    # already exists, the input parameter file was automatically replaced by
    # the existing log.param. This prevents you to run different things in a
    # same folder.
    else:
        Data = data.data(command_line, path)

    # Overwrite arguments from parameter file with the command line
    if command_line.N is None:
        try:
            command_line.N = Data.N
        except AttributeError:
            io_mp.message(
                "You did not provide a number of steps, neither via \
                command line, nor in %s" % command_line.param,
                "error")

    # Creating the file that will contain the chain
    io_mp.create_output_files(command_line, Data)

    # If there is a conflict between the log.param value and the .conf file,
    # exiting.
    if Data.path != path:
        io_mp.message(
            "Your log.param file is in contradiction with your .conf file, \
            please check your path in these two places.",
            "error")

    # Loading up the cosmological backbone. For the moment, only Class has been
    # wrapped.

    # Importing the python-wrapped Class from the correct folder, defined in
    # the .conf file, or overwritten at this point by the log.param.
    # If the cosmological code is Class, do the following to import all
    # relevant quantities
    if Data.cosmological_module_name == 'Class':
        try:
            for elem in os.listdir(Data.path['cosmo']+"python/build"):
                if elem.find("lib.") != -1:
                    classy_path = path['cosmo']+"python/build/"+elem
        except OSError:
            io_mp.message(
                "You probably did not compile the python wrapper of Class. \
                Please go to /path/to/class/python/ and do\n\
                ..]$ python setup.py build",
                "error")

        # Inserting the previously found path into the list of folders to
        # search for python modules.
        sys.path.insert(1, classy_path)
        try:
            from classy import Class
        except ImportError:
            io_mp.message(
                "You must have compiled the classy.pyx file. Please go to \
                /path/to/class/python and run the command\n\
                python setup.py build",
                "error")

        cosmo = Class()
    else:
        io_mp.message(
            "Unrecognised cosmological module. \
            Be sure to define the correct behaviour in MontePython.py \
            and data.py, to support a new one",
            "error")

    # MCMC chain
    mcmc.chain(cosmo, Data, command_line)

    # Closing up the file
    Data.out.close()
示例#31
0
def initialise(custom_command=''):
    """
    Initialisation routine

    This function recovers the input from the command line arguments, from
    :mod:`parser_mp`, the parameter files.

    It then extracts the path of the used Monte Python code, and proceeds to
    initialise a :class:`data` instance, a cosmological code instance.

    Parameters
    ----------
        custom_command: str
            allows for testing the code
    """
    # Parsing line argument
    command_line = parser_mp.parse(custom_command)

    # Recovering the local configuration
    path = recover_local_path(command_line)

    # check for MPI
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
    except ImportError:
        # set all chains to master if no MPI
        rank = 0

    # Recover Monte Python's version number
    version_path = os.path.join(
        path['root'], 'VERSION')
    with open(version_path, 'r') as version_file:
        version = version_file.readline()
    if not command_line.silent and not rank:
        print('Running Monte Python v%s' % version)

    # If the info flag was used, read a potential chain (or set of chains) to
    # be analysed with default procedure. If the argument is a .info file, then
    # it will extract information from it (plots to compute, chains to analyse,
    # etc...)
    if command_line.subparser_name == "info":
        from analyze import analyze  # only invoked when analyzing
        analyze(command_line)
        # FK: we need an additional None because of two cosmo-modules!
        return None, None, None, command_line, False

    # Fill in data, starting from  parameter file. If output folder already
    # exists, the input parameter file was automatically replaced by the
    # existing log.param. This prevents you to run different things in a same
    # folder.
    else:
        data = Data(command_line, path)

        # Overwrite arguments from parameter file with the command line
        if command_line.N is None:
            try:
                command_line.N = data.N
            except AttributeError:
                raise io_mp.ConfigurationError(
                    "You did not provide a number of steps, neither via " +
                    "command line, nor in %s" % command_line.param)

        # Loading up the cosmological backbone. For the moment, only CLASS has been
        # wrapped.
        cosmo1, cosmo2 = recover_cosmological_module(data)

        # Initialising the sampler
        # MH: Creating the file that will contain the chain
        if command_line.method == 'MH':
            io_mp.create_output_files(command_line, data)
        # NS: Creating the NS subfolder and the MultiNest arguments
        elif command_line.method == 'NS':
            from MultiNest import initialise as initialise_mn
            initialise_mn(cosmo1, cosmo2, data, command_line)
        # PC: Creating the PC subfolder and the PolyChord arguments
        elif command_line.method == 'PC':
            from PolyChord import initialise as initialise_pc
            initialise_pc(cosmo1, cosmo2, data, command_line)

        return cosmo1, cosmo2, data, command_line, True
示例#32
0
def log(item, m):
	logfile = file(str(item)+'.log','a')
	print >> logfile, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), m
	logfile.close()

clicks = {}
lastBidder = {}
bidders = {}
myBids = {}
bidTime = {}
timing = {}

for x in items:
	timing[x] = accurate.accurate()
	lastBidder[x] = False
	bidders[x] = analyze(str(x)+'.log')['bidders']
	myBids[x] = 0
	clicks[x] = []
	bidTime[x] = nextBid(False)
	log(x, 'first at %.2f' % (bidTime[x]))

sounds.start()

lastRefresh = 0

frame = 0
try:
	while True:
		frame += 1
		doSleep = True
		try:
示例#33
0
Author: Sebastian Alfers
This file is part of my thesis 'Evaluation and implementation of cluster-based dimensionality reduction'
License: https://github.com/sebastian-alfers/master-thesis/blob/master/LICENSE
'''

import numpy as np
import data_factory as df
import os.path
import analyze

sets = df.getAllDatasets()
#sets = [df.loadFirstPlistaDataset]
with open('log.txt', 'w') as file:
    file.write('##### printing the size of each dataset #####\n')
    for load in sets:
        data, label, desc, _ = load()
        shape = np.shape(data)
        file.write("dataset '%s':\n" % desc)
        file.write("rows:%s, dimensions:%s\n" % (shape[0], shape[1]))

        negativeExamples, negativePercentage, positiveExamples, positivePercentage, zero_elements, non_zero_elements = analyze.analyze(
            data, label, desc)
        file.write("negative observations: %s (%.2f %%) \n" %
                   (negativeExamples, negativePercentage))
        file.write("positive observations: %s (%.2f %%) \n" %
                   (positiveExamples, positivePercentage))
        file.write("zero elements: %.2f \n" % zero_elements)
        file.write("non zero elements: %.2f \n" % non_zero_elements)

        file.write("\n")
示例#34
0
from analyze import analyze
from glob import glob
import sys
from az import Azure
from faceplusplus import FacePlusPlus
from rekognition import Rekognition
import json

AZURE_TOKEN = ""
AZURE_ENDPOINT = ""
FACEPP_KEY = ""
FACEPP_SECRET = ""
REKOG_KEY = ""
REKOG_SECRET = ""

services = [
    Azure(AZURE_TOKEN, AZURE_ENDPOINT),
    FacePlusPlus(FACEPP_KEY, FACEPP_SECRET),
    Rekognition(REKOG_KEY, REKOG_SECRET)
]

analyze(services, glob(sys.argv[1]), wait=3)
示例#35
0
    'http://gutenberg.readingroo.ms/etext02/03hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/04hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/05hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/06hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/07hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/08hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/09hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/10hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/11hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/12hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/13hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/14hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/15hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/16hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/17hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/18hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/19hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/20hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/21hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/22hgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/0xhgp10a.zip',
    'http://gutenberg.readingroo.ms/etext02/0yhgp10a.zip'
]

for url in urls:
    filename = url.split("/")[-1]
    if not path.exists("raw/" + filename):
        call(["wget", "-P", "raw/", url])
    # call(["python3", "analyze.py", "raw/" + filename])
    analyze("raw/" + filename, "lab")
import pandas as pd
import seaborn as sns

import tensorflow as tf

from tensorflow import keras
from tensorflow.keras import layers

from analyze import analyze

import os

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

# Get data which have high correlation
data = analyze()

# Separate data into training set and test set
train_data = data.sample(frac=0.8, random_state=0)
test_data = data.drop(train_data.index)

# You can see stats information. Used the result to get rid of outliers
train_stats = train_data.describe()
train_stats.pop('price')
train_stats = train_stats.transpose()
# print(train_stats)

# Split features from labels
train_labels = train_data.pop('price')
test_labels = test_data.pop('price')
示例#37
0
for j in seiseki:
    if len(j)>1:
        if j.startswith('分野'):
            index = j.find('取得合計')
            if index>0:
                num = int(int(re.sub("\\D", "", str(j[index:])))/10)
                sum += num
                credits.append([j[3:11], num])
                #print('分野', j[3:11], 'を', num, '単位')#for debug

    else:
        if j[0].startswith('分野'):
            index = j[0].find('取得合計')
            if index>0:
                num = int(int(re.sub("\\D", "", str(j[0][index:])))/10)
                sum += num
                credits.append([j[0][3:11], num])
                #print('分野', j[0][3:11], 'を', num, '単位')#for debug

for j in seiseki:
    if j[-2:]=="3年":
        hoge=j.split(" ")# 最初からこれやった方がいいな?
        if not ((hoge[-7].startswith("D") or hoge[-7].startswith("F")) or hoge[-7].startswith("?")):# DとFが落単
            sannen+=int(float(hoge[-6]))
    elif j[-2:]=="4年":
        hoge=j.split(" ")
        if not ((hoge[-7].startswith("D") or hoge[-7].startswith("F")) or hoge[-7].startswith("?")):
            yonen+=int(float(hoge[-6]))

analyze(credits,sum, sannen, yonen)
  def LoadShowImage(self):
    self.ui.status_bkgnorm.setText('status: running')
    QtGui.QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
    #initialization
    self.data_struct = data_struct.h5()
    self.stk_sam = data_stack.data(self.data_struct)
    self.stk_bkg = data_stack.data(self.data_struct)
    self.anlz_sam = analyze.analyze(self.stk_sam)
    self.anlz_bkg = analyze.analyze(self.stk_bkg)
    self.common = common()
    #load sample and background
    if self.ui.samTxrm.isChecked() == True:            
        #self.new_stack_refresh()  
        self.stk_sam.new_data()
        #self.stk.data_struct.delete_data()
        self.anlz_sam.delete_data()  
        self.stk_sam.read_txrm(self.sam_filepath, self.data_struct)                 
    if self.ui.samXrm.isChecked() == True:              
        self.stk_sam.new_data()
        self.anlz_sam.delete_data()
        #self.sam_filelist = os.path.basename(str(self.sam_filepaths))
        self.stk_sam.read_xrm_list(self.sam_filepaths) 
    if self.ui.bkgTxrm.isChecked() == True:
        self.stk_bkg.new_data()
        self.anlz_bkg.delete_data() 
        self.stk_bkg.read_txrm(self.bkg_filepath, self.data_struct)
    if self.ui.bkgXrm.isChecked() == True:
        self.stk_bkg.new_data()
        self.anlz_bkg.delete_data()
        #self.bkg_filelist = os.path.basename(str(self.bkg_filepaths))
        self.stk_bkg.read_xrm_list(self.bkg_filepaths)
    self.common.stack_loaded == 1
    #update image information
    self.iev = int(self.stk_sam.n_ev)
    self.currentSliderStep = self.iev - 1
    self.ev = self.data_struct.exchange.energy
    #calculate scaleimg
    sam_image_stack = self.stk_sam.absdata.copy() 
    bkg_image_stack = self.stk_bkg.absdata.copy()
    self.scale_image_stack = np.true_divide(sam_image_stack,bkg_image_stack)  
    #show image
    self.ShowImage(self.currentSliderStep,self.scale_min,self.scale_max,self.rawImagePosX,self.rawImagePosY,self.bin,self.edgeJump)
    QtGui.QApplication.restoreOverrideCursor()
    #refresh_widgets
    self.ui.slider_img_maxscale.setEnabled(True)
    self.ui.slider_img_minscale.setEnabled(True)
    self.ui.slider_img.setEnabled(True)
    self.ui.current_img.setEnabled(True)
    self.ui.current_img.setText(str(self.iev))
    self.ui.slider_img.setMinimum(1)
    self.ui.slider_img.setMaximum(self.iev)
    self.ui.total_img.setText(str(self.iev))
    self.ui.status_bkgnorm.setText('status: complete')
    self.ui.text_minscale.setText("Min   0   %")
    self.ui.text_maxscale.setText("Max  100  %")
    self.ui.select_point.setEnabled(True)
    self.ui.update_spectrum.setEnabled(True)
    self.ui.x_cord.setEnabled(True)
    self.ui.y_cord.setEnabled(True)
    self.ui.edit_edge_jump.setEnabled(True)

    self.ui.pre_start.setMinimum(1)
    self.ui.pre_start.setMaximum(self.iev)
    self.ui.pre_end.setMinimum(1)
    self.ui.pre_end.setMaximum(self.iev)
    self.ui.post_start.setMinimum(1)
    self.ui.post_start.setMaximum(self.iev)
    self.ui.post_end.setMinimum(1)
    self.ui.post_end.setMaximum(self.iev)
    self.ui.edge_start.setMinimum(1)
    self.ui.edge_start.setMaximum(self.iev)
    self.ui.edge_end.setMinimum(1)
    self.ui.edge_end.setMaximum(self.iev)

    self.ui.text_pre_start.setText('PreEstart')
    self.ui.text_pre_end.setText('PreEend')
    self.ui.text_post_start.setText('PostEstart')
    self.ui.text_post_end.setText('PostEend')
    self.ui.text_edge_start.setText('EdgeStart')
    self.ui.text_edge_end.setText('EdgeEnd')
示例#39
0
 'http://gutenberg.readingroo.ms/etext02/02hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/03hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/04hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/05hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/06hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/07hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/08hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/09hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/10hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/11hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/12hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/13hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/14hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/15hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/16hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/17hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/18hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/19hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/20hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/21hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/22hgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/0xhgp10a.zip',
 'http://gutenberg.readingroo.ms/etext02/0yhgp10a.zip']

for url in urls:
    filename = url.split("/")[-1]
    if not path.exists("raw/" + filename):
        call(["wget", "-P", "raw/", url])
    # call(["python3", "analyze.py", "raw/" + filename])
    analyze("raw/" + filename, "lab")
示例#40
0
def validate():
    r_files = os.listdir('rootfiles')
    for r_file in r_files:
        if 'BDT' in r_file: break
    return analyze('rootfiles/' + r_file)
                print(i)

            # break if stuck
            if last_step_time:
                if time() - last_step_time > 120:
                    break
            last_step_time = time()

            # uncomment to debug
            #x = raw_input()

            # extract data form netlogo: {agent_id xcor ycor link_on speed direction on_route_time
            #                             dist_travelled remaining_route travel_time iteration}
            data = netlogo.report('[data] of turtles')
            # update cars and networkx
            cars, network = analyze(data, cars, network)

            '''
            YOUR CODE GOES HERE
            UPDATE ROUTES BASED ON
            NETWORK AND CARS
            '''

            if alg == 'dijkstra':
                # SIMPLE DIJKSTRA UPDATE AT EACH INTERSECTION
                update_routes_quickest(netlogo, network, cars)
            if alg == 'dijkstraBounded':
                # SIMPLE DIJKSTRA UPDATE AT EACH INTERSECTION
                update_routes_quickest_bounded(netlogo, network, cars)
            if alg == 'lessCarAhead':
                # Turn on the immediate road with higher speed
示例#42
0
from flask import Flask, abort,jsonify, request, render_template, redirect, url_for
import json,sys,urllib2,os
from functools import wraps
import analyze

app = Flask(__name__)
app.classifier = analyze.analyze()

def jsonp(f):
    """Wraps JSONified output for JSONP"""
    @wraps(f)
    def decorated_function(*args, **kwargs):
        callback = request.args.get('callback', False)
        if callback:
            content = str(callback) + '(' + str(f().data) + ')'
            return app.response_class(content, mimetype='application/json')
        else:
            return f(*args, **kwargs)
    return decorated_function


@app.route('/test')
def testClassifier():
	text = request.args['text'] if request.args['text'] else ''
    # TODO: strip out illegal CHARS
	result = dict(prediction=0)
	if text:
		result['prediction'] = app.classifier.predictText(text)
	return jsonify( result )

@app.route('/')
示例#43
0
def initialise(custom_command=''):
    """
    Initialisation routine

    This function recovers the input from the command line arguments, from
    :mod:`parser_mp`, the parameter files.

    It then extracts the path of the used Monte Python code, and proceeds to
    initialise a :class:`data` instance, a cosmological code instance.

    Parameters
    ----------
        custom_command: str
            allows for testing the code
    """
    # Parsing line argument
    command_line = parser_mp.parse(custom_command)

    # Recovering the local configuration
    path = recover_local_path(command_line)

    # Recover Monte Python's version number
    version_path = os.path.join(
        path['root'], 'VERSION')
    with open(version_path, 'r') as version_file:
        version = version_file.readline()
    print('Running Monte Python v%s' % version)

    # If the info flag was used, read a potential chain (or set of chains) to
    # be analysed with default procedure. If the argument is a .info file, then
    # it will extract information from it (plots to compute, chains to analyse,
    # etc...)
    if command_line.subparser_name == "info":
        from analyze import analyze  # only invoked when analyzing
        analyze(command_line)
        return None, None, command_line, False

    # Fill in data, starting from  parameter file. If output folder already
    # exists, the input parameter file was automatically replaced by the
    # existing log.param. This prevents you to run different things in a same
    # folder.
    else:
        data = Data(command_line, path)

        # Overwrite arguments from parameter file with the command line
        if command_line.N is None:
            try:
                command_line.N = data.N
            except AttributeError:
                raise io_mp.ConfigurationError(
                    "You did not provide a number of steps, neither via " +
                    "command line, nor in %s" % command_line.param)

        # Creating the file that will contain the chain, only with Metropolis
        # Hastings
        if command_line.method == 'MH':
            io_mp.create_output_files(command_line, data)

        # Loading up the cosmological backbone. For the moment, only CLASS has been
        # wrapped.
        cosmo = recover_cosmological_module(data)

        return cosmo, data, command_line, True
示例#44
0
#!/usr/bin/python3

import json
import os
from sys import argv

from analyze import analyze
from plot import plot

if __name__ == '__main__':
    if len(argv) < 2 or not os.path.isdir(argv[1]):
        print('Error: first argument is no directory')
        exit()
    directory = argv[1]

    all_data = analyze(directory, use_cache=True)

    plot(all_data, "edges", directory, True)
    plot(all_data, "rounds", directory)
    plot(all_data, "max_degree", directory, True)
    plot(all_data, "avg_degree", directory)
    plot(all_data, "min_degree", directory)
    plot(all_data, "local_clustering", directory)
    plot(all_data, "global_clustering", directory)
    plot(all_data, "pseudo_diameter", directory)
    plot(all_data, "exponent", directory)
    plot(all_data, "KS", directory)
    plot(all_data, "x_min", directory)
示例#45
0
'''
Author: Sebastian Alfers
This file is part of my thesis 'Evaluation and implementation of cluster-based dimensionality reduction'
License: https://github.com/sebastian-alfers/master-thesis/blob/master/LICENSE
'''

import data_factory as data
from analyze import analyze

for load in data.getAllDatasets():
    data, label = load()
    analyze(data, label)
示例#46
0
def main(name):
    unzip(name)
    print(name + " unzip completed")
    analyze(name)
    print(name + " analyze completed")
示例#47
0
def analyze():
    data = analyze_games.analyze()
    json.dump(data, open("data/circles.json", "w"))
示例#48
0
def chain(cosmo, data, command_line):
    """
    Run a Markov chain of fixed length with a Metropolis Hastings algorithm.

    Main function of this module, this is the actual Markov chain procedure.
    After having selected a starting point in parameter space defining the
    first **last accepted** one, it will, for a given amount of steps :

    + choose randomly a new point following the *proposal density*,
    + compute the cosmological *observables* through the cosmological module,
    + compute the value of the *likelihoods* of the desired experiments at this
      point,
    + *accept/reject* this point given its likelihood compared to the one of
      the last accepted one.

    Every time the code accepts :code:`data.write_step` number of points
    (quantity defined in the input parameter file), it will write the result to
    disk (flushing the buffer by forcing to exit the output file, and reopen it
    again.

    .. note::

        to use the code to set a fiducial file for certain fixed parameters,
        you can use two solutions. The first one is to put all input 1-sigma
        proposal density to zero (this method still works, but is not
        recommended anymore). The second one consist in using the flag "-f 0",
        to force a step of zero amplitude.

    """

    ## Initialisation
    loglike = 0

    # In case command_line.silent has been asked, outputs should only contain
    # data.out. Otherwise, it will also contain sys.stdout
    outputs = [data.out]
    if not command_line.silent:
        outputs.append(sys.stdout)

    use_mpi = False
    # check for MPI
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        # suppress duplicate output from slaves
        if rank:
            command_line.quiet = True
        use_mpi = True
    except ImportError:
        # set all chains to master if no MPI
        rank = 0

    # Initialise master and slave chains for superupdate.
    # Workaround in order to have one master chain and several slave chains even when
    # communication fails between MPI chains. It could malfunction on some hardware.
    # TODO: Would like to merge with MPI initialization above and make robust and logical
    # TODO: Or if keeping current scheme, store value and delete jumping_factor.txt
    # TODO: automatically if --parallel-chains is enabled
    if command_line.superupdate and data.jumping_factor:
        try:
            jump_file = open(command_line.folder + '/jumping_factor.txt', 'r')
            #if command_line.restart is None:
            if not use_mpi and command_line.parallel_chains:
                rank = 1
                warnings.warn(
                    'MPI not in use, flag --parallel-chains enabled, '
                    'superupdate enabled, and a jumping_factor.txt file detected. '
                    'If relaunching in the same folder or restarting a run this '
                    'will cause all chains to be assigned as slaves. In this case '
                    'instead note the value in jumping_factor.txt, delete the '
                    'file, and pass the value with flag -f <value>. This warning '
                    'may then appear again, but you can safely disregard it.')
            else:
                # For restart runs we want to save the input jumping factor
                # as starting jumping factor, but continue from the jumping
                # factor stored in the file.
                starting_jumping_factor = data.jumping_factor
                # This will load the value irrespective of whether it starts
                # with # (i.e. the jumping factor adaptation was started) or not.
                jump_value = jump_file.read().replace('# ', '')
                data.jumping_factor = float(jump_value)
            jump_file.close()
            print 'rank = ', rank
        except:
            jump_file = open(command_line.folder + '/jumping_factor.txt', 'w')
            jump_file.write(str(data.jumping_factor))
            jump_file.close()
            rank = 0
            print 'rank = ', rank
            starting_jumping_factor = data.jumping_factor

    # Recover the covariance matrix according to the input, if the varying set
    # of parameters is non-zero
    if (data.get_mcmc_parameters(['varying']) != []):

        # Read input covariance matrix
        sigma_eig, U, C = sampler.get_covariance_matrix(
            cosmo, data, command_line)

        # if we want to compute the starting point by minimising lnL (instead of taking it from input file or bestfit file)
        minimum = 0
        if command_line.minimize:
            minimum = sampler.get_minimum(cosmo, data, command_line, C)
            parameter_names = data.get_mcmc_parameters(['last_accepted'])
            for index, elem in parameter_names:
                data.mcmc_parameters[elem]['last_accepted'] = minimum[index]

        # if we want to compute Fisher matrix and then stop
        if command_line.fisher:
            sampler.get_fisher_matrix(cosmo, data, command_line, C, minimum)
            return

        # warning if no jumps are requested
        if data.jumping_factor == 0:
            warnings.warn(
                "The jumping factor has been set to 0. The above covariance " +
                "matrix will not be used.")

    # In case of a fiducial run (all parameters fixed), simply run once and
    # print out the likelihood. This should not be used any more (one has to
    # modify the log.param, which is never a good idea. Instead, force the code
    # to use a jumping factor of 0 with the option "-f 0".
    else:
        warnings.warn(
            "You are running with no varying parameters... I will compute " +
            "only one point and exit")
        data.update_cosmo_arguments()  # this fills in the fixed parameters
        loglike = sampler.compute_lkl(cosmo, data)
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    # In the fast-slow method, one need the Cholesky decomposition of the
    # covariance matrix. Return the Cholesky decomposition as a lower
    # triangular matrix
    Cholesky = None
    Rotation = None
    if command_line.jumping == 'fast':
        Cholesky = la.cholesky(C).T
        Rotation = np.identity(len(sigma_eig))

    # define path and covmat
    input_covmat = command_line.cov
    base = os.path.basename(command_line.folder)
    # the previous line fails when "folder" is a string ending with a slash. This issue is cured by the next lines:
    if base == '':
        base = os.path.basename(command_line.folder[:-1])
    command_line.cov = os.path.join(command_line.folder, base + '.covmat')

    # Fast Parameter Multiplier (fpm) for adjusting update and superupdate numbers.
    # This is equal to N_slow + f_fast N_fast, where N_slow is the number of slow
    # parameters, f_fast is the over sampling number for each fast block and f_fast
    # is the number of parameters in each fast block.
    for i in range(len(data.block_parameters)):
        if i == 0:
            fpm = data.over_sampling[i] * data.block_parameters[i]
        else:
            fpm += data.over_sampling[i] * (data.block_parameters[i] -
                                            data.block_parameters[i - 1])

    # If the update mode was selected, the previous (or original) matrix should be stored
    if command_line.update:
        if not rank and not command_line.silent:
            print 'Update routine is enabled with value %d (recommended: 50)' % command_line.update
            print 'This number is rescaled by cycle length %d (N_slow + f_fast * N_fast) to %d' % (
                fpm, fpm * command_line.update)
        # Rescale update number by cycle length N_slow + f_fast * N_fast to account for fast parameters
        command_line.update *= fpm
        previous = (sigma_eig, U, C, Cholesky)

    # Initialise adaptive
    if command_line.adaptive:
        if not command_line.silent:
            print 'Adaptive routine is enabled with value %d (recommended: 10*dimension)' % command_line.adaptive
            print 'and adaptive_ts = %d (recommended: 100*dimension)' % command_line.adaptive_ts
            print 'Please note: current implementation not suitable for multiple chains'
        if rank > 0:
            raise io_mp.ConfigurationError(
                'Adaptive routine not compatible with MPI')
        if command_line.update:
            warnings.warn(
                'Adaptive routine not compatible with update, overwriting input update value'
            )
        if command_line.superupdate:
            warnings.warn(
                'Adaptive routine not compatible with superupdate, deactivating superupdate'
            )
            command_line.superupdate = 0
        # Define needed parameters
        parameter_names = data.get_mcmc_parameters(['varying'])
        mean = np.zeros(len(parameter_names))
        last_accepted = np.zeros(len(parameter_names), 'float64')
        ar = np.zeros(100)
        if command_line.cov == None:
            # If no input covmat was given, the starting jumping factor
            # should be very small until a covmat is obtained and the
            # original start jumping factor should be saved
            start_jumping_factor = command_line.jumping_factor
            data.jumping_factor = command_line.jumping_factor / 100.
            # Analyze module will be forced to compute one covmat,
            # after which update flag will be set to False.
            command_line.update = command_line.adaptive
        else:
            # If an input covmat was provided, take mean values from param file
            # Question: is it better to always do this, rather than setting mean
            # to last accepted after the initial update run?
            for elem in parameter_names:
                mean[parameter_names.index(
                    elem)] = data.mcmc_parameters[elem]['initial'][0]

    # Initialize superupdate
    if command_line.superupdate:
        if not rank and not command_line.silent:
            print 'Superupdate routine is enabled with value %d (recommended: 20)' % command_line.superupdate
            if command_line.superupdate < 20:
                warnings.warn(
                    'Superupdate value lower than the recommended value. This '
                    'may increase the risk of poorly converged acceptance rate'
                )
            print 'This number is rescaled by cycle length %d (N_slow + f_fast * N_fast) to %d' % (
                fpm, fpm * command_line.superupdate)
        # Rescale superupdate number by cycle length N_slow + f_fast * N_fast to account for fast parameters
        command_line.superupdate *= fpm
        # Define needed parameters
        parameter_names = data.get_mcmc_parameters(['varying'])
        updated_steps = 0
        stop_c = False
        jumping_factor_rescale = 0
        if command_line.restart:
            try:
                jump_file = open(command_line.cov, 'r')
                jumping_factor_rescale = 1
            except:
                jumping_factor_rescale = 0
        c_array = np.zeros(command_line.superupdate
                           )  # Allows computation of mean of jumping factor
        R_minus_one = np.array([
            100., 100.
        ])  # 100 to make sure max(R-1) value is high if computation failed
        # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
        ar = np.zeros(command_line.superupdate)
        # Store acceptance rate of last 5*SU*(N_slow + f_fast * N_fast) steps
        backup_ar = np.zeros(5 * command_line.superupdate)
        # Make sure update is enabled
        if command_line.update == 0:
            if not rank and not command_line.silent:
                print 'Update routine required by superupdate. Setting --update 50'
                print 'This number is then rescaled by cycle length: %d (N_slow + f_fast * N_fast)' % fpm
            command_line.update = 50 * fpm
            previous = (sigma_eig, U, C, Cholesky)

    # If restart wanted, pick initial value for arguments
    if command_line.restart is not None:
        sampler.read_args_from_chain(data, command_line.restart)

    # If restart from best fit file, read first point (overwrite settings of
    # read_args_from_chain)
    if command_line.bf is not None and not command_line.minimize:
        sampler.read_args_from_bestfit(data, command_line.bf)

    # Pick a position (from last accepted point if restart, from the mean value
    # else), with a 100 tries.
    for i in range(100):
        if get_new_position(data, sigma_eig, U, i, Cholesky, Rotation) is True:
            break
        if i == 99:
            raise io_mp.ConfigurationError(
                "You should probably check your prior boundaries... because " +
                "no valid starting position was found after 100 tries")

    # Compute the starting Likelihood
    loglike = sampler.compute_lkl(cosmo, data)

    # Choose this step as the last accepted value
    # (accept_step), and modify accordingly the max_loglike
    sampler.accept_step(data)
    max_loglike = loglike

    # If the jumping factor is 0, the likelihood associated with this point is
    # displayed, and the code exits.
    if data.jumping_factor == 0:
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    acc, rej = 0.0, 0.0  # acceptance and rejection number count
    N = 1  # number of time the system stayed in the current position

    # Print on screen the computed parameters
    if not command_line.silent and not command_line.quiet:
        io_mp.print_parameters(sys.stdout, data)

    # Suppress non-informative output after initializing
    command_line.quiet = True

    k = 1
    # Main loop, that goes on while the maximum number of failure is not
    # reached, and while the expected amount of steps (N) is not taken.
    while k <= command_line.N:
        # If the number of steps reaches the number set in the adaptive method plus one,
        # then the proposal distribution should be gradually adapted.
        # If the number of steps also exceeds the number set in adaptive_ts,
        # the jumping factor should be gradually adapted.
        if command_line.adaptive and k > command_line.adaptive + 1:
            # Start of adaptive routine
            # By B. Schroer and T. Brinckmann
            # Modified version of the method outlined in the PhD thesis of Marta Spinelli

            # Store last accepted step
            for elem in parameter_names:
                last_accepted[parameter_names.index(
                    elem)] = data.mcmc_parameters[elem]['last_accepted']
            # Recursion formula for mean and covmat (and jumping factor after ts steps)
            # mean(k) = mean(k-1) + (last_accepted - mean(k-1))/k
            mean += 1. / k * (last_accepted - mean)
            # C(k) = C(k-1) + [(last_accepted - mean(k))^T * (last_accepted - mean(k)) - C(k-1)]/k
            C += 1. / k * (
                np.dot(np.transpose(np.asmatrix(last_accepted - mean)),
                       np.asmatrix(last_accepted - mean)) - C)
            sigma_eig, U = np.linalg.eig(np.linalg.inv(C))
            if command_line.jumping == 'fast':
                Cholesky = la.cholesky(C).T
            if k > command_line.adaptive_ts:
                # c = j^2/d
                c = data.jumping_factor**2 / len(parameter_names)
                # c(k) = c(k-1) + [acceptance_rate(last 100 steps) - 0.25]/k
                c += (np.mean(ar) - 0.25) / k
                data.jumping_factor = np.sqrt(len(parameter_names) * c)

            # Save the covariance matrix and the jumping factor in a file
            # For a possible MPI implementation
            #if not (k-command_line.adaptive) % 5:
            #    io_mp.write_covariance_matrix(C,parameter_names,str(command_line.cov))
            #    jump_file = open(command_line.folder + '/jumping_factor.txt','w')
            #    jump_file.write(str(data.jumping_factor))
            #    jump_file.close()
            # End of adaptive routine

    # If the number of steps reaches the number set in the update method,
    # then the proposal distribution should be adapted.
        if command_line.update:
            # Start of update routine
            # By M. Ballardini and T. Brinckmann
            # Also used by superupdate and adaptive

            # master chain behavior
            if not rank:
                # Add the folder to the list of files to analyze, and switch on the
                # options for computing only the covmat
                from parser_mp import parse
                info_command_line = parse(
                    'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian --want-covmat'
                    % command_line.folder)
                info_command_line.update = command_line.update

                if command_line.adaptive:
                    # Keep all points for covmat guess in adaptive
                    info_command_line = parse(
                        'info %s --minimal --noplot --keep-non-markovian --want-covmat'
                        % command_line.folder)
                    # Tell the analysis to update the covmat after t0 steps if it is adaptive
                    info_command_line.adaptive = command_line.adaptive
                    # Only compute covmat if no input covmat was provided
                    if input_covmat != None:
                        info_command_line.want_covmat = False

                # This is in order to allow for more frequent R-1 computation with superupdate
                compute_R_minus_one = False
                if command_line.superupdate:
                    if not (k + 10) % command_line.superupdate:
                        compute_R_minus_one = True
                # the +10 below is here to ensure that the first master update will take place before the first slave updates,
                # but this is a detail, the code is robust against situations where updating is not possible, so +10 could be omitted
                if (not (k + 10) % command_line.update
                        or compute_R_minus_one) and k > 10:
                    # Try to launch an analyze (computing a new covmat if successful)
                    try:
                        if not (k + 10) % command_line.update:
                            from analyze import analyze
                            R_minus_one = analyze(info_command_line)
                        elif command_line.superupdate:
                            # Compute (only, i.e. no covmat) R-1 more often when using superupdate
                            info_command_line = parse(
                                'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian'
                                % command_line.folder)
                            info_command_line.update = command_line.update
                            R_minus_one = analyze(info_command_line)
                    except:
                        if not command_line.silent:
                            print 'Step ', k, ' chain ', rank, ': Failed to calculate covariance matrix'

                if command_line.superupdate:
                    # Start of superupdate routine
                    # By B. Schroer and T. Brinckmann

                    c_array[(k - 1) %
                            (command_line.superupdate)] = data.jumping_factor

                    # If acceptance rate deviates too much from the target acceptance
                    # rate we want to resume adapting the jumping factor
                    # T. Brinckmann 02/2019: use mean a.r. over the last 5*len(ar) steps
                    # instead or the over last len(ar), which is more stable
                    if abs(np.mean(backup_ar) - command_line.superupdate_ar
                           ) > 5. * command_line.superupdate_ar_tol:
                        stop_c = False

                    # Start adapting the jumping factor after command_line.superupdate steps if R-1 < 10
                    # The lower R-1 criterium is an arbitrary choice to keep from updating when the R-1
                    # calculation fails (i.e. returns only zeros).
                    if (k > updated_steps + command_line.superupdate
                        ) and 0.01 < (max(R_minus_one) < 10.) and not stop_c:
                        c = data.jumping_factor**2 / len(parameter_names)
                        # To avoid getting trapped in local minima, the jumping factor should
                        # not go below 0.1 (arbitrary) times the starting jumping factor.
                        if (c + (np.mean(ar) - command_line.superupdate_ar) /
                            (k - updated_steps)) > (
                                0.1 * starting_jumping_factor
                            )**2. / len(parameter_names) or (
                                (np.mean(ar) - command_line.superupdate_ar) /
                                (k - updated_steps) > 0):
                            c += (np.mean(ar) - command_line.superupdate_ar
                                  ) / (k - updated_steps)
                            data.jumping_factor = np.sqrt(
                                len(parameter_names) * c)

                        if not (k - 1) % 5:
                            # Check if the jumping factor adaptation should stop.
                            # An acceptance rate of 25% balances the wish for more accepted
                            # points, while ensuring the parameter space is properly sampled.
                            # The convergence criterium is by default (26+/-1)%, so the adaptation
                            # will stop when the code reaches an acceptance rate of at least 25%.
                            # T. Brinckmann 02/2019: use mean a.r. over the last 5*len(ar) steps
                            # instead or the over last len(ar), which is more stable
                            if (max(R_minus_one) < 0.4) and (
                                    abs(
                                        np.mean(backup_ar) -
                                        command_line.superupdate_ar) <
                                    command_line.superupdate_ar_tol) and (abs(
                                        np.mean(c_array) / c_array[
                                            (k - 1) %
                                            (command_line.superupdate)] -
                                        1) < 0.01):
                                stop_c = True
                                data.out.write(
                                    '# After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate %f \n'
                                    % (int(acc), data.jumping_factor,
                                       np.mean(backup_ar)))
                                if not command_line.silent:
                                    print 'After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate of %f \n' % (
                                        int(acc), data.jumping_factor,
                                        np.mean(backup_ar))
                                jump_file = open(
                                    command_line.folder +
                                    '/jumping_factor.txt', 'w')
                                jump_file.write('# ' +
                                                str(data.jumping_factor))
                                jump_file.close()
                            else:
                                jump_file = open(
                                    command_line.folder +
                                    '/jumping_factor.txt', 'w')
                                jump_file.write(str(data.jumping_factor))
                                jump_file.close()

                    # Write the evolution of the jumping factor to a file
                    if not k % (command_line.superupdate):
                        jump_file = open(
                            command_line.folder + '/jumping_factors.txt', 'a')
                        for i in xrange(command_line.superupdate):
                            jump_file.write(str(c_array[i]) + '\n')
                        jump_file.close()
                    # End of main part of superupdate routine

                if not (k - 1) % (command_line.update / 3):
                    try:
                        # Read the covmat
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0, 0] == previous[2][0, 0]:
                            if k == 1:
                                if not command_line.silent:
                                    if not input_covmat == None:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s instead of %s. '
                                            'If new input covmat is desired, please delete previous covmat.'
                                            % (command_line.cov, input_covmat))
                                    else:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s. '
                                            'If no starting covmat is desired, please delete previous covmat.'
                                            % command_line.cov)
                            else:
                                # Start of second part of superupdate routine
                                if command_line.superupdate:
                                    # Adaptation of jumping factor should start again after the covmat is updated
                                    # Save the step number after it updated for superupdate and start adaption of c again
                                    updated_steps = k
                                    stop_c = False
                                    cov_det = np.linalg.det(C)
                                    prev_cov_det = np.linalg.det(previous[2])
                                    # Rescale jumping factor in order to keep the magnitude of the jumps the same.
                                    # Skip this update the first time the covmat is updated in order to prevent
                                    # problems due to a poor initial covmat. Rescale the jumping factor after the
                                    # first calculated covmat to the expected optimal one of 2.4.
                                    if jumping_factor_rescale:
                                        new_jumping_factor = data.jumping_factor * (
                                            prev_cov_det / cov_det)**(
                                                1. /
                                                (2 * len(parameter_names)))
                                        data.out.write(
                                            '# After %d accepted steps: rescaled jumping factor from %f to %f, due to updated covariance matrix \n'
                                            % (int(acc), data.jumping_factor,
                                               new_jumping_factor))
                                        if not command_line.silent:
                                            print 'After %d accepted steps: rescaled jumping factor from %f to %f, due to updated covariance matrix \n' % (
                                                int(acc), data.jumping_factor,
                                                new_jumping_factor)
                                        data.jumping_factor = new_jumping_factor
                                    else:
                                        data.jumping_factor = starting_jumping_factor
                                    jumping_factor_rescale += 1
                            # End of second part of superupdate routine

                            # Write to chains file when the covmat was updated
                                data.out.write(
                                    '# After %d accepted steps: update proposal with max(R-1) = %f and jumping factor = %f \n'
                                    % (int(acc), max(R_minus_one),
                                       data.jumping_factor))
                                if not command_line.silent:
                                    print 'After %d accepted steps: update proposal with max(R-1) = %f and jumping factor = %f \n' % (
                                        int(acc), max(R_minus_one),
                                        data.jumping_factor)
                                try:
                                    if stop - after - update:
                                        k = command_line.N
                                        print 'Covariance matrix updated - stopping run'
                                except:
                                    pass

                            previous = (sigma_eig, U, C, Cholesky)
                    except:
                        pass

                    command_line.quiet = True

                    # Start of second part of adaptive routine
                    # Stop updating the covmat after t0 steps in adaptive
                    if command_line.adaptive and k > 1:
                        command_line.update = 0
                        data.jumping_factor = start_jumping_factor
                        # Test if there are still enough steps left before the adaption of the jumping factor starts
                        if k > 0.5 * command_line.adaptive_ts:
                            command_line.adaptive_ts += k
        # Set the mean for the recursion formula to the last accepted point
                        for elem in parameter_names:
                            mean[parameter_names.index(
                                elem
                            )] = data.mcmc_parameters[elem]['last_accepted']
                    # End of second part of adaptive routine

            # slave chain behavior
            else:
                # Start of slave superupdate routine
                if command_line.superupdate:
                    # If acceptance rate deviates too much from the target acceptance
                    # rate we want to resume adapting the jumping factor. This line
                    # will force the slave chains to check if the jumping factor
                    # has been updated
                    if abs(np.mean(backup_ar) - command_line.superupdate_ar
                           ) > 5. * command_line.superupdate_ar_tol:
                        stop_c = False

        # Update the jumping factor every 5 steps in superupdate
                    if not k % 5 and k > command_line.superupdate and command_line.superupdate and (
                            not stop_c or
                        (stop_c and k % command_line.update)):
                        try:
                            jump_file = open(
                                command_line.folder + '/jumping_factor.txt',
                                'r')
                            # If there is a # in the file, the master has stopped adapting c
                            for line in jump_file:
                                if line.find('#') == -1:
                                    jump_file.seek(0)
                                    jump_value = jump_file.read()
                                    data.jumping_factor = float(jump_value)
                                else:
                                    jump_file.seek(0)
                                    jump_value = jump_file.read().replace(
                                        '# ', '')
                                    #if not stop_c or (stop_c and not float(jump_value) == data.jumping_factor):
                                    if not float(
                                            jump_value) == data.jumping_factor:
                                        data.jumping_factor = float(jump_value)
                                        stop_c = True
                                        data.out.write(
                                            '# After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate %f \n'
                                            % (int(acc), data.jumping_factor,
                                               np.mean(backup_ar)))
                                        if not command_line.silent:
                                            print 'After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate of %f \n' % (
                                                int(acc), data.jumping_factor,
                                                np.mean(backup_ar))
                            jump_file.close()
                        except:
                            if not command_line.silent:
                                print 'Reading jumping_factor file failed'
                            pass
                # End of slave superupdate routine

                # Start of slave update routine
                if not (k - 1) % (command_line.update / 10):
                    try:
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0, 0] == previous[2][0, 0] and not k == 1:
                            if command_line.superupdate:
                                # If the covmat was updated, the master has resumed adapting c
                                stop_c = False
                            data.out.write(
                                '# After %d accepted steps: update proposal \n'
                                % int(acc))
                            if not command_line.silent:
                                print 'After %d accepted steps: update proposal \n' % int(
                                    acc)
                            try:
                                if stop_after_update:
                                    k = command_line.N
                                    print 'Covariance matrix updated - stopping run'
                            except:
                                pass
                        previous = (sigma_eig, U, C, Cholesky)

                    except:
                        pass
                # End of slave update routine
            # End of update routine

    # Pick a new position ('current' flag in mcmc_parameters), and compute
    # its likelihood. If get_new_position returns True, it means it did not
    # encounter any boundary problem. Otherwise, just increase the
    # multiplicity of the point and start the loop again
        if get_new_position(data, sigma_eig, U, k, Cholesky, Rotation) is True:
            newloglike = sampler.compute_lkl(cosmo, data)
        else:  # reject step
            rej += 1
            if command_line.superupdate:
                ar[k % len(
                    ar
                )] = 0  # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
            elif command_line.adaptive:
                ar[k % len(ar)] = 0  # Local acceptance rate of last 100 steps
            N += 1
            k += 1
            continue

    # Harmless trick to avoid exponentiating large numbers. This decides
    # whether or not the system should move.
        if (newloglike != data.boundary_loglike):
            if (newloglike >= loglike):
                alpha = 1.
            else:
                alpha = np.exp(newloglike - loglike)
        else:
            alpha = -1

        if ((alpha == 1.) or (rd.uniform(0, 1) < alpha)):  # accept step

            # Print out the last accepted step (WARNING: this is NOT the one we
            # just computed ('current' flag), but really the previous one.)
            # with its proper multiplicity (number of times the system stayed
            # there).
            io_mp.print_vector(outputs, N, loglike, data)

            # Report the 'current' point to the 'last_accepted'
            sampler.accept_step(data)
            loglike = newloglike
            if loglike > max_loglike:
                max_loglike = loglike
            acc += 1.0
            N = 1  # Reset the multiplicity
            if command_line.superupdate:
                ar[k % len(
                    ar
                )] = 1  # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
            elif command_line.adaptive:
                ar[k % len(ar)] = 1  # Local acceptance rate of last 100 steps
        else:  # reject step
            rej += 1.0
            N += 1  # Increase multiplicity of last accepted point
            if command_line.superupdate:
                ar[k % len(
                    ar
                )] = 0  # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
            elif command_line.adaptive:
                ar[k % len(ar)] = 0  # Local acceptance rate of last 100 steps

    # Store a.r. for last 5 x SU*(N_slow + f_fast * N_fast) steps
        if command_line.superupdate:
            backup_ar[k % len(backup_ar)] = ar[k % len(ar)]

    # Regularly (option to set in parameter file), close and reopen the
    # buffer to force to write on file.
        if acc % data.write_step == 0:
            io_mp.refresh_file(data)
            # Update the outputs list
            outputs[0] = data.out
        k += 1  # One iteration done
    # END OF WHILE LOOP

    # If at this moment, the multiplicity is higher than 1, it means the
    # current point is not yet accepted, but it also mean that we did not print
    # out the last_accepted one yet. So we do.
    if N > 1:
        io_mp.print_vector(outputs, N - 1, loglike, data)

    # Print out some information on the finished chain
    rate = acc / (acc + rej)
    sys.stdout.write('\n#  {0} steps done, acceptance rate: {1}\n'.format(
        command_line.N, rate))

    # In case the acceptance rate is too low, or too high, print a warning
    if rate < 0.05:
        warnings.warn("The acceptance rate is below 0.05. You might want to "
                      "set the jumping factor to a lower value than the "
                      "default (2.4), with the option `-f 1.5` for instance.")
    elif rate > 0.6:
        warnings.warn("The acceptance rate is above 0.6, which means you might"
                      " have difficulties exploring the entire parameter space"
                      ". Try analysing these chains, and use the output "
                      "covariance matrix to decrease the acceptance rate to a "
                      "value between 0.2 and 0.4 (roughly).")
    # For a restart, erase the starting point to keep only the new, longer
    # chain.
    if command_line.restart is not None:
        os.remove(command_line.restart)
        sys.stdout.write(
            '    deleting starting point of the chain {0}\n'.format(
                command_line.restart))

    return
示例#49
0
def chain(cosmo, data, command_line):
    """
    Run a Markov chain of fixed length with a Metropolis Hastings algorithm.

    Main function of this module, this is the actual Markov chain procedure.
    After having selected a starting point in parameter space defining the
    first **last accepted** one, it will, for a given amount of steps :

    + choose randomnly a new point following the *proposal density*,
    + compute the cosmological *observables* through the cosmological module,
    + compute the value of the *likelihoods* of the desired experiments at this
      point,
    + *accept/reject* this point given its likelihood compared to the one of
      the last accepted one.

    Every time the code accepts :code:`data.write_step` number of points
    (quantity defined in the input parameter file), it will write the result to
    disk (flushing the buffer by forcing to exit the output file, and reopen it
    again.

    .. note::

        to use the code to set a fiducial file for certain fixed parameters,
        you can use two solutions. The first one is to put all input 1-sigma
        proposal density to zero (this method still works, but is not
        recommended anymore). The second one consist in using the flag "-f 0",
        to force a step of zero amplitude.

    """

    ## Initialisation
    loglike = 0

    # In case command_line.silent has been asked, outputs should only contain
    # data.out. Otherwise, it will also contain sys.stdout
    outputs = [data.out]
    if not command_line.silent:
        outputs.append(sys.stdout)

    # check for MPI
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        # suppress duplicate output from slaves
        if rank:
            command_line.quiet = True
    except ImportError:
        # set all chains to master if no MPI
        rank = 0

    # Recover the covariance matrix according to the input, if the varying set
    # of parameters is non-zero
    if (data.get_mcmc_parameters(['varying']) != []):
        sigma_eig, U, C = sampler.get_covariance_matrix(
            cosmo, data, command_line)
        if data.jumping_factor == 0:
            warnings.warn(
                "The jumping factor has been set to 0. The above covariance " +
                "matrix will not be used.")

    # In case of a fiducial run (all parameters fixed), simply run once and
    # print out the likelihood. This should not be used any more (one has to
    # modify the log.param, which is never a good idea. Instead, force the code
    # to use a jumping factor of 0 with the option "-f 0".
    else:
        warnings.warn(
            "You are running with no varying parameters... I will compute " +
            "only one point and exit")
        data.update_cosmo_arguments()  # this fills in the fixed parameters
        loglike = sampler.compute_lkl(cosmo, data)
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    # In the fast-slow method, one need the Cholesky decomposition of the
    # covariance matrix. Return the Cholesky decomposition as a lower
    # triangular matrix
    Cholesky = None
    Rotation = None
    if command_line.jumping == 'fast':
        Cholesky = la.cholesky(C).T
        Rotation = np.identity(len(sigma_eig))

    # If the update mode was selected, the previous (or original) matrix should be stored
    if command_line.update:
        previous = (sigma_eig, U, C, Cholesky)

    # If restart wanted, pick initial value for arguments
    if command_line.restart is not None:
        sampler.read_args_from_chain(data, command_line.restart)

    # If restart from best fit file, read first point (overwrite settings of
    # read_args_from_chain)
    if command_line.bf is not None:
        sampler.read_args_from_bestfit(data, command_line.bf)

    # Pick a position (from last accepted point if restart, from the mean value
    # else), with a 100 tries.
    for i in range(100):
        if get_new_position(data, sigma_eig, U, i, Cholesky, Rotation) is True:
            break
        if i == 99:
            raise io_mp.ConfigurationError(
                "You should probably check your prior boundaries... because " +
                "no valid starting position was found after 100 tries")

    # Compute the starting Likelihood
    loglike = sampler.compute_lkl(cosmo, data)

    # Choose this step as the last accepted value
    # (accept_step), and modify accordingly the max_loglike
    sampler.accept_step(data)
    max_loglike = loglike

    # If the jumping factor is 0, the likelihood associated with this point is
    # displayed, and the code exits.
    if data.jumping_factor == 0:
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    acc, rej = 0.0, 0.0  # acceptance and rejection number count
    N = 1  # number of time the system stayed in the current position

    # define path and covmat
    input_covmat = command_line.cov
    base = os.path.basename(command_line.folder)
    # the previous line fails when "folder" is a string ending with a slash. This issue is cured by the next lines:
    if base == '':
        base = os.path.basename(command_line.folder[:-1])
    command_line.cov = os.path.join(command_line.folder, base + '.covmat')

    # Print on screen the computed parameters
    if not command_line.silent and not command_line.quiet:
        io_mp.print_parameters(sys.stdout, data)

    # Suppress non-informative output after initializing
    command_line.quiet = True

    k = 1
    # Main loop, that goes on while the maximum number of failure is not
    # reached, and while the expected amount of steps (N) is not taken.
    while k <= command_line.N:

        # If the number of steps reaches the number set in the update method,
        # then the proposal distribution should be adapted.
        if command_line.update:

            # master chain behavior
            if not rank:
                # Add the folder to the list of files to analyze, and switch on the
                # options for computing only the covmat
                from parser_mp import parse
                info_command_line = parse(
                    'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian --want-covmat'
                    % command_line.folder)
                info_command_line.update = command_line.update
                # the +10 below is here to ensure that the first master update will take place before the first slave updates,
                # but this is a detail, the code is robust against situations where updating is not possible, so +10 could be omitted
                if not (k + 10) % command_line.update and k > 10:
                    # Try to launch an analyze
                    try:
                        from analyze import analyze
                        R_minus_one = analyze(info_command_line)
                    except:
                        if not command_line.silent:
                            print 'Step ', k, ' chain ', rank, ': Failed to calculate covariant matrix'
                        pass

                if not (k - 1) % command_line.update:
                    try:
                        # Read the covmat
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0, 0] == previous[2][0, 0]:
                            previous = (sigma_eig, U, C, Cholesky)
                            if k == 1:
                                if not command_line.silent:
                                    if not input_covmat == None:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s instead of %s. '
                                            'If new input covmat is desired, please delete previous covmat.'
                                            % (command_line.cov, input_covmat))
                                    else:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s. '
                                            'If no starting covmat is desired, please delete previous covmat.'
                                            % command_line.cov)
                            else:
                                data.out.write(
                                    '# After %d accepted steps: update proposal with max(R-1) = %f \n'
                                    % (int(acc), max(R_minus_one)))
                                if not command_line.silent:
                                    print 'After %d accepted steps: update proposal with max(R-1) = %f \n' % (
                                        int(acc), max(R_minus_one))
                                try:
                                    if stop - after - update:
                                        k = command_line.N
                                        print 'Covariant matrix updated - stopping run'
                                except:
                                    pass

                    except:
                        pass

                    command_line.quiet = True

            # slave chain behavior
            else:
                if not (k - 1) % command_line.update:
                    try:
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0, 0] == previous[2][0, 0] and not k == 1:
                            data.out.write(
                                '# After %d accepted steps: update proposal \n'
                                % int(acc))
                            if not command_line.silent:
                                print 'After %d accepted steps: update proposal \n' % int(
                                    acc)
                            try:
                                if stop_after_update:
                                    k = command_line.N
                                    print 'Covariant matrix updated - stopping run'
                            except:
                                pass
                        previous = (sigma_eig, U, C, Cholesky)

                    except IOError:
                        pass

        # Pick a new position ('current' flag in mcmc_parameters), and compute
        # its likelihood. If get_new_position returns True, it means it did not
        # encounter any boundary problem. Otherwise, just increase the
        # multiplicity of the point and start the loop again
        if get_new_position(data, sigma_eig, U, k, Cholesky, Rotation) is True:
            newloglike = sampler.compute_lkl(cosmo, data)
        else:  # reject step
            rej += 1
            N += 1
            k += 1
            continue

        # Harmless trick to avoid exponentiating large numbers. This decides
        # whether or not the system should move.
        if (newloglike != data.boundary_loglike):
            if (newloglike >= loglike):
                alpha = 1.
            else:
                alpha = np.exp(newloglike - loglike)
        else:
            alpha = -1

        if ((alpha == 1.) or (rd.uniform(0, 1) < alpha)):  # accept step

            # Print out the last accepted step (WARNING: this is NOT the one we
            # just computed ('current' flag), but really the previous one.)
            # with its proper multiplicity (number of times the system stayed
            # there).
            io_mp.print_vector(outputs, N, loglike, data)

            # Report the 'current' point to the 'last_accepted'
            sampler.accept_step(data)
            loglike = newloglike
            if loglike > max_loglike:
                max_loglike = loglike
            acc += 1.0
            N = 1  # Reset the multiplicity

        else:  # reject step
            rej += 1.0
            N += 1  # Increase multiplicity of last accepted point

        # Regularly (option to set in parameter file), close and reopen the
        # buffer to force to write on file.
        if acc % data.write_step == 0:
            io_mp.refresh_file(data)
            # Update the outputs list
            outputs[0] = data.out
        k += 1  # One iteration done
    # END OF WHILE LOOP

    # If at this moment, the multiplicity is higher than 1, it means the
    # current point is not yet accepted, but it also mean that we did not print
    # out the last_accepted one yet. So we do.
    if N > 1:
        io_mp.print_vector(outputs, N - 1, loglike, data)

    # Print out some information on the finished chain
    rate = acc / (acc + rej)
    sys.stdout.write('\n#  {0} steps done, acceptance rate: {1}\n'.format(
        command_line.N, rate))

    # In case the acceptance rate is too low, or too high, print a warning
    if rate < 0.05:
        warnings.warn("The acceptance rate is below 0.05. You might want to "
                      "set the jumping factor to a lower value than the "
                      "default (2.4), with the option `-f 1.5` for instance.")
    elif rate > 0.6:
        warnings.warn("The acceptance rate is above 0.6, which means you might"
                      " have difficulties exploring the entire parameter space"
                      ". Try analysing these chains, and use the output "
                      "covariance matrix to decrease the acceptance rate to a "
                      "value between 0.2 and 0.4 (roughly).")

    # For a restart, erase the starting point to keep only the new, longer
    # chain.
    if command_line.restart is not None:
        os.remove(command_line.restart)
        sys.stdout.write(
            '    deleting starting point of the chain {0}\n'.format(
                command_line.restart))

    return
示例#50
0
文件: rename.py 项目: jornada/DOENDO
def rename(lines, doc, ren_dict, block=None):
	'''
	Rename a variable in a particular block

	lines: line-oriented buffer to be altered
	doc: DOM object containing info about code
	ren_dict: dictionary of renames. This can be a map of string->strings
	or node->string.
	block: routine/module/sub to work in. None means the root node.
	This property only makes sense if you specified the ren_dict as a map 
	of string->string, otherwise the routine know the block in which it should
	work automatically.

	Example:
		fname = 'file.f90'
		lines = open(fname).readlines()
		doc = analyze(fname, ''.join(lines))
		rename(lines, doc, {'i':'ii'})
	'''

	if block is None:
		el = doc.childNodes[0]
	else:
		els = doc.getElementsByTagName('block')
		el=None
		for el_ in els:
			if el_.getAttribute('name')==block:
				el=el_
				break

	if el is None:
		print 'Could not find block '+block
		return

	for var0, new_var in ren_dict.iteritems():
		if isinstance(var0, str):
			orig_var = var0
			_vars = el.getElementsByTagName('var')
			var=None
			for var_ in _vars:
				if var_.getAttribute('name')==orig_var:
					var=var_
					break
		else:
			var = var0
			el = var.parentNode
			orig_var = var.getAttribute('name')

		if var is None:
			print 'Could not find variable '+orig_var+' in block '+block
			sys.exit(1)

		#get the initial and final lines
		start = int(el.getAttribute('start'))
		end = int(el.getAttribute('end'))

		#this will match only variables
		cmp_obj = re.compile(r'^([^!]*[^a-zA-Z0-9_!%%])%s([^a-zA-Z0-9_!%%])'%(orig_var))
		subs_str=r'\1%s\2'%(new_var)
		for i in range(start, end+1):
			old_line = ''
			new_line = ' '+lines[i]
			#hack to do multiple substitution on the same line
			#I probablly need to learn more regexp..
			while old_line != new_line:
				old_line = new_line
				new_line = cmp_obj.sub(subs_str, old_line)
			lines[i] = new_line[1:]

	#re-analyze file
	fname = doc.childNodes[0].nodeName
	data = ''.join(lines)
	doc = analyze.analyze(fname, data)
示例#51
0
'''
Author: Sebastian Alfers
This file is part of my thesis 'Evaluation and implementation of cluster-based dimensionality reduction'
License: https://github.com/sebastian-alfers/master-thesis/blob/master/LICENSE
'''

import numpy as np
import data_factory as df
import os.path
import analyze

sets = df.getAllDatasets()
#sets = [df.loadFirstPlistaDataset]
with open('log.txt', 'w') as file:
    file.write('##### printing the size of each dataset #####\n')
    for load in sets:
        data, label, desc, _ = load()
        shape = np.shape(data)
        file.write("dataset '%s':\n" % desc)
        file.write("rows:%s, dimensions:%s\n" % (shape[0], shape[1]))

        negativeExamples, negativePercentage, positiveExamples, positivePercentage, zero_elements, non_zero_elements = analyze.analyze(data, label, desc)
        file.write("negative observations: %s (%.2f %%) \n" % (negativeExamples, negativePercentage))
        file.write("positive observations: %s (%.2f %%) \n" % (positiveExamples, positivePercentage))
        file.write("zero elements: %.2f \n" % zero_elements)
        file.write("non zero elements: %.2f \n" % non_zero_elements)

        file.write("\n")
示例#52
0
                if platform == "darwin":
                    try: os.system("open -a 'Microsoft Excel.app' 'data/rankings.xlsx'")
                    except: print("Failed to open file. Please open it on your own.")
                elif platform == "win32":
                    try: os.system("open -a 'Microsoft Excel.exe' 'data/rankings.xlsx'")
                    except: print("Failed to open file. Please open it on your own.")
            else:
                print("Error: only 1 argument accepted, which is either 'scouting' or 'ranking'.")
        else:
            print("Error: only 1 argument accepted, which is either 'scouting' or 'ranking'.")

    elif choice.startswith("analyze") == True:
        choice = choice.split(" ")
        if len(choice) == 2:
            teamNum = choice[1]
            analyze.analyze(teamNum)
        else:
            print("Error: only 1 argument accepted, which is team number.")

    elif choice.startswith("add") == True:
        choice = choice.split(" ")
        if len(choice) == 2:
            teamNum = choice[1]
            analyze.collect(teamNum)

            try:
                # analyze.collect(teamNum)
                print("Stored data for "+teamNum+" in database.")
            except:
                print("Invalid team number.")
        else:
示例#53
0
def batch_mode():
    
    verbose  = 1
    
    settingsfile = 'Mantis_batch_settings.txt'     
    
    version = '2.0.5'
    wdir = ''
    outdir = 'MantisResults'
    filename = ''
    save_hdf5 = 0
    align_stack = 0
    i0_file = ''
    i0_histogram = 0
    run_pca = 0
    n_spca = 4
    run_ca = 0
    nclusters = 5
    ca_thickness = 0
    run_sa = 0 
    sa_spectra = []
    sa_use_clspectra = 0
    run_keyengs = 0
    kengs_thresh = 0.10
    save_png = 1
    save_pdf = 0
    save_svg = 0
    


    try:
        f = open(settingsfile, 'rt')
        for line in f:
            if ':' in line : 
                slist = line.split(':')
                tag = slist[0]
                value = ':'.join(slist[1:])
                
                if   tag == 'VERSION': version = float(value)
                elif tag == 'WORK_DIR' : wdir  =  value.strip()
                elif tag == 'OUTPUT_DIR_NAME' : outdir  =  value.strip()
                elif tag == 'FILENAME' : filename  =  value.strip()
                elif tag == 'ALIGN_STACK' : align_stack  =  value.strip()
                elif tag == 'I0_FILE' : i0_file  =  value.strip()
                elif tag == 'I0_HISTOGRAM' : i0_histogram = int(value)
                elif tag == 'SAVE_HDF5' : save_hdf5 = int(value)
                elif tag == 'RUN_PCA' : run_pca = int(value)
                elif tag == 'N_SPCA' : n_spca = int(value)
                elif tag == 'RUN_CLUSTER_ANALYSIS' : run_ca = int(value)
                elif tag == 'N_CLUSTERS' : nclusters = int(value)
                elif tag == 'THICKNESS_CORRECTION' : ca_thickness = int(value)
                elif tag == 'RUN_SPECTRAL_ANALYSIS' : run_sa = int(value)
                elif tag == 'SA_SPECTRUM' : 
                    spname = value.strip()
                    if len(spname) > 0 :
                        sa_spectra.append(spname)
                elif tag == 'SA_USE_CA_SPECTRA' : sa_use_clspectra = int(value)
                elif tag == 'RUN_KEY_ENGS' : run_keyengs = int(value)
                elif tag == 'KE_THRESHOLD' : kengs_thresh = float(value)
                elif tag == 'SAVE_PNG' : save_png = int(value)
                elif tag == 'SAVE_PDF' : save_pdf = int(value)
                elif tag == 'SAVE_SVG' : save_svg = int(value)


        f.close()
                  
    except: 
        print 'Error: Could not read in Mantis_batch_settings.txt.'
        return
    
    
    wdir = os.path.normpath(wdir)
    
    
    if verbose: 
        print 'Version: ', version
        print 'Working directory: ', wdir
        
    if not os.path.exists(wdir):
        print 'Error - Directory ', wdir, ' does not exist. Please specify working directory.'
        return
    
    outdir = os.path.join(wdir, outdir)
    if not os.path.exists(outdir):   
        os.makedirs(outdir)
        if not os.path.exists(outdir):
            print 'Error: Did not find and could not create a new output directory.'
            return 
                
    if save_png == 1:
        print "Save .png images"
        
    if save_pdf == 1:
        print "Save .pdf images"    
            
    datastruct = data_struct.h5()
    stk = data_stack.data(datastruct)
    anlz = analyze.analyze(stk)
    
    print 'Reading file:', filename
    basename, extension = os.path.splitext(filename)   
    filepath = os.path.join(wdir, filename)

    try: 
        if extension == '.hdf5':        
            stk.read_h5(filepath)
            
        if extension == '.hdr':             
            stk.read_sdf(filepath)                
        
        if extension == '.stk':               
            stk.read_stk(filepath)     
    
        if extension == '.txrm':                            
            stk.read_txrm(filepath)        
    
        if extension == '.xrm':              
            stk.read_xrm(filepath)        
            
        if extension == '.tif':              
            stk.read_tiff(filepath)    
    except:
        print "Error: Could not load stack."
        return
    
    
    if align_stack:
        print 'Aligning the stack'
        xshifts = np.zeros((stk.n_ev))
        yshifts = np.zeros((stk.n_ev))
        
        referenceimage = stk.absdata[:,:,0].copy()            

        for i in range(stk.n_ev):

            img2 = stk.absdata[:,:,i]  
 
               
            if i==0:     
                xshift, yshift, ccorr = stk.register_images(referenceimage, img2, 
                                                          have_ref_img_fft = False)        
            else:
                xshift, yshift, ccorr = stk.register_images(referenceimage, img2, 
                                                          have_ref_img_fft = True)
            
#             #Limit the shifts to MAXSHIFT chosen by the user
#             if (self.maxshift > 0):
#                 if (abs(xshift) > self.maxshift):
#                         xshift = npy.sign(xshift)*self.maxshift
#                 if (abs(yshift) > self.maxshift):
#                         yshift = npy.sign(yshift)*self.maxshift
            
            xshifts[i] = xshift
            yshifts[i] = yshift

                                       
        #Apply shifts
        for i in range(stk.n_ev):
            img = stk.absdata[:,:,i]
            if (abs(xshifts[i])>0.02) or (abs(yshifts[i])>0.02):
                shifted_img = stk.apply_image_registration(img, xshifts[i], yshifts[i])
                stk.absdata[:,:,i] = shifted_img

                    
    
    if datastruct.spectromicroscopy.normalization.white_spectrum is not None:
        print "I0 loaded"
    else:
        print "Loading I0"
        if i0_histogram == 1:
            print 'Getting I0 from the histogram'
            stk.calc_histogram()
            averagefluxmax = np.max(stk.histogram)
            histmin = 0.98*averagefluxmax
            histmax = averagefluxmax
            stk.i0_from_histogram(histmin, histmax)
        
        elif len(i0_file) > 0:
            print 'Reading I0 from file:', i0_file
            i0basename, i0extension = os.path.splitext(i0_file)   
            i0filepath = os.path.join(wdir, i0_file)
            stk.read_stk_i0(i0filepath, i0extension)
            
        else:
            print "Please either set I0_HISTOGRAM to 1 to calculate I0 or specify I0 file."
            return
        
    if datastruct.spectromicroscopy.normalization.white_spectrum is None:
        print 'Error: I0 not loaded'
        return
    
    if save_hdf5 == 1:
        fnameh5 =  os.path.join(wdir,basename+'_MantisBatch.hdf5')
        stk.write_h5(fnameh5, data_struct)  
        print 'Saving data to HDF5 file:', fnameh5
        
            
    pca_calculated = 0
    if run_pca == 1:
        print "Running PCA Analysis"
        anlz.calculate_pca()
        print "Chosen number of significant components:", n_spca
        print "Suggested number of significant components:", anlz.numsigpca
        pca_calculated = 1
        anlz.numsigpca = n_spca
        save_pca(outdir, filename, stk, anlz, save_png, save_pdf, save_svg)
        
    ca_calculated = 0
    if run_ca == 1:
        if pca_calculated == 0:
            anlz.calculate_pca()
        print "Running Cluster Analysis"
        print "Number of clusters",  nclusters 
        if ca_thickness == 1:
            print "Thickness correction enabled"
        nclusters = anlz.calculate_clusters(nclusters, ca_thickness)
        ca_calculated = 1
        save_ca(outdir, filename, stk, anlz, save_png, save_pdf, save_svg)

    if run_sa == 1:
        print "Running Spectral Analysis"
        if len(sa_spectra) > 0:
            print "Loading spectra:", sa_spectra
            for i in range(len(sa_spectra)):
                sppath = os.path.join(wdir, sa_spectra[i])
                anlz.read_target_spectrum(filename=sppath)
                
        if sa_use_clspectra == 1:
            if ca_calculated == 1:
                print "Loading cluster spectra"
                anlz.add_cluster_target_spectra()
            else:
                print "Please set RUN_CLUSTER_ANALYSIS to 1 to calculate cluster spectra."
                
        if anlz.n_target_spectra > 1:
            save_spa(outdir, filename, stk, anlz, save_png, save_pdf, save_svg)
        
    if run_keyengs == 1:
        if pca_calculated == 0:
            anlz.calculate_pca()
        print "Finding key energies"
        print "Threshold for finding key energies:", kengs_thresh
        key_engs= anlz.calc_key_engs(kengs_thresh)
        save_keyeng(key_engs, outdir, filename, stk, anlz, save_png, save_pdf, save_svg)
    
    if (save_hdf5 == 1) and (pca_calculated == 1) :
        fnameh5 =  os.path.join(wdir,basename+'_MantisBatch.hdf5')
        stk.write_results_h5(fnameh5, data_struct, anlz)    
    
        
    print "Finished doing Mantis analysis"
    return
示例#54
0
import io
from twisted.logger import eventsFromJSONLogFile
from analyze import analyze

for event in eventsFromJSONLogFile(open("log.json")):
    analyze(event)
示例#55
0
 def getResult(self):
     global adjCloseData
     self.switchFrame('Result')
     resuls = analyze.analyze(adjCloseData)
     x = np.arange(1, 31)
     plotCanvas(x, resuls, 'Prediction', 'Price', 'Days')
示例#56
0
def chain(cosmo, data, command_line):
    """
    Run a Markov chain of fixed length with a Metropolis Hastings algorithm.

    Main function of this module, this is the actual Markov chain procedure.
    After having selected a starting point in parameter space defining the
    first **last accepted** one, it will, for a given amount of steps :

    + choose randomnly a new point following the *proposal density*,
    + compute the cosmological *observables* through the cosmological module,
    + compute the value of the *likelihoods* of the desired experiments at this
      point,
    + *accept/reject* this point given its likelihood compared to the one of
      the last accepted one.

    Every time the code accepts :code:`data.write_step` number of points
    (quantity defined in the input parameter file), it will write the result to
    disk (flushing the buffer by forcing to exit the output file, and reopen it
    again.

    .. note::

        to use the code to set a fiducial file for certain fixed parameters,
        you can use two solutions. The first one is to put all input 1-sigma
        proposal density to zero (this method still works, but is not
        recommended anymore). The second one consist in using the flag "-f 0",
        to force a step of zero amplitude.

    """

    ## Initialisation
    loglike = 0

    # In case command_line.silent has been asked, outputs should only contain
    # data.out. Otherwise, it will also contain sys.stdout
    outputs = [data.out]
    if not command_line.silent:
        outputs.append(sys.stdout)

    # check for MPI
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        # suppress duplicate output from slaves
        if rank:
            command_line.quiet = True
    except ImportError:
        # set all chains to master if no MPI
        rank = 0

    # Recover the covariance matrix according to the input, if the varying set
    # of parameters is non-zero
    if (data.get_mcmc_parameters(['varying']) != []):
        sigma_eig, U, C = sampler.get_covariance_matrix(cosmo, data, command_line)
        if data.jumping_factor == 0:
            warnings.warn(
                "The jumping factor has been set to 0. The above covariance " +
                "matrix will not be used.")

    # In case of a fiducial run (all parameters fixed), simply run once and
    # print out the likelihood. This should not be used any more (one has to
    # modify the log.param, which is never a good idea. Instead, force the code
    # to use a jumping factor of 0 with the option "-f 0".
    else:
        warnings.warn(
            "You are running with no varying parameters... I will compute " +
            "only one point and exit")
        data.update_cosmo_arguments()  # this fills in the fixed parameters
        loglike = sampler.compute_lkl(cosmo, data)
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    # In the fast-slow method, one need the Cholesky decomposition of the
    # covariance matrix. Return the Cholesky decomposition as a lower
    # triangular matrix
    Cholesky = None
    Rotation = None
    if command_line.jumping == 'fast':
        Cholesky = la.cholesky(C).T
        Rotation = np.identity(len(sigma_eig))

    # If the update mode was selected, the previous (or original) matrix should be stored
    if command_line.update:
        previous = (sigma_eig, U, C, Cholesky)

    # If restart wanted, pick initial value for arguments
    if command_line.restart is not None:
        sampler.read_args_from_chain(data, command_line.restart)

    # If restart from best fit file, read first point (overwrite settings of
    # read_args_from_chain)
    if command_line.bf is not None:
        sampler.read_args_from_bestfit(data, command_line.bf)

    # Pick a position (from last accepted point if restart, from the mean value
    # else), with a 100 tries.
    for i in range(100):
        if get_new_position(data, sigma_eig, U, i,
                            Cholesky, Rotation) is True:
            break
        if i == 99:
            raise io_mp.ConfigurationError(
                "You should probably check your prior boundaries... because " +
                "no valid starting position was found after 100 tries")

    # Compute the starting Likelihood
    loglike = sampler.compute_lkl(cosmo, data)

    # Choose this step as the last accepted value
    # (accept_step), and modify accordingly the max_loglike
    sampler.accept_step(data)
    max_loglike = loglike

    # If the jumping factor is 0, the likelihood associated with this point is
    # displayed, and the code exits.
    if data.jumping_factor == 0:
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    acc, rej = 0.0, 0.0  # acceptance and rejection number count
    N = 1   # number of time the system stayed in the current position

    # define path and covmat
    input_covmat = command_line.cov
    base = os.path.basename(command_line.folder)
    # the previous line fails when "folder" is a string ending with a slash. This issue is cured by the next lines:
    if base == '':
        base = os.path.basename(command_line.folder[:-1])
    command_line.cov = os.path.join(
        command_line.folder, base+'.covmat')

    # Print on screen the computed parameters
    if not command_line.silent and not command_line.quiet:
        io_mp.print_parameters(sys.stdout, data)

    # Suppress non-informative output after initializing
    command_line.quiet = True

    k = 1
    # Main loop, that goes on while the maximum number of failure is not
    # reached, and while the expected amount of steps (N) is not taken.
    while k <= command_line.N:

        # If the number of steps reaches the number set in the update method,
        # then the proposal distribution should be adapted.
        if command_line.update:

            # master chain behavior
            if not rank:
                # Add the folder to the list of files to analyze, and switch on the
                # options for computing only the covmat
                from parser_mp import parse
                info_command_line = parse(
                    'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian --want-covmat' % command_line.folder)
                info_command_line.update = command_line.update
                # the +10 below is here to ensure that the first master update will take place before the first slave updates,
                # but this is a detail, the code is robust against situations where updating is not possible, so +10 could be omitted
                if not (k+10) % command_line.update and k > 10:
                    # Try to launch an analyze
                    try:
                        from analyze import analyze
                        R_minus_one = analyze(info_command_line)
                    except:
                        if not command_line.silent:
                            print 'Step ',k,' chain ', rank,': Failed to calculate covariant matrix'
                        pass

                if not (k-1) % command_line.update:
                    try:
                        # Read the covmat
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0,0] == previous[2][0,0]:
                            previous = (sigma_eig, U, C, Cholesky)
                            if k == 1:
                                if not command_line.silent:
                                    if not input_covmat == None:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s instead of %s. '
                                            'If new input covmat is desired, please delete previous covmat.'
                                            % (command_line.cov, input_covmat))
                                    else:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s. '
                                            'If no starting covmat is desired, please delete previous covmat.'
                                            % command_line.cov)
                            else:
                                data.out.write('# After %d accepted steps: update proposal with max(R-1) = %f \n' % (int(acc), max(R_minus_one)))
                                if not command_line.silent:
                                    print 'After %d accepted steps: update proposal with max(R-1) = %f \n' % (int(acc), max(R_minus_one))
                                try:
                                    if stop-after-update:
                                        k = command_line.N
                                        print 'Covariant matrix updated - stopping run'
                                except:
                                    pass

                    except:
                        pass

                    command_line.quiet = True

            # slave chain behavior
            else:
                if not (k-1) % command_line.update:
                    try:
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0,0] == previous[2][0,0] and not k == 1:
                            data.out.write('# After %d accepted steps: update proposal \n' % int(acc))
                            if not command_line.silent:
                                print 'After %d accepted steps: update proposal \n' % int(acc)
                            try:
                                if stop_after_update:
                                    k = command_line.N
                                    print 'Covariant matrix updated - stopping run'
                            except:
                                pass
                        previous = (sigma_eig, U, C, Cholesky)

                    except:
                        pass

        # Pick a new position ('current' flag in mcmc_parameters), and compute
        # its likelihood. If get_new_position returns True, it means it did not
        # encounter any boundary problem. Otherwise, just increase the
        # multiplicity of the point and start the loop again
        if get_new_position(
                data, sigma_eig, U, k, Cholesky, Rotation) is True:
            newloglike = sampler.compute_lkl(cosmo, data)
        else:  # reject step
            rej += 1
            N += 1
            k += 1
            continue

        # Harmless trick to avoid exponentiating large numbers. This decides
        # whether or not the system should move.
        if (newloglike != data.boundary_loglike):
            if (newloglike >= loglike):
                alpha = 1.
            else:
                alpha = np.exp(newloglike-loglike)
        else:
            alpha = -1

        if ((alpha == 1.) or (rd.uniform(0, 1) < alpha)):  # accept step

            # Print out the last accepted step (WARNING: this is NOT the one we
            # just computed ('current' flag), but really the previous one.)
            # with its proper multiplicity (number of times the system stayed
            # there).
            io_mp.print_vector(outputs, N, loglike, data)

            # Report the 'current' point to the 'last_accepted'
            sampler.accept_step(data)
            loglike = newloglike
            if loglike > max_loglike:
                max_loglike = loglike
            acc += 1.0
            N = 1  # Reset the multiplicity

        else:  # reject step
            rej += 1.0
            N += 1  # Increase multiplicity of last accepted point

        # Regularly (option to set in parameter file), close and reopen the
        # buffer to force to write on file.
        if acc % data.write_step == 0:
            io_mp.refresh_file(data)
            # Update the outputs list
            outputs[0] = data.out
        k += 1  # One iteration done
    # END OF WHILE LOOP

    # If at this moment, the multiplicity is higher than 1, it means the
    # current point is not yet accepted, but it also mean that we did not print
    # out the last_accepted one yet. So we do.
    if N > 1:
        io_mp.print_vector(outputs, N-1, loglike, data)

    # Print out some information on the finished chain
    rate = acc / (acc + rej)
    sys.stdout.write('\n#  {0} steps done, acceptance rate: {1}\n'.
                     format(command_line.N, rate))

    # In case the acceptance rate is too low, or too high, print a warning
    if rate < 0.05:
        warnings.warn("The acceptance rate is below 0.05. You might want to "
                      "set the jumping factor to a lower value than the "
                      "default (2.4), with the option `-f 1.5` for instance.")
    elif rate > 0.6:
        warnings.warn("The acceptance rate is above 0.6, which means you might"
                      " have difficulties exploring the entire parameter space"
                      ". Try analysing these chains, and use the output "
                      "covariance matrix to decrease the acceptance rate to a "
                      "value between 0.2 and 0.4 (roughly).")

    # For a restart, erase the starting point to keep only the new, longer
    # chain.
    if command_line.restart is not None:
        os.remove(command_line.restart)
        sys.stdout.write('    deleting starting point of the chain {0}\n'.
                         format(command_line.restart))

    return
#!/usr/bin/python

import cgi
from analyze import analyze

form = cgi.FieldStorage()
commander = form.getvalue('commander')
creatures = form.getvalue('min_creatures')
basics    = form.getvalue('basics')
verbose   = form.getvalue('verbose')

print 'Content-Type: text/html\n'
print "<form action='/cgi-bin/helper.py' method='post'>"
print "Commander: <input type='text' name='commander' value='%s'>" % (commander if commander is not None else '')
print "Min Creatures: <input type='text' name='creatures' value='%s'>" % (creatures if creatures is not None else '')
print "<input type='checkbox' name='basics' %s /> Basics" % ('checked' if basics else '')
print "<input type='checkbox' name='verbose' %s /> Verbose" % ('checked' if verbose else '')
print '''
<input type='submit' value='Submit'>
</form>
'''

if commander is None:
    print 'Please enter a commander'
else:
    out = analyze(commander, include_basics=basics, min_creatures=creatures, verbose=verbose).replace('\n', '<br />\n')
    if out == '':
        print 'Error'
    else:
        print out