コード例 #1
0
ファイル: commands.py プロジェクト: AzGoalie/skype-bot
def doCommand(Message, Status):
	stats.stats(Message)
	if (Message.FromDisplayName != "Skype Bot" and (Status == 'SENT' or Status == 'RECEIVED')):
		if Message.Body == '!ping':
			ping(Message)
		elif Message.Body == '!stats':
			stats.printStats(Message)
		elif Message.Body == '!help':
			help(Message)
		elif Message.Body =='!catfact':
			catfact(Message)
		elif Message.Body.startswith('!roll'):
			roll(Message)
		elif 'youtube.com/watch?v=' in Message.Body:
			urlTitle(Message)
		elif 'imgur.com/' in Message.Body and 'i.im' not in Message.Body:
			urlTitle(Message)
		elif Message.Body == '!botstart' and not config.trivia:
			botstart(Message)
		elif Message.Body == '!botend':
			botend(Message)
		elif (Message.Body.startswith('!') and config.chatbotOn and not config.trivia):
			bot(Message)
		elif Message.Body == '!trivia' and not config.chatbot:
			trivia.triviaStart(Message)
		elif Message.Body == '!leaderboard':
			trivia.printLeaderboard(Message)
		elif config.trivia and not config.chatbotOn:
			if Message.Body.startswith('!'):	#There are hidden commands in the trivia loop...
				trivia.triviaLoop(Message)
			else:
				pass
	
		else:
			pass
コード例 #2
0
ファイル: menu.py プロジェクト: PlasmaIntec/Python-Theater
def menu(pos):
    instruction = """Select:
(1) Display Seating Chart
(2) Sell one or more tickets
(3) Display statistics
(4) Reset program
(X) Exit program
"""
    select = raw_input(instruction)
    options = ["1", "2", "3", "4", "X"]
    if select == "1":
        printTheatre(pos)
        menu(pos)
    elif select == "2":
        sell(pos)
        menu(pos)
    elif select == "3":
        stats(pos)
        menu(pos)
    elif select == "4":
        reset(pos)
        menu(pos)
    elif select.lower() == "x":
        print("Have a bedraggled day ruffian :<")
    elif select not in options:
        print("Enter a number from 1 to 4 fool")
        menu(pos)
コード例 #3
0
def main(**kwargs):
    kwargs = parse_cl(sys.argv[1:])
    initialize_logger(kwargs['args'].out, kwargs['args'].debug, kwargs['args'].print_debug)
    logger = mylog.getLogger(__name__)
    start = time.time()
    if "prepare" in kwargs:
        logger.info("Run prepare")
        prepare(kwargs["args"])
    elif "cluster" in kwargs:
        logger.info("Run cluster")
        cluster(kwargs["args"])
    elif "report" in kwargs:
        logger.info("Run report")
        report(kwargs["args"])
    elif "predict" in kwargs:
        logger.info("Run predictions")
        predictions(kwargs["args"])
    elif "explore" in kwargs:
        logger.info("Run explore")
        explore(kwargs["args"])
    elif "stats" in kwargs:
        logger.info("Run stats")
        stats(kwargs["args"])
    elif "collapse" in kwargs:
        logger.info("Run collapse")
        collapse_fastq(kwargs["args"])
    elif "simulator" in kwargs:
        logger.info("Run simulator")
        # function to simulator
    logger.info('It took %.3f minutes' % ((time.time()-start)/60))
コード例 #4
0
def get_statistics():

	"""
	Input:
	--
	
	Combines all functions in order to perform statistics
	Once the statistics for a particular part of the data have
	been performed, the user is asked if they would like to 
	continue (yes: statistics can be run again, otherwise 
	the program stops)
	All results are written to files in the results folder
	called name of the statsitics_results to be found in the cwd
	(the absolut path is printed each time the statistics of a particular
	part of the data are run).
	An overview of the results (average number of tokens, average number of types,
	average number of sentences, type-token ratio and the number of analyzed files
	is printed)
	
	Output:
	---
	
	"""

	data = ask_for_data_directory()

	
	continue_statistics = 'yes'
	
	while continue_statistics == 'yes':
	
		
		name = which_stats(data)
		
		# In case the user chose 'all programs', the variable 'name'
		# is assigned to a list of all MA programs. In this case, the
		# program loops through the list of programs and calculates the
		# statistics for each one.
		
		
		# Else, it calculates the statistics for the chosen part of the data
		# ('name' is assigned to a string).
		if type(name)== list:
		
			for program_name in name:
	
				results_folder = create_dir(program_name)
				print(results_folder)
				file_list = create_list(data, program_name, results_folder)
				print(stats(file_list, results_folder))
		else:
			results_folder = create_dir(name)
			print(results_folder)
			file_list = create_list(data, name, results_folder)
			print(stats(file_list, results_folder))
		
		continue_statistics = input('Would you like to continue? ')
コード例 #5
0
	def __init__(self):

		self.grid = [
			['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
			['b', '.', '.', '.', '.', '.', '.', '.', '.', 'b'],
			['b', '.', '.', '.', '.', '.', '.', '.', '.', 'b'],
			['b', '.', '.', '.', '.', '.', '.', '.', '.', 'b'],
			['b', '.', '.', '.', '.', '.', '.', '.', '.', 'b'],
			['b', '.', '.', '.', '.', '.', 'w', 'w', 'w', 'b'],
			['b', '.', '.', '.', '.', '.', 'w', '.', '.', 'b'],
			['b', 'r', '.', '.', '.', '.', 'o', '.', 'c', 'b'],
			['b', 'r', 'r', '.', '.', '.', 'w', '.', '.', 'b'],
			['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
		]
		self.grid_explored = [([False] * len(self.grid[0]))] * len(self.grid)

		self.cell_colors = {
			'.': (0, 0, 0),
			'b': (255, 255, 255),
			'w': (255, 255, 255),
			'r': (255, 255, 255),
			'o': (255, 255, 255),
			'c': (255, 255, 255),
			'd': (255, 255, 255),
			'background': (0, 0, 0)
		}

		self.can_move = {
			'.' : True,
			'b' : False,
			'r' : True,
			'w' : False,
			'd' : False,
			'o' : True,
			'c' : True

		}

		self.objects_on_ground = {
			(8, 6): 'c'
		}


		#move enemy database to another file later
		font = pygame.font.Font("ant-maru.ttf", 14)

		merchant = font.render('M', False, (0, 250, 0))
		merchant_stats = stats.stats(10, 5, 5, 5, 5, 5)

		generic_enemy = font.render('E', False, (250, 0, 0))
		generic_enemy_stats = stats.stats(10, 5, 5, 5, 5, 5)



		self.actors = [actor_class.Actor((7, 7), merchant, "merchant", merchant_stats, "friendly", None, "a test merchant"),
		actor_class.Actor((6, 2), generic_enemy, "generic_enemy", generic_enemy_stats, "hostile", None, "a test enemy")]
コード例 #6
0
ファイル: svmGPV2.py プロジェクト: pparas007/TwitterGender
def main():
    #5:gender, 6:gender_confidence, 8:confidence in profile, 10:description, 11:no of favourited tweets,
    #13:link color, 14:name, 17:retweet count, 18:sidebar color, 21:tweet count
    #                                                                  (5, 6, 8, 11, 13, 17, 18, 21))
    dataset = pd.read_csv('dataset.csv',
                          encoding="latin1",
                          usecols=(5, 6, 8, 10, 11, 13, 17, 18, 21))
    #divide into dependent and independent variables
    X = dataset.iloc[:, 1:]
    y = dataset.iloc[:, 0]

    stats.stats(X, y)

    #print("help")
    #print(type(y.values))
    #print(type(X.values))
    print("printing")

    X, y = preprocessing.preprocessData(X.values, y.values)

    plotting.plot(X, y)
    """
    utilsData(X, y)
    
    #turning it into numpy ndarrays
    X = X.values
    y = y.values

    # data preprocessing to clean and arrange data.
    X,y=preprocessData(X,y)
    #plotData(X, y)

    plotDataSimple(X, y)
    """
    #after preprocessing: split data into training and test set
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.20,
                                                        random_state=0)

    #fit the data to the classifier
    svc = SVC(kernel='rbf', gamma=0.5, random_state=0)
    svc.fit(X_train, y_train)

    # predict the test data using the model
    y_pred = svc.predict(X_test)
    cm = confusion_matrix(y_test, y_pred)

    #find accuracy percentage
    print("Accuracy: ", metrics.accuracy_score(y_test, y_pred))
コード例 #7
0
 def test_plantas_season(self):
     '''It takes a dictionary of dictionaries that contains the years and months as keys and the generation related to a hydroelectric dam as values (between 2006 - 2015) and returns the same data separated by season, i.e, Autumn Summer... '''
     seasons = [["ENE","MAR"],["ABR","JUN"],["JUL","SEP"],["OCT","DIC"]]
     data = {}
     tabla = {}
     for ind,e in enumerate(plantas):
         try:
             for year in range(2006,2016):
                 results = {}
                 for i,season in enumerate(seasons):
                     mesInicio = season[0]
                     mesFin = season[1]
                     elementos = self.datos_anuales(plantas[ind],mesInicio,mesFin) #dictionary of dictionaries that contains the years and months as keys and the generation ral....
                     e = elementos[str(year)]
                     p = stats(e)
                     results[seasonlist[i]] = round(p.media(),4)
                 data[year]= results
             try :
                 tabla[plantas[ind]] = data
             except IndexError:
                 print "error en " + str(i) + plantas[ind]
         except ZeroDivisionError:
             print "zero division"
             pass
     return data
コード例 #8
0
def nodedeg(f, degsteps=None):
    #path_loss_range = range(1,102)
    path_loss_range = range(20, 94)

    paths = stats.stats(f)

    # Generate node degree for given maximum path loss
    nodes = list(paths.keys())
    links = 0

    if not degsteps:
        degsteps = [0, 1, 2, 4, 8, 16, float('inf')]
        degsteps = [x for x in degsteps if x < len(nodes) or x == float('inf')]

    degreedist = [[0] * len(path_loss_range) for _ in range(len(degsteps))]
    for dst in nodes:
        for i in range(0, len(path_loss_range)):
            max_path_loss = path_loss_range[i]

            degree = len([
                x for x in paths[dst].values() if x['maxmean'] <= max_path_loss
            ])
            if degree <= degsteps[0]:
                degreedist[0][i] += 1
            else:
                for j in range(1, len(degsteps)):
                    min_degree = degsteps[j - 1]
                    max_degree = degsteps[j]
                    if min_degree < degree <= max_degree:
                        degreedist[j][i] += 1
                        break

    return degsteps, path_loss_range, degreedist
コード例 #9
0
ファイル: make_table.py プロジェクト: alastair/fp-eval
def length(_, stats_method):
    stats_head = stats_header(stats_method)
    cols = ["8\,s", "15\,s", "30\,s", "0:30--0:38", "0:30--0:45", "0:30--0:60"]
    if len(stats_head) == 3: # val, lower, upper
        stub = "rr@{--}l"
    else:
        stub = "r"
    c = stub*len(cols)
    fmt = "l%s" % c
    print r"\begin{tabular}{%s}" % (fmt,)
    print r"\toprule"
    colspans = [r"\multicolumn{%s}{c}{%s}" % (len(stats_head), cname) for cname in cols]
    print r"Query length & %s \\" % (" & ".join(colspans), )

    stats_head = [r"\multicolumn{1}{c}{%s}" % (shname) for shname in stats_head]
    print r"Algorithm & %s \\" % (" & ".join([" & ".join(stats_head) for x in cols]), )
    print r"\midrule"

    rows = ["echoprint", "chromaprint", "landmark"]
    column_names = ["chop8", "chop15", "chop30", "30chop8", "30chop15", "30chop30"]
    for e in rows:
        r = []
        for m in column_names:
            row = db.session.query(evaluation.Run).filter(evaluation.Run.engine==e).filter(evaluation.Run.munge==m).one()
            i = row.id
            s = stats.stats(i)
            r.append(stats_method(s)[1])
        flat = [a for b in r for a in b]
        restofrow = " & ".join([i for i in flat])
        print r"%s & %s \\" % (e.title(), restofrow)

    footer()
コード例 #10
0
    def data(self,entidad,average):
        '''Opens a .csv file and returns a dictionary'''
        data = {}
        devst = []
        for year in range(2006,2016):
             with open('data/precipitation/precipAccCSV/%s.csv'%year, 'rb') as csvfile:
                 reader = csv.reader(csvfile, delimiter=',', quotechar='|')
                 for row in reader:
                     if row[0] == entidad.upper():
                         precip = row[13]
             data[year] = precip
             devst.append(float(precip))

        s = stats(devst).deviation()

        #average = 2024.4 #------------------------------------------------------help
        damp = max(devst)
        ave = min(devst, key=lambda x:abs(x-average))

        drought = min(devst)

        ivd = dict((v, k) for k, v in data.items())
        results = {}
        results["drought"] = ivd[str(drought).replace(".0","")]
        results["ave"] = ivd[str(ave).replace(".0","")]
        results["damp"] = ivd[str(damp).replace(".0","")]
        return data, results
コード例 #11
0
ファイル: test.py プロジェクト: cash2one/Risk_Ranking_System
def test_stats():
    import stats as st
    alg_list = ['alg1','alg2','alg3']
    algs_dicts_list = [{'D1':0,'D2':32.1,'D3':97.43},\
                       {'D1':98.09,'D2':85.3,'D3':17.53},\
                       {'D1':45.6,'D2':59.1,'D3':0}]
    s = st.stats(alg_list,algs_dicts_list)
    s.calc_stats()
    s.export_info(fn='/home/michal/SALSA_files/tmp/small_test/test',raw_flag=True)
    algs_dicts_list_2 = [{'D10':0,'D20':32.1,'D30':97.43},\
                         {'D10':98.09,'D20':85.3,'D30':17.53},\
                         {'D10':45.6,'D20':59.1,'D30':0}]
    s2 = st.stats(alg_list,algs_dicts_list_2)
    #alldicts = reduce(set.union, map(set, map(dict.items, algs_dicts_list_2)))
    st.stats_union([s,s2], fn='/home/michal/SALSA_files/tmp/small_test/test', raw_flag=True)
    return
コード例 #12
0
    def data(self, entidad, average):
        '''Opens a .csv file and returns a dictionary'''
        data = {}
        devst = []
        for year in range(2006, 2016):
            with open('data/precipitation/precipAccCSV/%s.csv' % year,
                      'rb') as csvfile:
                reader = csv.reader(csvfile, delimiter=',', quotechar='|')
                for row in reader:
                    if row[0] == entidad.upper():
                        precip = row[13]
            data[year] = precip
            devst.append(float(precip))

        s = stats(devst).deviation()

        #average = 2024.4 #------------------------------------------------------help
        damp = max(devst)
        ave = min(devst, key=lambda x: abs(x - average))

        drought = min(devst)

        ivd = dict((v, k) for k, v in data.items())
        results = {}
        results["drought"] = ivd[str(drought).replace(".0", "")]
        results["ave"] = ivd[str(ave).replace(".0", "")]
        results["damp"] = ivd[str(damp).replace(".0", "")]
        return data, results
コード例 #13
0
def analyse_args(args):
    try:
        option, value = split_arg(args[0])
    except IndexError:
        return create_session()

    allowed_opts = {
        # these don't take any value, and don't respond if an invalid value is passed
        'help': 'print (help_string)',
        'configure': 'Session(is_bare = True).reconfigure()',
        'change-pass': '******',
        'random': 'random(create_session())',
        # these demand one value (`backup` requires a valid location, while the other three require a datetime format)
        'write': 'Story(create_session(), when = value, is_write = True).write()',
        'view': 'Story(create_session(), when = value, check_path = True).view()',
        'backup': 'backup(create_session(), backup_loc = value)',
        'encrypt': 'Story(create_session(), when = value, check_path = True).encrypt()',
    }

    try:
        if option == 'search':      # special handling for `search`
            args.extend(['lang=None', 'start=start', 'end=end', 'grep'])
            options, values = zip(*map(split_arg, args))
            grep_val = '0' if 'ugly' in options else values[options.index('grep')]
            search(session = create_session(),
                   word = value,
                   lang = values[options.index('lang')],
                   start = values[options.index('start')],
                   end = values[options.index('end')],
                   grep = int(grep_val) if grep_val and grep_val.isdigit() else 7)      # '7' is rather smooth
        elif option == 'stats':     # ... and `stats`
            args.extend(['lang=None'])
            options, values = zip(*map(split_arg, args))
            stats(session = create_session(),
                  speed = int(value) if value and value.isdigit() else None,
                  lang = values[options.index('lang')])
        else:
            exec(allowed_opts[option])
        exit('')

    except KeyError:
        print ERROR, 'Invalid arguments! Continuing with the default...'
        return create_session()
    except (KeyboardInterrupt, EOFError):
        sleep(CAPTURE_WAIT)
        exit('\nGoodbye...\n')
コード例 #14
0
    def __init__(self, Yvar, options):
        self.Yvar = Yvar
        self.options = options
        self.stats = stats()
        self.stateSpaceSystem = stateSpaceSystem()

        ## -- model -- ##
        self.model()
コード例 #15
0
def cross_validation(dataset, k, *sgd_args, **sgd_kwargs):
    """Perform k-cross validation on the given dataset."""
    train_prec = []
    train_rec = []
    train_F = []
    eval_prec = []
    eval_rec = []
    eval_F = []

    void_id = dataset.alphabet.label(unicode(dataset.alphabet.void_label))
    parts = dataset.divide(k)
    obtypes = dataset.obtypes()
    for i in range(k):
        print "======== Part %s ========" % i
        print ""

        evl = parts[i]
        train = flatten(parts[:i] + parts[i+1:])
        model = Model(features_in_data(train))
        sgd(model, train, *sgd_args, **sgd_kwargs)
        tager = Tager(model, obtypes)

        _prec, _rec, _F = stats(train, tager, void_id)
        print "Train part:"
        print "Precision:", _prec
        print "Recall:", _rec
        print "F-measure:", _F
        print ""
        train_prec.append(_prec)
        train_rec.append(_rec)
        train_F.append(_F)

        _prec, _rec, _F = stats(evl, tager, void_id)
        print "Eval part:"
        print "Precision:", _prec
        print "Recall:", _rec
        print "F-measure:", _F
        print ""
        eval_prec.append(_prec)
        eval_rec.append(_rec)
        eval_F.append(_F)

    train_stats = (train_prec, train_rec, train_F)
    eval_stats = (eval_prec, eval_rec, eval_F)
    return train_stats, eval_stats
コード例 #16
0
def main():
    n = 100
    data = stats.floatArray(n)
    for i in range(n):
        data[i] = float(i + 1)
    data_stats = stats.stats(data, n)
    print('mean: {0:.2f}, stddev: {1:.2f}'.format(data_stats.mean,
                                                  data_stats.stddev))
    return 0
コード例 #17
0
def main():
    n = 100
    data = stats.floatArray(n)
    for i in range(n):
        data[i] = float(i + 1)
    data_stats = stats.stats(data, n)
    print('mean: {0:.2f}, stddev: {1:.2f}'.format(data_stats.mean,
                                                  data_stats.stddev))
    return 0
コード例 #18
0
def weightsStats(table, gamma):
   weightDs = [] # dictionaries of final weights across all rounds

   for i in range(1000):
      reward, Gmax, weights, bestStock, tickers = exp3Stocks(table, gamma)
      weightDs.append(dict(zip(tickers, distr(weights))))

   weightMatrix = []
   for key in tickers:
      print("weight stats for %s: %r" % (key, prettyList(stats(d[key] for d in weightDs))))
コード例 #19
0
def weightsStats(table, gamma):
   weightDs = [] # dictionaries of final weights across all rounds

   for i in range(1000):
      reward, Gmax, weights, bestStock, tickers = exp3Stocks(table, gamma)
      weightDs.append(dict(zip(tickers, distr(weights))))

   weightMatrix = []
   for key in tickers:
      print("weight stats for %s: %r" % (key, prettyList(stats(d[key] for d in weightDs))))
コード例 #20
0
ファイル: statsmain.py プロジェクト: ngd111/TA_lite
    def __enter__(self):
        self.logger = log(process_name.upper() + " main", log_filename)
        self.st = stats(_mongodb_host=self.db_host, \
                _mongodb_port=self.db_port, _mongodb_db=self.database)
        self.sched = BackgroundScheduler()

        # set signal handler
        signal.signal(signal.SIGTERM, self.signal_handler)

        self.utils.write_pid_of_process("hst")
        self.logger.write_log("info", "Enter to stats. app")
コード例 #21
0
def main():
    """Main function"""
    """Read in data"""
    # Read in and clean up data
    df_messy = pd.read_csv("data/under5mortalityper1000.csv")
    df_clean = df_messy.dropna(axis=1, how='all').dropna(axis=0, how='all')

    # Remove missing entries
    df_data = df_clean.dropna(axis=0, how='any')

    # Separate country names and data
    df_countries = df_data['Country']
    df_deaths = df_data.drop('Country', axis=1)
    """Data familiarization"""
    stats(df_clean, df_deaths, df_countries)
    """Part 1"""
    part1(df_deaths, df_countries)
    """Part 2"""
    part2(df_deaths, df_countries)
    """Part Graduate"""
    part_graduate(df_deaths, df_countries)
コード例 #22
0
def subgraph_perdb(noise):
    lengths = ["8", "15", "30"]
    levels = ["10", "20", "30"]
    fp = ["echoprint", "chromaprint", "landmark"]
    linestyle = ["-", ":", "--"]
    x = [8, 15, 30]
    pointstyle = ["o", "^", "+"]

    plt.figure()
    plt.xlim([5, 55])
    plt.xlabel("Query length (seconds)")
    plt.xticks(x)
    plt.ylabel("Accuracy")
    plt.ylim([0.0, 1.0])
    plt.title("Accuracy with added %s noise" % noise)

    count = 1
    for p, lev in zip(pointstyle, levels):
        plt.subplot(3, 1, count)

        dbel = 10 - int(lev)
        plt.xlim([5, 45])
        plt.xlabel("Query length (seconds)")
        plt.xticks(x)
        plt.ylabel("Accuracy")
        plt.ylim([0.0, 1.0])
        plt.title("Accuracy with %ddB %s noise" % (
            dbel,
            noise,
        ))

        count += 1
        print "noise", lev
        for line, c in zip(linestyle, fp):
            print "    fp", c
            data = []
            for lng in lengths:
                print ".",
                sys.stdout.flush()
                munge = "%s%s,chop%s" % (noise, lev, lng)
                row = db.session.query(
                    evaluation.Run).filter(evaluation.Run.engine == c).filter(
                        evaluation.Run.munge == munge).one()
                i = row.id
                s = stats.stats(i)
                accuracy = stats.prf(s)["accuracy"]
                data.append(accuracy)
            print ""
            linefmt = "k%s%s" % (line, p)
            lab = "%s" % (c, )
            plt.plot(x, data, linefmt, label=lab)
        plt.legend()
    plt.savefig("plot-%s-perdb.png" % noise)
コード例 #23
0
ファイル: rpi.py プロジェクト: eshilts/oponger
  def _average_percent_win(self, players):
    """
    Calculates the average win percentage for a set of players.
    """
    if not len(players):
      return 0

    sum = 0
    for player in players:
      sum += stats(player)['percent_win']

    return float(sum)/len(players)
コード例 #24
0
def bout_inhibition():
	"""
	Analyze data from the in-bout inhibition experiment and return a figure and csv file containing summary statistics
	"""
	# set array names
	array_names = {'nspk_cue_light': 'A', 'rew_nspk': 'C', 'rew_port': 'E', 'all_port': 'G',
	'xtra_nspk': 'I', 'pellet': 'K', 'inact_nspk': 'M', 'nspks': 'O',
	'current_ratio_requirement': 'R', 'time_last_reward': 'S', 'timers': 'T', 'laser': 'U',
	'fake_laser': 'V', 'random_list': 'W', 'PR_list': 'X', 'next_PR_thresh': 'Y'}

	# set inhibition parameters
	params = {'s_iti': 0.75, 'laser_time': 15, 'b_length': 2}

	# build collection of empty data frames
	datadict = {
	'totalNspksControl': pd.DataFrame(),
	'meanNspksControl': pd.DataFrame(),
	'totalNspksInhibited': pd.DataFrame(),
	'meanNspksInhibited': pd.DataFrame(),
	'nspksByResponseReq': pd.DataFrame(),
	'YFP_meanNspksInhibited': pd.DataFrame()}

	# set directory
	directory = r'/Users/sarahfischbach/dev/Lab_analysis/bout_raw_data/'

	# list containing medPC files to analyze
	os.chdir(directory)
	files = glob.glob('!*')

	###### START LOOP THROUGH FILES ######
	for item in files:
		f = directory + item

		# extract medPC data
		extracted = mdPCe.extract(f)

		# transform medPC data
		transformed = mdPCt.transform(extracted, array_names, params)

		# add transformed data to appropriate data frame
		datadict = merge.merge(transformed, datadict)

	###### END LOOP ######

	# stats analysis
	statsResults = stats.stats(datadict)

	# graph data
	figure = bout_plot.bout_plot(statsResults)
	figure.savefig(r'/Users/sarahfischbach/dev/Lab_analysis/inBoutInhibition.png',format='png')
	figure.show()
コード例 #25
0
def linkhist(f):
    paths = stats.stats(f)

    nodes = list(paths.keys())

    alldata = []
    for dst in nodes:
        for src in nodes:
            if src in paths[dst]:
                alldata.append(paths[dst][src]['mean'])

    r = range(int(round(min(alldata))), int(round(max(alldata) + 1)))

    return np.histogram(alldata, r)
コード例 #26
0
ファイル: script.py プロジェクト: txrx-research/vorpal
 def output_data(beacon_chain, time_elapsed, transaction_log,
                 collision_log, env):
     data = ""
     data += stats.transaction_segments_per_block(beacon_chain)
     data += stats.transactions_per_block(beacon_chain)
     if args.sweep:
         data += stats.probability_over_duration(
             args.crossshard, env.now, calc_crossshard_probability)
     data += stats.stats(args, time_elapsed, beacon_chain,
                         transaction_log,
                         env.total_generated_transactions,
                         collision_log)
     data += stats.config(args)
     return data
コード例 #27
0
ファイル: sxstats2.py プロジェクト: StephenETaylor/stats
 def setouts(self, v, ide=''):
     """
         here and following I assume object is implemented with a dictionary
         This low-level fooling feels quite unpythonic, but
         the description of hasattr() in pydoc3 includes implementation
         detail:  try getattr and catch the attribute error
         and I feel more comfortable playing with the dict.
         
     """
     for i, k in enumerate(['out0', 'out1', 'out2']):
         if not k in self.__dict__:
             self.__dict__[k] = stats.stats(k)
         st = self.__dict__[k]
         st.newitem(v[i], ide)
コード例 #28
0
ファイル: event.py プロジェクト: Metasilveur/Maestro
    def __init__(self, ecran):
        self.continuer = 1
        if pygame.midi.get_default_input_id() > 0:
            self.inp = pygame.midi.Input(1)
        else:
            print("Pas de clavier detecte!")
            pygame.quit()
            sys.exit()

        #self.inp = pygame.midi.Input(1)
        self.touches_app = []
        self.mEcran = ecran
        self.touches_curr_memory = []
        self.stats = stats(0, 0, 0, 0)
コード例 #29
0
def main(**kwargs):
    kwargs = parse_cl(sys.argv[1:])
    initialize_logger(kwargs['args'].out, kwargs['args'].debug,
                      kwargs['args'].print_debug)
    logger = mylog.getLogger(__name__)
    start = time.time()
    if "prepare" in kwargs:
        logger.info("Run prepare")
        prepare(kwargs["args"])
    elif "cluster" in kwargs:
        logger.info("Run cluster")
        cluster(kwargs["args"])
    elif "report" in kwargs:
        logger.info("Run report")
        report(kwargs["args"])
    elif "predict" in kwargs:
        logger.info("Run predictions")
        predictions(kwargs["args"])
    elif "target" in kwargs:
        logger.info("Run target annotation")
        targets_enrichment(kwargs["args"])
    elif "seqbuster" in kwargs:
        logger.info("Run seqbuster")
        miraligner(kwargs["args"])
    elif "explore" in kwargs:
        logger.info("Run explore")
        explore(kwargs["args"])
    elif "stats" in kwargs:
        logger.info("Run stats")
        stats(kwargs["args"])
    elif "collapse" in kwargs:
        logger.info("Run collapse")
        collapse_fastq(kwargs["args"])
    elif "simulator" in kwargs:
        logger.info("Run simulator")
        simulate(kwargs["args"])
    logger.info('It took %.3f minutes' % ((time.time() - start) / 60))
コード例 #30
0
    def normalize(self,lista):
        print lista
        '''Returns a list containing normalized (0-1) data from a given list'''

        rang = float(max(lista)) - float(min(lista))
        average = stats(lista).media()
        for i,e in enumerate(lista):
            try:
                lista[i] = (float(e) -average)/rang
            except ZeroDivisionError:
                lista[i] = 0
        minimum = min(lista)
        for i,e in enumerate(lista):
            lista[i] = e - minimum
        return lista
コード例 #31
0
    def backtest(self, *args):
        self.to_file()      # write results to .txt file
        if len(args) == 0:
            rdelta = relativedelta(self.backtest_end, self.backtest_start)
            months = rdelta.years * 12 + rdelta.months

            for i in range(months + 1):
                self.today = self.backtest_start + relativedelta(months = i)
                print '{}-{m:02d}'.format(self.today.year, m = self.today.month)
                self.update(self.today)
                print '\nMonmentum portfolio and their past performance:'
                print 'Long:' 
                print self.momentum_long
                print '\nShort:' 
                print self.momentum_short
                self.get_return()
            self.backtest_return = self.backtest_return[['Long', 'Short', 'Average', 'SPY']]
            print self.backtest_return, '\n'
            self.to_csv()
            stats(self.backtest_return['Long'], self.backtest_return['SPY'])
        else:
            self.backtest_start = args[0]
            self.backtest_end = args[1]
            self.backtest()
コード例 #32
0
ファイル: make_table.py プロジェクト: alastair/fp-eval
def subgraph_perfp(noise):
    lengths = ["8", "15", "30"]
    levels = ["10", "20", "30"]
    fp = ["echoprint", "chromaprint", "landmark"]
    linestyle = ["-", ":", "--"]
    x = [8, 15, 30]
    pointstyle = ["o", "^", "+"]

    plt.figure()
    plt.xlim([5, 55])
    plt.xlabel("Query length (seconds)")
    plt.xticks(x)
    plt.ylabel("Accuracy")
    plt.ylim([0.0, 1.0])
    plt.title("Accuracy with added %s noise" % noise)

    count = 1
    for line, c in zip(linestyle, fp):
        plt.subplot(3, 1, count)

        plt.xlim([5, 45])
        plt.xlabel("Query length (seconds)")
        plt.xticks(x)
        plt.ylabel("Accuracy")
        plt.ylim([0.0, 1.0])
        plt.title("%s accuracy with added %s noise" % (c, noise))

        count += 1
        print "fp", c
        for p, lev in zip(pointstyle, levels):
            print "   noise", lev
            data = []
            for lng in lengths:
                print ".",
                sys.stdout.flush()
                munge = "%s%s,chop%s" % (noise, lev, lng)
                row = db.session.query(evaluation.Run).filter(evaluation.Run.engine==c).filter(evaluation.Run.munge==munge).one()
                i = row.id
                s = stats.stats(i)
                accuracy = stats.prf(s)["accuracy"]
                data.append(accuracy)
            print ""
            linefmt = "k%s%s" % (line, p)
            dbel = 10 - int(lev)
            lab = "%ddB" % (dbel, )
            plt.plot(x, data, linefmt, label=lab)
        plt.legend()
    plt.savefig("plot-%s-perfp.png" % noise)
コード例 #33
0
ファイル: make_table.py プロジェクト: alastair/fp-eval
def print_time_row(querysize, rows, row_titles, cols, stats_method):
    ndpoints = len(stats_header(stats_method))
    for r, t in zip(rows, row_titles):
        ret = []
        for c in cols:
            munge = r % querysize
            try:
                row = db.session.query(evaluation.Run).filter(evaluation.Run.engine==c).filter(evaluation.Run.munge==munge).one()
                i = row.id
                s = stats.stats(i)
                ret.append(stats_method(s)[1])
            except sqlalchemy.orm.exc.NoResultFound:
                ret.append(["-" for x in range(ndpoints)])
        flat = [a for b in ret for a in b]
        restofrow = " & ".join(["%2.0f" % i if i != "-" else i for i in flat])
        print r"%s & %s \\" % (t, restofrow)
コード例 #34
0
def print_time_row(querysize, rows, row_titles, cols, stats_method):
    ndpoints = len(stats_header(stats_method))
    for r, t in zip(rows, row_titles):
        ret = []
        for c in cols:
            munge = r % querysize
            try:
                row = db.session.query(
                    evaluation.Run).filter(evaluation.Run.engine == c).filter(
                        evaluation.Run.munge == munge).one()
                i = row.id
                s = stats.stats(i)
                ret.append(stats_method(s)[1])
            except sqlalchemy.orm.exc.NoResultFound:
                ret.append(["-" for x in range(ndpoints)])
        flat = [a for b in ret for a in b]
        restofrow = " & ".join(["%2.0f" % i if i != "-" else i for i in flat])
        print r"%s & %s \\" % (t, restofrow)
コード例 #35
0
ファイル: graph.py プロジェクト: cash2one/Risk_Ranking_System
    def evaluation(self,algs_list,test=[],fn=None):
        import stats
        import numpy as np
        from datetime import datetime
        
        eval_algs_list = []
        Lpct_dicts_list = []
        if not len(test):    # test_mal is empty
            trh = gm.risk_threshold
            '''risk = self.get_nodes_attr_val_dict(self.n_attr.risk)'''
            risk = gm.get_labels_dict_from_dict(d=self.get_nodes_attr_val_dict(self.n_attr.risk), threshold=trh)
            test_mal = [ k for k,v in risk.items() if v == 1 ]
            test = zip(*risk.items())
            '''tmp = np.asarray([0] * len(risk))
            tmp[np.where(np.asarray(test[1])==1)] = 1
            test[1] = tmp
            #test[0] = np.asarray(test[0])'''
             
        else:
            test_mal = test[0][np.where(test[1]==1)]
        test_scores_list = []
        for alg in algs_list:
            Lpct_dicts_list.append(dict((k,self.G.node[k][self.alg_auth_Lpct[alg]]) for k in test_mal)) #dict of the Lpct values of the risky domains ONLY! (the ones with label 1)
            test_scores_list.append([ self.G.node[k][self.alg_auth_Lpct[alg]] for k in test[0] ])   #list of the Lpct values of ALL domains 
            eval_algs_list.append('_'.join([alg,'auth']))
            if alg in self.alg_hub_Lpct:    #hits or salsa
                Lpct_dicts_list.append(dict((k,self.G.node[k][self.alg_hub_Lpct[alg]]) for k in test_mal))  #dict of the Lpct values of the risky domains ONLY! (the ones with label 1)
                test_scores_list.append([ self.G.node[k][self.alg_hub_Lpct[alg]] for k in test[0] ])    #list of the Lpct values of ALL domains 
                eval_algs_list.append('_'.join([alg,'hub']))
        s = stats.stats(eval_algs_list,Lpct_dicts_list,test[1],test_scores_list) # stats instantiation
        s.calc_stats()
        if fn: # "full run" 
            s.export_info(fn=fn,raw_flag=True)
            s.export_seed_histogram(fn=fn[:-4])
        '''#FOR DEBUG:
        else: # a fold run- we still want to export the histogram of the known bad domains:
            s.export_seed_histogram(fn='/home/michal/SALSA_files/outputs/real_run/hist_'+datetime.now().strftime("%H:%M:%S"))'''

            
        
        # Export to weka file:
        #self.export_to_weka_file(algs_list,test)
        
        return s
コード例 #36
0
def process_one(process_args):
    file_loc, args = process_args
    retDic = {file_loc: None}

    try:
        score = open_midi(file_loc)
        if args.stats:
            retDic[file_loc] = stats(score)
        else:
            raw_data = modify_piece(score, file_loc, args)
            retDic[file_loc] = {'raw_data': raw_data}

    except exceptions21.StreamException:
        logger.error('Cannot translate %s to music21 stream' % file_loc)
    except CalledProcessError:
        logger.error('mscore failed to convert %s from midi to musixcml'
                     % file_loc)

    return retDic
コード例 #37
0
ファイル: page_handlers.py プロジェクト: jarajapu/oponger
  def DoGet(self, player_key_name):
    player_to_show = Player.get_by_key_name(player_key_name)

    logging.info('Getting player %s' % player_to_show)
    if not self.player:
      self.error(404)
      self.response.out.write("""<strong>No player with key %s.
      Try looking through the <a href="/players">list of players</a>.</strong>""" % player_key_name)

    additional_values = {
      'player_to_show'  : player_to_show,
      'available_games' : player_to_show.available_games(),
      'completed_games' : player_to_show.completed_games(),
      'active_games'    : player_to_show.active_games(),
      'stats'           : stats(player_to_show)
    }

    self.template_values.update(additional_values)
    self.render_to_response("player.html")
コード例 #38
0
ファイル: __init__.py プロジェクト: shizzz477/zap-api-python
    def __init__(self, proxies=None, apikey=None):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.

        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies or {
            'http': 'http://127.0.0.1:8080',
            'https': 'http://127.0.0.1:8080'
        }
        self.__apikey = apikey

        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.authorization = authorization(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.reveal = reveal(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.stats = stats(self)
        self.users = users(self)

        # not very nice, but prevents warnings when accessing the ZAP API via https
        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
コード例 #39
0
ファイル: __init__.py プロジェクト: psiinon/zap-api-python
    def __init__(self, proxies=None, apikey=None):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.

        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies or {
            'http': 'http://127.0.0.1:8080',
            'https': 'http://127.0.0.1:8080'
        }
        self.__apikey = apikey

        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.authorization = authorization(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.reveal = reveal(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.stats = stats(self)
        self.users = users(self)

        # not very nice, but prevents warnings when accessing the ZAP API via https
        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
コード例 #40
0
def _trainPieceDetection(video, numFrames=50):
    '''
    This function will handle training the piece detection and collecting
    initial statistics on the board location.

    video -- the video file which will be used for training,
    numFrames -- the number of frames which will be grabbed from the
        video file for the purposes of training the piece detector

    returns:  cornerStats, w, frames
    cornerStats -- object of class stats which will keeps track of the 
        location of the chess board square corners.
    w -- the weight learned from training the detector of pieces.
    frames -- the frames which were drawn from the video for the purposes of 
        training.
    '''
    pieceLocationsF = []
    for col in [0, 1, 6, 7]:
        for row in xrange(8):
            pieceLocationsF.append( (row, col) )
    
    pieceLocations = [pieceLocationsF for i in xrange(numFrames)]

    cornerStats = stats( (81, 2) )

    frames = []
    for i in xrange(numFrames):
        s, f = video.read()
        if not s:
            print 'Error:  The video was too short'

        chessBoardPoints = find_board.find_board(f)
        cornerStats.append(chessBoardPoints)
        frames.append(f)

    #print chessBoardPoints
    #print
    #print cornerStats.mean()

    w = find_pieces.train_detector( frames, pieceLocations, cornerStats.mean() )

    return cornerStats, w, frames
コード例 #41
0
    def DoGet(self, player_key_name):
        player_to_show = Player.get_by_key_name(player_key_name)

        logging.info('Getting player %s' % player_to_show)
        if not self.player:
            self.error(404)
            self.response.out.write("""<strong>No player with key %s.
      Try looking through the <a href="/players">list of players</a>.</strong>"""
                                    % player_key_name)

        additional_values = {
            'player_to_show': player_to_show,
            'available_games': player_to_show.available_games(),
            'completed_games': player_to_show.completed_games(),
            'active_games': player_to_show.active_games(),
            'stats': stats(player_to_show)
        }

        self.template_values.update(additional_values)
        self.render_to_response("player.html")
コード例 #42
0
def length(_, stats_method):
    stats_head = stats_header(stats_method)
    cols = ["8\,s", "15\,s", "30\,s", "0:30--0:38", "0:30--0:45", "0:30--0:60"]
    if len(stats_head) == 3:  # val, lower, upper
        stub = "rr@{--}l"
    else:
        stub = "r"
    c = stub * len(cols)
    fmt = "l%s" % c
    print r"\begin{tabular}{%s}" % (fmt, )
    print r"\toprule"
    colspans = [
        r"\multicolumn{%s}{c}{%s}" % (len(stats_head), cname) for cname in cols
    ]
    print r"Query length & %s \\" % (" & ".join(colspans), )

    stats_head = [
        r"\multicolumn{1}{c}{%s}" % (shname) for shname in stats_head
    ]
    print r"Algorithm & %s \\" % (" & ".join(
        [" & ".join(stats_head) for x in cols]), )
    print r"\midrule"

    rows = ["echoprint", "chromaprint", "landmark"]
    column_names = [
        "chop8", "chop15", "chop30", "30chop8", "30chop15", "30chop30"
    ]
    for e in rows:
        r = []
        for m in column_names:
            row = db.session.query(
                evaluation.Run).filter(evaluation.Run.engine == e).filter(
                    evaluation.Run.munge == m).one()
            i = row.id
            s = stats.stats(i)
            r.append(stats_method(s)[1])
        flat = [a for b in r for a in b]
        restofrow = " & ".join([i for i in flat])
        print r"%s & %s \\" % (e.title(), restofrow)

    footer()
コード例 #43
0
ファイル: rpi.py プロジェクト: eshilts/oponger
  def calculate_rankings(self):
    """
    Calculates rankings based on the RPI method. The implementation
    is naive, and would not work for a large number of players.
    """
    logging.info("Calculating RPI rankings.")
    for player in Player.all():
      wp = stats(player)['percent_win']

      opponents = self.player_opponents[hash(player)]
      owp = self._average_percent_win(opponents)

      opponents_opponents = self._get_opponents_for_players(opponents)
      oowp = self._average_percent_win(opponents_opponents)

      rpi_rank = (wp * 0.25) + (owp * 0.5) + (oowp * 0.25)
      logging.info('For player %s. RPI = %s (%s * 0.25 + %s * 0.5 + %s * 0.25)'
                   % (player.pseudonym,rpi_rank, wp, owp, oowp))

      player.rpi_rank = rpi_rank
      player.put()
コード例 #44
0
def display(get):
    n = 10

    # Get all data on day 0 where gpi is active
    D1 = get('10', day=0, gpi=1, n_trial=60, key='RT')
    # Get all data on day 1 where gpi is inactive
    D2 = get('10', day=1, gpi=0, n_trial=60, key='RT')

    D1_start = [np.mean(session[:n]) for session in D1]
    D1_end = [np.mean(session[-n:]) for session in D1]
    D2_start = [np.mean(session[:n]) for session in D2]
    D2_end = [np.mean(session[-n:]) for session in D2]

    print("Day 1 saline, day 2 muscimol")
    print("----------------------------")
    print("D1 start: %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D1_start))
    print("D1 end:   %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D1_end))
    print("D2 start: %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D2_start))
    print("D2 end:   %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D2_end))
    print()

    # Get all data on day 0 where gpi is inactive
    D1 = get('01', day=0, gpi=0, n_trial=60, key='RT')
    # Get all data on day 1 where gpi is active
    D2 = get('01', day=1, gpi=1, n_trial=60, key='RT')

    D1_start = [np.mean(session[:n]) for session in D1]
    D1_end = [np.mean(session[-n:]) for session in D1]
    D2_start = [np.mean(session[:n]) for session in D2]
    D2_end = [np.mean(session[-n:]) for session in D2]

    print("Day 2 muscimol, day 1 saline")
    print("----------------------------")
    print("D1 start: %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D1_start))
    print("D1 end:   %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D1_end))
    print("D2 start: %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D2_start))
    print("D2 end:   %.3f  ±%.3f (std)  ±%.3f (sem)" % stats(D2_end))
    print()
コード例 #45
0
    def __init__(self,
                 proxies={
                     'http': 'http://127.0.0.1:8080',
                     'https': 'http://127.0.0.1:8080'
                 }):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.
           
        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies

        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.authorization = authorization(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.reveal = reveal(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.stats = stats(self)
        self.users = users(self)
コード例 #46
0
ファイル: make_table.py プロジェクト: alastair/fp-eval
def calculate_row(fp, rows, cols, stats_method):
    ndpoints = len(stats_header(stats_method))
    all_data = []
    for r in rows:
        ret = []
        for c in cols:
            if r == "":
                ret.append(["" for i in range(ndpoints)])
                continue
            munge = r % c
            try:
                row = db.session.query(evaluation.Run).filter(evaluation.Run.engine==fp).filter(evaluation.Run.munge==munge).one()
                i = row.id
                s = stats.stats(i)
                ret.append(stats_method(s)[1])
            except sqlalchemy.orm.exc.NoResultFound:
                print "error, munge", munge
                raise
                ret.append(["-" for x in range(ndpoints)])
        flat = [a for b in ret for a in b]
        all_data.append(flat)
    return all_data
コード例 #47
0
ファイル: __init__.py プロジェクト: BLKStone/zaproxy
    def __init__(self, proxies={'http': 'http://127.0.0.1:8080',
        'https': 'http://127.0.0.1:8080'}):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.
           
        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies
        
        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.authorization = authorization(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.reveal = reveal(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.stats = stats(self)
        self.users = users(self)
コード例 #48
0
def calculate_row(fp, rows, cols, stats_method):
    ndpoints = len(stats_header(stats_method))
    all_data = []
    for r in rows:
        ret = []
        for c in cols:
            if r == "":
                ret.append(["" for i in range(ndpoints)])
                continue
            munge = r % c
            try:
                row = db.session.query(
                    evaluation.Run).filter(evaluation.Run.engine == fp).filter(
                        evaluation.Run.munge == munge).one()
                i = row.id
                s = stats.stats(i)
                ret.append(stats_method(s)[1])
            except sqlalchemy.orm.exc.NoResultFound:
                print "error, munge", munge
                raise
                ret.append(["-" for x in range(ndpoints)])
        flat = [a for b in ret for a in b]
        all_data.append(flat)
    return all_data
    cumulativeReward = 0
    t = 0
    ucb1Generator = ucb1(numActions, reward)
    for (chosenAction, reward, ucbs) in ucb1Generator:
        cumulativeReward += reward
        t += 1
        if t == numRounds:
            break

    return cumulativeReward, bestActionCumulativeReward, ucbs, tickers[
        bestAction]


prettyList = lambda L: ', '.join(['%.3f' % x for x in L])
payoffStats = lambda data: stats(ucb1Stocks(data)[0] for _ in range(1000))


def runExperiment(table):
    print("(Expected payoff, variance) over 1000 trials is %r" %
          (payoffStats(table), ))
    reward, bestActionReward, ucbs, bestStock = ucb1Stocks(table)
    print("For a single run: ")
    print("Payoff was %.2f" % reward)
    print("Regret was %.2f" % (bestActionReward - reward))
    print("Best stock was %s at %.2f" % (bestStock, bestActionReward))
    print("ucbs: %r" % prettyList(ucbs))


if __name__ == "__main__":
    table = readInStockTable('stocks/fortune-500.csv')
コード例 #50
0
ファイル: sxstats2.py プロジェクト: StephenETaylor/stats
 def setxf2(self, v, ide):
     if not hasattr(self, 'xf2'):  #not defined(self.xf2):
         self.xf2 = stats.stats('xf2')
     self.xf2.newitem(v, ide)
コード例 #51
0
ファイル: load_best.py プロジェクト: willsheffler/lib
def load_best(files,obj,number=0,level=0,discrete=0):
  """
  load_best(files,obj,number=0,level=0,discrete=0)

  Loads the best (lowest energy) Modeller files into a single pymol
  molecular object.  You can specify the number of structures to load,
  or an upper energy cutoff (level) below which structures will be
  loaded.

  Set discrete=1 to have the structures loaded with the discrete flag 
  set (allows colouring on individual state properties, for example).

  """
  #input = sys.argv[1]
  grep_cmd = 'grep OBJECTIVE ' + files
  grepped = os.popen(grep_cmd).readlines()

  level=float(level)
  number=int(number)

  entries = {}
  for f in grepped:
    line = f.split(':')
    entries[line[0]] = line[-1][0:-1]

  data = []
  for my_key in entries:
    data.append((my_key,entries[my_key]))

  data.sort(lambda x,y: cmp(x[1],y[1]))

# create array of data to send to statistics calculation
  x=[]
  for y in data:
    x.append(map(float,[y[1]]))

  mean,stdev,median,maximum,minimum = stats.stats(x)


  numcol = len(mean)
  numpts = len(x)

  print "Num pts: ", numpts, "Num cols: ", numcol
  print "col:    Mean:       Stdev:      Median:      Max:        Min:"
  for j in range(numcol):
    print "%4d  %10.8g  %10.8g  %10.8g  %10.8g  %10.8g" % \
    (j, mean[j], stdev[j], median[j],maximum[j], minimum[j])



  count = 0

  if number != 0:
    print "Loading %d lowest energy structures: " % number
    for i in range(number):
      load_obj(data[i][0], obj, float(data[i][1]),i+1,discrete)
      count += 1
  else:
    if level != 0.0:
      print "Loading structures with energy below specified level: ",level
      limit = level
    else:
      print "Loading structures with energy below the median: ",median
      limit = median

    for i in range(len(data)):
      if float(data[i][1]) < limit:
        load_obj(data[i][0], obj, float(data[i][1]),i+1,discrete)
        count += 1

  print 'Loaded %d structures into %s' % (count, obj)
コード例 #52
0
  def run_wq_models(self, **kwargs):
    prediction_testrun_date = datetime.now()
    try:
      config_file = ConfigParser.RawConfigParser()
      config_file.read(kwargs['config_file_name'])

      data_collector_plugin_directories=config_file.get('data_collector_plugins', 'plugin_directories').split(',')

      self.collect_data(data_collector_plugin_directories=data_collector_plugin_directories)


      boundaries_location_file = config_file.get('boundaries_settings', 'boundaries_file')
      sites_location_file = config_file.get('boundaries_settings', 'sample_sites')
      xenia_wq_db_file = config_file.get('database', 'name')

      #MOve xenia obs db settings into standalone ini. We can then
      #check the main ini file into source control without exposing login info.
      db_settings_ini = config_file.get('password_protected_configs', 'settings_ini')
      xenia_obs_db_config_file = ConfigParser.RawConfigParser()
      xenia_obs_db_config_file.read(db_settings_ini)

      xenia_obs_db_host = xenia_obs_db_config_file.get('xenia_observation_database', 'host')
      xenia_obs_db_user = xenia_obs_db_config_file.get('xenia_observation_database', 'user')
      xenia_obs_db_password = xenia_obs_db_config_file.get('xenia_observation_database', 'password')
      xenia_obs_db_name = xenia_obs_db_config_file.get('xenia_observation_database', 'database')

      output_plugin_dirs=config_file.get('output_plugins', 'plugin_directories').split(',')
    except (ConfigParser.Error, Exception) as e:
      self.logger.exception(e)
    else:
      #Load the sample site information. Has name, location and the boundaries that contain the site.
      mb_sites = mb_sample_sites()
      mb_sites.load_sites(file_name=sites_location_file, boundary_file=boundaries_location_file)
      #Retrieve the data needed for the models.

      mb_wq_data = mb_wq_model_data(xenia_wq_db_name=xenia_wq_db_file,
                                    xenia_obs_db_type='postgres',
                                    xenia_obs_db_host=xenia_obs_db_host,
                                    xenia_obs_db_user=xenia_obs_db_user,
                                    xenia_obs_db_password=xenia_obs_db_password,
                                    xenia_obs_db_name=xenia_obs_db_name
                                    )

      site_model_ensemble = []
      #First pass we want to get all the data, after that we only need to query
      #the site specific pieces.
      reset_site_specific_data_only = False
      site_data = OrderedDict()
      total_time = 0
      for site in mb_sites:
        try:
          #Get all the models used for the particular sample site.
          model_list = self.build_test_objects(config_file=config_file, site_name=site.name)
          if len(model_list):
            #Create the container for all the models.
            site_equations = wqEquations(site.name, model_list, True)

            #Get the station specific tide stations
            tide_station = config_file.get(site.name, 'tide_station')
          else:
            self.logger.error("No models found for site: %s" % (site.name))
        except (ConfigParser.Error,Exception) as e:
          self.logger.exception(e)
        else:
          try:
            if len(model_list):
              mb_wq_data.reset(site=site,
                                tide_station=tide_station
                                )

              site_data['station_name'] = site.name
              mb_wq_data.query_data(kwargs['begin_date'], kwargs['begin_date'], site_data, reset_site_specific_data_only)
              reset_site_specific_data_only = True
              site_equations.runTests(site_data)
              total_test_time = sum(testObj.test_time for testObj in site_equations.tests)
              self.logger.debug("Site: %s total time to execute models: %f ms" % (site.name, total_test_time * 1000))
              total_time += total_test_time


              #Calculate some statistics on the entero results. This is making an assumption
              #that all the tests we are running are calculating the same value, the entero
              #amount.
              entero_stats = None
              if len(site_equations.tests):
                entero_stats = stats()
                for test in site_equations.tests:
                  if test.mlrResult is not None:
                    entero_stats.addValue(test.mlrResult)
                entero_stats.doCalculations()

              #Check to see if there is a entero sample for our date as long as the date
              #is not the current date.
              entero_value = None
              if datetime.now().date() != kwargs['begin_date'].date():
                entero_value = check_site_date_for_sampling_date(site.name, kwargs['begin_date'], output_settings_ini, kwargs['use_logging'])


              site_model_ensemble.append({'metadata': site,
                                          'models': site_equations,
                                          'entero_value': None,
                                          'statistics': entero_stats})
          except Exception,e:
            self.logger.exception(e)

      self.logger.debug("Total time to execute all sites models: %f ms" % (total_time * 1000))
      try:
        self.output_results(output_plugin_directories=output_plugin_dirs,
                                site_model_ensemble=site_model_ensemble,
                                prediction_date=kwargs['begin_date'],
                                prediction_run_date=prediction_testrun_date)
      except Exception as e:
        self.logger.exception(e)
コード例 #53
0
ファイル: sgdd.py プロジェクト: snsl/osc-osd
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import re, sys
import commands
from stats import stats

st = stats()

time = re.compile("^time", re.IGNORECASE)
sp = re.compile("\s+")

# iflag=direct if=/dev/sdb of=/tmp/z
def run(exp):
    if exp == "read":
        var = "if=/dev/sdb of=/dev/null"
    elif exp == "write":
        var = "of=/dev/sdb if=/dev/zero"

    for bs in range(4, 512, 4):
        r = []
        for j in range(100):
            s = "sg_dd blk_sgio=1 " + var +" bs=" + str(bs) + "K bpt=1 " + \
コード例 #54
0
ファイル: 2049.py プロジェクト: Linzee/py-2049
parser = OptionParser()
parser.add_option("-t", "--train",
                  dest="train",
                  help="Search for train or group of trains by name")
parser.add_option("-p", "--probability",
                  dest="probability", action="store_true", default=False,
                  help="Calculate probability to reach destination")
parser.add_option("-d", "--delays-file",
                  dest="path", default=DATA_PATH, metavar="FILE",
                  help="Change path to delays file")
parser.add_option("-s", "--show",
                  dest="show", action="store_true", default=False,
                  help="Only show graph (else they are stored in file)")

options, optionsValues = parser.parse_args()

# fix console encoding
for option, value in vars(options).iteritems():
    if isinstance(value, basestring):
        setattr(options, option, value.decode(sys.getfilesystemencoding()))
for i in range(0, len(optionsValues)):
    optionsValues[i] = optionsValues[i].decode(sys.getfilesystemencoding())

DATA_PATH = options.path

if not options.probability:
    from stats import stats
    stats(DATA_PATH, options)
else:
    from probability import probability
    probability(DATA_PATH, optionsValues)
コード例 #55
0
   bestActionCumulativeReward = singleActionReward(bestAction)

   cumulativeReward = 0
   t = 0
   randomGenerator = randomBandit(numActions, reward)
   for (chosenAction, reward) in randomGenerator:
      cumulativeReward += reward
      t += 1
      if t == numRounds:
         break

   return cumulativeReward, bestActionCumulativeReward, tickers[bestAction]


prettyList = lambda L: ', '.join(['%.3f' % x for x in L])
payoffStats = lambda data: stats(randomBanditStocks(data)[0] for _ in range(1000))


def runExperiment(table):
   print("(Expected payoff, variance) over 1000 trials is %r" % (payoffStats(table),))
   reward, bestActionReward, bestStock = randomBanditStocks(table)
   print("For a single run: ")
   print("Payoff was %.2f" % reward)
   print("Regret was %.2f" % (bestActionReward - reward))
   print("Best stock was %s at %.2f" % (bestStock, bestActionReward))


if __name__ == "__main__":
   table = readInStockTable('stocks/fortune-500.csv')
   runExperiment(table)