Exemplo n.º 1
0
def print_stats(input_list, title=None):
    '''
    Computes minimum, maximum, mean, and median using get_stats function from
      stats module, and prints them out in a nice format.

    Parameters:
      input_list(list): a list representing a column
      title(str): Optional. If given, title is printed out before the stats.

    Examples:
    >>> print_stats(range(50))
    Minimum: 0
    Maximum: 49
s    Mean: 24.5
    Median: 24.5
    >>> print_stats(range(100), 'Stats!')
    Stats!
    Minimum: 0
    Maximum: 99
    Mean: 49.5
    Median: 49.5
    '''

    # your code goes here
    if title == None:  #checks to see if there is a title
        print("Minimum: %i\nMaximum: %i\nMean: %.1f\nMedian: %.1f" %
              get_stats(input_list))  #if not just print the statistics
    else:  #if there is a title
        print(title)  #print the title
        print("Minimum: %i\nMaximum: %i\nMean: %.1f\nMedian: %.1f" %
              get_stats(input_list))  #then print the statitics

    return None
Exemplo n.º 2
0
def event_trigger(channel):
  logger = logging.getLogger('scale')
  logger.info("Edge Detect")
  time.sleep(3)
  GPIO.output(GREEN_LED, True)

  # Take picture with webcam
  image = ocr.get_picture(True)
  # Turn off LED when done
  GPIO.output(GREEN_LED, False)

  # OCR the image
  weight = ocr.ocr_image(image)

  # Live on the wild side, exceptions are handled globally for us (probably bad)
  weight = float(weight)

  # Handle case where the hundreds digit is cut off for some reason
  if (weight < 100):
    weight = weight + 100

  if (weight < MIN_WEIGHT) or (weight > MAX_WEIGHT):
    raise ValueError('Invalid Weight!')

  logger.info("Weight: " + str(weight))

  # Open the json file
  with open('../web/data.json') as f:
    data = json.load(f)

  # Get time/date
  # timestamp = time.strftime('%x %X %Z')
  timestamp = int(round(time.time() * 1000))

  # Append to the temp json object
  data.update({timestamp : weight})

  # Write changes to the file
  with open('../web/data.json', 'w') as f:
    json.dump(data, f, sort_keys=True, indent=2)

  # Send weight to IFTTT maker channel
  if len(ifttt.IFTTT_KEY) > 0 and len(ifttt.IFTTT_TRIGGER) > 0:
      url = 'https://maker.ifttt.com/trigger/' + ifttt.IFTTT_TRIGGER + '/with/key/' + ifttt.IFTTT_KEY
      payload = {'value1': weight}
      response = requests.post(url, json=payload)
      logger.info('IFTTT Response: ' + response.text)

  # Update stats
  stats.get_stats()

  # Flash Green LED for successful weight capture
  for i in range(0,6):
    GPIO.output(GREEN_LED, True)
    time.sleep(0.35)
    GPIO.output(GREEN_LED, False)
    time.sleep(0.35)
Exemplo n.º 3
0
def sample():
    from sys import platform as _platform
    if _platform == "linux" or _platform == "linux2":
        # Linux
        return jsonify(stats.get_stats())
    elif _platform == "darwin":
        # OS X
        return jsonify(stats.get_stats())
    elif _platform == "win32":
        # Windows
        return jsonify({})
Exemplo n.º 4
0
def count_success_failure_by_ns(filename):
    number_ns = 80
    ns_total = {}
    ns_success = {}
    for i in range(number_ns):
        ns_total[i] = {}
        ns_success[i] = {}

    f = open(filename)
    for line in f:
        tokens = line.split()
        try:
            id = int(tokens[3])
            ns = int(tokens[4])
            client_confirmation = int(tokens[9])
            if client_confirmation != -1:
                update_recved = int(tokens[6])
                latency = client_confirmation - update_recved
                ns_success[ns][id]  = latency
            else:
                ns_total[ns][id]  = 1
        except:
            pass

    tuples = []
    for i in range(number_ns):
        ns_tuple = [i, len(ns_total[i]), len(ns_success[i]), len(ns_total[i]) - len(ns_success[i])]
        latencies = get_stats(ns_success[i].values())
        ns_tuple.extend(latencies)
        tuples.append(ns_tuple)
        
    output_filename = os.path.join((os.path.split(filename)[0]),'ns_update_stats.txt')
    from write_array_to_file import write_tuple_array
    write_tuple_array(tuples, output_filename, p = True)
Exemplo n.º 5
0
def get_hostwise_stats(latencies):
    stats_2d_array = []
    for lns, lns_lat in latencies.items():
        lns_stats = get_stats(lns_lat)
        lns_stats.insert(0, lns)
        stats_2d_array.append(lns_stats)
    return stats_2d_array
Exemplo n.º 6
0
def run_stats(args):
    manager = proxy_db_manager.ProxyDBManager(args.db_filename)
    proxies = manager.get_proxies_by_type()
    for type in proxies:
        for proxy, port in proxies[type]:
            success_rate, average_speed, error_desc = stats.get_stats(type, proxy, port)
            manager.update_proxy(proxy, port, type, success_rate, average_speed, error_desc)         
Exemplo n.º 7
0
def print_stats(input_list, title = None):
    '''
    Computes minimum, maximum, mean, and median using get_stats function from
      stats module, and prints them out in a nice format.

    Parameters:
      input_list(list): a list representing a column
      title(str): Optional. If given, title is printed out before the stats.

    Examples:
    >>> print_stats(range(50))
    Minimum: 0
    Maximum: 49
    Mean: 24.5
    Median: 24.5
    >>> print_stats(range(100), title = 'Stats!')
    Stats!
    Minimum: 0
    Maximum: 99
    Mean: 49.5
    Median: 49.5
    '''
    if title:
        print(title)
    print('Minimum: %i\nMaximum: %i\nMean: %.1f\nMedian: %.1f'
          % get_stats(input_list))
    return None
Exemplo n.º 8
0
def get_hostwise_stats(latencies):
    stats_2d_array = []
    for lns,lns_lat in latencies.items():
        lns_stats = get_stats(lns_lat)
        lns_stats.insert(0,lns)
        stats_2d_array.append(lns_stats)
    return stats_2d_array
Exemplo n.º 9
0
def index():
    return render_template(
        "index.html",
        initialSample=json.dumps(stats.get_stats()),
        highLoadThreshold=float(os.environ.get('HIGH_LOAD_THRESHOLD', 1.0)),
        highLoadDuration=int(os.environ.get('HIGH_LOAD_DURATION', 120000))
    )
Exemplo n.º 10
0
def stats_lambda(event, context):
    request_data = event["queryStringParameters"]
    ticker = request_data["ticker"]
    return {
        "statusCode": 200,
        "body": get_stats(ticker).to_json(orient='records')
    }
Exemplo n.º 11
0
def print_stats(input_list, title=None):
    '''
    Computes minimum, maximum, mean, and median using get_stats function from
      stats module, and prints them out in a nice format.

    Parameters:
      input_list(list): a list representing a column
      title(str): Optional. If given, title is printed out before the stats.

    Examples:
    >>> print_stats(range(50))
    Minimum: 0
    Maximum: 49
    Mean: 24.5
    Median: 24.5
    >>> print_stats(range(100), title = 'Stats!')
    Stats!
    Minimum: 0
    Maximum: 99
    Mean: 49.5
    Median: 49.5
    '''
    if title:
        print(title)
    print('Minimum: %i\nMaximum: %i\nMean: %.1f\nMedian: %.1f' %
          get_stats(input_list))
    return None
Exemplo n.º 12
0
def make_data_js(start_year, end_year):
    years = end_year - start_year
    months = [
        "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct",
        "nov", "dec"
    ]
    f = open('data.js', 'w')
    f.write('var start_year = ' + str(start_year) + '\n')
    f.write('var end_year = ' + str(end_year) + '\n')
    f.write('var years = ' + str(years) + '\n')
    f.write('var months = ' + str(months) + '\n')
    f.write('var lifetime_stats = ' + str(get_general_stats()) + '\n')
    f.write('var category_stats = ' +
            str(get_stats('category', start_year, end_year)) + '\n')
    f.write('var month_stats = ' +
            str(get_stats('month', start_year, end_year)) + '\n')
    f.close()
Exemplo n.º 13
0
def main():
    args = parse_args()
    categories = args.entities.split(',')
    if not args.output:
        out_dir = args.input # will save tagged output in input folder
    else:
        out_dir = args.output
    if 'p' in args.actions:
        if not args.model:
            raise ValueError('Provide path to UDPipe model with action "p" (parse)')
        else:
            model = Model.load(args.model)
            if not model:
                print("Cannot load model from file '%s'\n" % model_path)
                sys.exit(1)
            print('Processing with UDPipe...')
            cnt = 0
            for filename in os.listdir(args.input):
                name, ext = os.path.splitext(filename)
                if ext == '.txt':
                    cnt += 1
                    out_file = os.path.join(args.input, name+'.conllu')
                    process_text(os.path.join(args.input,filename), args.input_format, model, out_file)
            print('{} parsed texts saved in "{}"'.format(cnt, args.input))
            args.column = 1 # overwrite any user provided col to UdPipe default used here
    if 't' in args.actions:         
        print('Loading NorMedTerm')
        ordered_terms = load_terms(args.terms, categories)
        print('Loaded {} terms'.format(len(ordered_terms)))
        if out_dir != args.input and not os.path.exists(out_dir):
            os.mkdir(out_dir)
        print('Performing baseline tagging on...')
        t_cnt = 0
        for filename in os.listdir(args.input):
            name, ext = os.path.splitext(filename)
            if ext == '.conllu' or ext == '.vert':
                t_cnt += 1
                print('\t...',filename)
                out_file = os.path.join(out_dir, name + '.ner')
                baseline_tag(os.path.join(args.input,filename), ordered_terms, out_file, 
                                          args.column, args.lower)
        print('{} tagged files saved in "{}"'.format(t_cnt, out_dir))
    if 's' in args.actions:
        print('Computing statistics...')
        get_stats(out_dir, args.column, args.lower)
Exemplo n.º 14
0
 def test_get_stats(self):
     data = []
     with open('results.json') as f:
         for line in f:
             data.append(json.loads(line))
     links, subdomains, counters, total_space, total_links, status = get_stats(
         [data])
     self.assertEqual(total_space, 257)
     self.assertEqual(total_links, 3)
     self.assertEqual(status[200], 1)
Exemplo n.º 15
0
def helper(filename, bar):
    """parse file, get data, and evaluate rank"""
    chat_parser = parser.ChatParser()
    tmp = analyser.Analyser()
    with open(filename, 'r', encoding='utf-8') as fp:
        chat = chat_parser.parse(fp, bar)
    chatdata = tmp.analyse(chat)
    print(chatdata)
    ret = stats.get_stats(chatdata)
    return ret
Exemplo n.º 16
0
def save_stats():
    stop_execution()
    stats = get_stats(space)
    if 0 not in stats:
        mb.showerror('Error', 'No data to plot!')
        start_execution()
        return
    in_filename = tkinter.filedialog.asksaveasfilename(filetypes=(("Text file",
                                                                   ".txt"), ))
    write_stats(stats, in_filename)
Exemplo n.º 17
0
    def set_ma_df(self):

        cols = self.coins4ma
        pos_df = self.pos_df.loc[:, cols]
        price_df = self.price_df.loc[:, cols]
        pnl_df = get_stats(pos_df, price_df, self.booksize, tcost=self.tcost)

        inttvr_se = GET_INTTVR_SE(pos_df, price_df, cols)
        more_df = pd.concat([inttvr_se], axis=1)
        pnl_df = pd.concat([pnl_df, more_df], axis=1)
        self.ma_df = pnl_df
Exemplo n.º 18
0
 def get_pnl_df(cols):
     pos_df = self.pos_df.loc[:, cols]
     price_df = self.price_df.loc[:, cols]
     pnl_df = get_stats(pos_df,
                        price_df,
                        self.booksize,
                        tcost=self.tcost)
     tvr_se = GET_INTTVR_SE(pos_df, price_df, cols)
     more_df = pd.concat([tvr_se], axis=1)
     pnl_df = pd.concat([pnl_df, more_df], axis=1)
     return pnl_df
Exemplo n.º 19
0
def main(filename, *args):
  csvreader = CSVReader(filename)
  mat = csvreader.mat

  xvec = mat[:,0]
  yvec = mat[:,1]
  sdict = get_stats(xvec, yvec, asdict=True)

  idxvec = yvec >= yvec.max()
  peakx = xvec[idxvec]
  print 'FWHM (um): %d\tz_peak (mm): %.3f'%(sdict['FWHM']*1000, peakx)
Exemplo n.º 20
0
def group_by(filename,
             group_by_index,
             value_index,
             filter=None,
             grouping_function=None,
             numeric=True):
    """Performs group-by operation.
    
        group on group_by index, values are in value_index.
        index = 0, 1, 2 etc. """

    f = open(filename)
    group_kv = {}
    exception_count = 0
    for line in f:
        tokens = line.split()
        try:
            if filter is not None and filter(tokens) == False:
                continue
            val = float(tokens[value_index])
            if numeric:
                group_key = float(tokens[group_by_index])
                if grouping_function is not None:
                    group_key = grouping_function(float(
                        tokens[group_by_index]))
            else:
                group_key = tokens[group_by_index]
                if grouping_function is not None:
                    group_key = grouping_function(tokens[group_by_index])

            if group_key in group_kv:
                group_kv[group_key].append(val)
            else:
                group_kv[group_key] = [val]
        except:
            exception_count += 1
            continue
    if exception_count > 0:
        print "EXCEPTION COUNT:", exception_count

    output_tuples = []

    for k, v in group_kv.items():
        output_tuple = [k]
        output_tuple.append(len(v))
        my_stat = get_stats(v)
        output_tuple.extend(my_stat)
        output_tuples.append(output_tuple)
    output_tuples.sort(key=itemgetter(0))
    return output_tuples
Exemplo n.º 21
0
def group_by(filename, group_by_index, value_index, filter = None, grouping_function = None, numeric = True):
    """Performs group-by operation.
    
        group on group_by index, values are in value_index.
        index = 0, 1, 2 etc. """
    
    f = open(filename)
    group_kv = {}
    exception_count = 0
    for line in f:
        tokens = line.split()
        try:
            if filter is not None and filter(tokens) == False:
                continue
            val = float(tokens[value_index])
            if numeric:
                group_key = float(tokens[group_by_index])
                if grouping_function is not None:
                    group_key = grouping_function(float(tokens[group_by_index]))
            else:
                group_key = tokens[group_by_index]
                if grouping_function is not None:
                    group_key = grouping_function(tokens[group_by_index])
            
            if group_key in group_kv:
                group_kv[group_key].append(val)
            else:
                group_kv[group_key] = [val]
        except:
            exception_count += 1
            continue
    if exception_count > 0:
        print "EXCEPTION COUNT:", exception_count
    
    output_tuples = []

    for k, v in group_kv.items():
        output_tuple = [k]
        output_tuple.append(len(v))
        my_stat = get_stats(v)
        output_tuple.extend(my_stat)
        output_tuples.append(output_tuple)
    output_tuples.sort(key = itemgetter(0))
    return output_tuples
Exemplo n.º 22
0
def count_success_failure_by_ns(filename):
    number_ns = 80
    ns_total = {}
    ns_success = {}
    for i in range(number_ns):
        ns_total[i] = {}
        ns_success[i] = {}

    f = open(filename)
    for line in f:
        tokens = line.split()
        try:
            id = int(tokens[3])
            ns = int(tokens[4])
            client_confirmation = int(tokens[9])
            if client_confirmation != -1:
                update_recved = int(tokens[6])
                latency = client_confirmation - update_recved
                ns_success[ns][id] = latency
            else:
                ns_total[ns][id] = 1
        except:
            pass

    tuples = []
    for i in range(number_ns):
        ns_tuple = [
            i,
            len(ns_total[i]),
            len(ns_success[i]),
            len(ns_total[i]) - len(ns_success[i])
        ]
        latencies = get_stats(ns_success[i].values())
        ns_tuple.extend(latencies)
        tuples.append(ns_tuple)

    output_filename = os.path.join((os.path.split(filename)[0]),
                                   'ns_update_stats.txt')
    from write_array_to_file import write_tuple_array
    write_tuple_array(tuples, output_filename, p=True)
Exemplo n.º 23
0
def main(filename, *args):
  data = NP.genfromtxt(
      filename,
      delimiter=",",
      dtype=float,
      skip_header=2,
      comments='#')
  if NP.isnan(NP.sum(data)):
    # failed to load psf data using default settings, assuming it is from
    # imageJ and thus tab delimited with no headers
    data = NP.genfromtxt(
        filename,
        dtype=float)

  if NP.isnan(NP.sum(data)):
    print 'nan in data file, aborting'
    return -1

  from stats import get_stats
  xvec = data[:,0]
  yvec = data[:,1]
  stats = get_stats(xvec, yvec, noauc=True)
  ymax, ymin, ymedian = stats[:3]

  leftedge = None
  rightedge = None

  quartermax = 0.25*(ymax + ymedian)
  for idx in xrange(len(yvec)):
    if yvec[idx] > quartermax:
      leftedge = xvec[idx]
      break

  for idx in xrange(len(yvec)-1, -1, -1):
    if yvec[idx] > quartermax:
      rightedge = xvec[idx]
      break

  print 'Quartermax = ', quartermax
  print 'Width of Region of Interest:', rightedge-leftedge
Exemplo n.º 24
0
def format_response(event):
    """Determine what response to provide based upon event data.

    Args:
      event: A dictionary with the event data.

    """

    text = ""

    # Case 1: The bot was added to a room
    if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':
        text = 'Thanks for adding me to "%s"!\n' % event['space'][
            'displayName'] + commands_text

    # Case 2: The bot was added to a DM
    elif event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'DM':
        text = 'Thanks for adding me to a DM, %s!\n' % event['user'][
            'displayName'] + commands_text

    elif event['type'] == 'MESSAGE':
        usr_input = event['message']['text'].split("@GamerStats")[1].strip()

        if len(usr_input) == 0 or usr_input.lower() == "help":
            text = commands_text
        elif usr_input.lower() == "games":
            text = "Available games to pull stats from:\n" + "\n".join(
                list(games.keys()))
        else:
            args = usr_input.split("/")
            if len(args) != 3:
                text = "Invalid input.\n" + commands_text
            elif " " in args[1].strip():
                text = "Invalid input. Username cannot contain spaces."
            else:
                text = stats.get_stats(args[0].strip().lower(),
                                       args[1].strip(),
                                       args[2].strip().lower(), games)

    return {'text': text}
Exemplo n.º 25
0
def inline_callback(update: Update, context: CallbackContext):
    query = update.inline_query.query.strip().lower()

    logger.info('Query received: %s', query)

    results = []

    for country in s.countries:
        if (query in country.lower() and len(query) >= 2) or query == country.lower():
            results.append(
                InlineQueryResultArticle(
                    id=uuid4(),
                    title=country,
                    input_message_content=InputTextMessageContent(
                        get_stats(country))
                ))

    update.inline_query.answer(results)

    update.inline_query.answer(
        results=results,
        cache_time=120,
    )
            "No generalized deck above the entropy threshold was found in the range of specified parameters. Aborting."
        )
    if best_d_dice == None:
        print(
            "No dynamic dice above the entropy threshold was found in the range of specified parameters. Aborting."
        )
else:
    # If a generalized deck and dynamic die were found, print a message describing them
    print(
        f"The best generalized deck parameters found were {best_g_deck.size_factor} for the size factor and {best_g_deck.refill_constant} for the refill constant. The resulting variance was {g_deck_variance}."
    )
    print(
        f"The best dynamic dice parameter found was {best_d_dice.decrease_factor} for the decrease factor. The resulting variance was {d_dice_variance}."
    )

    # Compute the relevant statistics of the best generalized deck and dynamic dice, as well as the statistics of a basic deck and basic dice system for comparison.
    deck_results = stats.get_stats(rs.Deck(VALUES), STEPS, TRIALS)
    dice_results = stats.get_stats(rs.Dice(VALUES), STEPS, TRIALS)
    g_deck_results = stats.get_stats(best_g_deck, STEPS, TRIALS)
    d_dice_results = stats.get_stats(best_d_dice, STEPS, TRIALS)

    # Write the results to a csv file so that they can be analyzed
    write_results.write_results(
        "out.csv",
        deck_results + dice_results + g_deck_results + d_dice_results, [
            "Deck entropy", "Deck variance", "Dice entropy", "Dice variance",
            "Generalized Deck entropy", "Generalizde Deck variance",
            "Dynamic Dice entropy", "Dynamic Dice variance"
        ])

    print("The results have been saved to out.csv")
Exemplo n.º 27
0
def save(pack):
    with open(cacheFile, 'wb') as f:
        pickle.dump(pack, f, -1)
    stats_html = stats.get_stats(pack)
    with open(stats_path, 'w') as f:
        f.write(stats_html)
Exemplo n.º 28
0
    for row in rows:
        set_stat(cur, row[0], row[1])
        if not check_passwd(row[2]):
            logs[row[1]] = row[0]
    save_log(logs)


if __name__ == '__main__':
    start = timer()

    try:
        parser = argparse.ArgumentParser(description='Check user data.')
        parser.add_argument('--stats', action='store_true', help='show stats')
        args = parser.parse_args()

        con = connect_db()
        cur = con.cursor()
        if args.stats:
            stats = get_stats(cur)
            for stat in stats:
                print(stat[0], stat[1])
            con.close()
        else:
            init_stat(cur)
            main(cur)
            con.commit()
    except KeyboardInterrupt:
        print("Import data interrupted", file=sys.stderr)
    end = timer()
    print ("Execution time:", end - start)
# test_stats.py

import stats

stats.init()
stats.event_occurred("meal_eaten")
stats.event_occurred("snack_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("snack_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("diet_started")
stats.event_occurred("meal_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("diet_abandoned")
stats.event_occurred("snack_eaten")

for event,num_times in stats.get_stats():
    print("{} occurred {} times".format(event, num_times))
Exemplo n.º 30
0
from runutils import read_assemblies
from stats import get_stats
from Bio import SeqIO
import sys

# usage: <ref file> <assemblies>

ref_length = sum([len(rec) for rec in SeqIO.parse(open(sys.argv[1]), "fasta")])

assemblies = read_assemblies(sys.argv[2])
stats = [(a, get_stats(a['Desc'], "../../" + a['Path'], "fasta", ref_length)) for a in assemblies]

for a, s in stats:
	print "%s\t%s\t%s" % (a['Desc'], a['AssemblySoftware'], s.n_vals[0.5])
Exemplo n.º 31
0
  def plot(self):
    """
    This function is *wayyyyyy* too long
    """
    csvfiles = self.csvfiles
    title = self.title
    title = title.replace('\\n', '\n')

    fig = self._fig
    ax = self._ax

    headers = ['']

    ylim = self.ylim

    if self.labels is None and len(csvfiles) > 1:
      # if no labels are specified and we have multiple
      # csv files, then when we plot it is confusing which
      # trace corresponds to which file. In these cases
      # we populate the labels with the basename of the csvfiles
      self.labels = map(op.basename, csvfiles)

      # shorten/truncate filenames in the legend if they are over 16
      # characters
      def shorten(s):
        from os.path import splitext
        s = splitext(s)[0]
        if len(s) > 32:
          return s[:10]+'..'+s[-16:]
        else:
          return s
      self.labels = map(shorten, self.labels)

    if ylim is not None:
      ax.set_autoscaley_on(False)
      ax.set_ylim(ylim)

    for csvidx in xrange(len(csvfiles)):
      if self.color_cycle_length is not None:
        # the first +1 is to stop csvidx=0 from triggering even
        # though it has no ill effect. The second +1 is because to
        # have a cycle length of 3, e.g. 0,1,2 you need to reset
        # on the 4, not 3
        if (csvidx+1)%(self.color_cycle_length+1) == 0:
          reset_linespec()

      from dataloader import DataLoader
      csvfile = csvfiles[csvidx]

      csv_ydx = None
      csv_xdx = None
      if csvfile.endswith(']'):
        csvfile, colspec = csvfile.rsplit('[', 1)
        colspec = colspec[:-1]
        if ',' in colspec:
          csv_xdx, csv_ydx = map(int, colspec.split(','))
        else:
          csv_ydx = int(colspec)

      print 'Plotting',csvfile
      data = DataLoader(csvfile)
      print 'Data source', data.source

      pathcomponents = op.abspath(csvfile).split(op.sep)
      filespec = op.sep.join(pathcomponents[-4:])
      if len(pathcomponents) > 4:
        filespec = '...'+filespec

      headers += ['# File: '+filespec]
      if self._single:
        headers.append(data.header)

      if self.labels is not None:
        if csvidx < len(self.labels):
          lbl = self.labels[csvidx]
        else:
          lbl = self.labels[-1]
      else:
        lbl = None

      xdx = self.xindex - 1
      if csv_xdx is not None:
        xdx = csv_xdx - 1

      print 'Using x-index:', xdx + 1
      xvec = data.matrix[:,xdx]

      ydx = self.yindex - 1
      if csv_ydx is not None:
        ydx = csv_ydx - 1

      print 'Using y-index:',ydx + 1
      yvec = data.matrix[:,ydx]

      yerr = None
      if self.yerror_index is not None:
        yerr = data.matrix[:, self.yerror_index-1]

      if data.source == 'SIOS':
        tvec = csv.mat[:,3]
      else:
        tvec = None

      if data.source == 'calc_power_spectrum.py':
        xvec /= 1000
        #self.logy = True

      if self.register_on_ymax:
        maxidx = np.argmax(yvec)
        xvec -= xvec[maxidx]

      if self.sub_x0:
        xvec -= xvec[0]

      if self.sub_y0:
        yvec -= yvec[0]

      xvec += self.xoffset

      xvec *= self.xmultiplier
      yvec *= self.ymultiplier

      if self.normalise:
        yrange = yvec.max() - yvec.min()
        yvec -= yvec.min()
        yvec /= yrange

      if self.normalise_on_first:
        if self._first_trace_ymax is None:
          self._first_trace_ymax = yvec.max()

        yrange = self._first_trace_ymax - yvec.min()
        yvec -= yvec.min()
        yvec /= yrange

      if self.differentiate:
        dy = yvec[1:] - yvec[:-1]
        yvec = dy
        xvec = xvec[1:]
        print 'Differentiating'

      use_right = self.use_right_axis_for == csvfile
      self._plot_traces(lbl, xvec, yvec, tvec, yerr=yerr, use_right=use_right)

      if self.fwhm:
        from stats import get_stats
        sdict = get_stats(xvec, yvec, asdict=True)
        FWHM = sdict['FWHM']
        fstart, fend = sdict['FWHM_x']
        hm = yvec.max()/2
        ax.hlines(hm, fstart, fend, linewidth=4, linestyles='solid', colors=['black'])
        ax.text((fstart+fend)/2, hm*0.9, '%.2f'%(FWHM), size=11, ha='center')

    if self.logy == True:
      ax.set_yscale('log')

    xlabel = self.xlabel
    ylabel = self.ylabel

    if xlabel is None:
      xlabel = data.xy_labels[0]
    if ylabel is None:
      ylabel = data.xy_labels[1]

    ax.set_xlabel(max('', xlabel))
    ax.set_ylabel(max('', ylabel))

    if self.no_legend == False:
      if self.labels is not None:
        if len(self.labels) > 5:
          fontsize = 8
        else:
          fontsize = 11

        ax.legend(
            loc=self.legend_position,
            ncol=1,
            prop={'size':fontsize})

    ax.set_title(title, size='medium')
    ax.set_autoscalex_on(False)

    # set x and y limits if given
    if self.xlim is not None:
      ax.set_xlim(self.xlim)

    def get_ticks(start, end, step):
      assert end > start
      diff = end - start
      ticks = np.arange(diff // step) * step + start
      return np.append(ticks, [end])

    if self.xtic_step is not None:
      minx, maxx = ax.get_xlim()
      ax.set_xticks(get_ticks(minx, maxx, self.xtic_step))

    if self.ytic_step is not None:
      miny, maxy = ax.get_ylim()
      ax.set_yticks(get_ticks(miny, maxy, self.ytic_step))

    yaxis = ax.get_yaxis()
    xaxis = ax.get_xaxis()

    ylim = yaxis.get_view_interval()
    xlim = xaxis.get_view_interval()

    if self.hgrid or self.grid:
      yaxis.grid()

    if self.vgrid or self.grid:
      xaxis.grid()

    if self.vline is not None:
      for vl in self.vline:
        ax.vlines(vl, ylim[0], ylim[1], linestyles='solid', colors=['red'])

    if self.hline is not None:
      for hl in self.hline:
        ax.hlines(hl, xlim[0], xlim[1], linestyles='solid', colors=[self.hline_color])

    if self.normalise:
      ax.hlines(0.5, xlim[0], xlim[1], linestyles='dotted', colors=['gray'])

    texttoplot = ['']
    # comments needs to be a list
    if self.comments is not None:
      texttoplot.extend(map(lambda x: '# '+x, self.comments))

    if self.negoverflow:
      texttoplot.append('# Negative overflows fixed')
    if self.lowpass is not None:
      texttoplot.append('# '+'Lowpass filtered at %.2f Hz'%(self.lowpass))

    if not self.no_debug:
      texttoplot.extend(headers)

    ax.text(
        0, 1.0,
        '\n'.join(texttoplot),
        color='0.75',
        zorder=-1,
        verticalalignment='top',
        **self.textkwargs)

    # resize the plot if figsize is given
    if self.figsize is not None:
      fig.set_size_inches(*self.figsize, forward=True)
      PLT.tight_layout()
Exemplo n.º 32
0
def create_stats_files(input_dir, nepochs, output_dir):

    in_base = input_dir + "/" + str(nepochs) + "."

    with open(output_dir + str(nepochs) + ".stats.txt", "w+") as outf:
        stats.get_stats(nepochs, in_base, outf)
Exemplo n.º 33
0
import argparse
import init
import stats
from utils import *

# argument parser
parser = argparse.ArgumentParser(description='The fastest Twitter crawler.')
parser.add_argument('--stats', dest='stats', action='store_const',
                           const=sum, default=False,
                                              help='Get stats of crawled data')
parser.add_argument('--init', dest='init',action='store_const',const=True,default=False,help="Initialize the crawler")
args = parser.parse_args()

# select action based on arguments
if args.stats:
    users = get_users_list_from("all_user_ids.json")
    print stats.get_stats(users)
if args.init:
    init.init()
Exemplo n.º 34
0
        hints = 2
    else:
        hints = 3

    return hints


def add_word(word):
    if len(used_words) >= 16:
        del used_words[0]

    used_words.append(word)


# start menu
menu(get_stats())

print(f'\n\n{" "*30} PRESS ANY KEY TO PLAY')

msvcrt.getch()  # wait for keypress

# game loop
while True:
    topic, word = get_topic_and_word()
    lives = 10
    hints = get_hints(word)
    guessed_letters = []
    won = False

    while True:
        os.system('cls')
Exemplo n.º 35
0
 def __init__(self, population: Population):
     self.population = population
     self.stats = get_stats(population)
     self.best_solution = best(population)
     self.worst_solution = worst(population)
Exemplo n.º 36
0
dirFiles = os.listdir(dir)
if 'plot.py' in dirFiles: dirFiles.remove('plot.py')
if 'old' in dirFiles: dirFiles.remove('old')
dirFiles.sort(key=lambda f: int(filter(str.isdigit, f)))

speed = float(sys.argv[2]) # mm/s

d1s = []
d2s = []
d3s = []
d4s = []

for m in dirFiles:
	# Happens twice per number!
	t1, t2, x1, x2 = stats.import_test(dir, filter(str.isdigit, m))
	d1, d2, d3, d4 = stats.get_stats(t1, t2, x1, x2, speed)
	d1s.append(d1)
	d2s.append(d2)
	d3s.append(d3)
	d4s.append(d4)

d1 = np.min(d1s)
print('Mid measurement delay, theoretical-sent (min): ' + str(d1*1000) + ' ms')
d1 = np.average(d1s)
print('Mid measurement delay, theoretical-sent (avg): ' + str(d1*1000) + ' ms')
d1 = np.max(d1s)
print('Mid measurement delay, theoretical-sent (max): ' + str(d1*1000) + ' ms')
d1 = np.std(d1s)
print('Mid measurement delay, theoretical-sent (stdev): ' + str(d1*1000) + ' ms')

d2 = np.min(d2s)
Exemplo n.º 37
0
from Selectors import select_two
from generate import generate_population
from mutate import mutate
from crossover import crossover
from score import score_population
from stats import get_stats, best, worst
from Config import *
from Config import SEED

if __name__ == "__main__":
    random.seed(SEED)

    board = Board(DATA_PATH)
    population = generate_population(board)
    score_population(board, population)
    best_score, worst_score, avg_score, std_score = get_stats(population)

    scores_per_gen = []

    i = 1
    no_change = 0
    while i <= GENERATIONS and no_change < STOP_AFTER:
        if no_change == 0:
            score_population(board, population)
            best_score, worst_score, avg_score, std_score = get_stats(
                population)
        scores_per_gen.append((best_score, worst_score, avg_score, std_score))
        print(str(i) + ". Best: " + str(best_score) + ", Worst: " + str(worst_score) + ", Avg: " + str(avg_score) + ", Std: " + \
              str(std_score))

        new_population = Population()
Exemplo n.º 38
0
import stats

stats.init()
stats.event_occured("meal_eaten")
stats.event_occured("snack_eaten")
stats.event_occured("snack_eaten")
stats.event_occured("diet_started")
stats.event_occured("meal_eaten")
stats.event_occured("meal_eaten")
stats.event_occured("snack_eaten")
stats.event_occured("snack_eaten")
stats.event_occured("meal_eaten")
stats.event_occured("snack_eaten")
stats.event_occured("snack_eaten")
stats.event_occured("diet_abandoned")
stats.event_occured("snack_eaten")
stats.event_occured("meal_eaten")
stats.event_occured("snack_eaten")
stats.event_occured("meal_eaten")
stats.event_occured("snack_eaten")

for event, num_times in stats.get_stats():
    print("{} occured {} times".format(event, num_times))
Exemplo n.º 39
0
def simulate_games(samples: int) -> Dict[str, Any]:
    """Runs a set of simulations to get some average stats"""
    player_names = ['Loyal', 'Traitor']
    units = (
        [
            make_unit("1st Sqd", 0, False),
            make_unit("2nd Sqd", 0, True),
            make_tank("Tank 1", 0, False)
        ],
        [
            make_unit("1st Sqd", 50, False),
            make_unit("2nd Sqd", 50, True),
            make_tank("Tank 1", 0, False)
        ],
    )
    options = {'reinforce_position_0': 0, 'reinforce_position_1': 50}
    rounds, winner = [], []
    player1_kia, player1_mia = [], []
    player2_kia, player2_mia = [], []
    player1_killzone, player2_killzone = [], []
    player1_overkill: List[int] = []
    player2_overkill: List[int] = []
    player1_models, player2_models = [], []
    player1_units, player2_units = defaultdict(list), defaultdict(list)
    player1_weapons, player2_weapons = defaultdict(list), defaultdict(list)
    for i in range(samples):
        units_copy = [deepcopy(units[0]), deepcopy(units[1])]
        round, win, stats = simulate_game(player_names, units_copy, options)
        rounds.append(float(round))
        winner.append(float(win))
        player1_kia.append(stats[0]['KIA'])
        player1_mia.append(stats[0]['MIA'])
        player2_kia.append(stats[1]['KIA'])
        player2_mia.append(stats[1]['MIA'])
        player1_killzone.extend(stats[0]['killzone'])
        player2_killzone.extend(stats[1]['killzone'])
        player1_overkill.extend(stats[0]['overkill'])
        player1_overkill.extend(stats[1]['overkill'])
        player1_models.append(stats[0]['models_per_turn'])
        player2_models.append(stats[1]['models_per_turn'])
        for k, v in stats[0]['damage_per_unit'].items():
            player1_units[k].append(v)
        for k, v in stats[0]['damage_per_weapon'].items():
            player1_weapons[k].append(v)
        for k, v in stats[1]['damage_per_unit'].items():
            player2_units[k].append(v)
        for k, v in stats[1]['damage_per_weapon'].items():
            player2_weapons[k].append(v)

    return {
        "round": get_stats(rounds),
        "player1_stats": {
            "name":
            player_names[0],
            "win_rate":
            1 - mean(winner),
            "kia":
            get_stats(player1_kia),
            "mia":
            get_stats(player1_mia),
            "killzone":
            get_stats(player1_killzone),
            "overkill":
            get_stats(player1_overkill),
            "models_per_turn": [
                get_stats(units_in_nth_turn)
                for units_in_nth_turn in zip_longest(*player1_models)
            ],
            "damage_per_unit":
            {k: get_stats(v)
             for k, v in player1_units.items()},
            "damage_per_weapon":
            {k: get_stats(v)
             for k, v in player1_weapons.items()}
        },
        "player2_stats": {
            "name":
            player_names[1],
            "win_rate":
            mean(winner),
            "kia":
            get_stats(player2_kia),
            "mia":
            get_stats(player2_mia),
            "killzone":
            get_stats(player2_killzone),
            "overkill":
            get_stats(player2_overkill),
            "models_per_turn": [
                get_stats(units_in_nth_turn)
                for units_in_nth_turn in zip_longest(*player2_models)
            ],
            "damage_per_unit":
            {k: get_stats(v)
             for k, v in player2_units.items()},
            "damage_per_weapon":
            {k: get_stats(v)
             for k, v in player2_weapons.items()}
        }
    }
Exemplo n.º 40
0
                writefile2.write(line)
        except ValueError:  # ValueError: not enough values to unpack (expected 2, got 1)
            writefile2.write(line)
            pass
    opfile.close()
    writefile1.close()
    writefile2.close()
    print("")
    print(f"lines written into new file are {linesWritten}")
    print(f"vocab length {len(set(vocab))}")
    print(f"lengths dont match for these many pairs {lengths_no_match}")

    # 1.3
    # obtain more stats from  the new corpora and save them
    stats = get_stats("moe_misspellings_train_ascii.tsv",
                      existing_stats,
                      max_left_grams=3)
    save_stats(stats, NEW_STATS_JSON_PATH)

    # 1.4
    # convert counts to probabilities
    stats = to_probs(stats)

    ############################################
    # 2. look at some noise injection examples
    ############################################
    """
    from utils import _get_replace_probs_all_contexts
    _get_replace_probs_all_contexts(
        stats, "", True, "o", alphas=[0.025, 0.05, 0.2, 0.7], print_stats=True)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple example of calling a module that has a global variable and some functions
In the same dir there must exist file stats.py 
"""

import stats
stats.init_stats()
stats.event_occurred("meal_eaten")
stats.event_occurred("snack_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("snack_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("diet_started")
stats.event_occurred("meal_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("meal_eaten")
stats.event_occurred("diet_abandoned")
stats.event_occurred("snack_eaten")

for event, number_of_times in stats.get_stats():
    print("{} occured {} times".format(event, number_of_times))
Exemplo n.º 42
0
def page_stats_page():
    data = stats.get_stats()
    # print(data)
    return render_template("stats.html", html_data=Markup(data))
Exemplo n.º 43
0
def save(pack):
    with open(cacheFile, 'wb') as f:
        pickle.dump(pack, f, -1)
    stats_html = stats.get_stats(pack)
    with open(stats_path, 'w') as f:
        f.write(stats_html)
Exemplo n.º 44
0
def show_stats(root):
    stop_execution()
    stats = get_stats(space)
    plot_dialog.show_stats(root, stats, space)