Ejemplo n.º 1
0
    def iou_calc(self, boxes1, boxes2):  #x, y, w, h
        boxes1 = torch.stack([
            boxes1[:, :, :, :, 0] - boxes1[:, :, :, :, 2] / 2.0,
            boxes1[:, :, :, :, 1] - boxes1[:, :, :, :, 3] / 2.0,
            boxes1[:, :, :, :, 0] + boxes1[:, :, :, :, 2] / 2.0,
            boxes1[:, :, :, :, 1] + boxes1[:, :, :, :, 3] / 2.0
        ])
        boxes1 = boxes1.permute(1, 2, 3, 4, 0)

        boxes2 = torch.stack([
            boxes2[:, :, :, :, 0] - boxes2[:, :, :, :, 2] / 2.0,
            boxes2[:, :, :, :, 1] - boxes2[:, :, :, :, 3] / 2.0,
            boxes2[:, :, :, :, 0] + boxes2[:, :, :, :, 2] / 2.0,
            boxes2[:, :, :, :, 1] + boxes2[:, :, :, :, 3] / 2.0
        ])
        boxes2 = boxes2.permute(1, 2, 3, 4, 0)
        #        print('boxes1')
        #        print(boxes1)
        #        print('boxes2')
        #        print (boxes2)
        # calculate the left up point & right down point
        lu = torch.max(boxes1[:, :, :, :, :2], boxes2[:, :, :, :, :2])
        rb = torch.min(boxes1[:, :, :, :, 2:], boxes2[:, :, :, :, 2:])
        #        print('lu')
        #        print(lu)
        #        print('rb')
        #        print(rb)
        # intersection
        intersection = torch.max((rb - lu), Variable(torch.zeros(rb.size())))
        inter_square = intersection[:, :, :, :, 0] * intersection[:, :, :, :,
                                                                  1]
        #        print('intersection')
        #        print(intersection)
        #        print('inter_square')
        #        print(inter_square)

        square1 = (boxes1[:, :, :, :, 2] - boxes1[:, :, :, :, 0]) * \
                (boxes1[:, :, :, :, 3] - boxes1[:, :, :, :, 1])
        square2 = (boxes2[:, :, :, :, 2] - boxes2[:, :, :, :, 0]) * \
                (boxes2[:, :, :, :, 3] - boxes2[:, :, :, :, 1])
        square1 = torch.clamp(square1, 0.00001, 112 * 112)
        square2 = torch.clamp(square2, 0.00001, 112 * 112)
        #        print('square1')
        #        print(square1)
        #        print('square2')
        #        print(square2)
        union_square = square1 + square2 - inter_square
        #        print('uniou_square')
        #        print(union_square)
        #        print('iou')
        #        print(inter_square / union_square)
        result = inter_square / union_square
        #        for i0 in range(100):
        #            for i1 in range(7):
        #                for i2 in range(7):
        #                    for i3 in range(2):
        #                        if math.isnan(result[i0,i1,i2,i3].data.cpu().numpy()):
        #                            Tracer()()
        if math.isnan(torch.sum(result).data.cpu().numpy()):
            Tracer()()
        return result  #shape = (batch_size, 7, 7, 2)
Ejemplo n.º 2
0
import cv2
import numpy as np
from IPython.core.debugger import Tracer

keyboard = Tracer()
from scipy.interpolate import UnivariateSpline


def create_LUT_8UC1(x, y):
    spl = UnivariateSpline(x, y, k=2)
    return spl(xrange(256))


def _get_images_from_batches(batch):
    batch_size = batch.shape[0]
    img_width = batch.shape[1]
    img_height = batch.shape[2]
    img_channel = batch.shape[3]
    imgs = np.split(batch, batch_size)
    reshaped_imgs = []
    for img in imgs:
        img = img.reshape(img_width, img_height, img_channel)
        reshaped_imgs.append(img)
    return reshaped_imgs, img_width, img_height, img_channel


def trans2uint(batch):
    batch = np.interp(batch, [0, 1], [0, 255])
    batch = np.ndarray.astype(batch, 'uint8')
    return batch
Ejemplo n.º 3
0
from arsenal.debug import saverr  # registers hook

from IPython import embed as ip
#from IPython.frontend.terminal.embed import InteractiveShellEmbed
#_ip = InteractiveShellEmbed(banner1='')


def enable_ultratb(mode='Context', **kwargs):
    from IPython.core import ultratb
    sys.excepthook = ultratb.FormattedTB(mode=mode, **kwargs)


# TODO: look IPython's debugging stuff..
# http://ipython.org/ipython-doc/dev/api/generated/IPython.core.debugger.html
from IPython.core.debugger import Tracer
set_trace = lambda: Tracer()()

#from IPython.Debugger import Pdb
from pdb import set_trace, pm, Pdb


def enable_debug_hook():
    "Register pdb's post-mortem debugger as the handler for uncaught exceptions."

    def debug_hook(*args):
        sys.__excepthook__(*args)
        pm()

    sys.excepthook = debug_hook

Ejemplo n.º 4
0
import pandas as pd
from exceptions import WrongDataFrameSize, NoScanAvailable
from IPython.core.debugger import Tracer

debughere = Tracer()
import json


def extract_scan(df, ip="", protocol='tcp'):
    no_scan = []
    if ip:
        scan = df[df["ip"] == ip][protocol].iloc[0]
    else:
        if not isinstance(df, pd.core.series.Series):
            raise WrongDataFrameSize
            pass
        else:
            scan = df[protocol]
    scan_df = pd.DataFrame(columns=[
        'conf', 'cpe', 'extrainfo', 'name', 'port', 'product', 'reason',
        'state', 'version'
    ])
    if isinstance(scan, str):
        scan = json.loads(scan)
        scan = [dict(scan[x], **{"port": x}) for x in list(scan.keys())]
        scan_df = scan_df.append(scan[0], ignore_index=True)
        if len(scan) > 1:
            scan_df = scan_df.append(scan[1:], ignore_index=True)
            scan_df.insert(0, "ip", ip, allow_duplicates=True)
            return scan_df
        else:
#!/usr/bin/python

#-------------------------------------------------------------------------------
#License GPL v3.0
#Author: Alexandre Manhaes Savio <*****@*****.**>
#Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
#Universidad del Pais Vasco UPV/EHU
#Use this at your own risk!
#-------------------------------------------------------------------------------

from IPython.core.debugger import Tracer; debug_here = Tracer()

import os, sys, argparse
import numpy as np
import nibabel as nib

import aizkolari_utils as au

def set_parser():
   parser = argparse.ArgumentParser(description='Save thresholded NifTi volumes computed from the given volume.')
   parser.add_argument('-i', '--input', dest='input', required=True,
                      help='list of files to be thresholded. If it is a list, it must go within quotes.')
   parser.add_argument('-o', '--outdir', dest='outdir', required=False, default = '',
                      help='name of the output directory where the results will be saved. If not given, will put aside the correspondent input file.')
   parser.add_argument('-t', '--thresholds', dest='threshs', required=False, default='95'
                      help='list of floats within [0,100] separated by blank space. If it is a list, it must go within quotes')
   parser.add_argument('-m', '--mask', default='', dest='mask', required=False,
                      help='Mask file.')
   parser.add_argument('-e', '--extension', default='.nii.gz', dest='ext', required=False,
                      help='Output files extension.')
   parser.add_argument('-a', '--absolute', default=False, action='append', dest='abs', required=False,
Ejemplo n.º 6
0
def create_db():

    global success
    global conn
    global db_path

    global MRSTY_TABLE_FILE
    global MRCON_TABLE_FILE
    global MRREL_TABLE_FILE
    global LRABR_TABLE_FILE

    print("\ncreating umls.db")
    # connect to the .db file we are creating.
    db_path = os.path.join(umls_tables, 'umls.db')
    conn = sqlite3.connect(db_path)
    conn.text_factory = str

    print("opening files")
    # load data in files.
    try:
        mrsty_path = os.path.join(umls_tables, 'MRSTY.RRF')
        MRSTY_TABLE_FILE = open(mrsty_path, "r")
    except IOError:
        print("\nNo file to use for creating MRSTY.RRF table\n")
        sys.exit()

    try:
        mrcon_path = os.path.join(umls_tables, 'MRCONSO.RRF')
        MRCON_TABLE_FILE = open(mrcon_path, "r")
    except IOError:
        print("\nNo file to use for creating MRCONSO.RRF table\n")
        sys.exit()

    try:
        mrrel_path = os.path.join(umls_tables, 'MRREL.RRF')
        MRREL_TABLE_FILE = open(mrrel_path, "r")
    except IOError:
        print("\nNo file to use for creating MRREL.RRF table\n")
        sys.exit()

    try:
        lrabr_path = os.path.join(umls_tables, "..", "LEX", 'LRABR')
        from IPython.core.debugger import Tracer  # NOQA
        Tracer()()
        LRABR_TABLE_FILE = open(lrabr_path, "r")
    except IOError:
        print("\nNo file to use for creating LRABR table\n")
        sys.exit()

    print("creating tables")
    c = conn.cursor()

    # create tables.
    c.execute("CREATE TABLE MRSTY( CUI, TUI, STN, STY, ATUI, CVF  ) ;")
    c.execute("CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) ;")
    c.execute("CREATE TABLE MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF );")
    c.execute("CREATE TABLE LRABR( EUI1, ABR, TYPE, EUI2, STR);")

    print("inserting data into MRSTY table")
    for line in MRSTY_TABLE_FILE:

        line = line.strip('\n')

        assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])

        line = line.split('|')

        # end will always be empty str
        line.pop()

        assert len(line) == 6

        c.execute(
            "INSERT INTO MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) values( ?, ?, ?, ?, ?, ?)", tuple(line))

    print("inserting data into MRCON table")
    for line in MRCON_TABLE_FILE:

        line = line.strip('\n')

        assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])

        line = line.split('|')

        # end will always be empty str
        line.pop()

        assert len(line) == 18

        c.execute("INSERT INTO MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);", tuple(line))

    print("inserting data into MRREL table")
    for line in MRREL_TABLE_FILE:

        line = line.strip('\n')

        assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])

        line = line.split('|')

        # end will always be empty str
        line.pop()

        assert len(line) == 16

        c.execute("INSERT INTO MRREL(  CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF ) values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )", tuple(line))

    print("inserting into LRABR table")
    for line in LRABR_TABLE_FILE:

        line = line.strip('\n')

        assert line[-1] == '|', "str: {}, char: ".format(line, line[-1])

        line = line.split('|')

        line.pop()

        assert len(line) == 5

        c.execute(
            "INSERT INTO LRABR( EUI1, ABR, TYPE, EUI2, STR) values( ?, ?, ?, ?,?)", tuple(line))

    print("creating indices")

    # create indices for faster queries
    c.execute("CREATE INDEX mrsty_cui_map ON MRSTY(CUI)")
    c.execute("CREATE INDEX mrcon_str_map ON MRCON(STR)")
    c.execute("CREATE INDEX mrcon_cui_map ON MRCON(CUI)")
    c.execute("CREATE INDEX mrrel_cui2_map ON MRREL( CUI2 )")
    c.execute("CREATE INDEX mrrel_cui1_map on MRREL( CUI1 ) ")
    c.execute("CREATE INDEX mrrel_rel_map on MRREL( REL )")
    c.execute("CREATE INDEX lrabr_abr_map on LRABR(ABR)")
    c.execute("CREATE INDEX lrabr_str_map on LRABR(STR)")

    # save changes to .db
    conn.commit()

    success = True
    print("\nsqlite database created")
Ejemplo n.º 7
0
def backtest(filename, data, schedule_data):
  f = open(filename)
  f.readline()
  player_data = {}
  time_data = []
  for i in xrange(50):
    line = f.readline()
    if line is None or len(line) == 0:
      break
    date = int(line[:3])
    print date
    jsonvalue = "{"+f.readline()+"}"
    value = json.loads(jsonvalue)
    time_data.insert(0,(date,value))
    for p in value:
      if not p in player_data:
        player_data[p] = [0]
      player_data[p].insert(0,value[p])

  time_data2 = convertToPlayersTimeData(time_data, data)

  teams = set([i.team for i in data])

  for i in xrange(len(time_data2)):
    stamp_data = time_data2[i][1]
  Tracer()()
  portfolio = ["rohit sharma", "ajinkya rahane", "david warner", "glenn maxwell", "robin uthappa", "shane watson", "sandeep sharma", "sunil narine", "pravin tambe", "yuzvendra chahal", "bhuvneshwar kumar"]
  # portfolio = ["yuzvendra chahal", "shakib al hasan", "shane watson", "rohit sharma", "sandeep sharma", "sunil narine", "ajinkya rahane", "jacques kallis", "robin uthappa", "jayant yadav","bhuvneshwar kumar"]
  # portfolio = ["manish pandey", "rohit sharma","jacques kallis","robin uthappa", "aditya tare", "ambati rayudu", "morne morkel","piyush chawla","sunil narine","lasith malinga","pragyan ojha"]
  power_player = "glenn maxwell"
  # power_player = "bhuvneshwar kumar"
  portfolio_p = set([getPlayer(data, p)[0] for p in portfolio])
  power_player_p = getPlayer(data, power_player)[0]
  points = 0
  subs = 75

  mlab = Matlab(matlab='/Applications/MATLAB_R2013a.app/bin/matlab')
  mlab.start()
  for i in xrange(4,len(time_data2)):
    # START = str(time_data2[i][0])
    # CURRENT_TEAM = set(portfolio)
    # SUBSTITUTIONS = subs
    # PAST_STATS = time_data[i][1]
    print "\n\n\n\n\n\n"
    print (subs, str(time_data2[i][0]))
    print set(portfolio_p)
    print points
    print "\n\n\n\n\n\n"
    # print time_data[i-1][1]
    # Tracer()()

    inp = (subs, str(time_data2[i][0]), set(portfolio), time_data[i-1][1])
    backtest_pickteam.pickTeam(data, schedule_data, inp)



    res = mlab.run_func('/Users/deedy/Dev/FantasyIPL-Moneyball/python2matlab.m', {}, maxtime = 500)
    changes = backtest_results.getResults(data, schedule_data, res['result'])
    subs -= changes[2]
    portfolio_p = changes[0]
    power_player_p = changes[1]
    # Tracer()()
    # update portfolio
    # update subs
    # update power player
    # Tracer()()

    teams = [(p,time_data2[i][1][p] - time_data2[i-1][1][p] ) for p in time_data2[i][1] if p in portfolio_p]
    print teams
    pthis = 0
    for i in teams:
      if power_player_p == i[0]:
        pthis += 2*i[1]
      else:
        pthis += i[1]
    points+= pthis
    print "{0}\t{1}\t{2}\n\n".format(points, pthis, subs)


    # print "{0}\t{1}".format(time_data2[i][0] , teams)

  mlab.stop()
  Tracer()()
  f.close()
Ejemplo n.º 8
0
def setUpAndSaveVars(players, scores, schedule):
    numplayers = len(players)

    # A and B
    team_total_constraint = [1.0] * numplayers
    wicketkeeper_constraint = [float(player.is_keeper) for player in players]
    A = matrix([team_total_constraint, wicketkeeper_constraint])
    A = A.trans()
    B = matrix([11.0, 1.0])

    # G and H
    all_unstrict_constraints = []
    all_unstrict_constraints_limits = []
    teams = set([player.team for player in players])
    teams = list(teams)

    for team in teams:
        all_unstrict_constraints.append(
            [float(player.team == team) for player in players])
        all_unstrict_constraints_limits.append(6.0)

    batsman_constraint = [-float(player.is_batsman) for player in players]
    allrounder_constraint = [
        -float(player.is_allrounder) for player in players
    ]
    uncapped_constraint = [-float(player.is_uncapped) for player in players]
    bowler_constraint = [-float(player.is_bowler) for player in players]
    bowling_constraint = [
        -float(player.is_bowler or player.is_allrounder) for player in players
    ]
    overseas_constraint = [float(player.is_overseas) for player in players]
    price_constraint = [float(player.price) / 1000000.0 for player in players]
    all_unstrict_constraints.extend([
        batsman_constraint, allrounder_constraint, uncapped_constraint,
        bowler_constraint, bowling_constraint, overseas_constraint,
        price_constraint
    ])
    all_unstrict_constraints_limits.extend(
        [-4.0, -1.0, -1.0, -2.0, -5.0, 4.0, 10.0])
    G = matrix(all_unstrict_constraints)
    G = G.trans()
    H = matrix(all_unstrict_constraints_limits)

    # P and Q
    Q = [-score for score in scores]

    # find_naive_one_time_best_team(Q,A,B,G,H)
    # Tracer()()

    Q_new = []

    days = len(schedule["schedule"])
    Tracer()()
    start_index = schedule["schedule"].index(
        [x for x in schedule["schedule"] if x[0] == START][0])
    past_games = {}
    for t in teams:
        past_games[t] = 0
    for day in schedule["schedule"][:start_index]:
        for t in day[1]:
            past_games[t] += 1

    ## PAST SQUADS
    curr_team_indices = []
    for player_name in PAST_STATS:
        hscore = PAST_STATS[player_name]
        player_arr = getPlayer(players, player_name)
        if len(player_arr) > 1:
            print "Too many people with name {0}".format(player_name)
            Tracer()()
        elif len(player_arr) == 0:
            print "Name not found - {0}".format(player_name)
            Tracer()()
        else:
            player = player_arr[0]
            player_ind = players.index(player)
            if past_games[player.team] > 0:
                # print player
                # print Q[player_ind]
                weight_to_ipl = 0.2 + 0.8 * (past_games[player.team] / 14.0)
                weight_to_ipl = 1
                Q[player_ind] = Q[player_ind] * (
                    1 - weight_to_ipl) + weight_to_ipl * (
                        -hscore) / past_games[player.team]
                if hscore == 0:
                    Q[player_ind] *= 0.5
                # print Q[player_ind]
                # print "\n\n"
                # Tracer()()
    scores = [-x for x in Q]

    ##
    print sorted(zip(players, Q), key=lambda x: -x[1])

    Tracer()()
    days = len(schedule["schedule"][start_index:])
    for day in schedule["schedule"][start_index:]:
        match_mask = [0] * len(all_unstrict_constraints[0])
        for playing_team in day[1]:
            match_mask = [
                sum(x) for x in zip(
                    match_mask, all_unstrict_constraints[teams.index(
                        playing_team)])
            ]
        # Tracer()()
        ##### Kevin Pietersen ruled out for 417
        if day[0] == '417':
            ind = players.index(getPlayer(players, "kevin pietersen")[0])
            match_mask[ind] = 0
        ####

        Q_new.extend([a * b for a, b in zip(Q, match_mask)])
    Q_new = matrix(Q_new).trans()

    #### Current Team
    curr_team_indices = []
    for player_name in CURRENT_TEAM:
        player_arr = getPlayer(players, player_name)
        if len(player_arr) > 1:
            print "Too many people with name {0}".format(player_name)
            Tracer()()
        else:
            curr_team_indices.append(players.index(player_arr[0]))
    curr_team = [0] * numplayers
    for x in curr_team_indices:
        curr_team[x] = 1
    ####
    Tracer()()

    save_filename = 'player-optimization-data.mat'
    print "Saving variables as {0}".format('player-optimization-data.mat')
    sio.savemat(
        save_filename, {
            'A': A,
            'B': B,
            'G': G,
            'H': H,
            'Q': Q,
            'Q_new': Q_new,
            'days': days,
            'curr_team': curr_team,
            'numplayers': numplayers,
            'substitutions': SUBSTITUTIONS,
            'scores': Q,
            'start_index': start_index
        })
Ejemplo n.º 9
0
def scorePlayerAlgo2(player):
    if not hasattr(player, "tournamentStats"):
        return -1, -1, -1, -1
    if len(player.tournamentStats) == 0:
        # Tracer()()
        return -1, -1, -1, -1
    allfieldingstats = [t[1]['fieldingStats'] for t in player.tournamentStats]
    compiled_fielding_stats = {}
    for afs in allfieldingstats[0].viewkeys():
        compiled_fielding_stats[afs] = []
    for fs in allfieldingstats:
        for afs in fs.viewkeys():
            compiled_fielding_stats[afs].append(float(fs[afs]))

    allbowlingstats = [t[1]['bowlingStats'] for t in player.tournamentStats]
    compiled_bowling_stats = {}

    for afs in allbowlingstats[0].viewkeys():
        compiled_bowling_stats[afs] = []
    for fs in allbowlingstats:
        for afs in fs.viewkeys():
            try:
                if not fs[afs] == '-':
                    compiled_bowling_stats[afs].append(float(fs[afs]))
                else:
                    compiled_bowling_stats[afs].append(0.0)
            except:
                Tracer()()

    allbattingstats = [t[1]['battingStats'] for t in player.tournamentStats]
    compiled_batting_stats = {
        'a': [],
        'b': [],
        'inns': [],
        'no': [],
        'sr': [],
        'm': [],
        '4s': [],
        '6s': [],
        'hs': [],
        'r': [],
        '100s': [],
        '50s': []
    }
    for afs in allbattingstats[0].viewkeys():
        compiled_batting_stats[afs] = []
    for fs in allbattingstats:
        for afs in fs.viewkeys():
            try:
                if not afs in compiled_batting_stats:
                    compiled_batting_stats[afs].append(0.0)
                if type(fs[afs]) is int:
                    compiled_batting_stats[afs].append(float(fs[afs]))
                elif fs[afs][-1] == '*':
                    compiled_batting_stats[afs].append(float(fs[afs][:-1]))
                elif not fs[afs] == '-':
                    compiled_batting_stats[afs].append(float(fs[afs]))
            except Exception as e:
                Tracer()()

    stats = {}
    stats['bowlingStats'] = {}
    stats['bowlingStats']['r'] = sum(compiled_bowling_stats['r'])
    stats['bowlingStats']['inns'] = sum(compiled_bowling_stats['inns'])
    stats['bowlingStats']['10w'] = sum(compiled_bowling_stats['10w'])
    stats['bowlingStats']['nb'] = sum(compiled_bowling_stats['nb'])
    stats['bowlingStats']['wb'] = sum(compiled_bowling_stats['wb'])
    stats['bowlingStats']['maid'] = sum(compiled_bowling_stats['maid'])
    stats['bowlingStats']['4w'] = sum(compiled_bowling_stats['4w'])
    stats['bowlingStats']['5w'] = sum(compiled_bowling_stats['5w'])
    stats['bowlingStats']['b'] = sum(compiled_bowling_stats['b'])
    stats['bowlingStats']['wmaid'] = sum(compiled_bowling_stats['wmaid'])
    stats['bowlingStats']['m'] = sum(compiled_bowling_stats['m'])
    stats['bowlingStats']['6s'] = sum(compiled_bowling_stats['6s'])
    stats['bowlingStats']['4s'] = sum(compiled_bowling_stats['4s'])
    stats['bowlingStats']['ov'] = sum(compiled_bowling_stats['ov'])
    stats['bowlingStats']['w'] = sum(compiled_bowling_stats['w'])
    stats['bowlingStats']['d'] = sum(compiled_bowling_stats['d'])
    stats['bowlingStats'][
        'e'] = 6 * stats['bowlingStats']['r'] / stats['bowlingStats'][
            'b'] if stats['bowlingStats']['b'] != 0 else sys.float_info.max
    stats['bowlingStats'][
        'sr'] = 100 * stats['bowlingStats']['w'] / stats['bowlingStats'][
            'b'] if stats['bowlingStats']['b'] != 0 else sys.float_info.max
    stats['bowlingStats'][
        'a'] = stats['bowlingStats']['r'] / stats['bowlingStats'][
            'w'] if stats['bowlingStats']['w'] != 0 else sys.float_info.max
    stats['bowlingStats']['bbmw'] = max(compiled_bowling_stats['bbmw'])
    stats['bowlingStats']['bbiw'] = max(compiled_bowling_stats['bbiw'])
    tmpind = compiled_bowling_stats['bbmr'].index(
        min([
            compiled_bowling_stats['bbmr'][j] for j in [
                i for i, val in enumerate(compiled_bowling_stats['bbmw'])
                if val == stats['bowlingStats']['bbmw']
            ]
        ]))
    stats['bowlingStats']['bbmr'] = compiled_bowling_stats['bbmr'][tmpind]
    tmpind = compiled_bowling_stats['bbir'].index(
        min([
            compiled_bowling_stats['bbir'][j] for j in [
                i for i, val in enumerate(compiled_bowling_stats['bbiw'])
                if val == stats['bowlingStats']['bbiw']
            ]
        ]))
    stats['bowlingStats']['bbir'] = compiled_bowling_stats['bbir'][tmpind]

    stats['battingStats'] = {}
    stats['battingStats']['r'] = sum(compiled_batting_stats['r'])
    stats['battingStats']['inns'] = sum(compiled_batting_stats['inns'])
    stats['battingStats']['no'] = sum(compiled_batting_stats['no'])
    stats['battingStats']['b'] = sum(compiled_batting_stats['b'])
    stats['battingStats']['m'] = sum(compiled_batting_stats['m'])
    stats['battingStats']['100s'] = sum(compiled_batting_stats['100s'])
    stats['battingStats']['50s'] = sum(compiled_batting_stats['50s'])
    stats['battingStats']['4s'] = sum(compiled_batting_stats['4s'])
    stats['battingStats']['6s'] = sum(compiled_batting_stats['6s'])
    stats['battingStats']['hs'] = max(compiled_batting_stats['hs'])
    stats['battingStats']['a'] = stats['battingStats']['r'] / (
        stats['battingStats']['inns'] - stats['battingStats']['no']) if (
            stats['battingStats']['inns'] -
            stats['battingStats']['no']) != 0 else sys.float_info.max
    stats['battingStats'][
        'sr'] = 100 * stats['battingStats']['r'] / stats['battingStats'][
            'b'] if stats['battingStats']['b'] else sys.float_info.max
    stats['fieldingStats'] = {}
    stats['fieldingStats']['m'] = sum(compiled_fielding_stats['m'])
    stats['fieldingStats']['inns'] = sum(compiled_fielding_stats['inns'])
    stats['fieldingStats']['c'] = sum(compiled_fielding_stats['c'])
    stats['fieldingStats']['ro'] = sum(compiled_fielding_stats['ro'])
    stats['fieldingStats']['s'] = sum(compiled_fielding_stats['s'])

    # Tracer()()
    battingpoints = 0
    bowlingpoints = 0
    fieldingpoints = 0

    # Batting
    battingstats = stats['battingStats']
    innings = float(battingstats['inns'])
    matches = float(battingstats['m'])
    if not innings == 0:
        expected_runs = int(battingstats['r']) / innings
        try:
            strike_rate = float(battingstats['sr'])
        except:
            strike_rate = 0
        sixes = int(battingstats['6s'])
        battingpoints = expected_runs + expected_runs * (
            strike_rate / 100) + 10 * math.floor(
                expected_runs / 25) + 2 * (sixes / innings)
        battingpoints = battingpoints

    #Bowling
    bowlingstats = stats['bowlingStats']
    innings = float(bowlingstats['inns'])
    matches = float(bowlingstats['m'])
    if not innings == 0:
        wickets = int(bowlingstats['w'])
        balls = int(bowlingstats['b'])
        maidens = int(bowlingstats['maid'])
        dots = int(bowlingstats['d'])
        runs_given = int(bowlingstats['r'])
        econscore = (1.5 * balls - runs_given) / innings
        if econscore > 0:
            econscore *= 2
        bowlingpoints = econscore + 20 * (wickets / innings) + 10 * math.floor(
            (wickets / innings) / 2) + 20 * (maidens / innings) + (dots /
                                                                   innings)
        bowlingpoints = bowlingpoints

    #Fielding
    fieldingstats = stats['fieldingStats']
    matches = float(fieldingstats['m'])
    catches = int(fieldingstats['c'])
    stumpings = int(fieldingstats['s'])
    runouts = int(fieldingstats['ro'])
    fieldingpoints = 10 * (catches / matches) + 15 * (
        stumpings / matches) + 12.5 * (runouts / matches)

    # if "South" in player.name:
    #   Tracer()()

    if matches < 5:
        battingpoints /= (5 - matches)
        bowlingpoints /= (5 - matches)
        fieldingpoints /= (5 - matches)

    totalpoints = battingpoints + bowlingpoints + fieldingpoints
    # print "{0}\t\tBat: {1}\tBowl: {2}\tField: {3}\tTotal:{4}".format(player, battingpoints, bowlingpoints, fieldingpoints, totalpoints)

    return totalpoints, battingpoints, bowlingpoints, fieldingpoints
Ejemplo n.º 10
0
    def parse(self, response):
        # Random string is used to construct the XHR sent to twitter.com
        random_str = "BD1UO2FFu9QAAAAAAAAETAAAAAcAAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
        data = json.loads(response.body_as_unicode())
        #default rate delay is 12s
        # rate_delay = self.settings['DOWNLOAD_DELAY']
        # rate_delay = 2

        # delay_choices = [(1,30), (2,25), (3,20),(4,15),(5,10)]
        # delay_choices = [(1,50), (2,30), (3,10),(4,8),(5,2)]
        # delay_choices = [(0,1),(1,89), (2,4), (3,3),(4,2),(5,1)]
        # delay_choices = [(1,60), (2,20), (3,10),(4,8),(5,2)]
        # delay_choices = [(0,33),(1,56), (2,5), (3,3),(4,2),(5,1)]
        # if data["max_position"] is not None:

        try:
            if data['items_html'] is not None:
                tweets = self.extract_tweets(data['items_html'])

                # If we have no tweets, then we can break the loop early
                if len(tweets) == 0 and data['has_more_items'] is False:
                    Tracer()()
                    pprint(data)
                    logging.log(logging.DEBUG, data)
                    logging.log(
                        logging.INFO, "Reach the end of search results( " +
                        self.query + " )")
                    return
                    # yield Request(url=next_url, callback=self.parse)

                for tweet in tweets:
                    # push parsed item to mongoDB pipline
                    yield self.parse_tweet(tweet, response)

                # If we haven't set our min tweet yet, set it now
                if self.min_tweet is None:
                    self.is_first_query = True
                    self.min_tweet = tweets[0]
                elif self.min_tweet is not tweets[0]:
                    self.min_tweet = tweets[0]

                # continue_search = self.save_tweets(tweets)

                # The max tweet is the last tweet in the list
                self.max_tweet = tweets[-1]
                if self.min_tweet['tweet_id'] is not self.max_tweet[
                        'tweet_id']:
                    self.max_position = "TWEET-%s-%s-%s" % (
                        self.max_tweet['tweet_id'], self.min_tweet['tweet_id'],
                        random_str)
                    # '''
                    #     is_first_query is a indicator used to identify the intial query. With the intial query
                    #     the crwaler can simulate the hand-shake request while the delay time is greater than a
                    #     predefined time period, for instance, 22 seconds
                    # '''
                    # if self.is_first_query:
                    #     self.data_max_position = self.max_position
                    #     self.is_first_query = False
                    # Construct next url to crawl
                    next_url = self.construct_url(
                        self.query,
                        max_position=self.max_position,
                        operater="max_position")

                    # Sleep for rate_delay
                    # Tracer()()
                    # delay_multiple = self.weighted_choice(delay_choices)
                    # if delay_multiple is not 0:
                    #     delay_time = random.uniform(rate_delay*(delay_multiple-1), rate_delay*delay_multiple)
                    #     logging.log(logging.DEBUG,"Sleep for "+ str(delay_time) +" seconds")
                    #     time.sleep(delay_time)
                    #     # if delay_time > 22:
                    #     #     next_url = self.construct_url(
                    #     #         self.query,
                    #     #         max_position=self.data_max_position,
                    #     #         operater="min_position")
                    #     #     yield Request(url=next_url, callback=self.parse,dont_filter=True)
                    # else:
                    #     logging.log(logging.DEBUG,"Sleep for 0 seconds")

                    print
                    print "Next Request:" + "TWEET-%s-%s" % (
                        self.max_tweet['tweet_id'], self.min_tweet['tweet_id'])
                    print
                    # Tracer()()
                    yield Request(url=next_url,
                                  callback=self.parse,
                                  dont_filter=True)
        except Exception, e:
            pass
Ejemplo n.º 11
0
def process(index, col):
    global err
    inst, major, degree, season, decision, status, date_add, date_add_ts, comment = None, None, None, None, None, None, None, None, None

    if len(col) != 6:
        Tracer()()
    try:
        inst = col[0].text.strip().encode('ascii', 'ignore')
    except:
        Tracer()()
    try:
        major = None
        progtext = col[1].text.strip().encode('ascii', 'ignore')
        for (p, nam) in PROGS:
            if p.lower() in progtext.lower():
                major = nam
                break
        if not major:
            major = 'Other'
            errlog['major'].append((index, col))

        degree = None
        for (d, deg) in DEGREE:
            if d in progtext:
                degree = deg
                break
        if not degree:
            degree = 'Other'

        season = None
        mat = re.search('\([SF][01][0-9]\)', progtext)
        if mat:
            season = mat.group()[1:-1]
        else:
            mat = re.search('\(\?\)', progtext)
            if mat:
                season = None
    except NameError as e:
        print e
        Tracer()()
    except:
        print "Unexpected error:", sys.exc_info()[0]
        Tracer()()
    try:
        extra = col[2].find(class_='extinfo')
        gpafin, grev, grem, grew, new_gre, sub = None, None, None, None, None, None
        if extra:
            gre_text = extra.text.strip().encode('ascii', 'ignore')
            gpa = re.search('Undergrad GPA: ((?:[0-9]\.[0-9]{1,2})|(?:n/a))',
                            gre_text)
            general = re.search(
                'GRE General \(V/Q/W\): ((?:1[0-9]{2}/1[0-9]{2}/(?:(?:[0-6]\.[0-9]{2})|(?:99\.99)|(?:56\.00)))|(?:n/a))',
                gre_text)
            new_gref = True
            subject = re.search('GRE Subject: ((?:[2-9][0-9]0)|(?:n/a))',
                                gre_text)

            if gpa:
                gpa = gpa.groups(1)[0]
                if not gpa == 'n/a':
                    try:
                        gpafin = float(gpa)
                    except:
                        Tracer()()
            else:
                errlog['gpa'].append((index, gre_text))
            if not general:
                general = re.search(
                    'GRE General \(V/Q/W\): ((?:[2-8][0-9]0/[2-8][0-9]0/(?:(?:[0-6]\.[0-9]{2})|(?:99\.99)|(?:56\.00)))|(?:n/a))',
                    gre_text)
                new_gref = False

            if general:
                general = general.groups(1)[0]
                if not general == 'n/a':
                    try:
                        greparts = general.split('/')
                        if greparts[2] == '99.99' or greparts[
                                2] == '0.00' or greparts[2] == '56.00':
                            grew = None
                        else:
                            grew = float(greparts[2])
                        grev = int(greparts[0])
                        grem = int(greparts[1])
                        new_gre = new_gref
                        if new_gref and (grev > 170 or grev < 130 or grem > 170
                                         or grem < 130 or
                                         (grew and (grew < 0 or grew > 6))):
                            errlog['general'].append((index, gre_text))
                            grew, grem, grev, new_gre = None, None, None, None
                        elif not new_gref and (grev > 800 or grev < 200
                                               or grem > 800 or grem < 200 or
                                               (grew and
                                                (grew < 0 or grew > 6))):
                            errlog['general'].append((index, gre_text))
                            grew, grem, grev, new_gre = None, None, None, None
                    except Exception as e:
                        Tracer()()
            else:
                errlog['general'].append((index, gre_text))

            if subject:
                subject = subject.groups(1)[0]
                if not subject == 'n/a':
                    sub = int(subject)
            else:
                errlog['subject'].append((index, gre_text))

            extra.extract()
        decision = col[2].text.strip().encode('ascii', 'ignore')
        try:
            decisionfin, method, decdate, decdate_ts = None, None, None, None
            (decisionfin, method, decdate) = re.search(
                '((?:Accepted)|(?:Rejected)|(?:Wait listed)|(?:Other)|(?:Interview))? ?via ?((?:E-[mM]ail)|(?:Website)|(?:Phone)|(?:Other)|(?:Postal Service)|(?:Unknown))? ?on ?([0-9]{1,2} [A-Z][a-z]{2} [0-9]{4})?',
                decision).groups()
            if method and method == 'E-Mail':
                method = 'E-mail'
            if method and method == 'Unknown':
                method = 'Other'
            if decdate:
                try:
                    decdate_date = datetime.datetime.strptime(
                        decdate, '%d %b %Y')
                    decdate_ts = decdate_date.strftime('%s')
                    decdate = decdate_date.strftime('%d-%m-%Y')
                except Exception as e:
                    decdate_date, decdate_ts, decdate = None, None, None
        except Exception as e:
            Tracer()()
    except Exception as e:
        Tracer()()
    try:
        statustxt = col[3].text.strip().encode('ascii', 'ignore')
        if statustxt in STATUS:
            status = STATUS[statustxt]
        else:
            status = None
    except:
        Tracer()()
    try:
        date_addtxt = col[4].text.strip().encode('ascii', 'ignore')
        date_add_date = datetime.datetime.strptime(date_addtxt, '%d %b %Y')
        date_add_ts = date_add_date.strftime('%s')
        date_add = date_add_date.strftime('%d-%m-%Y')
    except:
        # print(col[4].text.strip().encode('ascii', 'ignore'))
        Tracer()()
    try:
        comment = col[5].text.strip().encode('ascii', 'ignore')
    except:
        Tracer()()
    res = [
        inst, major, degree, season, decisionfin, method, decdate, decdate_ts,
        gpafin, grev, grem, grew, new_gre, sub, status, date_add, date_add_ts,
        comment
    ]
    print res
    return res
Ejemplo n.º 12
0
    def extract_tweets(self, items_html):
        """
        Parses Tweets from the given HTML
        :param items_html: The HTML block with tweets
        :return: A JSON list of tweets
        """
        try:
            soup = BeautifulSoup(items_html, "lxml")
            tweets = []
            twitter_username_re = re.compile(
                r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z_]+[A-Za-z0-9_]+[A-Za-z]+[A-Za-z0-9])'
                # r'(?<=@)\w+'
            )

            for li in soup.find_all("li", class_='js-stream-item'):
                # Tracer()()
                # If our li doesn't have a tweet-id, we skip it as it's not going
                # to be a tweet.
                if 'data-item-id' not in li.attrs:
                    continue

                tweet = {
                    'tweet_id': li['data-item-id'],
                    'text': None,
                    # 'is_retweet':false,
                    'user_id': None,
                    'user_screen_name': None,
                    'user_name': None,
                    'created_at_ts': None,
                    'created_at_iso': None,
                    # 'convo_url': None,
                    # 'image_url': [],
                    'num_retweets': 0,
                    'num_favorites': 0,
                    'keyword': [],
                    'quote_tweet_id': None,
                    'quote_tweet_userid': None,
                    'quote_tweet_username': None,
                    'quote_tweet_screenname': None,
                    'quote_tweet_text': None,
                    'html': None
                }
                try:
                    tweet['html'] = str(li)
                except Exception, e:
                    Tracer()()
                    pass
                '''
                Extract tweet text
                '''
                try:
                    text_p = li.find("p", class_="tweet-text")
                    if text_p is not None:
                        tweet['text'] = text_p.get_text()
                except Exception, e:
                    Tracer()()
                    logging.log(logging.DEBUG,
                                "ERROR(extract_text_p): %s" % (str(e), ))
                    traceback.print_exc()
                '''
                Extract quote tweet content if exists
                '''
                try:
                    quote_tweet = li.find("div",
                                          class_="QuoteTweet-innerContainer")
                    if quote_tweet is not None:
                        # Tracer()()
                        tweet['quote_tweet_id'] = quote_tweet['data-item-id']
                        tweet['quote_tweet_userid'] = quote_tweet[
                            'data-user-id']
                        tweet['quote_tweet_screenname'] = quote_tweet[
                            'data-screen-name']
                        tweet['quote_tweet_username'] = quote_tweet.find(
                            "b", class_="QuoteTweet-fullname").get_text()
                        tweet['quote_tweet_text'] = quote_tweet.find(
                            "div", class_="QuoteTweet-text").get_text()
                except Exception, e:
                    Tracer()()
                    logging.log(logging.DEBUG,
                                "ERROR(extract_quote_tweet): %s" % (str(e), ))
                    traceback.print_exc()
Ejemplo n.º 13
0
eval(cmd)
%prun [opts] statement # profiler; profile.help(); -r # return pstats.Stats obj; / -l # <limit> string (only some fn_names),int(#ln),float(.25 - quarter of output),e.g. '-l __init__ -l 5' top 5 ln of constructors; / -s <key> # sort by key
%prun -l 7 -s cumulative fn1(); %run -p -s cumulative fnm.py; %lprun -f fn1 -f fn2 statemt_to_profile # %prun (cProfile) for macro_profiling and %lprun (line_profiler) for micro_profiling
$python -m cProfile cprof_example.py; execfile(fnm); %run [-n -i -t [-N<N>] -d [-b<N>] -p [profile options]] fnm [args] # like $python fnm args; with ipy traceback; %run in empty nsp, %run -i in current nsp; leaves behind vars
%timeit range(1000) # Line magics (1-liner,args on line1)
%%timeit x=numpy.random.randn((100,100)) / numpy.linalg.svd(x) # setup not timed
import profile,pstats; from timeit import Timer; Timer('t=a;a=b;b=t','a=1; b=2').timeit(); Timer('a,b=b,a','a=1; b=2').timeit() # profile,pstats; traditional swapping args vs tuple arg pack/unpack
timer=time.clock if sys.platform[:3]=='win' else time.time; min(Timer.total(1000,str.upper,'spam') for i in range(50)) # profile -wall_clock,cpu_clock
# c.TerminalIPythonApp.extensions=['line_profiler']
# %load_ext line_profiler # works for iPython.exe, not for pyDev (%lprun unavail)
import unittest,doctest ## unit/system testing, white/black box testing
#--- 07 dbg/traceback(~er_msg,fnm/line#/fn/fnm_txt + fnm_caller/..),ipdb,inspect --------------------------------------
# tb_obj~err_log, it contains "stack_trace" (fnm,line#,fn_nm,er_type,er_msg),has link to frm (possibly frm_dead as well); ipdb.pm(tb_obj) (no frame stack walk here, just look at obj before gc deletes them?);
o=sys.exc_info()[2]; o=sys.last_traceback; insp_stack2(o)
%tb 'prints tb, mode set up in %xmode'; traceback.print_exc(file=open('fnm.txt','w')); threading.settrace(fn) 'tb_multithr'
from IPython.core.debugger import Tracer; Tracer()()
#--- ipdb h; init in .pdbrc,pdb~ipdb;
# kb(),insp_stack2(),whos2(); from numpy.lib.utils import source; source(fn1) 'fnm+src,good enough';
# help,h; alias,unalias;
# run,restart; where,w,bt; up,u; down,d; next,n; step,s; jump,j;continue,cont,c; return,r; until,unt; commands[cmd to continue]; debug[dbg2];
# whatis;args,a; pinfo,pdef,pdoc; list,l; pp,p # pp get_ipython().magic('who_ls'); src_code,print
# break,b,tbreak,enable,disable,condition,ignore,clear,cl; quit,q,exit; EOF; #bpts
# if stuck on input - do empty line; stuck on output - "q"
# b=123; whos2() 'mixed statemt fail in ipdb'; a?? fails in ipdb,eval(), use numpy.lib.utils.source(fn), insp_stack2,whos2(),
# frm_stack~stack_trace~stack_snapshot, for interactive and post-mortem debugging (pm uses traceback)
%pdb on; %debug~ipdb.pm() # open dbg at latest exception (it clobbers/kills prev ones), init in `InteractiveShell.pdb`;
#--- ipdb test
get_ipython().magic('%reset -f'); from vm0 import *; import ipdb; ipdb.launch_ipdb_on_exception(); # ipdb.set_trace()
ipdb.run('wh()'); ipdb.pm() # dbg/traceback printout; ipdb.post_mortem, vars in "old fn frames" are preserved until gc
def set_trace(): from IPython.core.debugger import Pdb; Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back) # set bpt
def debug(f,*args,**kargs): from IPython.core.debugger import Pdb; pdb=Pdb(color_scheme='Linux'); return pdb.runcall(f,*args,**kargs) # dbg_run_fn
Ejemplo n.º 14
0
    def loss_function_vec(
        self,
        predicts,
        labels,
        threshold,
        cal_accuracy=False
    ):  #labels: [batch_size, cell_size_x, cell_size_y, 2+5] (x, y, w, h, C, p(c0), p(c1))

        predict_class = predicts[:, :self.cell_size * self.cell_size *
                                 self.num_classes].contiguous().view(
                                     self.batch_size, self.cell_size,
                                     self.cell_size, self.num_classes
                                 )  #batch_size, cell_size, cell_size, num of class (class score)
        predict_confidence = predicts[:, self.cell_size * self.cell_size *
                                      self.num_classes:(
                                          self.cell_size * self.cell_size *
                                          self.num_classes +
                                          self.cell_size * self.cell_size * 2
                                      )].contiguous().view(
                                          self.batch_size, self.cell_size,
                                          self.cell_size, 2
                                      )  #batch_size, cell_size, cell_size, num of boxes (box confidence)
        predict_boxes = predicts[:, (
            self.cell_size * self.cell_size * self.num_classes +
            self.cell_size * self.cell_size * 2
        ):].contiguous().view(
            self.batch_size, self.cell_size, self.cell_size, 2, 4
        )  # batch_size, cell_size, cell_size, boxes_num, 4 (box coordinate)

        gt_object = labels[:, :, :,
                           4].contiguous().view(self.batch_size,
                                                self.cell_size, self.cell_size,
                                                1)
        gt_boxes = labels[:, :, :,
                          0:4].contiguous().view(self.batch_size,
                                                 self.cell_size,
                                                 self.cell_size, 1, 4)
        gt_boxes = gt_boxes.expand(self.batch_size, self.cell_size,
                                   self.cell_size, 2, 4)
        gt_classes = labels[:, :, :, 5:]
        predict_boxes_tran = torch.stack([
            (predict_boxes[:, :, :, :, 0] + self.offset) * 16,
            (predict_boxes[:, :, :, :, 1] + self.offset.permute(0, 2, 1, 3)) *
            16, predict_boxes[:, :, :, :, 2] * 112,
            predict_boxes[:, :, :, :, 3] * 112
        ])
        predict_boxes_tran = predict_boxes_tran.permute(1, 2, 3, 4, 0)

        gt_boxes_tran = torch.stack([
            (gt_boxes[:, :, :, :, 0] + self.offset) * 16,
            (gt_boxes[:, :, :, :, 1] + self.offset.permute(0, 2, 1, 3)) * 16,
            gt_boxes[:, :, :, :, 2] * 112, gt_boxes[:, :, :, :, 3] * 112
        ])
        gt_boxes_tran = gt_boxes_tran.permute(1, 2, 3, 4, 0)

        gt_iou = self.iou_calc(predict_boxes_tran, gt_boxes_tran)
        max_iou = torch.max(gt_iou, 3, keepdim=True)
        max_iou = max_iou[0]
        object_mask = torch.mul(
            torch.ge(gt_iou, max_iou).float(), gt_object.float())
        noob_mask = Variable(torch.ones(object_mask.size())) - object_mask

        #class loss
        delta_p = gt_classes - predict_class
        delta_p_obj = delta_p * gt_object
        class_loss = self.lambda_class * torch.sum(delta_p_obj**
                                                   2) / self.batch_size

        #coord loss
        coord_mask = object_mask.contiguous().view(self.batch_size,
                                                   self.cell_size,
                                                   self.cell_size, 2, 1)
        coord_delta = predict_boxes[:, :, :, :, :2] - gt_boxes[:, :, :, :, :2]
        coord_delta_mask = coord_delta * coord_mask
        size_delta = torch.sqrt(predict_boxes[:, :, :, :, 2:]) - torch.sqrt(
            gt_boxes[:, :, :, :, 2:])
        size_delta_mask = size_delta * coord_mask
        coord_loss = (torch.sum(coord_delta_mask**2) + \
                       torch.sum(size_delta_mask**2))/self.batch_size * self.lambda_coord

        #iou loss
        confidence_delta = predict_confidence - gt_iou

        iou_loss = (torch.sum((confidence_delta*object_mask)**2)+   \
                        self.lambda_noobj * torch.sum((confidence_delta*noob_mask)**2))/self.batch_size

        #        print(class_loss)
        #        print(coord_loss)
        #        print(iou_loss)
        total_loss = class_loss + coord_loss + iou_loss
        #        print (total_loss)
        #        pdb.set_trace()

        accuracy = []
        if cal_accuracy is False:
            accuracy = [0, 0, 0]
        else:
            #############detection accuracy###############
            gt_noob = Variable(torch.ones(gt_object.size())) - gt_object
            threshold = threshold
            max_confidence = torch.max(predict_confidence, 3, keepdim=True)
            detect_iou = torch.ge(max_confidence[0], threshold).float()
            detect_iou_tp = detect_iou * gt_object
            detect_iou_fp = detect_iou * gt_noob
            detect_tp_accu = torch.sum(detect_iou_tp) / self.batch_size
            detect_fp_accu = torch.sum(detect_iou_fp) / self.batch_size
            detect_gt = torch.sum(gt_object) / self.batch_size
            detect_tp_accu = detect_tp_accu / detect_gt
            detect_fp_accu = detect_fp_accu / detect_gt
            accuracy.append(detect_tp_accu)
            accuracy.append(detect_fp_accu)
            #############iou accuracy#####################
            iou_accu = torch.sum(
                max_confidence[0] * detect_iou_tp) / self.batch_size
            iou_accu = iou_accu / detect_gt
            accuracy.append(iou_accu)
            #############class accuracy###################
            predicted_class = torch.max(predict_class, 3, keepdim=True)[1]
            groundtruth_classes = torch.max(gt_classes, 3, keepdim=True)[1]
            class_eq = (predicted_class == groundtruth_classes)
            class_hit = class_eq.float() * detect_iou * gt_object
            class_accu = torch.sum(
                class_hit.data.float()) / torch.sum(gt_object)
            accuracy.append(class_accu)
        if math.isnan(total_loss.data.cpu().numpy()):
            for i0 in range(self.batch_size):
                for i1 in range(self.cell_size):
                    for i2 in range(self.cell_size):
                        for i3 in range(2):
                            if math.isnan(
                                    confidence_delta[i0, i1, i2,
                                                     i3].data.cpu().numpy()):
                                print([i0, i1, i2, i3])
            Tracer()()
        return total_loss, accuracy
Ejemplo n.º 15
0




























Tracer()()
Ejemplo n.º 16
0
from IPython.core.debugger import Tracer; keyboard = Tracer()
import tensorflow as tf
import abc

class BaseModel(object):
    """
    Abstracted class for neural network models using tensorflow.
    """

    def __init__(self, name, seed):
        tf.set_random_seed(seed)
        self.seed = seed
        self.name = name

    def getAllVariables(self):
        # returns dictionary wihch contrains all model's variable
        # e.x: modelname := 'CNN'  ->  'CNN/foo/bar', 'foo/CNN/bar' are OK, 'CTCNN/foo/bar' will be ignored
        r_dict = {}
        for v in tf.all_variables():
            strNum = v.name.find(self.name+'/')
            if strNum >= 0:
                if strNum == 0 or v.name[strNum-1] == '/':
                    r_dict[v.name] = v
        return r_dict

    def checkWeightSum(self):
        # retuens sum of all variables in the model
        s = 0.0
        for name, v in self.getAllVariables().iteritems():
            s += tf.reduce_sum(v)
Ejemplo n.º 17
0
CreateLib().

TODO: Add facility to generate our own custom defined stub files. In particular
so we can create a TextWindowPrint() command.
TODO: Switch to jinja2 templates instead of string format replacements.
"""
import os
import yaml
import re

import glovars

# Setup to use breakpt() for droppping into ipdb:
from IPython.core.debugger import Tracer

breakpt = Tracer()

# Regex for wrapping paragraphs...
# 60 columns then up to the next whitespace:
PARMAT = re.compile('(.{60}.+?)\s')
DOCSTRING = '''
    ' Wrapper stub for {filename} {cmdtypecaps}
    '
    ' {cmddoc}
    '
    ' Description:
    '   {descr}
    '
    ' Returns:
    '   {returndoc}
    '
Ejemplo n.º 18
0
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as pl
import logging

_log = logging.getLogger('mcfost')
logging.basicConfig(level=logging.INFO)

# this lets you put "stop()" in your code to have a debugger breakpoint
from IPython.core.debugger import Tracer

stop = Tracer()

_VERBOSE = False

# some extremely simple classes to serve as structs.


class Star():
    temp = 4000
    radius = 1.0
    mass = 1.0
    x = 0.0
    y = 0.0
    z = 0.0
    spectrum = ''
    fUV = 0.0
    flope_fuv = 0.0

Ejemplo n.º 19
0
import numpy as np

from pystruct.problems import GraphCRF
from pystruct.inference import inference_dispatch

from IPython.core.debugger import Tracer

tracer = Tracer()


class IgnoreVoidCRF(GraphCRF):
    """GraphCRF that ignores nodes with void label in ground truth.
    """
    def __init__(self,
                 n_states=2,
                 n_features=None,
                 inference_method='qpbo',
                 void_label=21):
        if void_label >= n_states:
            raise ValueError("void_label must be one of the states!")
        GraphCRF.__init__(self, n_states, n_features, inference_method)
        self.void_label = void_label

    def max_loss(self, y):
        return np.sum(y != self.void_label)

    def loss(self, y, y_hat):
        # hamming loss:
        return np.sum((y != y_hat)[y != self.void_label])

    def loss_augmented_inference(self,
Ejemplo n.º 20
0
def stress_constraints(tri_data1, tri_data2, sm, pr):
    out = []
    E1, S1, x_to_xp1 = derive_stress(tri_data1, sm, pr)
    E2, S2, x_to_xp2 = derive_stress(tri_data2, sm, pr)

    tri1, _, tri_idx1, corner_idx1 = tri_data1
    dof_start1 = tri_idx1 * 9 + corner_idx1 * 3
    n1 = np.cross(tri1[1] - tri1[0], tri1[2] - tri1[0])
    n1 /= np.linalg.norm(n1)

    tri2, _, tri_idx2, corner_idx2 = tri_data2
    dof_start2 = tri_idx2 * 9 + corner_idx2 * 3
    n2 = np.cross(tri2[1] - tri2[0], tri2[2] - tri2[0])
    n2 /= np.linalg.norm(n2)

    S1xx = rotate_tensor(S1, x_to_xp1)
    np.testing.assert_almost_equal(
        Sdot(S1xx, n1), np.array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]))
    S2xx = rotate_tensor(S2, x_to_xp2)
    np.testing.assert_almost_equal(
        Sdot(S2xx, n2), np.array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]))

    def make_constraint(lhs, rhs):
        terms = []
        for k in range(3):
            terms.append(Term(lhs[1 + k], dof_start1 + k))
            terms.append(Term(-rhs[1 + k], dof_start2 + k))
        out.append(ConstraintEQ(terms, -lhs[0] + rhs[0]))

    lhs1 = n2.dot(Sdot(S1xx, n1))
    rhs1 = n2.dot(Sdot(S2xx, n1))
    make_constraint(lhs1, rhs1)

    r2 = np.cross(n1, n2)
    r2 /= np.linalg.norm(r2)

    r1 = np.cross(n1, r2)
    r3 = np.cross(n2, r2)

    lhs2 = S1xx[0, 0] + S1xx[1, 1] + S1xx[2, 2]
    rhs2 = S2xx[0, 0] + S2xx[1, 1] + S2xx[2, 2]
    # norm = np.linalg.norm(lhs2[1:])
    # lhs2 /= norm
    # rhs2 /= norm
    # lhs2[0] *= -1.5
    # rhs2[0] *= -1.5
    # make_constraint(lhs2, rhs2)

    nmid = (n1 + n2) / 2.0
    nmid /= np.linalg.norm(nmid)
    lhs3 = nmid.dot(Sdot(S1xx, r2))
    rhs3 = nmid.dot(Sdot(S2xx, r2))
    norm = np.linalg.norm(lhs3[1:])
    lhs3 /= norm
    rhs3 /= norm
    # lhs3[0] /= -7.4
    # rhs3[0] /= -7.4
    # make_constraint(lhs3, rhs3)
    if np.any(S1xx[:, :, 0]) != 0:
        print('C1: ', lhs1, rhs1)
        print('C2: ', lhs2, rhs2)
        print('C3: ', lhs3, rhs3)
        from IPython.core.debugger import Tracer
        Tracer()()

    return out
Ejemplo n.º 21
0
from IPython.core.debugger import Tracer
dh = Tracer()
from pyjmi import transfer_model, transfer_optimization_problem, get_files_path
from pyjmi.optimization.casadi_collocation import BlockingFactors
from pymodelica import compile_fmu
from pyfmi import load_fmu
import matplotlib.pyplot as plt
import os
from pyjmi.common.io import ResultDymolaTextual
import time

import sys
sys.path.append('..')
import symbolic_processing as sp
from simulation import *

# Define problem
plt.rcParams.update({'text.usetex': False})
with_plots = True
#~ with_plots = False
blt = True
#~ blt = False
caus_opts = sp.CausalizationOptions()
#~ caus_opts['plots'] = True
#~ caus_opts['draw_blt'] = True
#~ caus_opts['solve_blocks'] = True
#~ caus_opts['ad_hoc_scale'] = True
#~ caus_opts['inline'] = False
#~ caus_opts['closed_form'] = True
#~ caus_opts['inline_solved'] = True
Ejemplo n.º 22
0
    def extract_tweets(self, items_html):
        """
        Parses Tweets from the given HTML
        :param items_html: The HTML block with tweets
        :return: A JSON list of tweets
        """
        try:
            soup = BeautifulSoup(items_html, "lxml")
            tweets = []
            twitter_username_re = re.compile(
                r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z_]+[A-Za-z0-9_]+[A-Za-z]+[A-Za-z0-9])'
                # r'(?<=@)\w+'
                )

            for li in soup.find_all("li", class_='js-stream-item'):
                # Tracer()()
                # If our li doesn't have a tweet-id, we skip it as it's not going
                # to be a tweet.
                if 'data-item-id' not in li.attrs:
                    continue

                tweet = {
                    'tweet_id': li['data-item-id'],
                    'text': None,
                    # 'is_retweet':false,
                    'user_id': None,
                    'user_screen_name': None,
                    'user_name': None,
                    'created_at_ts': None,
                    'created_at_iso': None,
                    # 'convo_url': None,
                    # 'image_url': [],
                    'num_retweets': 0,
                    'num_favorites': 0,
                    'keyword': [],
                    'quote_tweet_id': None,
                    'quote_tweet_userid': None,
                    'quote_tweet_username': None,
                    'quote_tweet_screenname': None,
                    'quote_tweet_text': None,
                    'html': None
                }
                try:
                    tweet['html'] = str(li)
                except Exception, e:
                    Tracer()()
                    pass

                '''
                Extract tweet text
                '''
                try:
                    text_p = li.find("p", class_="tweet-text")
                    if text_p is not None:
                        tweet['text'] = text_p.get_text()

                        # If there is any user mention containing the query, then pass the tweet.
                        # Tracer()()
                        # if self.query.find("from:") == -1:
                        user_mentions = twitter_username_re.match(tweet['text'])
                        if user_mentions and any([self.query_keyword.lower() in user_mention.lower() for user_mention in user_mentions.groups()]):
                            # Tracer()()
                            logging.log(logging.DEBUG, 'Found '+self.query_keyword+' in '+ str(user_mentions.groups())+': Drop tweet '+tweet['tweet_id'])
                            continue
                        # If the keyword was found in the text and was the same with query, then accept the tweet
                        # elif text_p.find("strong") and text_p.find("strong").get_text().lower() == self.query_keyword.lower():
                        #     tweet['keyword'] = text_p.find("strong").get_text()
                        # elif tweet['text'].lower().find(self.query_keyword.lower()) != -1:
                        #     tweet['keyword'] = self.query_keyword
                        # else:
                        #     # The keyword is not in the text, then pass the tweet.
                        #     # Tracer()()
                        #     logging.log(logging.DEBUG, 'No '+self.query_keyword +' in the content of tweet'+': Drop tweet '+tweet['tweet_id'])
                        #     continue
                    else:
                        # Tracer()()
                        logging.log(logging.DEBUG, 'No content in the tweet' + ': Drop tweet ' + tweet['tweet_id'])
                        continue
                except Exception, e:
                    Tracer()()
                    logging.log(logging.DEBUG, "ERROR(extract_text_p): %s" % (str(e),))
                    traceback.print_exc()
                '''
                Extract quote tweet content if exists
                '''
                try:
                    quote_tweet = li.find("div", class_="QuoteTweet-innerContainer")
                    if quote_tweet is not None:
                        # Tracer()()
                        tweet['quote_tweet_id'] = quote_tweet['data-item-id']
                        tweet['quote_tweet_userid'] = quote_tweet['data-user-id']
                        tweet['quote_tweet_screenname'] = quote_tweet['data-screen-name']
                        tweet['quote_tweet_username'] = quote_tweet.find("b",class_="QuoteTweet-fullname").get_text()
                        tweet['quote_tweet_text'] = quote_tweet.find("div",class_="QuoteTweet-text").get_text()
                except Exception, e:
                    Tracer()()
                    logging.log(logging.DEBUG, "ERROR(extract_quote_tweet): %s" % (str(e),))
                    traceback.print_exc()
Ejemplo n.º 23
0
# internal libraries
from templates import c_template, h_template, pyx_template, setup_template

# Python 2 vs 3 importing
try:
    from string import letters as all_letters
except ImportError:
    from string import ascii_letters as all_letters

# debugging
try:
    from IPython.core.debugger import Tracer
except ImportError:
    pass
else:
    set_trace = Tracer()


class CythonGenerator(object):
    def __init__(self,
                 filename_prefix,
                 mass_matrix,
                 forcing_vector,
                 constants,
                 coordinates,
                 speeds,
                 specified=None):
        """Instantiates an object that can generates a Cython shared object
        module with a function that evaluates the provided mass_matrix and
        the forcing vector given the numerical values of the input
        variables.
Ejemplo n.º 24
0
def DebugHere():
    if ipy_debug_available:
        t = Tracer()
        t()
    else:
        print 'ipython not available for debugging'
Ejemplo n.º 25
0
def main():
    global g_options

    parser = OptionParser()
    parser.add_option("--scrape",
                      dest="scrape",
                      action="store_true",
                      help="Scrape random samples from XenoCanto")
    parser.add_option(
        "--scrape-conserve",
        dest="scrape_conserve",
        action="store_true",
        help=
        "Scrape random samples from XenoCanto, but only labels which already exist"
    )
    parser.add_option(
        "--scrape-period",
        dest="scrape_interval",
        action="store",
        type="int",
        help=
        "Scrape random samples from XenoCanto, at given interval in seconds")

    parser.add_option("--stats",
                      dest="stats",
                      action="store_true",
                      help="Print statistics for local samples")

    parser.add_option("-S",
                      "--make-spectrograms",
                      dest="spectrograms_build",
                      action="store_true",
                      help="Make and overwrite spectrograms to file.")

    parser.add_option("-t",
                      "--load-templates",
                      dest="templates_load",
                      action="store_true",
                      help="Load templates from file")
    parser.add_option(
        "-T",
        "--make-templates",
        dest="templates_build",
        action="store_true",
        help="Make and overwrite templates to file. Equivalent to -sT")
    parser.add_option("--show-templates",
                      dest="templates_interactive",
                      action="store_true",
                      help="Show template extraction results")

    parser.add_option("-d", dest="data_load", action="store")

    parser.add_option("-f",
                      "--load-features",
                      dest="features_load",
                      action="store_true",
                      help="Load features from file")
    parser.add_option(
        "-F",
        "--make-features",
        dest="features_build",
        action="store",
        help="Extract and overwrite features to file. Equivalent to -stF")

    parser.add_option("-c",
                      "--classify",
                      dest="classify",
                      action="store_true",
                      help="Run classifier. Equivalent to -stfc")

    parser.add_option("-v",
                      "--verbose",
                      dest="verbose",
                      action="store_true",
                      help="Print verbose output")
    parser.add_option("-i",
                      "--informative",
                      dest="informative",
                      action="store_true",
                      help="Print informative output")

    parser.add_option("-l",
                      "--filter-label",
                      dest="label_filter",
                      action="store",
                      help="Process only samples of a givel label value")

    parser.add_option("--merge", dest="merge_results", action="store_true")

    (options, args) = parser.parse_args()
    g_options = options

    logging.basicConfig(level=logging.INFO)

    __loaddata = not options.classify

    if __loaddata:
        repository = SampleRepository(spectrograms_dir=DIR_SPECTROGRAMS,
                                      samples_dir=DIR_SAMPLES)

        repository.gather_samples()

        if process_scrape_options(options, repository): exit()
        if process_stats_options(options, repository): exit()

        previous_data = load_feature_data(options.data_load) or None

        logging.info('{} samples'.format(len(repository.samples)))

        class_to_idx = previous_data.label_map \
                if previous_data is not None \
                else make_class_mapping(repository.samples)

        idx_to_class = {v: k for k, v in class_to_idx.iteritems()}
        logging.info('{} classes'.format(len(class_to_idx)))

        # filter ids (keep/reject previous incl. labels)
        previous_ids = previous_data.ids if previous_data is not None else []
        preserve = True

        if preserve:
            logging.info('keeping ids: {}'.format(previous_ids))
            if len(previous_ids) != 0:
                repository.filter_uids(previous_ids, reject=False)
        else:
            logging.info('rejecting ids: {}'.format(previous_ids))
            repository.filter_uids(previous_ids, reject=True)
            logging.info(
                'remove previous ids: filtered down to {} samples'.format(
                    len(repository.samples)))

            previous_labels = set([idx_to_class[y] for y in previous_data.y])
            logging.info('rejecting labels: {}'.format(previous_labels))
            repository.filter_labels(previous_labels, reject=True)
            logging.info('filter labels: filtered down to {} samples'.format(
                len(repository.samples)))

            repository.filter_labels(['Eurasian Blackcap', 'Garden Warbler'],
                                     reject=False)

            repository.reject_by_class_count(at_least=20)
            logging.info(
                'remove low bound: filtered down to {} samples'.format(
                    len(repository.samples)))

            repository.keep_n_of_each_class(20)

        if options.spectrograms_build:
            logging.error('UNSUPPORTED ACTION -- NOT STORING SGRAMS DEV ONLY')
            sgram_count = build_all_spectrograms(repository.samples)
            logging.info('built {} spectrograms'.format(sgram_count))
            #repository.store_all()
            #store_all_spectrograms(samples)

        logging.info('filtered down to {} samples'.format(
            len(repository.samples)))
        print_template_statistics(repository.samples)

        if options.templates_build:
            logging.error("WILL NOT STORE TEMPLATES .. DEV ONLY")
            #Tracer()()
            #delete_stored_templates(samples)
            all_templates = build_all_templates(repository.samples, options,
                                                options.label_filter)
            #store_all_templates(samples)
            logging.info('extracted {} templates'.format(len(all_templates)),
                         '', 'vvv after template build vvv')
            print_template_statistics(samples)
            exit()

        repository.reject_by_template_count_per_class(at_least=2000)
        print_template_statistics(repository.samples)

        X = None
        y = None
        ids = None
        if options.merge_results:
            data_1 = load_feature_data('./merged_data_2.pkl')
            data_2 = load_feature_data('./_new_data_1.pkl')
            pids = data_1.ids + data_2.ids
            Tracer()()
            repository.filter_uids(pids, reject=False)
            data = merge_results_d(data_1, data_2, repository)

            pickle_safe(results, './_merged_results.pkl')
            Tracer()()

        elif options.features_build:
            repository.filter_labels([
                'Eurasian Reed Warbler', 'Garden Warbler',
                'Western Meadowlark', 'Eurasian Blackcap'
            ],
                                     reject=False)
            all_templates = get_first_n_templates_per_class(repository, 3000)
            logging.info('{} template(s) loaded'.format(len(all_templates)))
            X, y, ids = extract_features(repository.samples, all_templates,
                                         class_to_idx)
            used_templates = [t.get_uid() for t in all_templates]

            data = {
                'X': X,
                'y': y,
                'ids': ids,
                'label_map': class_to_idx,
                'template_order': used_templates
            }

            pickle_safe(data, options.features_build)
        elif options.features_load:
            data = previous_data

    if options.classify:
        #with open('./cnfdata', 'r') as f:
        #    ce = pickle.load(f)

        #plot_cnf_matrix(ce.cnf, data.y, idx_to_class, normalize=True, show_values=True)
        #Tracer()()

        data = load_feature_data(options.data_load)
        ce = ClfEval(data, 10, 10, None)
        #clf = ExtraTreesClassifier(
        param_grid = {
            'n_estimators': [300],  #[10, 500, 5000, 10000],
            'max_features':
            [0.33
             ],  #, 'sqrt', 'log2'],#, len(previous_data.template_order)*0.8],
            'min_samples_split': [2],  #, 10, 100],
            'min_samples_leaf': [1],  #, 10, 100],
            'max_depth': [None]  #, 5, 10, 100, 200]
        }
        __params_l = list(ParameterGrid(param_grid))
        __i = 0

        print('')
        for p in __params_l:
            print('  {}'.format(p))
        print('')

        # Evaluate all parameters. !! Results are dumped to console !!
        t1 = time.time()
        evaluate_clf_params(__params_l, 0, data)
        print('total time {}'.format(time.time() - t1))
Ejemplo n.º 26
0
def get_kmeans_player_logs(n_clusters=100):
    logs = get_logs_for_clustering()
    kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(logs)
    centers = kmeans.cluster_centers_
    print kmeans.cluster_centers_
    Tracer()()
def crazy_visual():
    dataset = NYUSegmentation()
    # load training data
    data = load_nyu(n_sp=500)
    data = add_edges(data)

    for x, image_name, superpixels, y in zip(data.X, data.file_names,
                                             data.superpixels, data.Y):
        print(image_name)
        if int(image_name) != 11:
            continue
        image = dataset.get_image(image_name)
        plt.figure(figsize=(20, 20))
        bounary_image = mark_boundaries(image, superpixels)
        plt.imshow(bounary_image)
        gridx, gridy = np.mgrid[:superpixels.shape[0], :superpixels.shape[1]]

        edges = x[1]
        points_normals = dataset.get_pointcloud_normals(image_name)
        centers2d = get_superpixel_centers(superpixels)
        centers3d = [
            np.bincount(superpixels.ravel(), weights=c.ravel())
            for c in points_normals[:, :, :3].reshape(-1, 3).T
        ]
        centers3d = (np.vstack(centers3d) / np.bincount(superpixels.ravel())).T
        sp_normals = get_sp_normals(points_normals[:, :, 3:], superpixels)
        offset = centers3d[edges[:, 0]] - centers3d[edges[:, 1]]
        offset = offset / np.sqrt(np.sum(offset**2, axis=1))[:, np.newaxis]
        #mean_normal = (sp_normals[edges[:, 0]] + sp_normals[edges[:, 1]]) / 2.
        mean_normal = sp_normals[edges[:, 0]]
        #edge_features = np.arccos(np.abs((offset * mean_normal).sum(axis=1))) * 2. / np.pi
        edge_features = 1 - np.abs((offset * mean_normal).sum(axis=1))
        no_normals = (np.all(sp_normals[edges[:, 0]] == 0, axis=1) +
                      np.all(sp_normals[edges[:, 1]] == 0, axis=1))
        edge_features[no_normals] = 0  # nan normals

        if True:
            coords = points_normals[:, :, :3].reshape(-1, 3)
            perm = np.random.permutation(superpixels.max() + 1)
            mv.points3d(coords[:, 0],
                        coords[:, 1],
                        coords[:, 2],
                        perm[superpixels.ravel()],
                        mode='point')
            #mv.points3d(centers3d[:, 0], centers3d[:, 1], centers3d[:, 2], scale_factor=.04)
            mv.quiver3d(centers3d[:, 0], centers3d[:, 1], centers3d[:, 2],
                        sp_normals[:, 0], sp_normals[:, 1], sp_normals[:, 2])
            mv.show()
        from IPython.core.debugger import Tracer
        Tracer()()

        for i, edge in enumerate(edges):
            e0, e1 = edge
            #color = (dataset.colors[y[e0]] + dataset.colors[y[e1]]) / (2. * 255.)
            #f = edge_features[i]
            #if f < 0:
            #e0, e1 = e1, e0
            #f = -f

            #plt.arrow(centers[e0][0], centers[e0][1],
            #centers[e1][0] - centers[e0][0], centers[e1][1] - centers[e0][1],
            #width=f * 5
            #)
            color = "black"
            plt.plot([centers2d[e0][0], centers2d[e1][0]],
                     [centers2d[e0][1], centers2d[e1][1]],
                     c=color,
                     linewidth=edge_features[i] * 5)
        plt.scatter(centers2d[:, 0], centers2d[:, 1], s=100)
        plt.tight_layout()
        plt.xlim(0, superpixels.shape[1])
        plt.ylim(superpixels.shape[0], 0)
        plt.axis("off")
        plt.savefig("figures/normal_relative/%s.png" % image_name,
                    bbox_inches="tight")
        plt.close()
Ejemplo n.º 28
0
# k_means.py
# calculate k-means over the lyric database
# using tfidf scaling + cosine similarity

import sys
import sqlite3
from math import sqrt
import numpy as np

trace = None
try:
    from IPython.core.debugger import Tracer
    trace = Tracer()
except ImportError:

    def disabled():
        dbg("Tracepoint disabled -- IPython not found")

    trace = disabled

MXM_DB = "mxm_dataset.db"
MXM_TFIDF = "mxm_tfidf_small.db"

tfidf = None
num_means = 6
total_docs = 0
modpct = 1

centroids = []

Ejemplo n.º 29
0
from __future__ import print_function

import torch

from seq2seq.loss import NLLLoss

from IPython.core.debugger import Tracer
debug_here = Tracer()


class Evaluator(object):
    def __init__(self, loss=NLLLoss(), batch_size=64):
        """Class to initialize an evaluator

        Args:
            loss (seq2seq.loss, optional): loss for evaluator (default: seq2seq.loss.NLLLoss)
            batch_size (int, optional): batch size for evaluator (default: 64)
        """
        self.loss = loss
        self.batch_size = batch_size

    def evaluate(self, model, data):
        """ Evaluate a model on given dataset and return performance.

        Args:
            model (seq2seq.models): model to evaluate
            data (seq2seq.dataset.dataset.Dataset): dataset to evaluate against

        Returns:
            loss (float): loss of the given model on the given dataset
        """
Ejemplo n.º 30
0
    def step(self, action):
        """
        Take one step of interaction.
        
        Parameters
        ----------
        action: ndarray
            action[0: smas_num] are action corresponding to sma
            action[smas_num: end] are action corresponding to light color
        
        Returns
        -------
        observation: ndarray
            obervation of environment after taking an action
        reward: float
            reward after taking an action
        done: bool
            whether simulation is done or not.
        info:
            some information for debugging
        """
        # To imporove system's robustness, we need to just ignore this action
        # rather than throw an exception.
        if np.sum(np.isnan(action)) != 0:
            Tracer()()
            raise ValueError("Find nan value in action!")
        # Clip action to avoid improer action command
        action = np.clip(action, self.act_min, self.act_max)
        action = (action + 1) / 2.0
        # Split action for light and sma
        action = np.ndarray.flatten(action)  # flatten array
        action_smas = action[:self.smas_num]
        action_lights_intensity = action[self.smas_num:]
        # Taking action

        #vrep.simxPauseCommunication(self.clientID,True)     #temporarily halting the communication thread
        self._set_all_joint_position(action_smas)
        # Actually only set light color
        self._set_all_light_intensity(action_lights_intensity)
        #vrep.simxPauseCommunication(self.clientID,False)    #and evaluated at the same time

        # Set a small sleep time to avoid getting nan sensor data
        time.sleep(0.01)

        # Observe current state
        try:
            self.observation = self._self_observe()
        except ValueError:
            self._nanObervationFlag = 1
            logging.error("Observation has NAN value.")

        # This reward is non-interactive reward i.e. it's not affected by visitor.
        # Therefore, it's only used for tunning hyper-parameters of LASAgent
        if self.reward_function_type == 'occupancy':
            #   1. 'IR_distance': based on IR distance from detected object to IR
            #   2. 'IR_state_ratio': the ratio of # of detected objects and all #
            #                        of IR sensors
            #   3. 'IR_state_number': the number of detected objects
            self.reward = self._reward_occupancy(self.observation,
                                                 'IR_distance')
        else:
            raise ValueError('No reward function: {}'.format(
                self.reward_function_type))

        done = False
        info = []
        return self.observation, self.reward, done, info