def shell():
    """Runs a shell in the app context.

    Runs an interactive Python shell in the context of a given
    Flask application. The application will populate the default
    namespace of this shell according to it's configuration.
    This is useful for executing small snippets of management code
    without having to manually configuring the application.
    """
    import IPython
    from flask.globals import _app_ctx_stack
    app = _app_ctx_stack.top.app
    banner = 'Python %s on %s\nIPython: %s\nApp: %s%s\nInstance: %s\n' % (
        sys.version,
        sys.platform,
        IPython.__version__,
        app.import_name,
        app.debug and ' [debug]' or '',
        app.instance_path,
    )

    ctx = {}

    # Support the regular Python interpreter startup script if someone
    # is using it.
    startup = os.environ.get('PYTHONSTARTUP')
    if startup and os.path.isfile(startup):
        with open(startup, 'r') as f:
            eval(compile(f.read(), startup, 'exec'), ctx)

    ctx.update(app.make_shell_context())

    IPython.embed(banner1=banner, user_ns=ctx)
def impute_data(df, cohort):

    #import IPython
    #IPython.embed()

    if isinstance(df, str):
        df = ml.read_data(df)

    #########################
    ## IMPUTE MISSING DATA ##
    #########################
    print "Imputing missing data..."

    #change msam to missing is msam_NA==1
    nanList =  ['g6_g6msam_nan', 'g7_g7msam_nan', 'g8_g8msam_nan', 'g9_g8msam_nan']
    varList = [[ 'g6_g6msam_Advanced', 'g6_g6msam_Basic', 'g6_g6msam_Proficient'], ['g7_g7msam_Advanced', 'g7_g7msam_Basic', 'g7_g7msam_Proficient'], ['g8_g8msam_Advanced', 'g8_g8msam_Basic', 'g8_g8msam_Proficient'],['g9_g8msam_Advanced', 'g9_g8msam_Basic', 'g9_g8msam_Proficient']]
    for x in range(0,len(nanList)):
        nacol = nanList[x]
        colList = varList[x]
        for col in colList:
            df.loc[df[nacol] == 1, col] = np.nan 


    #pred missing data using any available data
    wordList = ['absrate', 'mapr', 'msam_Advanced', 'msam_Basic', 'msam_Proficient', 'mobility', 'nsusp', 'mpa', 'tardyr', 'psatm', 'psatv', 'retained']
    for word in wordList:
        colList = [col for col in df.columns if word in col]
        rowMean = df[colList].mean(axis=1)
        for col in colList:
            print df[col].value_counts(dropna=False)
            df.loc[:,col].fillna(rowMean, inplace=True)
            print df[col].value_counts(dropna=False)


    '''
    ############################
    # IMPUTE NEIGHBORHOOD DATA #
    ############################

    print "Imputing missing school neighborhood data..."

    ## Fill missing school neighborhood data
    print "Fixing neighborhood columns..."
    neighborhood_cols = ['suspensionrate',  'mobilityrateentrantswithdra',  'attendancerate',   'avg_class_size',   'studentinstructionalstaffratio',   'dropoutrate',  'grade12documenteddecisionco',  'grade12documenteddecisionem',  'grade12documenteddecisionmi',  'grad12docdec_col_emp', 'graduationrate',   'studentsmeetinguniversitysyste',   'Est_Households_2012',  'Est_Population_2012',  'Med_Household_Income_2012',    'Mean_Household_Income_2012',   'Pop_Below_Poverty_2012',   'Percent_Below_Poverty_2012',   'Pop_Under18_2012', 'Under18_Below_Poverty_2012',   'Under18_Below_Poverty_Percent_2012',   'Housholds_on_Food_stamps_with_Children_Under18_2012',  'Housholds_Pop_on_Food_Stamps_2012',    'Pop_BlackAA_2012', 'Pop_White_2012',   'Bt_18_24_percent_less_than_High_School_2012',  'Bt_18_24_percent_High_School_2012',    'Bt_18_24_percent_Some_College_or_AA_2012', 'Bt_1824_percent_BA_or_Higher_2012',    'Over_25_percent_less_than_9th_grade_2012', 'Over_25_percent_9th_12th_2012',    'Over_25_percent_High_School_2012', 'Over_25__percent_Some_College_No_Deg_2012',    'Over_25_percent_AA_2012',  'Over_25_percent_Bachelors_2012',   'Over_25_percent_Graduate_or_Professionals_2012']
    ml.replace_with_mean(df, neighborhood_cols)
    '''

    #summary = ml.summarize(df)
    #print summary.T
    #ml.print_to_csv(summary.T, 'updated_summary_stats_vertical.csv')

    return_file = '/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/imputed_data_cohort' + str(cohort) + '.csv'
    ml.print_to_csv(df, return_file)

    #IPython.embed()

    print "Done!"
    import IPython
    IPython.embed()
    return df
Ejemplo n.º 3
0
def transform_data():
    from solaris.run import load_data
    from sklearn.externals import joblib

    data = load_data('data/data.pkl')

    kringing = PertubatedKriging()
    #kringing = PertubatedSpline()

    data['description'] = '%r: %r' % (kringing, kringing.est)
    print data['description']

    print('_' * 80)
    print(kringing)
    print

    for key in ['train', 'test']:
        print('_' * 80)
        print('transforming %s' % key)
        print
        X = data['X_%s' % key]

        X = kringing.fit_transform(X)
        data['X_%s' % key] = X

    print
    print('dumping data')
    joblib.dump(data, 'data/interp10_data.pkl')
    IPython.embed()
Ejemplo n.º 4
0
Archivo: grid.py Proyecto: chbrown/tsa
def corpus_sandbox(analysis_options):
    print('Exploring SB-5 corpus')
    session = create_session()
    sb5b_documents = session.query(Document).join(Source).\
        filter(Source.name == 'sb5b').all()

    print('Found %d documents' % len(sb5b_documents))

    rows = [dict(
        label=document.label,
        inferred=bool(document.details.get('Inferred')),
        source=document.details.get('Source', 'NA')) for document in sb5b_documents]
    df = pd.DataFrame.from_records(rows)

    # df_agg = df.groupby(['label', 'inferred'])

    # df.pivot_table(values=['label'], rows=['inferred'], aggfunc=[len])
    df.pivot_table(rows=['label', 'inferred'], aggfunc=[len])
    df.pivot_table(rows=['label', 'source'], aggfunc=[len])
    df.pivot_table(rows=['source'], aggfunc=[len])
    # df_agg.plot(x='train', y='accuracy')

    for document in sb5b_documents:
        # 'weareohio' in document.document.lower(), .document
        print(document.details.get('Source'), document.label)

    IPython.embed()
Ejemplo n.º 5
0
def user_console(user_email_address):
    with global_session_scope() as db_session:
        account = db_session.query(Account).filter_by(
            email_address=user_email_address).one()

        if account.provider == 'eas':
            banner = """
        You can access the account instance with the 'account' variable.
        """
        else:
            with writable_connection_pool(account.id, pool_size=1).get()\
                    as crispin_client:
                if account.provider == 'gmail' \
                        and 'all' in crispin_client.folder_names():
                    crispin_client.select_folder(
                        crispin_client.folder_names()['all'][0],
                        uidvalidity_cb)

                banner = """
        You can access the crispin instance with the 'crispin_client' variable,
        and the account instance with the 'account' variable.

        IMAPClient docs are at:

            http://imapclient.readthedocs.org/en/latest/#imapclient-class-reference
        """

        IPython.embed(banner1=banner)
Ejemplo n.º 6
0
 def _aggregate_batch(data_holder, use_list=False):
     size = len(data_holder[0])
     result = []
     for k in range(size):
         if use_list:
             result.append(
                 [x[k] for x in data_holder])
         else:
             dt = data_holder[0][k]
             if type(dt) in [int, bool]:
                 tp = 'int32'
             elif type(dt) == float:
                 tp = 'float32'
             else:
                 try:
                     tp = dt.dtype
                 except AttributeError:
                     raise TypeError("Unsupported type to batch: {}".format(type(dt)))
             try:
                 result.append(
                     np.asarray([x[k] for x in data_holder], dtype=tp))
             except Exception as e:  # noqa
                 logger.exception("Cannot batch data. Perhaps they are of inconsistent shape?")
                 if isinstance(dt, np.ndarray):
                     s = pprint.pformat([x[k].shape for x in data_holder])
                     logger.error("Shape of all arrays to be batched: " + s)
                 try:
                     # open an ipython shell if possible
                     import IPython as IP; IP.embed()    # noqa
                 except ImportError:
                     pass
     return result
Ejemplo n.º 7
0
    def remove_convex(self, start_state = None, peg = None):
        left_peg = start_state.get_left_peg(peg)
        right_peg = start_state.get_right_peg(peg)
        new_contour_pegs = self.convex_hull(start_state, peg, left_peg, right_peg)

        pegs_to_be_added = []
        for elem in new_contour_pegs:
            if (elem != left_peg) and (elem != right_peg):
                pegs_to_be_added.append(elem)

        prev_inside = start_state.inside
        prev_contour = start_state.contour
        new_outside = start_state.outside[:]
        new_outside.append(peg)
        new_inside = []
        for elem in prev_inside:
            if elem not in pegs_to_be_added:
                new_inside.append(elem)
        list_of_new_pegs = []
        for elem in prev_contour.peg_order:
            if elem != peg:
                list_of_new_pegs.append(prev_contour.peg_dict[elem])
            else:
                for new_peg in pegs_to_be_added:
                    list_of_new_pegs.append(Peg(new_peg, True, 0))
        end_state = State(list_of_new_pegs, new_inside, new_outside)
        assert len(new_inside + new_outside + end_state.contour.peg_order) == 12
        IPython.embed()
        return end_state
Ejemplo n.º 8
0
Archivo: batchtps.py Proyecto: rll/lfd
    def test_mapping_cost(
        self,
        other,
        bend_coef=DEFAULT_LAMBDA[1],
        outlierprior=1e-1,
        outlierfrac=1e-2,
        outliercutoff=1e-2,
        T=5e-3,
        norm_iters=DEFAULT_NORM_ITERS,
    ):
        mapping_err = self.mapping_cost(other, outlierprior, outlierfrac, outliercutoff, T, norm_iters)
        for i in range(self.N):
            ## compute error for 0 on cpu
            s_gpu = mapping_err[i]
            s_cpu = np.float32(0)
            xt = self.pts_t[i].get()
            xw = self.pts_w[i].get()

            yt = other.pts_t[i].get()
            yw = other.pts_w[i].get()

            ##use the trace b/c then numpy will use float32s all the way
            s_cpu += np.trace(xt.T.dot(xt) + xw.T.dot(xw) - 2 * xw.T.dot(xt))
            s_cpu += np.trace(yt.T.dot(yt) + yw.T.dot(yw) - 2 * yw.T.dot(yt))

            if not np.isclose(s_cpu, s_gpu, atol=1e-4):
                ## high err tolerance is b/c of difference in cpu and gpu precision?
                print "cpu and gpu sum sq differences differ!!!"
                ipy.embed()
                sys.exit(1)
Ejemplo n.º 9
0
Archivo: batchtps.py Proyecto: rll/lfd
 def test_bending_cost(
     self,
     other,
     bend_coef=DEFAULT_LAMBDA[1],
     outlierprior=1e-1,
     outlierfrac=1e-2,
     outliercutoff=1e-2,
     T=5e-3,
     norm_iters=DEFAULT_NORM_ITERS,
 ):
     self.get_target_points(other, outlierprior, outlierfrac, outliercutoff, T, norm_iters)
     self.update_transform(bend_coef)
     bending_costs = self.bending_cost(bend_coef)
     for i in range(self.N):
         c_gpu = bending_costs[i]
         k_nn = self.kernels[i].get()
         w_nd = self.w_nd[i].get()
         c_cpu = np.float32(0)
         for d in range(DATA_DIM):
             r = np.dot(k_nn, w_nd[:, d]).astype(np.float32)
             r = np.float32(np.dot(w_nd[:, d], r))
             c_cpu += r
         c_cpu *= np.float32(bend_coef)
         if np.abs(c_cpu - c_gpu) > 1e-4:
             ## high err tolerance is b/c of difference in cpu and gpu precision?
             print "cpu and gpu bend costs differ!!!"
             ipy.embed()
             sys.exit(1)
Ejemplo n.º 10
0
Archivo: test.py Proyecto: gkahn13/bsp
def pr2_flashlidar():
    env = rave.Environment()
    #env.Load('envs/testpr2sensors.env.xml')
    env.Load('envs/pr2-table.env.xml')
    
    env.SetViewer('qtcoin') 
    time.sleep(1)
    
    start_time = time.time()
    sensors = [s for s in env.GetSensors() if s.GetName().find("flashlidar") != -1]
    lidar = sensors[0]
    
    lidar.Configure(Sensor.ConfigureCommand.PowerOn)
    #lidar.Configure(Sensor.ConfigureCommand.RenderDataOn)
            
    while True:
        start_time = time.time()
        olddata = lidar.GetSensorData(Sensor.Type.Laser)
        while True:
            data = lidar.GetSensorData(Sensor.Type.Laser)
            if data.stamp != olddata.stamp:
                break
            time.sleep(0.1)
        print('Elapsed: {0}'.format(time.time() - start_time))
        break
    
    lidar.Configure(Sensor.ConfigureCommand.PowerOff)
    #lidar.Configure(Sensor.ConfigureCommand.RenderDataOff)
    
    IPython.embed()
Ejemplo n.º 11
0
Archivo: batchtps.py Proyecto: rll/lfd
def check_update(ctx, b):
    ctx.tps_params[0] = ctx.default_tps_params.copy()
    ctx.update_ptrs()
    xt = ctx.pts_t[0].get()
    p_mat = ctx.proj_mats[b][0].get()
    o_mat = ctx.offset_mats[b][0].get()
    true_res = np.dot(p_mat, xt) + o_mat
    ctx.set_tps_params(ctx.offset_mats[b])
    o_gpu = ctx.tps_params[0].get()
    if not np.allclose(o_gpu, o_mat):
        print "setting tps params failed"
        diff = np.abs(o_mat - o_gpu)
        nz = np.nonzero(diff)
        print nz
        ipy.embed()
        sys.exit(1)
    ctx.update_transform(b)
    p1 = ctx.tps_params[0].get()
    if not np.allclose(true_res, p1):
        print "p1 and true res differ"
        print p1[:3]
        diff = np.abs(p1 - true_res)
        print np.max(diff)
        amax = np.argmax(diff)
        print amax
        nz = np.nonzero(diff)
        print nz[0]
        ipy.embed()
        sys.exit(1)
Ejemplo n.º 12
0
Archivo: test.py Proyecto: gkahn13/bsp
def pr2_sensors():
    env = rave.Environment()
    #env.Load('robots/pr2-beta-sim.robot.xml')
    env.Load('envs/testpr2sensors.env.xml')
    r = env.GetRobots()[0]
    
    env.SetViewer('qtcoin') 
    
    ienablesensor = 0
    while True:
        start_time = time.time()
        sensors = env.GetSensors()
        for i,sensor in enumerate(sensors):
            if i==ienablesensor:
                sensor.Configure(Sensor.ConfigureCommand.PowerOn)
                sensor.Configure(Sensor.ConfigureCommand.RenderDataOn)
            else:
                sensor.Configure(Sensor.ConfigureCommand.PowerOff)
                sensor.Configure(Sensor.ConfigureCommand.RenderDataOff)
        print 'showing sensor %s, try moving obstacles'%(sensors[ienablesensor].GetName())
        if sensors[ienablesensor].Supports(Sensor.Type.Laser):
            # if laser, wait for the sensor data to be updated and then print it
            olddata = sensors[ienablesensor].GetSensorData(Sensor.Type.Laser)
            while True:
                data = sensors[ienablesensor].GetSensorData(Sensor.Type.Laser)
                if data.stamp != olddata.stamp:
                    break
                time.sleep(0.1)
            print 'sensor data: ',data.ranges                        
        #time.sleep(5)
        ienablesensor = (ienablesensor+1)%len(sensors)
        print('Elapsed: {0}'.format(time.time() - start_time))

    
    IPython.embed()
Ejemplo n.º 13
0
Archivo: test.py Proyecto: gkahn13/bsp
def eih():
    env = rave.Environment()
    env.Load('data/testwamcamera.env.xml')
    
    env.SetViewer('qtcoin')
    
    ienablesensor = 0
    while True:
        sensors = env.GetSensors()
        for i,sensor in enumerate(sensors):
            if i==ienablesensor:
                sensor.Configure(Sensor.ConfigureCommand.PowerOn)
                sensor.Configure(Sensor.ConfigureCommand.RenderDataOn)
            else:
                sensor.Configure(Sensor.ConfigureCommand.PowerOff)
                sensor.Configure(Sensor.ConfigureCommand.RenderDataOff)
        print 'showing sensor %s, try moving obstacles'%(sensors[ienablesensor].GetName())
        if sensors[ienablesensor].Supports(Sensor.Type.Laser):
            # if laser, wait for the sensor data to be updated and then print it
            olddata = sensors[ienablesensor].GetSensorData(Sensor.Type.Laser)
            while True:
                data = sensors[ienablesensor].GetSensorData(Sensor.Type.Laser)
                if data.stamp != olddata.stamp:
                    break
                time.sleep(0.1)
            print 'sensor data: ',data.ranges                        
        time.sleep(5)
        ienablesensor = (ienablesensor+1)%len(sensors)

    
    IPython.embed()
Ejemplo n.º 14
0
def fileToHash(filename):
    try:
        assert(isfile(filename))
    except:
        IPython.embed(simple_prompt=True)
    with open(filename, 'rb') as f:
        return sha256(f.read()).hexdigest()
Ejemplo n.º 15
0
def main():
    """
    program starting point
    """
    shell = False
    atomic = False
    try:
        optlist, _ = getopt.getopt(sys.argv[1:], 'p:n:', ["shell", "atomic"])
        optdict = dict(optlist)
        prefix = optdict['-p']
        if '-' in prefix:
            raise ValueError('"-" cannot exist in prefix=%r' % prefix)
        num_workers = int(optdict['-n'])
        if "--shell" in optdict:
            shell = True
        if "--atomic" in optdict:
            atomic = True
    except Exception:
        print traceback.format_exc()
        usage()
        sys.exit(1)
    orchestrator = Orchestrator(prefix, num_workers)
    if shell:
        # give me a ipython shell
        IPython.embed()
        return
    orchestrator.start(atomic)
Ejemplo n.º 16
0
	def segment(self):
		# Specify segments and index
		exit = False
		i = 1
		while not exit:
			user_ret = raw_input("Enter new segment name: ")
			self.map_index_labels[i] = str(user_ret)
			self.map_index_data[i] = []
			i += 1
			user_ret = raw_input("Done with specifing segments?[y/n]")
			if user_ret == 'y':
				exit = True
		print "----------------Collecting Segments ----------------------"
		exit = False
		while not exit:
			self.print_all_index_labels()
			index = int(raw_input("Which index?"))
			start_frm = int(raw_input("Starting frame?"))
			end_frm = int(raw_input("End frame?"))
			new_segment = (start_frm, end_frm)
			segment_list = self.map_index_data[index]
			segment_list.append(new_segment)
			self.map_index_data[index] = segment_list
			user_ret = raw_input("Done with specifing segments?[y/n]")
			if user_ret == 'y':
				exit = True
		IPython.embed()
Ejemplo n.º 17
0
 def pause(self, message="Pause"):
     if hasattr(self, 'services'):
         self.services.start_interactive_mode()
     import IPython
     IPython.embed()
     if hasattr(self, 'services'):
         self.services.stop_interactive_mode()
Ejemplo n.º 18
0
def ArrayToCode(array, codes, code_lengths):
    code_lengths = np.array(code_lengths, dtype = np.int32)
    total_length = np.sum(code_lengths[array])
    total_4bytes = (total_length - 1) / 32 + 1

    compressed_codes= np.zeros(total_4bytes, dtype = np.uint32)

    idx = 0
    shift = 0
    for i in range(len(array)):
        number = array[i]
        length = code_lengths[number]
        code = codes[number]
        while length > 0:
            eff = min(length, 32-shift)
            bits_to_write = code & ((1 << eff) - 1)
            if idx == total_4bytes:
                print i
                import IPython
                IPython.embed()
            compressed_codes[idx] += bits_to_write << shift

            code = code >> eff
            length -= eff
            idx += (shift + eff) / 32
            shift = (shift + eff) % 32

    assert idx * 32 + shift == total_length  # For debug

    return compressed_codes, total_length
def test(model, dataset, weights_filepath=BEST_WEIGHT_FILE):

    model.load_weights(weights_filepath)

    train_iterator = dataset.iterator(batch_size=batch_size,
                                      num_batches=nb_test_batches)

    batch_x, batch_y = train_iterator.next()

    results_dir = 'results'
    if not os.path.exists(results_dir):
        os.makedirs(results_dir)

    pred = model._predict(batch_x)
    pred = pred.reshape(batch_size, patch_size, 1, patch_size, patch_size)



    #pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

    # for i in range(batch_size):
    #     v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5)
    #     mcubes.export_mesh(v, t, results_dir + '/drill_' + str(i) + '.dae', 'drill')
    #     viz.visualize_batch_x(pred, i, str(i), results_dir + "/pred_" + str(i))
    #     viz.visualize_batch_x(batch_x, i, str(i), results_dir + "/input_" + str(i))
    #     viz.visualize_batch_x(batch_y, i, str(i), results_dir + "/expected_" + str(i))
    for i in range(batch_size):
        viz.visualize_batch_x_y_overlay(batch_x, batch_y, pred, i=i,  title=str(i))
        # viz.visualize_batch_x(pred, i, 'pred_' + str(i), )
        # viz.visualize_batch_x(batch_x, i,'batch_x_' + str(i), )
        # viz.visualize_batch_x(batch_y, i, 'batch_y_' + str(i), )


    import IPython
    IPython.embed()
Ejemplo n.º 20
0
def some_func(x, y):

	try:
		return x / y
	except:
		IPython.embed()
		raise
 def update_alpha_beta_gamma(self):  # Step 6
     # At the moment, we can only fix C0 = 0
     if self.parDict['CFit'] == 'fixed' and self.parDict['C0'] == 0.0:
         X = np.ones((self.N, 2), dtype=float)
         X[:, 1] = self.xi
         # Eqn (77)
         XTXinv = np.linalg.inv(np.dot(X.T, X))
         Sigma_chat = XTXinv * self.sigsqr
         # Eqn (76)
         chat = np.dot(np.dot(XTXinv, X.T), self.eta)
         # Eqn (75)
         self.alpha, self.beta = self.rng.multivariate_normal(chat, Sigma_chat)
         self.gamma=0.
     else:
         # This doesn't work if any parameters fixed
         X = np.ones((self.N, 3), dtype=float)
         X[:, 1] = self.xi
         X[:, 2] = self.z
         # Eqn (77)
         XTXinv = np.linalg.inv(np.dot(X.T, X))
         Sigma_chat = XTXinv * self.sigsqr
         # Eqn (76)
         chat = np.dot(np.dot(XTXinv, X.T), self.eta)
         # Eqn (75)
         try:
             self.alpha, self.beta, self.gamma = self.rng.multivariate_normal(chat, Sigma_chat)
         except:
             print "multivariate_normal fail"
             IPython.embed()
             sys.exit()
Ejemplo n.º 22
0
    def do_merge(self):
        import cv2
        import numpy as np

        images = [cv2.imread(path.join(BUILD_FULL_PATH, graph)) for graph in self.graphs]
        shape = self.rows, self.cols
        try:
            widths = np.array([img.shape[1] for img in images]).reshape(shape).max(axis=0)
            heights = np.array([img.shape[0] for img in images]).reshape(shape).max(axis=1)
        except AttributeError:
            import IPython
            IPython.embed()
        width = widths.sum()
        height = heights.sum()

        merge_img = np.zeros((height, width, 3), np.uint8)

        cumsum_width = np.r_[0, np.cumsum(widths)]
        cumsum_height = np.r_[0, np.cumsum(heights)]

        for i, img in enumerate(images):
            row = i // self.cols
            col = i % self.cols
            y = cumsum_height[row]
            x = cumsum_width[col]
            h, w = img.shape[:2]
            merge_img[y:y+h, x:x+w] = img

        cv2.imwrite(path.join(BUILD_FULL_PATH, self.graphs[-1].replace(".png", ".merge.png")), merge_img)
Ejemplo n.º 23
0
    def validExtend(self, start_config, end_config):

        segmentNum = 10
        IPython.embed()
        x1=numpy.linspace(start_config[0],end_config[0],segmentNum)
        x2=numpy.linspace(start_config[1],end_config[1],segmentNum)
        x3=numpy.linspace(start_config[2],end_config[2],segmentNum)
        x4=numpy.linspace(start_config[3],end_config[3],segmentNum)
        x5=numpy.linspace(start_config[4],end_config[4],segmentNum)
        x6=numpy.linspace(start_config[5],end_config[5],segmentNum)
        x7=numpy.linspace(start_config[6],end_config[6],segmentNum)

        for i in xrange(0,len(x1)):

            newconfig = [x1[i], x2[i], x3[i], x4[i], x5[i], x6[i], x7[i]]

            self.robot.SetActiveDOFValues(newconfig)
            env = self.robot.GetEnv()

            #check collision
            isCollision =  env.CheckCollision(env.GetBodies()[0],env.GetBodies()[1])
            isSelfCollision = self.robot.CheckSelfCollision()

            if(isCollision==True or isSelfCollision==True):
                return False

        return True
Ejemplo n.º 24
0
def python_shell(options):
    logger = setup_logger("Robot", debug=options.verbose)

    def conn_callback(*args):
        sys.stdout.write(".")
        sys.stdout.flush()
        return True

    if options.shell == "ipython":
        import IPython
    else:
        import importlib
        sys.path.append(os.path.abspath(""))
        module_name, entrance_name = options.shell.rsplit(".", 1)
        module_instance = importlib.import_module(module_name)
        entrance = module_instance.__getattribute__(entrance_name)

    robot, device = connect_robot_helper(options.target, options.clientkey)

    if options.shell == "ipython":
        logger.info("----> READY")
        logger.info("""
      * Hint: Try 'robot?' and 'dir(robot)' to get more informations)\n""")
        IPython.embed()
        return 0
    else:
        return entrance(robot, device)
Ejemplo n.º 25
0
def ipython_drop(msg, glo, loc):
    """
    Add this function in your code to drop to an ipython shell

    (provided that glVar.shell == True)

    msg: informative message to print when shell is invoked
    glo: globals()
    loc: locals()
    """
    import IPython
    banner = 'Dropping into IPython, type CTR-D to exit' + msg
    try:
        # Try pre-0.11 syntax first
        args = ['-pi1','In <\\#>: ','-pi2','   .\\D.: ',
                '-po','Out<\\#>: ','-nosep']
        ipshell = IPython.Shell.IPShellEmbed(
            args, 
            banner = banner,
            exit_msg = 'Leaving Interpreter, back to program.')
        ipshell(global_ns = glo, local_ns = loc)
    except AttributeError:
        # try the new syntax: post-0.11
        #from IPython.config.loader import Config
        #cfg = Config()
        # directly open the shell
        IPython.embed(user_ns=loc, banner2=banner)
Ejemplo n.º 26
0
def expand_mlb_data(infile_data, pitcher=False, live=False):
  new_feature_dataframes = [infile_data]
  expanded_columns = []
  for feature_name, (func, columns) in get_expansion_targets(pitcher=pitcher, expanding_live=live):
    with Timer() as t:
      print 'Expanding', feature_name, '(' + ', '.join(columns) + ')...'
      raw_data = [func(row) for index, row in infile_data.iterrows()]
      raw_columns = encode_names(feature_name, columns)
      expanded_columns += raw_columns
      try:
        new_feature_data = pd.DataFrame(raw_data,
                                      index=infile_data.index,
                                      columns=raw_columns)
      except AssertionError as ex:
        print 'Debugging assertion error -- probably no data for some featurizer was loaded. ' \
              'I suspect Numberfire scraping needs to happen!'
        import IPython
        IPython.embed()
      except TypeError as ex:
        print 'Debugging type error'
        import IPython
        IPython.embed()

      new_feature_dataframes.append(new_feature_data)
    print '  took %d seconds' % t.elapsed
  expanded_data = pd.concat(new_feature_dataframes, axis=1)
  # After doing all of that concatenation the index is super weird so just reset it
  expanded_data.reset_index(drop=True, inplace=True)
  # Transform categorical variables to indicator variables -- but only for expanded discrete columns.
  # May need to tweak how this list is generated in the future.
  categorical_cols = [c for c in expanded_columns if expanded_data[c].dtype.name == 'object']
  expanded_discretized = pd.get_dummies(expanded_data, prefix_sep='=', columns=categorical_cols)
  return expanded_discretized
 def cookie_save(self):
     t = time.strftime('%Y%m%dT%H%M')
     import IPython; IPython.embed()
     _f = '%s_cookiejar.pickle' % t
     with open(_f, 'wb') as f:
         pickle.dump(self.cj, f, pickle.HIGHEST_PROTOCOL)
     print('File save on %s' % _f)
Ejemplo n.º 28
0
def test_window_distance(width, num_steps, plot=None):
    import sdf_file, obj_file
    np.random.seed(100)

    mesh_file_name = 'data/test/meshes/Co_clean.obj'
    sdf_3d_file_name = 'data/test/sdf/Co_clean.sdf'

    sdf = sdf_file.SdfFile(sdf_3d_file_name).read()
    mesh = obj_file.ObjFile(mesh_file_name).read()
    graspable = GraspableObject3D(sdf, mesh)

    grasp_axis = np.array([0, 1, 0])
    grasp_width = 0.1

    grasp1_center = np.array([0, 0, -0.025])
    grasp1 = g.ParallelJawPtGrasp3D(grasp1_center, grasp_axis, grasp_width)
    grasp2_center = np.array([0, 0, -0.030])
    grasp2 = g.ParallelJawPtGrasp3D(grasp2_center, grasp_axis, grasp_width)

    w1, w2 = graspable.surface_information(grasp1, width, num_steps)
    v1, v2 = graspable.surface_information(grasp2, width, num_steps)

    # IPython.embed()

    if plot:
        plot(w1.proj_win, num_steps)
        plot(w2.proj_win, num_steps)
        plot(v1.proj_win, num_steps)
        plot(v2.proj_win, num_steps)
        plt.show()

    IPython.embed()

    return
Ejemplo n.º 29
0
def ipython(user_ns=None):
    try:
        import IPython
        from IPython.config.loader import Config
    except ImportError:
        return simple_repl(user_ns=user_ns)
    defns = {'os':os, 're':re, 'sys':sys}
    if not user_ns:
        user_ns = defns
    else:
        defns.update(user_ns)
        user_ns = defns

    c = Config()
    c.InteractiveShellApp.exec_lines = [
        'from __future__ import division, absolute_import, unicode_literals, print_function',
        ]
    c.TerminalInteractiveShell.confirm_exit = False
    c.PromptManager.in_template = (r'{color.LightGreen}calibre '
            '{color.LightBlue}[{color.LightCyan}%s{color.LightBlue}]'
            r'{color.Green}|\#> '%get_version())
    c.PromptManager.in2_template = r'{color.Green}|{color.LightGreen}\D{color.Green}> '
    c.PromptManager.out_template = r'<\#> '
    c.TerminalInteractiveShell.banner1 = BANNER
    c.PromptManager.justify = True
    c.TerminalIPythonApp.ipython_dir = ipydir
    os.environ['IPYTHONDIR'] = ipydir

    c.InteractiveShell.separate_in = ''
    c.InteractiveShell.separate_out = ''
    c.InteractiveShell.separate_out2 = ''

    c.PrefilterManager.multi_line_specials = True

    IPython.embed(config=c, user_ns=user_ns)
Ejemplo n.º 30
0
def main():
    app = QApplication(sys.argv)
    app.quitOnLastWindowClosed = True
    form = PlotDialog(app)
    form.show()
    IPython.embed()
    app.exit()
Ejemplo n.º 31
0
def gen_mat(nx, ny, nth):
    dx = 1 / nx
    dy = 1 / ny

    xx = np.linspace(0, 1, nx)
    yy = np.linspace(0, 1, ny)

    # Made up shape. Little kelp on top, bulge near middle, zero at bottom.
    kelp_lengths = 5 * (1 - yy)**2 * np.exp(5 * yy - 4)
    # Number of individual kelps in each depth layer - more towards the top
    ind = 2 - yy

    # Assume that kelp and water scatter the same,
    # but kelp absorbs much more light
    # Choose these to create diagonal dominance
    # non-DDOM coefs:
    #sct_water = 5
    #sct_kelp = 5
    #abs_water = 1
    #abs_kelp = 5
    # DDOM coefs:
    sct_water = 4 * nx
    sct_kelp = 4 * nx
    abs_water = 2 * np.pi * sct_water
    abs_kelp = 2 * abs_water
    iops = [vsf, abs_water, sct_water, abs_kelp, sct_kelp]

    scenario = gm2.KelpScenario(surf_bc_fun, iops)
    scenario.set_kelp(kelp_lengths, ind)
    scenario.set_num_grid_points(nx, ny, nth)
    scenario.calculate_pk()

    # What to do
    gen_sparsity_plots = True
    interactive_load_mat = False
    plot_kelp = False
    plot_irrad = False

    print("{}x{}x{}".format(nx, ny, nth))

    if gen_sparsity_plots:
        # Loop through all possible variable orderings
        # Only generate 210 matrices for now
        for ii, var_order in ([0,
                               [2, 1,
                                0]], ):  #enumerate(it.permutations(range(3))):
            print()
            print("ii={}: {}".format(ii, var_order))

            # Determine common name for files
            # kelp1_[variable dimensions]_[variable order]
            name = ('ddom_{}x{}x{}_{}{}{}'.format(nx, ny, nth, *var_order))

            print("Creating matrix")
            scenario.gen_rte_matrix(var_order)

            print("Saving files")
            # Solve system & plot result
            print("Solving system")
            scenario.solve_system()
            scenario.reshape_rad()
            print("Calculating irradiance")

            scenario.calc_irrad()
            scenario.plot_irrad('../img/irrad/irrad_' + name + '.png')

            # Save mat file
            scenario.write_rte_system_mat('../mat/' + name)
            # Save sparsity plots - one coarse (spy) & one precise (int)
            #scenario.write_int_matrix_png('../img/sparsity/int_'+name+'.png')
            #scenario.plot_rte_matrix('../img/sparsity/spy_'+name+'.png')

    if plot_kelp:
        print("Plotting kelp")
        plt.figure(1)
        scenario.plot_kelp('../img/solve/kelp.png')

    if plot_irrad:
        print("Creating matrix")
        plt.figure(2)
        scenario.calculate_rte_matrix()
        #scenario.write_int_matrix_png('solve/sparsity.png')
        print("Solving system")
        scenario.solve_system()
        print("Calculating irradiance")
        scenario.calc_irrad()
        scenario.plot_irrad('../img/solve/irrad.png')
        print("Done!")

    if interactive_load_mat:
        print("Loading matrix")
        scenario.load_rte_system_mat('../mat/kelp1_50x50x32_012.mat',
                                     [0, 1, 2])
        print("Finished loading")
        IPython.embed()
Ejemplo n.º 32
0
def main(argv):
    parser = ArgumentParser(usage=__doc__.lstrip())
    parser.add_argument("--verbose", "-v", action="count", default=1,
                        help="more verbosity")
    parser.add_argument("--no-build", "-n", action="store_true", default=False,
                        help="do not build the project (use system installed version)")
    parser.add_argument("--build-only", "-b", action="store_true", default=False,
                        help="just build, do not run any tests")
    parser.add_argument("--doctests", action="store_true", default=False,
                        help="Run doctests in module")
    parser.add_argument("--coverage", action="store_true", default=False,
                        help=("report coverage of project code. HTML output goes "
                              "under build/coverage"))
    parser.add_argument("--mode", "-m", default="fast",
                        help="'fast', 'full', or something that could be "
                             "passed to nosetests -A [default: fast]")
    parser.add_argument("--submodule", "-s", default=None,
                        help="Submodule whose tests to run (cluster, constants, ...)")
    parser.add_argument("--pythonpath", "-p", default=None,
                        help="Paths to prepend to PYTHONPATH")
    parser.add_argument("--tests", "-t", action='append',
                        help="Specify tests to run")
    parser.add_argument("--python", action="store_true",
                        help="Start a Python shell with PYTHONPATH set")
    parser.add_argument("--ipython", "-i", action="store_true",
                        help="Start IPython shell with PYTHONPATH set")
    parser.add_argument("--shell", action="store_true",
                        help="Start Unix shell with PYTHONPATH set")
    parser.add_argument("--debug", "-g", action="store_true",
                        help="Debug build")
    parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
                        help="Arguments to pass to Nose")
    args = parser.parse_args(argv)

    if args.pythonpath:
        for p in reversed(args.pythonpath.split(os.pathsep)):
            sys.path.insert(0, p)

    if not args.no_build:
        site_dir = build_project(args)
        sys.path.insert(0, site_dir)
        os.environ['PYTHONPATH'] = site_dir

    if args.python:
        import code
        code.interact()
        sys.exit(0)

    if args.ipython:
        import IPython
        IPython.embed()
        sys.exit(0)

    if args.shell:
        shell = os.environ.get('SHELL', 'sh')
        print("Spawning a Unix shell...")
        os.execv(shell, [shell])
        sys.exit(1)

    extra_argv = args.args

    if args.coverage:
        dst_dir = os.path.join('build', 'coverage')
        fn = os.path.join(dst_dir, 'coverage_html.js')
        if os.path.isdir(dst_dir) and os.path.isfile(fn):
            shutil.rmtree(dst_dir)
        extra_argv += ['--cover-html',
                       '--cover-html-dir='+dst_dir]

    if args.build_only:
        sys.exit(0)
    elif args.submodule:
        modname = PROJECT_MODULE + '.' + args.submodule
        try:
            __import__(modname)
            test = sys.modules[modname].test
        except (ImportError, KeyError, AttributeError):
            print("Cannot run tests for %s" % modname)
            sys.exit(2)
    elif args.tests:
        def test(*a, **kw):
            extra_argv = kw.pop('extra_argv', ())
            extra_argv = extra_argv + args.tests[1:]
            kw['extra_argv'] = extra_argv
            from numpy.testing import Tester
            return Tester(args.tests[0]).test(*a, **kw)
    else:
        __import__(PROJECT_MODULE)
        test = sys.modules[PROJECT_MODULE].test

    result = test(args.mode,
                  verbose=args.verbose,
                  extra_argv=args.args,
                  doctests=args.doctests,
                  coverage=args.coverage)

    if result.wasSuccessful():
        sys.exit(0)
    else:
        sys.exit(1)
Ejemplo n.º 33
0
    def from_argv(cls, argv=None, **kwargs):
        prog = cls.server and 'irc3d' or 'irc3'
        doc = """
        Run an {prog} instance from a config file

        Usage: {prog} [options] <config>...

        Options:

        -h, --help          Display this help and exit
        --version           Output version information and exit
        --logdir DIRECTORY  Log directory to use instead of stderr
        --logdate           Show datetimes in console output
        --host HOST         Server name or ip
        --port PORT         Server port
        -v,--verbose        Increase verbosity
        -r,--raw            Show raw irc log on the console
        -d,--debug          Add some debug commands/utils
        -i,--interactive    Load a ipython console with a bot instance
        """.format(prog=prog)
        if not cls.server:
            doc += """
            --help-page         Output a reST page containing commands help
            """.strip()
        import os
        import docopt
        import textwrap
        args = argv or sys.argv[1:]
        args = docopt.docopt(textwrap.dedent(doc), args, version=version)
        cfg = utils.parse_config(
            cls.server and 'server' or 'bot', *args['<config>'])
        cfg.update(
            verbose=args['--verbose'],
            debug=args['--debug'],
        )
        cfg.update(kwargs)
        if args['--host']:  # pragma: no cover
            host = args['--host']
            cfg['host'] = host
            if host in ('127.0.0.1', 'localhost'):
                cfg['ssl'] = False
        if args['--port']:  # pragma: no cover
            cfg['port'] = args['--port']
        if args['--logdir'] or 'logdir' in cfg:
            logdir = os.path.expanduser(args['--logdir'] or cfg.get('logdir'))
            cls.logging_config = config.get_file_config(logdir)
        if args['--logdate']:  # pragma: no cover
            fmt = cls.logging_config['formatters']['console']
            fmt['format'] = config.TIMESTAMPED_FMT
        if args.get('--help-page'):  # pragma: no cover
            for v in cls.logging_config['handlers'].values():
                v['level'] = 'ERROR'
        if args['--raw']:
            cfg['raw'] = True
        context = cls.from_config(cfg)
        if args.get('--help-page'):  # pragma: no cover
            context.print_help_page()
        elif args['--interactive']:  # pragma: no cover
            import IPython
            IPython.embed()
            sys.exit(0)
        else:
            context.run(forever=not bool(kwargs))
        if kwargs or argv:
            return context
Ejemplo n.º 34
0
    paveALICE.AddText("5 < #it{p}_{T,ch jet} < 30 GeV/#it{c}")
    paveALICE.AddText("|#eta_{jet}| < 0.5")
    paveALICE.Draw()

    return canvas


def main():
    ROOT.TH1.AddDirectory(False)
    ROOT.gStyle.SetOptTitle(0)
    ROOT.gStyle.SetOptStat(0)

    f = open("LHC15i2response_Train1399.yaml", 'r')
    config_b = yaml.load(f)
    f.close()

    f = open("LHC15i2response_Train1399.yaml", 'r')
    config_c = yaml.load(f)
    f.close()

    canvas = EfficiencyComparison(config_c, config_b)
    canvas.SaveAs("{0}/Efficiency_Paper.pdf".format(config_c["input_path"]))
    canvas.SaveAs("{0}/Efficiency_Paper.C".format(config_c["input_path"]))


if __name__ == '__main__':

    main()

    IPython.embed()
Ejemplo n.º 35
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        # default values
        prog_name = argv[0]
        ignore_lock = True
        filename_template = None
        filename_output = None
        no_output = False
        list_invoices = False
        invoice_number = None
        invoice_id = None
        filename_from_invoice = False
        output_path = None
        with_ipshell = False

        try:
            opts, args = getopt.getopt(argv[1:], "fhliI:t:o:OP:", ["help"])
        except getopt.error as msg:
            raise Usage(msg)

        for opt in opts:
            if opt[0] in ["-f"]:
                print("ignoring lock")
                ignore_lock = True
            if opt[0] in ["-h", "--help"]:
                raise Usage("Help:")
            if opt[0] in ["-I"]:
                invoice_id = opt[1]
                print("using invoice ID '" + str(invoice_id) + "'.")
            if opt[0] in ["-i"]:
                print("Using ipshell")
                with_ipshell = True
            if opt[0] in ["-o"]:
                filename_output = opt[1]
                print("using output file", filename_output)
            if opt[0] in ["-O"]:
                if filename_output:
                    print("given output filename will be overwritten,")
                print("creating output filename from Invoice data.")
                filename_from_invoice = True
            if opt[0] in ["-t"]:
                filename_template = opt[1]
                print("using template file", filename_template)
            if opt[0] in ["-l"]:
                list_invoices = True
                print("listing invoices")
            if opt[0] in ["-P"]:
                output_path = opt[1]
                print("output path is", output_path + ".")

        # Check for correct input
        if len(args) > 1:
            print("opts:", opts, "args:", args)
            raise Usage("Only one input possible !")
        if len(args) == 0:
            raise Usage("No input given !")
        input_url = args[0]

        # Check for correct template
        if not filename_template:
            no_output = True
            if not list_invoices:
                raise Usage("No template given !")

        # Check for output file
        if not (filename_output or filename_from_invoice):
            if filename_template:
                filename_output = filename_template + ".out"
                print("no output filename given, will be:", filename_output)

    except Usage as err:
        if err.msg == "Help:":
            retcode = 0
        else:
            print("Error:", err.msg, file=sys.stderr)
            print("for help use --help", file=sys.stderr)
            retcode = 2

        print()
        print("Usage:")
        print()
        print("Invoke with", prog_name, "gnucash_url.")
        print("where input is")
        print("   filename")
        print("or file://filename")
        print("or mysql://user:password@host/databasename")
        print()
        print("-f             force open = ignore lock")
        print("-l             list all invoices")
        print("-h or --help   for this help")
        print("-I ID          use invoice ID")
        print("-t filename    use filename as template file")
        print("-o filename    use filename as output file")
        print(
            "-O             create output filename by date, owner and invoice number"
        )
        print(
            "-P path        path for output file. Overwrites path in -o option"
        )

        return retcode

    # Try to open the given input
    try:
        print("Opening", input_url, ".")
        session = gnucash.Session(input_url, ignore_lock=ignore_lock)
    except Exception as exception:
        print("Problem opening input.")
        print(exception)
        return 2

    book = session.book
    root_account = book.get_root_account()
    comm_table = book.get_table()
    EUR = comm_table.lookup("CURRENCY", "EUR")

    invoice_list = get_all_invoices(book)

    if list_invoices:
        for number, invoice in enumerate(invoice_list):
            print(str(number) + ")")
            print(invoice)

    if not (no_output):

        if invoice_id:
            invoice = book.InvoiceLookupByID(invoice_id)
            if not invoice:
                print("ID not found.")
                return 2

        if invoice_number:
            invoice = invoice_list[invoice_number]

        print("Using the following invoice:")
        print(invoice)

        path_template = os.path.dirname(filename_template)
        filename_template_basename = os.path.basename(filename_template)

        loader = jinja2.FileSystemLoader(path_template)
        env = jinja2.Environment(loader=loader)
        template = env.get_template(filename_template_basename)

        #company = gnucash_business.Company(book.instance)

        output = template.render(invoice=invoice,
                                 locale=locale)  #, company=company)

        if filename_from_invoice:
            filename_date = invoice.GetDatePosted().strftime(
                "%Y-%m-%d")  # something like 2014-11-01
            filename_owner_name = str(invoice.GetOwner().GetName())
            filename_invoice_id = str(invoice.GetID())
            filename_output = filename_date + "_" + filename_owner_name + "_" + filename_invoice_id + ".tex"

        if output_path:
            filename_output = os.path.join(output_path,
                                           os.path.basename(filename_output))

        print("Writing output", filename_output, ".")
        with open(filename_output, 'w') as f:
            f.write(output)

        if with_ipshell:
            import IPython
            IPython.embed()
Ejemplo n.º 36
0
def interact(mydict=None, argv=None, mybanner=None, loglevel=20):
    global session
    import code, sys, pickle, os, getopt, re
    from .config import conf
    conf.interactive = True
    if loglevel is not None:
        conf.logLevel = loglevel

    the_banner = "Welcome to Scapy (%s)"
    if mybanner is not None:
        the_banner += "\n"
        the_banner += mybanner

    if argv is None:
        argv = sys.argv

    import atexit
    try:
        import rlcompleter, readline
    except ImportError:
        log_loading.info("Can't load Python libreadline or completer")
        READLINE = 0
    else:
        READLINE = 1

        class ScapyCompleter(rlcompleter.Completer):
            def global_matches(self, text):
                matches = []
                n = len(text)
                for lst in [dir(__builtin__), list(session.keys())]:
                    for word in lst:
                        if word[:n] == text and word != "__builtins__":
                            matches.append(word)
                return matches

            def attr_matches(self, text):
                m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
                if not m:
                    return
                expr, attr = m.group(1, 3)
                try:
                    object = eval(expr)
                except:
                    object = eval(expr, session)
                if isinstance(object, Packet) or isinstance(
                        object, Packet_metaclass):
                    words = [x for x in dir(object) if x[0] != "_"]
                    words += [x.name for x in object.fields_desc]
                else:
                    words = dir(object)
                    if hasattr(object, "__class__"):
                        words = words + rlcompleter.get_class_members(
                            object.__class__)
                matches = []
                n = len(attr)
                for word in words:
                    if word[:n] == attr and word != "__builtins__":
                        matches.append("%s.%s" % (expr, word))
                return matches

        readline.set_completer(ScapyCompleter().complete)
        readline.parse_and_bind("C-o: operate-and-get-next")
        readline.parse_and_bind("tab: complete")

    session = None
    session_name = ""
    STARTUP_FILE = DEFAULT_STARTUP_FILE
    PRESTART_FILE = DEFAULT_PRESTART_FILE

    iface = None
    try:
        opts = getopt.getopt(argv[1:], "hs:Cc:Pp:d")
        for opt, parm in opts[0]:
            if opt == "-h":
                _usage()
            elif opt == "-s":
                session_name = parm
            elif opt == "-c":
                STARTUP_FILE = parm
            elif opt == "-C":
                STARTUP_FILE = None
            elif opt == "-p":
                PRESTART_FILE = parm
            elif opt == "-P":
                PRESTART_FILE = None
            elif opt == "-d":
                conf.logLevel = max(1, conf.logLevel - 10)

        if len(opts[1]) > 0:
            raise getopt.GetoptError("Too many parameters : [%s]" %
                                     " ".join(opts[1]))

    except getopt.GetoptError as msg:
        log_loading.error(msg)
        sys.exit(1)

    if PRESTART_FILE:
        _read_config_file(PRESTART_FILE)

    scapy_builtins = __import__("all", globals(), locals(), ".").__dict__
    builtins.__dict__.update(scapy_builtins)
    globkeys = list(scapy_builtins.keys())
    globkeys.append("scapy_session")
    scapy_builtins = None  # XXX replace with "with" statement
    if mydict is not None:
        builtins.__dict__.update(mydict)
        globkeys += list(mydict.keys())

    conf.color_theme = DefaultTheme()
    if STARTUP_FILE:
        _read_config_file(STARTUP_FILE)

    if session_name:
        try:
            os.stat(session_name)
        except OSError:
            log_loading.info("New session [%s]" % session_name)
        else:
            try:
                try:
                    session = pickle.load(gzip.open(session_name, "rb"))
                except IOError:
                    session = pickle.load(open(session_name, "rb"))
                log_loading.info("Using session [%s]" % session_name)
            except EOFError:
                log_loading.error("Error opening session [%s]" % session_name)
            except AttributeError:
                log_loading.error(
                    "Error opening session [%s]. Attribute missing" %
                    session_name)

        if session:
            if "conf" in session:
                conf.configure(session["conf"])
                session["conf"] = conf
        else:
            conf.session = session_name
            session = {"conf": conf}

    else:
        session = {"conf": conf}

    builtins.__dict__["scapy_session"] = session

    if READLINE:
        if conf.histfile:
            try:
                readline.read_history_file(conf.histfile)
            except IOError:
                pass
        atexit.register(scapy_write_history_file, readline)

    atexit.register(scapy_delete_temp_files)

    IPYTHON = False
    if conf.interactive_shell.lower() == "ipython":
        try:
            import IPython
            IPYTHON = True
        except ImportError as e:
            log_loading.warning(
                "IPython not available. Using standard Python shell instead.")
            IPYTHON = False

    if IPYTHON:
        banner = the_banner % (
            conf.version) + " using IPython %s" % IPython.__version__

        # Old way to embed IPython kept for backward compatibility
        try:
            args = ['']  # IPython command line args (will be seen as sys.argv)
            ipshell = IPython.Shell.IPShellEmbed(args, banner=banner)
            ipshell(local_ns=session)
        except AttributeError as e:
            pass

        # In the IPython cookbook, see 'Updating-code-for-use-with-IPython-0.11-and-later'
        IPython.embed(user_ns=session, banner2=banner)

    else:
        code.interact(banner=the_banner % (conf.version),
                      local=session,
                      readfunc=conf.readfunc)

    if conf.session:
        save_session(conf.session, session)

    for k in globkeys:
        try:
            del (builtins.__dict__[k])
        except:
            pass
Ejemplo n.º 37
0
def outer_main(barquery=None,
               beerfile=None,
               get_taps=True,
               get_cans=False,
               interactive=False,
               **kwargs):

    if barquery:
        barname, bar_url = get_bar(barquery)
        # beerlst, n_on_tap = get_beers(bar_url)
        # d_beers_beermenus, n_on_tap = get_beers(bar_url)
        d_beermenus = get_beers(bar_url)
        kwargs['d_beermenus'] = d_beermenus

        def is_served_as(beer, *args):
            servingtypes = {
                d['type'].lower()
                for d in d_beermenus[beer]['serving']
            }
            return any(arg in servingtypes for arg in args)

        is_on_tap = lambda beer: is_served_as(beer, 'draft', 'cask', 'crowler',
                                              'growler')
        is_bottled = lambda beer: is_served_as(beer, 'bottle', 'can')

        has_no_servinginfo = lambda beer: not d_beermenus[beer]['serving']

        beerlst = [
            beer for beer in d_beermenus.keys()
            if (is_on_tap(beer) or has_no_servinginfo(beer))
        ]
        beerlst_rest = [
            beer for beer in d_beermenus.keys() if is_bottled(beer)
        ]

        if barname.lower(
        ) == 'covenhoven':  # TODO drafts listed as bottle - eventually fix for good
            beerlst = list(d_beermenus.keys())
            beerlst_rest = []

    else:
        barname = beerfile.split('_')[-1]
        beerlst = get_beers_from_file(beerfile)
        # on_draft = get_beers_from_file(beerfile)
        d_beermenus = {}  #; n_on_tap = len(beerlst)

    print('\n what\'s on @ {} ?? \n'.format(barname.upper()))

    if beerfile or (barquery and get_taps):
        # beerlst_taps = beerlst[:n_on_tap]
        beerlst_taps = beerlst
        d_beers1 = alternate_main(beerlst_taps,
                                  with_key=(not get_cans),
                                  **kwargs)

    if barquery and get_cans:
        print('\nCANS & BOTTLES...\n')
        # beerlst_cans = beerlst[n_on_tap:]
        beerlst_cans = beerlst_rest
        d_beers2 = alternate_main(beerlst_cans, with_key=get_cans, **kwargs)

    if interactive:
        from itertools import chain
        for k, v in chain(d_beers1.items(), d_beers2.items()):
            d_beers = d_beermenus[k].update(**v)

        import IPython
        IPython.embed()
Ejemplo n.º 38
0
            log_once("UniformDiscretizer1D: value smaller than min!")
            return 0
        if v > self.maxv:
            log_once("UniformDiscretizer1D: value larger than max!")
            return self.nr_bin - 1
        return int(np.clip((v - self.minv) / self.spacing, 0, self.nr_bin - 1))

    def get_distribution(self, v, smooth_factor=0.05, smooth_radius=2):
        """ return a smoothed one-hot distribution of the sample v.
        """
        b = self.get_bin(v)
        ret = np.zeros((self.nr_bin, ), dtype='float32')
        ret[b] = 1.0
        if v >= self.maxv or v <= self.minv:
            return ret
        try:
            for k in range(1, smooth_radius + 1):
                ret[b + k] = smooth_factor**k
        except IndexError:
            pass
        for k in range(1, min(smooth_radius + 1, b + 1)):
            ret[b - k] = smooth_factor**k
        ret /= ret.sum()
        return ret


if __name__ == '__main__':
    u = UniformDiscretizer1D(-10, 10, 0.12)
    import IPython as IP
    IP.embed(config=IP.terminal.ipapp.load_default_config())
Ejemplo n.º 39
0
def main():

    import IPython
    IPython.embed()
Ejemplo n.º 40
0
import sys

from whoosh.index import open_dir

import IPython

try:
    ix = open_dir('ex_index')
    print "{} documents in index `ix`".format(ix.doc_count())

    r = ix.reader()

    IPython.embed(banner1="""
Got index reader in `r`.

Play around with the contents of the index!
See http://pythonhosted.org/Whoosh/api/reading.html for help.
""")
except OSError:
    print >> sys.stderr, "Can't find index in 'ex_index'. Create it first!"
Ejemplo n.º 41
0
def main():
    global args, best_loss1
    args = parser.parse_args()

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    # create model
    model = DenseNet121(N_CLASSES).cuda()

    import IPython
    IPython.embed()

    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.BCELoss().cuda()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.001,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_idx = np.genfromtxt(TRAIN_INDEXES, delimiter=',').astype('int64')
    train_dataset = ChestXrayDataSetHDF5(data_dir=HDF5_DATA_DIR,
                                         set_idx=train_idx,
                                         transform=transforms.Compose([
                                             RandomHorizontalFlip(),
                                             transforms.ToTensor(), normalize
                                         ]),
                                         target_transform=None)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_idx = np.genfromtxt(TRAIN_INDEXES, delimiter=',').astype('int64')
    val_dataset = ChestXrayDataSetHDF5(data_dir=HDF5_DATA_DIR,
                                       set_idx=val_idx,
                                       transform=transforms.Compose([
                                           RandomHorizontalFlip(),
                                           transforms.ToTensor(), normalize
                                       ]),
                                       target_transform=None)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        loss1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = loss1 < best_loss1
        best_loss1 = max(loss1, best_loss1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, is_best)
Ejemplo n.º 42
0
    def run_epoch_recurrence_one_batch(self,
                                       batch,
                                       is_training=False,
                                       source='agent'):

        if is_training:
            self.acmodel.train()
        else:
            self.acmodel.eval()

        batch_old = batch
        batch = self.transform_demos(batch, source)
        batch.sort(key=len, reverse=True)
        # Constructing flat batch and indices pointing to start of each demonstration
        obss = []
        action_true = []
        action_teacher = []
        done = []
        inds = [0]

        for demo in batch:
            obss.append(demo[0])
            action_true.append(demo[1])
            done.append(demo[2])
            action_teacher.append(demo[3])
            inds.append(inds[-1] + len(demo[0]))

        # (batch size * avg demo length , 3), where 3 is for (state, action, done)
        try:
            obss = np.concatenate(obss)
        except:
            print("?")
            import IPython
            IPython.embed()
        action_true = np.concatenate(action_true)
        assert len(action_true.shape) == 1
        done = np.concatenate(done)
        action_teacher = np.concatenate(action_teacher)
        inds = inds[:-1]
        num_frames = len(obss)

        mask = np.ones([len(obss)], dtype=np.float64)
        try:
            mask[inds] = 0
        except:
            print("???")
            print("BATCH LENGTH", len(batch))
            for demo in batch:
                print("Obs", demo[0].shape)
                print("LEN", len(demo))
            print("INDS", inds, inds.shape)
            print("MASK", mask.shape)
            import IPython
            IPython.embed()
        mask = torch.tensor(mask, device=self.device,
                            dtype=torch.float).unsqueeze(1)

        # Observations, true action, values and done for each of the stored demostration
        action_true = torch.tensor([action for action in action_true],
                                   device=self.device,
                                   dtype=torch.long)

        # Memory to be stored
        memories = torch.zeros([len(obss), self.acmodel.memory_size],
                               device=self.device)
        episode_ids = np.zeros(len(obss))
        memory = torch.zeros([len(batch), self.acmodel.memory_size],
                             device=self.device)

        preprocessed_first_obs = self.obss_preprocessor(
            obss[inds], self.device)
        instr = self.acmodel.get_instr(preprocessed_first_obs)
        instr_embedding = self.acmodel._get_instr_embedding(instr)

        # Loop terminates when every observation in the flat_batch has been handled
        while True:
            # taking observations and done located at inds
            obs = obss[inds]
            done_step = done[inds]
            preprocessed_obs = self.obss_preprocessor(obs, self.device)

            with torch.no_grad():
                # taking the memory till len(inds), as demos beyond that have already finished
                dist, info = self.acmodel(preprocessed_obs,
                                          memory[:len(inds), :],
                                          instr_embedding[:len(inds)],
                                          self.distill_with_teacher)
                new_memory = info['memory']

            memories[inds, :] = memory[:len(inds), :]
            memory[:len(inds), :] = new_memory
            episode_ids[inds] = range(len(inds))

            # Updating inds, by removing those indices corresponding to which the demonstrations have finished
            inds = inds[:int(len(inds) - sum(done_step))]
            if len(inds) == 0:
                break

            # Incrementing the remaining indices
            inds = [index + 1 for index in inds]

        # Here, actual backprop upto args.recurrence happens
        final_loss = 0
        per_token_correct = [0, 0, 0, 0, 0, 0, 0]
        per_token_teacher_correct = [0, 0, 0, 0, 0, 0, 0]
        per_token_count = [0, 0, 0, 0, 0, 0, 0]
        per_token_teacher_count = [0, 0, 0, 0, 0, 0, 0]
        per_token_agent_count = [0, 0, 0, 0, 0, 0, 0]
        final_entropy, final_policy_loss, final_value_loss = 0, 0, 0

        indexes = self.starting_indexes(num_frames)
        memory = memories[indexes]
        accuracy = 0
        total_frames = len(indexes) * self.args.recurrence
        accuracy_list = []
        lengths_list = []
        agent_running_count_long = 0
        teacher_running_count_long = 0
        for i in range(self.args.recurrence):
            obs = obss[indexes]
            preprocessed_obs = self.obss_preprocessor(obs, device=self.device)

            action_step = action_true[indexes]

            mask_step = mask[indexes]
            dist, info = self.acmodel(preprocessed_obs, memory * mask_step,
                                      instr_embedding[episode_ids[indexes]],
                                      self.distill_with_teacher)
            memory = info["memory"]

            entropy = dist.entropy().mean()
            policy_loss = -dist.log_prob(action_step).mean()
            loss = policy_loss - self.args.entropy_coef * entropy
            action_pred = dist.probs.max(1, keepdim=False)[1]
            accuracy_list.append(float((action_pred == action_step).sum()))
            lengths_list.append(
                (action_pred.shape, action_step.shape, indexes.shape))
            accuracy += float(
                (action_pred == action_step).sum()) / total_frames
            final_loss += loss
            final_entropy += entropy
            final_policy_loss += policy_loss

            action_step = action_step.detach().cpu().numpy(
            )  # ground truth action
            action_pred = action_pred.detach().cpu().numpy()  # action we took
            agent_running_count = 0
            teacher_running_count = 0
            for j in range(len(per_token_count)):
                token_indices = np.where(action_step == j)[0]
                count = len(token_indices)
                correct = np.sum(
                    action_step[token_indices] == action_pred[token_indices])
                per_token_correct[j] += correct
                per_token_count[j] += count

                action_teacher_index = action_teacher[indexes]
                assert action_teacher_index.shape == action_pred.shape == action_step.shape, (
                    action_teacher_index.shape, action_pred.shape,
                    action_step.shape)
                teacher_token_indices = np.where(action_teacher_index == j)[0]
                teacher_count = len(teacher_token_indices)
                teacher_correct = np.sum(
                    action_teacher_index[teacher_token_indices] ==
                    action_pred[teacher_token_indices])
                per_token_teacher_correct[j] += teacher_correct
                per_token_teacher_count[j] += teacher_count
                teacher_running_count += teacher_count
                agent_running_count += count
                agent_running_count_long += teacher_count
                teacher_running_count_long += teacher_count
            assert np.min(action_step) < len(per_token_count), (
                np.min(action_step), action_step)
            assert np.max(action_step) >= 0, (np.max(action_step), action_step)
            assert np.min(action_pred) < len(per_token_count), (
                np.min(action_pred), action_pred)
            assert np.max(action_pred) >= 0, (np.max(action_pred), action_pred)
            assert np.min(action_teacher_index) < len(per_token_count), (
                np.min(action_teacher_index), action_teacher_index)
            assert np.max(action_teacher_index) >= 0, (
                np.max(action_teacher_index), action_teacher_index)
            indexes += 1

        final_loss /= self.args.recurrence

        if is_training:
            self.optimizer.zero_grad()
            final_loss.backward()
            self.optimizer.step()

        log = {}
        log["Entropy"] = float(final_entropy / self.args.recurrence)
        log["Loss"] = float(final_policy_loss / self.args.recurrence)
        log["Accuracy"] = float(accuracy)
        assert float(accuracy) <= 1, float(accuracy)
        teacher_numerator = 0
        teacher_denominator = 0
        agent_numerator = 0
        agent_denominator = 0
        for i, (correct, count, teacher_correct, teacher_count) in enumerate(
                zip(per_token_correct, per_token_count,
                    per_token_teacher_correct, per_token_teacher_count)):
            assert correct <= count, (correct, count)
            assert teacher_correct <= teacher_count, (teacher_correct,
                                                      teacher_count)
            if count > 0:
                log[f'Accuracy_{i}'] = correct / count
                agent_numerator += correct
                agent_denominator += count
            if teacher_count > 0:
                log[f'TeacherAccuracy_{i}'] = teacher_correct / teacher_count
                teacher_numerator += teacher_correct
                teacher_denominator += teacher_count

        assert agent_denominator == teacher_denominator, (
            agent_denominator, teacher_denominator, per_token_count,
            per_token_teacher_count)
        assert abs(float(accuracy) -
                   agent_numerator / agent_denominator) < .001, (
                       accuracy, agent_numerator / agent_denominator)
        log["TeacherAccuracy"] = float(teacher_numerator / teacher_denominator)

        return log
    def optimize_policy(self, itr, all_samples_data, all_samples_data_latent):
        assert len(
            all_samples_data
        ) == self.num_grad_updates + 1  # we collected the rollouts to compute the grads and then the test!
        sess = tf.get_default_session()
        if not self.use_maml:
            all_samples_data = [all_samples_data[0]]

        input_list = []
        for step in range(
                len(all_samples_data)):  # these are the gradient steps
            obs_list, action_list, adv_list, noise_list, task_idx_list = [], [], [], [], []
            for i in range(self.meta_batch_size):

                inputs = ext.extract(all_samples_data[step][i], "observations",
                                     "actions", "advantages", "noises",
                                     "task_idxs")
                print(inputs)
                obs_list.append(inputs[0])
                action_list.append(inputs[1])
                adv_list.append(inputs[2])
                noise_list.append(inputs[3])
                task_idx_list.append(inputs[4])
            input_list += obs_list + action_list + adv_list + noise_list + task_idx_list  # [ [obs_0], [act_0], [adv_0], [obs_1], ... ]
            if step == 0:
                adv_list_latent, z_list_latent, task_idx_list_latent = [], [], []
                for i in range(self.meta_batch_size):

                    inputs = ext.extract(all_samples_data_latent[step][i],
                                         "advantages", "noises", "task_idxs")
                    #import ipdb
                    #ipdb.set_trace()
                    means = tf.gather(self.policy.all_params['latent_means'],
                                      inputs[-1])
                    stds = tf.gather(self.policy.all_params['latent_stds'],
                                     inputs[-1])
                    zs = sess.run(means + inputs[-2] * tf.exp(stds))
                    adv_list_latent.append(inputs[0])
                    z_list_latent.append(zs)
                    task_idx_list_latent.append(inputs[2])
                input_list += adv_list_latent + z_list_latent + task_idx_list_latent
            #import ipdb
            #ipdb.set_trace()
            if step == 0:  ##CF not used?
                init_inputs = input_list

        if self.use_maml:
            dist_info_list = []
            for i in range(self.meta_batch_size):
                agent_infos = all_samples_data[
                    self.kl_constrain_step][i]['agent_infos']
                dist_info_list += [
                    agent_infos[k]
                    for k in self.policy.distribution.dist_info_keys
                ]
            input_list += tuple(dist_info_list)
            logger.log("Computing KL before")
            mean_kl_before = self.optimizer.constraint_val(input_list)

        if self.kl_scheme is None:
            curr_kl_weighting = self.kl_weighting
        elif self.kl_scheme == "0.01step4to0.05":
            curr_kl_weighting = min(0.05, 0.01 + (itr // 10) * 0.01)
        elif self.kl_scheme == "0.01step8to0.1":
            curr_kl_weighting = min(0.1, 0.01 + (itr // 5) * 0.001)

        elif self.kl_scheme == "0.01step8to0.05":
            curr_kl_weighting = min(0.05, 0.01 + (itr // 5) * 0.0005)

        elif self.kl_scheme == "0.01step8to0.2":
            curr_kl_weighting = min(0.2, 0.01 + (itr // 5) * 0.002)

        elif self.kl_scheme == "0.002step100to0.1":
            curr_kl_weighting = min(0.1, 0.002 + (itr // 5) * 0.001)

        elif self.kl_scheme == "0.002step100to0.02":
            curr_kl_weighting = min(0.02, 0.002 + (itr // 5) * 0.0002)

        elif self.kl_scheme == "0.002step100to0.05":
            curr_kl_weighting = min(0.05, 0.002 + (itr // 5) * 0.0005)

        elif self.kl_scheme == "0.01stepcontto0.05":
            curr_kl_weighting = min(0.05, 0.01 + (itr) * 0.001)
        elif self.kl_scheme == "0.01stepcontto0.1":
            curr_kl_weighting = min(0.1, 0.01 + (itr) * 0.001)
        elif self.kl_scheme == "0.01step8to0.3":
            curr_kl_weighting = min(0.3, 0.01 + (itr) * 0.003)
        else:
            print("ERROR")
            import IPython
            IPython.embed()
        input_list += ([curr_kl_weighting], )
        logger.log("Computing loss before")
        loss_before = self.optimizer.loss(input_list)
        logger.log("Optimizing")
        self.optimizer.optimize(input_list)
        logger.log("Computing loss after")
        loss_after = self.optimizer.loss(input_list)
        if self.use_maml:
            logger.log("Computing KL after")
            mean_kl = self.optimizer.constraint_val(input_list)
            logger.record_tabular('MeanKLBefore',
                                  mean_kl_before)  # this now won't be 0!
            logger.record_tabular('MeanKL', mean_kl)
        logger.record_tabular('LossBefore', loss_before)
        logger.record_tabular('LossAfter', loss_after)
        logger.record_tabular('dLoss', loss_before - loss_after)
        return dict()
Ejemplo n.º 44
0
params.problem_file = '/tmp/lkh/m{0}n{1}task.m-pdtsp'.format(m, n)
params.max_trials = 1
params.runs = 3
params.special = True
lkh.parser.write_tsplib(params.problem_file,
                        graph,
                        params,
                        nodelist=nodelist,
                        demand=demand,
                        capacity=2,
                        depot=0)
tour, info = lkh.solver.lkh_solver(params)

import IPython

IPython.embed(banner1="")
"""
# Reduce the velocity limits
velocity_limits = robot.GetDOFVelocityLimits()
robot.SetDOFVelocityLimits(0.25*velocity_limits)
# Set the viewer camera
Tcam = br.euler.to_transform(*np.deg2rad([-120, 0, 90]))
Tcam[:3,3] = [2.5, 0.25, 2]
viewer.SetCamera(Tcam)
# Find the IK solutions for both arms
Tleft = env.GetKinBody('cube_05').GetTransform()
Tleft[:3,3] += [0, 0, 0.065]
Tright = env.GetKinBody('cube_01').GetTransform()
Tright[:3,3] += [0, 0, 0.065]
# Estimate torso angle
torso_angle = bimanual.estimate_torso_angle(Tleft[:3,3], Tright[:3,3])
Ejemplo n.º 45
0
 def embed_ipython(
     args, context
 ):  # allow embedding IPython session in the middle of Scheme code
     import IPython
     IPython.embed()
Ejemplo n.º 46
0
def main(_):
    if FLAGS.self_test:
        print('Running self-test.')
        train_data, train_labels = fake_data(256)
        validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
        test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
        num_epochs = 1
    else:
        # Get the data.
        train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
        train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
        test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
        test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')

        # Extract it into numpy arrays.
        train_data = extract_data(train_data_filename, 60000)
        train_labels = extract_labels(train_labels_filename, 60000)
        test_data = extract_data(test_data_filename, 10000)
        test_labels = extract_labels(test_labels_filename, 10000)

        # Generate a validation set.
        validation_data = train_data[:VALIDATION_SIZE, ...]
        validation_labels = train_labels[:VALIDATION_SIZE]
        train_data = train_data[VALIDATION_SIZE:, ...]
        train_labels = train_labels[VALIDATION_SIZE:]
        num_epochs = NUM_EPOCHS
    train_size = train_labels.shape[0]

    # This is where training samples and labels are fed to the graph.
    # These placeholder nodes will be fed a batch of training data at each
    # training step using the {feed_dict} argument to the Run() call below.
    train_data_node = tf.placeholder(data_type(),
                                     shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE,
                                            NUM_CHANNELS))
    train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE, ))
    eval_data = tf.placeholder(data_type(),
                               shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE,
                                      NUM_CHANNELS))

    # The variables below hold all the trainable weights. They are passed an
    # initial value which will be assigned when we call:
    # {tf.global_variables_initializer().run()}
    conv1_weights = tf.Variable(
        tf.truncated_normal(
            [5, 5, NUM_CHANNELS, 32],  # 5x5 filter, depth 32.
            stddev=0.1,
            seed=SEED,
            dtype=data_type()))
    conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
    conv2_weights = tf.Variable(
        tf.truncated_normal([5, 5, 32, 64],
                            stddev=0.1,
                            seed=SEED,
                            dtype=data_type()))
    conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
    fc1_weights = tf.Variable(  # fully connected, depth 512.
        tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
                            stddev=0.1,
                            seed=SEED,
                            dtype=data_type()))
    fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
    fc2_weights = tf.Variable(
        tf.truncated_normal([512, NUM_LABELS],
                            stddev=0.1,
                            seed=SEED,
                            dtype=data_type()))
    fc2_biases = tf.Variable(
        tf.constant(0.1, shape=[NUM_LABELS], dtype=data_type()))

    # We will replicate the model structure for the training subgraph, as well
    # as the evaluation subgraphs, while sharing the trainable parameters.
    def model(data, train=False):
        """The Model definition."""
        # 2D convolution, with 'SAME' padding (i.e. the output feature map has
        # the same size as the input). Note that {strides} is a 4D array whose
        # shape matches the data layout: [image index, y, x, depth].
        conv = tf.nn.conv2d(data,
                            conv1_weights,
                            strides=[1, 1, 1, 1],
                            padding='SAME')
        # Bias and rectified linear non-linearity.
        relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
        # Max pooling. The kernel size spec {ksize} also follows the layout of
        # the data. Here we have a pooling window of 2, and a stride of 2.
        pool = tf.nn.max_pool(relu,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME')
        conv = tf.nn.conv2d(pool,
                            conv2_weights,
                            strides=[1, 1, 1, 1],
                            padding='SAME')
        relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
        pool = tf.nn.max_pool(relu,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME')
        # Reshape the feature map cuboid into a 2D matrix to feed it to the
        # fully connected layers.
        pool_shape = pool.get_shape().as_list()
        reshape = tf.reshape(
            pool,
            [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
        # Fully connected layer. Note that the '+' operation automatically
        # broadcasts the biases.
        hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
        # Add a 50% dropout during training only. Dropout also scales
        # activations such that no rescaling is needed at evaluation time.
        if train:
            hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
        return tf.matmul(hidden, fc2_weights) + fc2_biases

    # Training computation: logits + cross-entropy loss.
    logits = model(train_data_node, True)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=train_labels_node, logits=logits))

    # L2 regularization for the fully connected parameters.
    regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
                    tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
    # Add the regularization term to the loss.
    loss += 5e-4 * regularizers

    # Optimizer: set up a variable that's incremented once per batch and
    # controls the learning rate decay.
    batch = tf.Variable(0, dtype=data_type())
    # Decay once per epoch, using an exponential schedule starting at 0.01.
    learning_rate = tf.train.exponential_decay(
        0.01,  # Base learning rate.
        batch * BATCH_SIZE,  # Current index into the dataset.
        train_size,  # Decay step.
        0.95,  # Decay rate.
        staircase=True)
    # Use simple momentum for the optimization.
    optimizer = tf.train.MomentumOptimizer(learning_rate,
                                           0.9).minimize(loss,
                                                         global_step=batch)

    # Predictions for the current training minibatch.
    train_prediction = tf.nn.softmax(logits)

    # Predictions for the test and validation, which we'll compute less often.
    eval_logits = model(eval_data)
    eval_prediction = tf.nn.softmax(eval_logits)

    # Small utility function to evaluate a dataset by feeding batches of data to
    # {eval_data} and pulling the results from {eval_predictions}.
    # Saves memory and enables this to run on smaller GPUs.
    def eval_in_batches(data, sess):
        """Get all predictions for a dataset by running it in small batches."""
        size = data.shape[0]
        if size < EVAL_BATCH_SIZE:
            raise ValueError("batch size for evals larger than dataset: %d" %
                             size)
        predictions = numpy.ndarray(shape=(size, NUM_LABELS),
                                    dtype=numpy.float32)
        for begin in xrange(0, size, EVAL_BATCH_SIZE):
            end = begin + EVAL_BATCH_SIZE
            if end <= size:
                predictions[begin:end, :] = sess.run(
                    eval_prediction,
                    feed_dict={eval_data: data[begin:end, ...]})
            else:
                batch_predictions = sess.run(
                    eval_prediction,
                    feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
                predictions[begin:, :] = batch_predictions[begin - size:, :]
        return predictions

    # Create a local session to run the training.
    start_time = time.time()
    saver = tf.compat.v1.train.Saver(keep_checkpoint_every_n_hours=2,
                                     max_to_keep=2)
    import IPython
    IPython.embed()
    raise 1
    with tf.Session() as sess:
        # Run all the initializers to prepare the trainable parameters.
        tf.global_variables_initializer().run()
        print('Initialized!')
        # Loop through training steps.
        for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
            # Compute the offset of the current minibatch in the data.
            # Note that we could use better randomization across epochs.
            offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
            batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
            batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
            # This dictionary maps the batch data (as a numpy array) to the
            # node in the graph it should be fed to.
            feed_dict = {
                train_data_node: batch_data,
                train_labels_node: batch_labels
            }
            # Run the optimizer to update weights.
            sess.run(optimizer, feed_dict=feed_dict)
            # print some extra information once reach the evaluation frequency
            if step % EVAL_FREQUENCY == 0:
                # fetch some extra nodes' data
                l, lr, predictions = sess.run(
                    [loss, learning_rate, train_prediction],
                    feed_dict=feed_dict)
                elapsed_time = time.time() - start_time
                start_time = time.time()
                print('Step %d (epoch %.2f), %.1f ms' %
                      (step, float(step) * BATCH_SIZE / train_size,
                       1000 * elapsed_time / EVAL_FREQUENCY))
                print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
                print('Minibatch error: %.1f%%' %
                      error_rate(predictions, batch_labels))
                print('Validation error: %.1f%%' % error_rate(
                    eval_in_batches(validation_data, sess), validation_labels))
                sys.stdout.flush()

        saver.save(sess, os.path.join(MODEL_DIR, 'model'))
        # Finally print the result!
        test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
        print('Test error: %.1f%%' % test_error)
        if FLAGS.self_test:
            print('test_error', test_error)
            assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
                test_error, )
Ejemplo n.º 47
0
def warp_slice(inp_src: DataSource,
               patch_shape: Union[Tuple[int, ...], np.ndarray],
               M: np.ndarray,
               target_src: Optional[DataSource] = None,
               target_patch_shape: Optional[Union[Tuple[int],
                                                  np.ndarray]] = None,
               target_discrete_ix: Optional[Sequence[int]] = None,
               input_discrete_ix: Optional[Sequence[int]] = None,
               debug: bool = False) -> Tuple[np.ndarray, Optional[np.ndarray]]:
    """
    Cuts a warped slice out of the input image and out of the target_src image.
    Warping is applied by multiplying the original source coordinates with
    the inverse of the homogeneous (forward) transformation matrix ``M``.

    "Source coordinates" (``src_coords``) signify the coordinates of voxels in
    ``inp_src`` and ``target_src`` that are used to compose their respective warped
    versions. The idea here is that not the images themselves, but the
    coordinates from where they are read are warped. This allows for much higher
    efficiency for large image volumes because we don't have to calculate the
    expensive warping transform for the whole image, but only for the voxels
    that we eventually want to use for the new warped image.
    The transformed coordinates usually don't align to the discrete
    voxel grids of the original images (meaning they are not integers), so the
    new voxel values are obtained by linear interpolation.

    Parameters
    ----------
    inp_src
        Input image source (in HDF5)
    patch_shape
        (spatial only) Patch shape ``(D, H, W)``
        (spatial shape of the neural network's input node)
    M
        Forward warping tansformation matrix (4x4).
        Must contain translations in source and target_src array.
    target_src
        Optional target source array to be extracted from in the same way.
    target_patch_shape
        Patch size for the ``target_src`` array.
    target_discrete_ix
        List of target channels that contain discrete values.
        By default (``None``), every channel is is seen as discrete (this is
        generally the case for classification tasks).
        This information is used to decide what kind of interpolation should
        be used for reading target data:
        - discrete targets are obtained by nearest-neighbor interpolation
        - non-discrete (continuous) targets are linearly interpolated.
    input_discrete_ix
        List of input channels that contain discrete values.
        By default (``None``), no channel is seen as discrete (generally
        inputs are real world images).
        See target_discrete_ix for the effect on input interpolation.
    debug: If ``True`` (default), enable additional sanity checks to catch
        warping issues early.

    Returns
    -------
    inp
        Warped input image slice
    target
        Warped target_src image slice
        or ``None``, if ``target_src is None``.
    """

    patch_shape = tuple(patch_shape)
    if len(inp_src.shape) == 3:
        n_f = 1
    elif len(inp_src.shape) == 4:
        n_f = inp_src.shape[0]
    else:
        raise ValueError(f'Can\'t handle inp_src shape {inp_src.shape}')

    # Spatial shapes of input and target data sources
    inp_src_shape = np.array(inp_src.shape[-3:])

    M_inv = np.linalg.inv(M.astype(np.float64)).astype(floatX)  # stability...
    dest_corners = make_dest_corners(patch_shape)
    src_corners = np.dot(M_inv, dest_corners.T).T
    if np.any(M[3, :3] != 0):  # homogeneous divide
        src_corners /= src_corners[:, 3][:, None]

    # check corners
    src_corners = src_corners[:, :3]
    lo = np.min(np.floor(src_corners), 0).astype(np.int)
    hi = np.max(np.ceil(src_corners + 1), 0).astype(np.int)
    # compute/transform dense coords
    dest_coords = make_dest_coords(patch_shape)
    src_coords = np.tensordot(dest_coords, M_inv, axes=[[-1], [1]])
    if np.any(M[3, :3] != 0):  # homogeneous divide
        src_coords /= src_coords[..., 3][..., None]
    # cut patch
    src_coords = src_coords[..., :3]

    # TODO: WIP code, integrate this into the warping pipeline with config options
    # Perform elastic deformation on warped coordinates so we don't have
    #  to interpolate twice.
    # For more details, see elektronn3.data.transforms.ElasticTransform
    elastic = False
    if elastic:
        sigma = 4
        alpha = 40
        aniso_factor = 2

        for i in range(3):
            # For each coordinate of dimension i, build a random displacement,
            #  smooth it with sigma and multiply it by alpha
            elastic_displacement = gaussian_filter(
                np.random.rand(*patch_shape) * 2 - 1,
                sigma,
                mode='constant',
                cval=0) * alpha
            # Apply anisotropy correction
            if i == 0 and aniso_factor != 1:
                elastic_displacement /= aniso_factor
            # Apply deformation
            src_coords[..., i] += elastic_displacement
            # Clip out-of-bounds coordinates back to original cube edges to
            #  prevent out-of-bounds reading
            np.clip(src_coords[..., i],
                    lo[i],
                    hi[i] - 1,
                    out=src_coords[..., i])

    if target_src is not None:
        target_src_shape = np.array(target_src.shape[-3:])
        target_patch_shape = tuple(target_patch_shape)
        n_f_t = target_src.shape[0] if target_src.ndim == 4 else 1

        target_src_offset = np.subtract(inp_src_shape, target_src.shape[-3:])
        if np.any(np.mod(target_src_offset, 2)):
            raise ValueError("targets must be centered w.r.t. images")
        target_src_offset //= 2

        target_offset = np.subtract(patch_shape, target_patch_shape)
        if np.any(np.mod(target_offset, 2)):
            raise ValueError("targets must be centered w.r.t. images")
        target_offset //= 2

        src_coords_target = src_coords[
            target_offset[0]:(target_offset[0] + target_patch_shape[0]),
            target_offset[1]:(target_offset[1] + target_patch_shape[1]),
            target_offset[2]:(target_offset[2] + target_patch_shape[2])]
        # shift coords to be w.r.t. to origin of target_src array
        lo_targ = np.floor(
            src_coords_target.min(2).min(1).min(0) - target_src_offset).astype(
                np.int)
        hi_targ = np.ceil(
            src_coords_target.max(2).max(1).max(0) + 1 -
            target_src_offset).astype(np.int)
        if np.any(lo_targ < 0) or np.any(hi_targ >= target_src_shape - 1):
            raise WarpingOOBError("Out of bounds for target_src")

    if np.any(lo < 0) or np.any(hi >= inp_src_shape - 1):
        raise WarpingOOBError("Out of bounds for inp_src")

    # Slice and interpolate input
    # Slice to hi + 1 because interpolation potentially needs this value.
    img_cut = slice_3d(inp_src, lo, hi + 1, dtype=floatX)
    if img_cut.ndim == 3:
        img_cut = img_cut[None]
    inp = np.zeros((n_f, ) + patch_shape, dtype=floatX)
    lo = lo.astype(floatX)

    if debug and np.any(
        (src_coords - lo).max(2).max(1).max(0) >= img_cut.shape[-3:]):
        raise WarpingSanityError(
            f'src_coords check failed (too high).\n{(src_coords - lo).max(2).max(1).max(0), img_cut.shape[-3:]}'
        )
    if debug and np.any((src_coords - lo).min(2).min(1).min(0) < 0):
        raise WarpingSanityError(
            f'src_coords check failed (negative indices).\n{(src_coords - lo).min(2).min(1).min(0)}'
        )

    if input_discrete_ix is None:
        input_discrete_ix = [False for i in range(img_cut.shape[0])]
    else:
        input_discrete_ix = [
            i in input_discrete_ix for i in range(img_cut.shape[0])
        ]

    for k, discr in enumerate(input_discrete_ix):
        (map_coordinates_nearest if discr else map_coordinates_linear)(
            img_cut[k], src_coords, lo, inp[k])

    # Slice and interpolate target
    if target_src is not None:
        # dtype is float as well here because of the static typing of the
        # numba-compiled map_coordinates functions
        # Slice to hi + 1 because interpolation potentially needs this value.
        target_cut = slice_3d(target_src, lo_targ, hi_targ + 1, dtype=floatX)
        if target_cut.ndim == 3:
            target_cut = target_cut[None]
        src_coords_target = np.ascontiguousarray(src_coords_target,
                                                 dtype=floatX)
        target = np.zeros((n_f_t, ) + target_patch_shape, dtype=floatX)
        lo_targ = (lo_targ + target_src_offset).astype(floatX)
        if target_discrete_ix is None:
            target_discrete_ix = [True for i in range(n_f_t)]
        else:
            target_discrete_ix = [
                i in target_discrete_ix for i in range(n_f_t)
            ]

        if debug and np.any(
            (src_coords_target -
             lo_targ).max(2).max(1).max(0) >= target_cut.shape[-3:]):
            raise WarpingSanityError(
                f'src_coords_target check failed (too high).\n{(src_coords_target - lo_targ).max(2).max(1).max(0)}\n{target_cut.shape[-3:]}'
            )
        if debug and np.any(
            (src_coords_target - lo_targ).min(2).min(1).min(0) < 0):
            raise WarpingSanityError(
                f'src_coords_target check failed (negative indices).\n{(src_coords_target - lo_targ).min(2).min(1).min(0)}'
            )

        for k, discr in enumerate(target_discrete_ix):
            if discr:
                map_coordinates_nearest(target_cut[k], src_coords_target,
                                        lo_targ, target[k])

                if debug:
                    unique_cut = set(list(np.unique(target_cut[k])))
                    unique_warp = set(list(np.unique(target[k])))
                    # If new values appear in discrete targets, there is something wrong.
                    # unique_warp can have less values than unique_cut though, for example
                    #  if the warping transform coincidentally slices away all values of a class.
                    if not unique_warp.issubset(unique_cut):
                        print(
                            f'Invalid target encountered:\n\nunique_cut=\n{unique_cut}\n'
                            f'unique_warp=\n{unique_warp}\nM_inv=\n{M_inv}\n'
                            f'src_coords_target - lo_targ=\n{src_coords_target - lo_targ}\n'
                        )
                        # Try dropping to an IPython shell (Won't work with num_workers > 0).
                        import IPython
                        IPython.embed()
                        raise SystemExit

            else:
                map_coordinates_linear(target_cut[k], src_coords_target,
                                       lo_targ, target[k])

    else:
        target = None

    if debug and np.any(np.isnan(inp)):
        raise RuntimeError('Warping is broken: inp contains NaN.')
    if debug and np.any(np.isnan(target)):
        raise RuntimeError('Warping is broken: target contains NaN.')

    return inp, target
Ejemplo n.º 48
0
def run_trial(opt):
    oc = svm.OneClassSVM(kernel='rbf', nu=.01, gamma=.01)
    est = knet.Network(opt.arch, learning_rate=opt.lr, epochs=opt.epochs)
    lnr = learner.Learner(est)

    plot_dir = utils.generate_plot_dir('initial', 'experts', vars(opt))
    data_dir = utils.generate_data_dir('initial', 'experts', vars(opt))
    if not os.path.exists(plot_dir):
        os.makedirs(plot_dir)
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)

    opt.plot_dir = plot_dir
    opt.data_dir = data_dir

    opt.num_valid_trajs = max(1, int(.25 * opt.iters))
    opt.samples = 10

    train_trajs = []
    valid_trajs = []

    for i in range(opt.iters):
        print "Iteration: " + str(i)
        states, int_actions, taken_actions, r = statistics.collect_traj(
            opt.env, opt.sup, opt.t)

        lnr.add_data(states, int_actions)

    oc.fit(lnr.X)
    preds = oc.predict(lnr.X)
    train_err = len(preds[preds == -1]) / float(len(preds))
    print "\nTraining error: " + str(train_err)

    lnr.train()

    sup_rewards = np.zeros((20))
    lnr_rewards = np.zeros((20))

    X_valid = []
    X_test = []
    for i in range(20):
        states_valid, int_actions_valid, _, r_valid = statistics.collect_traj(
            opt.env, opt.sup, opt.t, False)
        states_test, int_actions_test, _, r_test = statistics.collect_traj(
            opt.env, lnr, opt.t, False)

        sup_rewards[i] = r_valid
        lnr_rewards[i] = r_test

        X_valid += states_valid
        X_test += states_test

    valid_preds = oc.predict(X_valid)
    valid_err = len(valid_preds[valid_preds == -1]) / float(len(valid_preds))
    print "Validation erorr: " + str(valid_err)

    test_preds = oc.predict(X_test)
    test_err = len(test_preds[test_preds == -1]) / float(len(test_preds))
    print "Test erorr: " + str(test_err)

    print "\n\n"

    print "Average sup reward: " + str(np.mean(sup_rewards)) + " +/- " + str(
        scipy.stats.sem(sup_rewards))
    print "Average lnr reward: " + str(np.mean(lnr_rewards)) + " +/- " + str(
        scipy.stats.sem(lnr_rewards))

    print "\n\n"

    def dec(u):
        x = opt.env.get_x()
        s, _, _, _ = opt.env.step(u)
        opt.env.set_x(x)
        return oc.decision_function([s])[0, 0]

    rewards = np.zeros((20))
    rec_counts = np.zeros((20))
    X_robust = []
    for i in range(20):

        s = opt.env.reset()
        states = [s]

        for t in range(opt.t):
            score = oc.decision_function([s])[0, 0]
            print "Decision score: " + str(score)
            if score < .1:
                alpha = .1
                a = alpha * utils.finite_diff1(
                    np.zeros(opt.env.action_space.shape), dec)
                IPython.embed()
                rec_counts[i] += 1.0
                s, r, done, _ = opt.env.step(a)

            a = lnr.intended_action(s)
            s, r, done, _ = opt.env.step(a)

            rewards[i] += r
            states.append(s)

            if done == True:
                break

        X_robust += states

    robust_preds = oc.predict(X_robust)
    robust_err = len(robust_preds[robust_preds == -1]) / float(
        len(robust_preds))
    print "Robust erorr: " + str(robust_err)

    rec_freq = np.mean(rec_counts / float(opt.t))
    print "Recovery frequency: " + str(rec_freq)

    print "Robust rewards: " + str(np.mean(rewards)) + " +/- " + str(
        scipy.stats.sem(rewards))
Ejemplo n.º 49
0
 def run(self):
     IPython.embed(header=__header__, banner1=__content__)
     print "Detaching from the target process"
     self.session.detach()
     return
Ejemplo n.º 50
0
def scrape_single_game(sport, browser, debug=False):
  # Since we are separating contests by their start time now, we don't currently try to choose a
  # best contest to scrape. We should certainly upgrade this in the future!
  contest_element_id_regex = re.compile(r"contest_(?P<gameid>\d+)-(?P<tableid>\d+)")
  browser_elem = browser.find_element_by_class_name('contest-list-item')
  match_groups = contest_element_id_regex.match(browser_elem.get_attribute("id"))
  fd_game_id = int(match_groups.group("gameid"))
  fd_table_id = int(match_groups.group("tableid"))
  fd_game_title = browser_elem.find_element_by_class_name("contest-name-text").text
  entry_fee = browser_elem.find_element_by_class_name("entry-fee-cell").text.replace('$','').replace(',','')
  game_time = browser_elem.find_element_by_class_name("startdate-cell").text
  cal = Calendar()
  new_parsed_dt, ret_code = cal.parseDT(game_time)

  # If it is between 9 PM and midnight locally (Pacific), we'll have an issue:
  # FD shows us the game time as e.g. "7 PM" but we'll interpret that as today.
  # Workaround: if the game time is in the past, its probably tomorrow. Add 1 day.
  # Break if it's not between 9PM and midnight so we know this happened
  if new_parsed_dt < datetime.datetime.now():
    if datetime.datetime.now().hour < 21:
      print 'Parsed time of FanDuel game start is in the past, but this isnt a TZ issue!'
      IPython.embed()
    else:
      new_parsed_dt += datetime.timedelta(days=1)
      assert new_parsed_dt > datetime.datetime.now()

  browser_elem.click()

  print 'Scraping this game:'
  print '  ', fd_game_title
  print '  on', new_parsed_dt.isoformat()
  game_entry_url = 'https://www.fanduel.com/games/{game_id}/contests/{game_id}-{table_id}/enter'.format(game_id=fd_game_id, table_id=fd_table_id)

  # Go to the details for the game, to find list of eligible players for this game.
  browser.get(game_entry_url)

  time.sleep(1)

  # Get the salary cap for the game directly
  salary_text = browser.find_element_by_xpath('//*[@id="ui-skeleton"]/div/section/div[2]/div[4]/div[2]/section/header/'
                                              'remaining-salary/div/div[1]/figure').text
  cap = int(salary_text.replace('$','').replace(',',''))

  # Download the master player / salary list
  player_list_link = browser.find_element_by_link_text("Download players list")
  # Sometimes this doesn't work the first time...

  download_filename = get_csv_file(sport, new_parsed_dt, fd_game_id)
  attempts = 1
  while not os.path.exists(download_filename) and attempts <= 10:
    print '...trying to download file (attempt %d)' % attempts
    time.sleep(3)
    player_list_link.click()
    time.sleep(3)
    attempts += 1
  if attempts > 5:
    print "Problem downloading player list -- not saving anything"
    return False
  else:
    print '...success!'

  # Get player lineups
  if sport == 'mlb':
    print "Accessing lineups..."
    lineups_link = browser.find_element_by_link_text("Lineup Info")
    # You can't just click it, it opens in a new tab :( So we visit it with get() and then go back()
    lineup_url = lineups_link.get_attribute('href')
    browser.get(lineup_url)
    lineup_info = parse_lineup_page(browser.page_source)
    browser.back()
    print "  %d / %d lineups submitted (%d players)" % (len(lineup_info.loaded_teams),
                                                        len(lineup_info.loaded_teams) + len(lineup_info.unloaded_teams),
                                                        len(lineup_info.parsed_players))
  else:
    # Should consider grabbing injury / GTD status here
    lineup_info = None
    # Except that doesn't work too well
    # parse_nba_player_list(browser.page_source)


  add_game_info_to_db(sport,
                      fd_game_id,
                      cap,
                      entry_fee,
                      new_parsed_dt,
                      fd_table_id,
                      game_title=fd_game_title,
                      lineup=lineup_info)
  # Return to the main lobby page with back()
  browser.back()
  return True
Ejemplo n.º 51
0
        print 'Index error on antenna %i'%ant
        continue
    avg_p2 = np.nanmean(np.absolute(dp2[tup][pol_strs[1]]))
    ratio = avg_p2/avg_p1
    std_p1 = np.nanstd(np.absolute(dp1[tup][pol_strs[0]]))
    std_p2 = np.nanstd(np.absolute(dp2[tup][pol_strs[1]]))
    std = ratio*np.sqrt((std_p1/avg_p1)**2. + (std_p2/avg_p2)**2.)
    x1,y1 = antpos[ant]['top_x'],antpos[ant]['top_y']
    dx,dy = np.abs(x0-x1),np.abs(y0-y1)
    L = np.sqrt(dx**2. + dy**2.)
    
    avg_ratios.append(ratio)
    stdevs.append(std)
    bl_length.append(L)
    #indiv plot
    if opts.mb and ant in badants: continue
    plt.errorbar(L,ratio,yerr=std,fmt='o',ecolor='b',color='b')
    plt.text(L,ratio,str(ant))
    if ant in badants: plt.plot(L,ratio,'kx',ms=10)

if opts.verb: print np.where(avg_ratios > np.nanmean(avg_ratios)+2*np.nanstd(avg_ratios))
#format
plt.xlabel('Basline length [m]')
plt.xlim(0,300)
plt.ylabel(r'$\langle | V_{a,j} | \rangle_{t,\nu}$',size=20)
plt.suptitle('Relative to antenna %i'%anchor_ant,size=15)
if not opts.nobl: plt.show()
plt.close()

import IPython;IPython.embed()
Ejemplo n.º 52
0
from exampleapp import app
import IPython

app.testing = True
test_client = app.test_client()

welcome_message = """Welcome to your Flask CLI environment. 
The following variables are available to use:

app           -> Your Flask app instance.
test_client   -> Your Flask app.test_client().
"""

IPython.embed(header=welcome_message)
Ejemplo n.º 53
0
def main(argv):
    parser = ArgumentParser(usage=__doc__.lstrip())
    parser.add_argument("--verbose",
                        "-v",
                        action="count",
                        default=1,
                        help="more verbosity")
    parser.add_argument(
        "--no-build",
        "-n",
        action="store_true",
        default=False,
        help="do not build the project (use system installed version)")
    parser.add_argument("--build-only",
                        "-b",
                        action="store_true",
                        default=False,
                        help="just build, do not run any tests")
    parser.add_argument("--doctests",
                        action="store_true",
                        default=False,
                        help="Run doctests in module")
    parser.add_argument(
        "--coverage",
        action="store_true",
        default=False,
        help=("report coverage of project code. HTML output goes "
              "under build/coverage"))
    parser.add_argument(
        "--gcov",
        action="store_true",
        default=False,
        help=("enable C code coverage via gcov (requires GCC). "
              "gcov output goes to build/**/*.gc*"))
    parser.add_argument("--lcov-html",
                        action="store_true",
                        default=False,
                        help=("produce HTML for C code coverage information "
                              "from a previous run with --gcov. "
                              "HTML output goes to build/lcov/"))
    parser.add_argument("--mode",
                        "-m",
                        default="fast",
                        help="'fast', 'full', or something that could be "
                        "passed to nosetests -A [default: fast]")
    parser.add_argument(
        "--submodule",
        "-s",
        default=None,
        help="Submodule whose tests to run (cluster, constants, ...)")
    parser.add_argument("--pythonpath",
                        "-p",
                        default=None,
                        help="Paths to prepend to PYTHONPATH")
    parser.add_argument("--tests",
                        "-t",
                        action='append',
                        help="Specify tests to run")
    parser.add_argument("--python",
                        action="store_true",
                        help="Start a Python shell with PYTHONPATH set")
    parser.add_argument("--ipython",
                        "-i",
                        action="store_true",
                        help="Start IPython shell with PYTHONPATH set")
    parser.add_argument("--shell",
                        action="store_true",
                        help="Start Unix shell with PYTHONPATH set")
    parser.add_argument("--debug",
                        "-g",
                        action="store_true",
                        help="Debug build")
    parser.add_argument("--parallel",
                        "-j",
                        type=int,
                        default=0,
                        help="Number of parallel jobs during build")
    parser.add_argument("--show-build-log",
                        action="store_true",
                        help="Show build output rather than using a log file")
    parser.add_argument("--bench",
                        action="store_true",
                        help="Run benchmark suite instead of test suite")
    parser.add_argument(
        "--bench-compare",
        action="store",
        metavar="COMMIT",
        help=("Compare benchmark results to COMMIT. "
              "Note that you need to commit your changes first!"))
    parser.add_argument("--raise-warnings",
                        default=None,
                        type=str,
                        choices=('develop', 'release'),
                        help="if 'develop', warnings are treated as errors")
    parser.add_argument("args",
                        metavar="ARGS",
                        default=[],
                        nargs=REMAINDER,
                        help="Arguments to pass to Nose, Python or shell")
    args = parser.parse_args(argv)

    if args.bench_compare:
        args.bench = True
        args.no_build = True  # ASV does the building

    if args.lcov_html:
        # generate C code coverage output
        lcov_generate()
        sys.exit(0)

    if args.pythonpath:
        for p in reversed(args.pythonpath.split(os.pathsep)):
            sys.path.insert(0, p)

    if args.gcov:
        gcov_reset_counters()

    if args.debug and args.bench:
        print("*** Benchmarks should not be run against debug "
              "version; remove -g flag ***")

    if not args.no_build:
        site_dir = build_project(args)
        sys.path.insert(0, site_dir)
        os.environ['PYTHONPATH'] = site_dir

    extra_argv = args.args[:]
    if extra_argv and extra_argv[0] == '--':
        extra_argv = extra_argv[1:]

    if args.python:
        # Debugging issues with warnings is much easier if you can see them
        print("Enabling display of all warnings")
        import warnings
        import types

        warnings.filterwarnings("always")
        if extra_argv:
            # Don't use subprocess, since we don't want to include the
            # current path in PYTHONPATH.
            sys.argv = extra_argv
            with open(extra_argv[0], 'r') as f:
                script = f.read()
            sys.modules['__main__'] = types.ModuleType('__main__')
            ns = dict(__name__='__main__', __file__=extra_argv[0])
            exec_(script, ns)
            sys.exit(0)
        else:
            import code
            code.interact()
            sys.exit(0)

    if args.ipython:
        # Debugging issues with warnings is much easier if you can see them
        print("Enabling display of all warnings and pre-importing numpy as np")
        import warnings
        warnings.filterwarnings("always")
        import IPython
        import numpy as np
        IPython.embed(user_ns={"np": np})
        sys.exit(0)

    if args.shell:
        shell = os.environ.get('SHELL', 'sh')
        print("Spawning a Unix shell...")
        os.execv(shell, [shell] + extra_argv)
        sys.exit(1)

    if args.coverage:
        dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
        fn = os.path.join(dst_dir, 'coverage_html.js')
        if os.path.isdir(dst_dir) and os.path.isfile(fn):
            shutil.rmtree(dst_dir)
        extra_argv += ['--cover-html', '--cover-html-dir=' + dst_dir]

    if args.bench:
        # Run ASV
        items = extra_argv
        if args.tests:
            items += args.tests
        if args.submodule:
            items += [args.submodule]

        bench_args = []
        for a in items:
            bench_args.extend(['--bench', a])

        if not args.bench_compare:
            cmd = ['asv', 'run', '-n', '-e', '--python=same'] + bench_args
            os.chdir(os.path.join(ROOT_DIR, 'benchmarks'))
            os.execvp(cmd[0], cmd)
            sys.exit(1)
        else:
            commits = [x.strip() for x in args.bench_compare.split(',')]
            if len(commits) == 1:
                commit_a = commits[0]
                commit_b = 'HEAD'
            elif len(commits) == 2:
                commit_a, commit_b = commits
            else:
                p.error("Too many commits to compare benchmarks for")

            # Check for uncommitted files
            if commit_b == 'HEAD':
                r1 = subprocess.call(
                    ['git', 'diff-index', '--quiet', '--cached', 'HEAD'])
                r2 = subprocess.call(['git', 'diff-files', '--quiet'])
                if r1 != 0 or r2 != 0:
                    print("*" * 80)
                    print("WARNING: you have uncommitted changes --- "
                          "these will NOT be benchmarked!")
                    print("*" * 80)

            # Fix commit ids (HEAD is local to current repo)
            p = subprocess.Popen(['git', 'rev-parse', commit_b],
                                 stdout=subprocess.PIPE)
            out, err = p.communicate()
            commit_b = out.strip()

            p = subprocess.Popen(['git', 'rev-parse', commit_a],
                                 stdout=subprocess.PIPE)
            out, err = p.communicate()
            commit_a = out.strip()

            cmd = [
                'asv', 'continuous', '-e', '-f', '1.05', commit_a, commit_b
            ] + bench_args
            os.chdir(os.path.join(ROOT_DIR, 'benchmarks'))
            os.execvp(cmd[0], cmd)
            sys.exit(1)

    test_dir = os.path.join(ROOT_DIR, 'build', 'test')

    if args.build_only:
        sys.exit(0)
    elif args.submodule:
        modname = PROJECT_MODULE + '.' + args.submodule
        try:
            __import__(modname)
            test = sys.modules[modname].test
        except (ImportError, KeyError, AttributeError):
            print("Cannot run tests for %s" % modname)
            sys.exit(2)
    elif args.tests:

        def fix_test_path(x):
            # fix up test path
            p = x.split(':')
            p[0] = os.path.relpath(os.path.abspath(p[0]), test_dir)
            return ':'.join(p)

        tests = [fix_test_path(x) for x in args.tests]

        def test(*a, **kw):
            extra_argv = kw.pop('extra_argv', ())
            extra_argv = extra_argv + tests[1:]
            kw['extra_argv'] = extra_argv
            from numpy.testing import Tester
            return Tester(tests[0]).test(*a, **kw)
    else:
        __import__(PROJECT_MODULE)
        test = sys.modules[PROJECT_MODULE].test

    # Run the tests under build/test
    try:
        shutil.rmtree(test_dir)
    except OSError:
        pass
    try:
        os.makedirs(test_dir)
    except OSError:
        pass

    cwd = os.getcwd()
    try:
        os.chdir(test_dir)
        result = test(args.mode,
                      verbose=args.verbose,
                      extra_argv=extra_argv,
                      doctests=args.doctests,
                      raise_warnings=args.raise_warnings,
                      coverage=args.coverage)
    finally:
        os.chdir(cwd)

    if result.wasSuccessful():
        sys.exit(0)
    else:
        sys.exit(1)
Ejemplo n.º 54
0
    def forward(self, x):
        sources = list()  # locとconfへの入力source1~6を格納
        loc = list()  # locの出力を格納
        conf = list()  # confの出力を格納

        # vggのconv4_3まで計算する
        for k in range(23):
            x = self.vgg[k](x)

        # conv4_3の出力をL2Normに入力し、source1を作成、sourcesに追加
        source1 = self.L2Norm(x)
        sources.append(source1)

        # vggを最後まで計算し、source2を作成、sourcesに追加
        for k in range(23, len(self.vgg)):
            x = self.vgg[k](x)

        sources.append(x)

        # extrasのconvとReLUを計算
        # source3~6を、sourcesに追加
        for k, v in enumerate(self.extras):
            x = F.relu(v(x), inplace=True)
            if k % 2 == 1:  # conv→ReLU→cov→ReLUをしたらsourceに入れる
                sources.append(x)

        # source1~6に、それぞれ対応する畳み込みを1回ずつ適用する
        # zipでforループの複数のリストの要素を取得
        # source1~6まであるので、6回ループが回る
        for (x, l, c) in zip(sources, self.loc, self.conf):
            # Permuteは要素の順番を入れ替え
            loc.append(l(x).permute(0, 2, 3, 1).contiguous())
            conf.append(c(x).permute(0, 2, 3, 1).contiguous())
            # l(x)とc(x)で畳み込みを実行
            # l(x)とc(x)の出力サイズは[batch_num, 4*アスペクト比の種類数, featuremapの高さ, featuremap幅]
            # sourceによって、アスペクト比の種類数が異なり、面倒なので順番入れ替えて整える
            # permuteで要素の順番を入れ替え、
            # [minibatch数, featuremap数, featuremap数,4*アスペクト比の種類数]へ
            # (注釈)
            # torch.contiguous()はメモリ上で要素を連続的に配置し直す命令です。
            # あとでview関数を使用します。
            # このviewを行うためには、対象の変数がメモリ上で連続配置されている必要があります。

        # さらにlocとconfの形を変形
        # locのサイズは、torch.Size([batch_num, 34928])
        # confのサイズはtorch.Size([batch_num, 183372])になる
        loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)

        # さらにlocとconfの形を整える
        # locのサイズは、torch.Size([batch_num, 8732, 4])
        # confのサイズは、torch.Size([batch_num, 8732, 21])
        loc = loc.view(loc.size(0), -1, 4)
        try:
            conf = conf.view(conf.size(0), -1, self.num_classes)
        except:
            import traceback
            traceback.print_exc()

            import IPython
            IPython.embed()

        # 最後に出力する
        output = (loc, conf, self.dbox_list)

        if self.phase == "inference":  # 推論時
            # クラス「Detect」のforwardを実行
            # 返り値のサイズは torch.Size([batch_num, 21, 200, 5])
            return self.detect(output[0], output[1], output[2])
        else:  # 学習時
            return output
    def generate_compose_tree(self, treex, latent_canvas_size):
        for i in range(0, treex.num_children):
            treex.children[i] = self.generate_compose_tree(treex.children[i], latent_canvas_size)

        # one hot embedding of a word
        ohe = self.get_code(self.dictionary, treex.word)
        if treex.function == 'combine':
            vis_dist = self.vis_dist(ohe)
            pos_dist = self.pos_dist(ohe)
            if treex.num_children > 0:
                # visual content
                vis_dist_child = treex.children[0].vis_dist
                vis_dist = self.combine(vis_dist, vis_dist_child, 'vis')
                # visual position
                pos_dist_child = treex.children[0].pos_dist
                pos_dist = self.combine(pos_dist, pos_dist_child, 'pos')

            treex.vis_dist = vis_dist
            treex.pos_dist = pos_dist

        elif treex.function == 'describe':
            # blend visual words
            vis_dist = self.vis_dist(ohe)
            pos_dist = self.pos_dist(ohe)
            if treex.num_children > 0:
                # visual content
                vis_dist_child = treex.children[0].vis_dist
                vis_dist = self.describe(vis_dist_child, vis_dist, 'vis')
                # visual position
                pos_dist_child = treex.children[0].pos_dist
                pos_dist = self.describe(pos_dist_child, pos_dist, 'pos')

            treex.pos_dist = pos_dist

            # regress bbox
            treex.pos = np.clip(self.box_vae.generate(prior=treex.pos_dist).data.cpu().numpy().astype(int),
                                int(self.ds),
                                self.im_size).flatten() // self.ds

            #print treex.pos
            #treex.pos = [6, 5]
            if treex.parent == None:
                ones = self.get_ones(torch.Size([1, 1]))
                if not self.bg_bias:
                    bg_vis_dist = [Variable(torch.zeros(latent_canvas_size)).cuda(), \
                                   Variable(torch.zeros(latent_canvas_size)).cuda()]
                else:
                    bg_vis_dist = [self.bias_mean(ones).view(*latent_canvas_size), \
                                   self.bias_var(ones).view(*latent_canvas_size)]


                b = [int(latent_canvas_size[2]) // 2 - treex.pos[0] // 2,
                     int(latent_canvas_size[3]) // 2 - treex.pos[1] // 2, treex.pos[0], treex.pos[1]]

                #b[:2] = [7, 8]
                #b[:2] = [6, 6]
                bg_vis_dist = [self.assign_util(bg_vis_dist[0], b, self.transform(vis_dist[0], treex.pos),
                                                'assign'), \
                               self.assign_util(bg_vis_dist[1], b,
                                                self.transform(vis_dist[1], treex.pos, variance=True),
                                                'assign')]

                vis_dist = bg_vis_dist
                treex.offsets = b
            else:
                # resize vis_dist
                vis_dist = [self.transform(vis_dist[0], treex.pos), \
                            self.transform(vis_dist[1], treex.pos, variance=True)]

            treex.vis_dist = vis_dist

        elif treex.function == 'layout':
            # get pos word as position prior
            treex.pos_dist = self.pos_dist(ohe)
            assert (treex.num_children > 0)

            # get offsets: use gt for training
            l_pos = treex.children[0].pos
            r_pos = treex.children[1].pos

            offsets = np.clip(self.offset_vae.generate(prior=treex.pos_dist).data.cpu().numpy().astype(int), 0,
                              self.im_size).flatten() // self.ds
            countdown = 0
            while self.check_valid(offsets, l_pos, r_pos, self.im_size // self.ds) == False:
                offsets = np.clip(self.offset_vae.generate(prior=treex.pos_dist).data.cpu().numpy().astype(int), 0,
                                  self.im_size).flatten() // self.ds
                if countdown >= 100:
                    print('Tried proposing more than 100 times.')
                    if self.debug_mode:
                        import IPython;
                        IPython.embed()
                    print('Warning! Manually adapt offsets')
                    lat_size = self.im_size // self.ds
                    if offsets[0] + l_pos[0] > lat_size:
                        offsets[0] = lat_size - l_pos[0]
                    if offsets[1] + l_pos[1] > lat_size:
                        offsets[1] = lat_size - l_pos[1]
                    if offsets[2] + r_pos[0] > lat_size:
                        offsets[2] = lat_size - r_pos[0]
                    if offsets[3] + r_pos[1] > lat_size:
                        offsets[3] = lat_size - r_pos[1]

                countdown += 1
            treex.offsets = offsets
            l_offset = offsets[:2]
            r_offset = offsets[2:]

            ######################### constructing latent map ###############################
            # bias filled mean&var
            ones = self.get_ones(torch.Size([1, 1]))
            if not self.bg_bias:
                bg_vis_dist = [Variable(torch.zeros(latent_canvas_size)).cuda(), \
                               Variable(torch.zeros(latent_canvas_size)).cuda()]
            else:
                bg_vis_dist = [self.bias_mean(ones).view(*latent_canvas_size), \
                               self.bias_var(ones).view(*latent_canvas_size)]

            vis_dist = bg_vis_dist
            try:
                # arrange the layout of two children
                vis_dist[0] = self.assign_util(vis_dist[0], list(l_offset) + list(l_pos), treex.children[0].vis_dist[0],
                                               'assign')
                vis_dist[1] = self.assign_util(vis_dist[1], list(l_offset) + list(l_pos), treex.children[0].vis_dist[1],
                                               'assign')

                vis_dist[0] = self.assign_util(vis_dist[0], list(r_offset) + list(r_pos), treex.children[1].vis_dist[0],
                                               'assign')
                vis_dist[1] = self.assign_util(vis_dist[1], list(r_offset) + list(r_pos), treex.children[1].vis_dist[1],
                                               'assign')
            except:
                print('latent distribution doesnt fit size.')
                import IPython;
                IPython.embed()

            if treex.parent != None:
                p = [min(l_offset[0], r_offset[0]), min(l_offset[1], r_offset[1]), \
                     max(l_offset[0] + l_pos[0], r_offset[0] + r_pos[0]),
                     max(l_offset[1] + l_pos[1], r_offset[1] + r_pos[1])]
                treex.pos = [p[2] - p[0], p[3] - p[1]]
                treex.vis_dist = [vis_dist[0][:, :, p[0]:p[2], p[1]:p[3]], \
                                  vis_dist[1][:, :, p[0]:p[2], p[1]:p[3]]]
            else:
                treex.vis_dist = vis_dist

        return treex
Ejemplo n.º 56
0
def main():
    # Runs code in "functions.py" files, provided in configuration directories.
    config.exec_custom_functions()

    # Sets up logging
    if config.debug_mode:
        logging.getLogger().setLevel(logging.DEBUG)
        logging.debug("Setting log level to DEBUG (debug_mode is enabled)")

    # Set up graceful exit for SIGTERM (so finally clauses might have a chance to execute)
    def handle_sigterm(*args):
        logging.warn("Exiting due to SIGTERM")
        sys.exit(0)

    signal.signal(signal.SIGTERM, handle_sigterm)

    # Validates constraints on the configuration data (does not touch database)
    validate_config()

    # Performs database migrations as needed
    migrate()

    # Validates constraints on the configuration data and database data in conjunction
    validate_database_constraints()

    if config.mode == "ipython":
        # If we're running --ipython mode, STOP here (don't interfere with a server that may be
        # running simultaneuosly). Launch the IPython shell and wait for user input.
        import IPython
        return IPython.embed()
    elif config.mode == "server":
        # Run ob2 in server mode.
        #
        # First, we clean up our resumable queues by re-enqueuing any half-completed transactions.
        # Then, we reset the state of the local Docker daemon.
        # Then, we start all our worker threads.
        # Finally, the main thread goes to sleep until we receive a signal.

        # Recovers the resumable queue used for the mailer thread (if mailer is enabled)
        if config.mailer_enabled:
            mailer_queue.recover()

        # Recovers the resumable queue used for the GitHub API thread (if GitHub is NOT in read-only
        # mode)
        if not config.github_read_only_mode:
            repomanager_queue.recover()

        # Clears out stray Docker containers and images
        reset_grader()

        # Start background threads for all the apps
        # Warning: Do not try to start more than 1 web thread. The web server is already threaded.
        apps = [(ob2.dockergrader, config.num_workers), (ob2.web, 1)]
        if config.mailer_enabled:
            apps.append((ob2.mailer, 1))
        if not config.github_read_only_mode:
            # The GitHub repo manager thread is only needed if GitHub is NOT in read-only mode
            apps.append((ob2.repomanager, 1))
        for app, num_workers in apps:
            for _ in range(num_workers):
                worker = Thread(target=app.main)
                worker.daemon = True
                worker.start()

        # Wait until we're asked to quit
        while True:
            try:
                signal.pause()
            except (KeyboardInterrupt, SystemExit):
                logging.warn("Shutting down.. Goodbye world.")
                break
            except:
                traceback.print_exc()
    def forward(self, x, treex, treeindex=None, alpha=1.0, ifmask=False, maskweight=1.0):
        ################################
        ##    input: images, trees    ##
        ################################

        # if multigpu_full, pick the trees by treeindex
        if self.multigpu_full:
            treex_pick = [treex[ele[0]] for ele in treeindex.data.cpu().numpy().astype(int)]
            treex = treex_pick

        if ifmask == True:
            mask = []
            for i in range(0, len(treex)):
                mask += [self.get_mask_from_tree(treex[i], x[0:1, :, :, :].size())]
            mask = torch.cat(mask, dim=0)

        # encoding the images
        h = self.reader(x)
        # proposal distribution
        latent_mean = self.h_mean(h)
        latent_var = self.h_var(h)

        # losses
        kld_loss, rec_loss, pos_loss = 0, 0, 0

        # forward GNMN
        prior_mean_all = []
        prior_var_all = []
        trees = []
        for i in range(0, len(treex)):  # iterate through every tree of the batch
            trees.append(self.compose_tree(treex[i], self.latent_canvas_size))
            prior_mean_all += [trees[i].vis_dist[0]]
            prior_var_all += [trees[i].vis_dist[1]]
            pos_loss += trees[i].pos_loss
            if np.isnan(trees[i].pos_loss.data.cpu().numpy()):
                print('found nan pos loss')
                import IPython;
                IPython.embed()

        prior_mean = torch.cat(prior_mean_all, dim=0)
        prior_var = torch.cat(prior_var_all, dim=0)

        prior_mean, prior_var = self.renderer([prior_mean, prior_var])

        # sample z map
        z_map = self.sampler(latent_mean, latent_var)

        # kld loss
        kld_loss = alpha * self.bikld([latent_mean, latent_var], [prior_mean, prior_var]) + \
                   (1 - alpha) * self.bikld([latent_mean.detach(), latent_var.detach()], [prior_mean, prior_var])

        rec = self.writer(z_map)

        if ifmask is True:
            mask = (mask + maskweight) / (maskweight + 1.0)
            rec_loss = self.pixelrecon_criterion(mask * rec, mask * x)
        else:
            rec_loss = self.pixelrecon_criterion(rec, x)
        rec_loss = rec_loss.sum()

        return rec_loss, kld_loss, pos_loss, rec
Ejemplo n.º 58
0
    def run(self, config_dict_file, config_dict):
        config_dict['n_hidden_to3Dpose'] = config_dict.get(
            'n_hidden_to3Dpose', 2)

        if 1:  # load small example data
            import pickle
            data_loader = pickle.load(open('../examples/test_set.pickl', "rb"))
        else:
            data_loader = self.load_data_test(config_dict)
            # save example data
            if 0:
                import pickle
                IPython.embed()
                data_iterator = iter(data_loader)
                data_cach = [next(data_iterator) for i in range(10)]
                data_cach = tuple(data_cach)
                pickle.dump(data_cach, open('../examples/test_set.pickl',
                                            "wb"))

        # load model
        model = self.load_network(config_dict)
        model = model.to(device)

        def tensor_to_npimg(torch_array):
            return np.swapaxes(np.swapaxes(torch_array.numpy(), 0, 2), 0, 1)

        def denormalize(np_array):
            return np_array * np.array(config_dict['img_std']) + np.array(
                config_dict['img_mean'])

        # extract image
        def tensor_to_img(output_tensor):
            output_img = tensor_to_npimg(output_tensor)
            output_img = denormalize(output_img)
            output_img = np.clip(output_img, 0, 1)
            return output_img

        def rotationMatrixXZY(theta, phi, psi):
            Ax = np.matrix([[1, 0, 0], [0, np.cos(theta), -np.sin(theta)],
                            [0, np.sin(theta), np.cos(theta)]])
            Ay = np.matrix([[np.cos(phi), 0, -np.sin(phi)], [0, 1, 0],
                            [np.sin(phi), 0, np.cos(phi)]])
            Az = np.matrix([
                [np.cos(psi), -np.sin(psi), 0],
                [np.sin(psi), np.cos(psi), 0],
                [0, 0, 1],
            ])
            return Az * Ay * Ax

        # get next image
        input_dict, label_dict = None, None
        data_iterator = iter(data_loader)

        def nextImage():
            nonlocal input_dict, label_dict
            input_dict, label_dict = next(data_iterator)
            input_dict['external_rotation_global'] = torch.from_numpy(
                np.eye(3)).float().to(device)

        nextImage()

        # apply model on images
        output_dict = None

        def predict():
            nonlocal output_dict
            model.eval()
            with torch.no_grad():
                input_dict_cuda, label_dict_cuda = utils_data.nestedDictToDevice(
                    (input_dict, label_dict), device=device)
                output_dict_cuda = model(input_dict_cuda)
                output_dict = utils_data.nestedDictToDevice(output_dict_cuda,
                                                            device='cpu')

        predict()

        # init figure
        my_dpi = 400
        fig, ax_blank = plt.subplots(figsize=(5 * 800 / my_dpi,
                                              5 * 300 / my_dpi))
        plt.axis('off')
        # gt skeleton
        ax_gt_skel = fig.add_subplot(111, projection='3d')
        ax_gt_skel.set_position([0.8, 0.0, 0.2, 0.98])
        handle_gt_skel = utils_plt.plot_3Dpose_simple(
            ax_gt_skel,
            label_dict['3D'][0].numpy().reshape([-1, 3]).T,
            bones=utils_skel.bones_h36m,
            linewidth=5,
            plot_handles=None)  # , colormap='Greys')
        ax_gt_skel.invert_zaxis()
        ax_gt_skel.grid(False)
        ax_gt_skel.set_axis_off()
        ax_gt_skel.set_title("GT pose")
        # output skeleton
        ax_pred_skel = fig.add_subplot(111, projection='3d')
        ax_pred_skel.set_position([0.65, 0.0, 0.2, 0.98])
        handle_pred_skel = utils_plt.plot_3Dpose_simple(
            ax_pred_skel,
            label_dict['3D'][0].numpy().reshape([-1, 3]).T,
            bones=utils_skel.bones_h36m,
            linewidth=5,
            plot_handles=None)  # , colormap='Greys')
        ax_pred_skel.invert_zaxis()
        ax_pred_skel.grid(False)
        ax_pred_skel.set_axis_off()
        ax_pred_skel.set_title("Pred. pose")
        # input image
        ax_in_img = plt.axes([-0.16, 0.2, 0.7, 0.7])
        ax_in_img.axis('off')
        im_input = plt.imshow(tensor_to_img(input_dict['img_crop'][0]),
                              animated=True)
        ax_in_img.set_title("Input img")
        # output image
        ax_out_img = plt.axes([0.15, 0.2, 0.7, 0.7])
        ax_out_img.axis('off')
        im_pred = plt.imshow(tensor_to_img(output_dict['img_crop'][0]),
                             animated=True)
        ax_out_img.set_title("Output img")

        # update figure with new data
        def update_figure():
            # images
            im_input.set_array(tensor_to_img(input_dict['img_crop'][0]))
            im_pred.set_array(tensor_to_img(output_dict['img_crop'][0]))
            # gt 3D poses
            gt_pose = label_dict['3D'][0]
            R_cam_2_world = label_dict['extrinsic_rot_inv'][0].numpy()
            R_world_in_cam = la.inv(R_cam_2_world) @ input_dict[
                'external_rotation_global'].cpu().numpy() @ R_cam_2_world
            pose_rotated = R_world_in_cam @ gt_pose.numpy().reshape([-1, 3]).T
            utils_plt.plot_3Dpose_simple(ax_gt_skel,
                                         pose_rotated,
                                         bones=utils_skel.bones_h36m,
                                         plot_handles=handle_gt_skel)
            # prediction 3D poses
            pose_mean = label_dict['pose_mean'][0].numpy()
            pose_std = label_dict['pose_std'][0].numpy()
            pred_pose = (output_dict['3D'][0].numpy().reshape(pose_mean.shape)
                         * pose_std) + pose_mean
            pose_rotated = R_world_in_cam @ pred_pose.reshape([-1, 3]).T
            utils_plt.plot_3Dpose_simple(ax_pred_skel,
                                         pose_rotated,
                                         bones=utils_skel.bones_h36m,
                                         plot_handles=handle_pred_skel)

            # flush drawings
            fig.canvas.draw_idle()

        def update_rotation(event):
            rot = slider_yaw_glob.val
            print("Rotationg ", rot)
            batch_size = input_dict['img_crop'].size()[0]
            input_dict['external_rotation_global'] = torch.from_numpy(
                rotationMatrixXZY(theta=0, phi=0, psi=rot)).float().to(device)
            input_dict['external_rotation_cam'] = torch.from_numpy(
                np.eye(3)
            ).float().to(
                device
            )  # torch.from_numpy(rotationMatrixXZY(theta=0, phi=rot, psi=0)).float().cuda()
            predict()
            update_figure()

        ax_next = plt.axes([0.05, 0.1, 0.15, 0.04])
        button_next = Button(ax_next,
                             'Next image',
                             color='lightgray',
                             hovercolor='0.975')

        def nextButtonPressed(event):
            nextImage()
            predict()
            update_figure()

        button_next.on_clicked(nextButtonPressed)
        ax_yaw_glob = plt.axes([0.25, 0.1, 0.65, 0.015], facecolor='lightgray')
        slider_range = 2 * np.pi
        slider_yaw_glob = Slider(ax_yaw_glob,
                                 'Yaw',
                                 -slider_range,
                                 slider_range,
                                 valinit=0)
        slider_yaw_glob.on_changed(update_rotation)
        plt.show()
Ejemplo n.º 59
0
def farthest_point_sampling(pts,
                            k,
                            initial_idx=None,
                            metrics=l2_norm,
                            skip_initial=False,
                            indices_dtype=numpy.int32,
                            distances_dtype=numpy.float32):
    """Batch operation of farthest point sampling

    Code referenced from below link by @Graipher
    https://codereview.stackexchange.com/questions/179561/farthest-point-algorithm-in-python

    Args:
        pts (numpy.ndarray or cupy.ndarray): 2-dim array (num_point, coord_dim)
            or 3-dim array (batch_size, num_point, coord_dim)
            When input is 2-dim array, it is treated as 3-dim array with
            `batch_size=1`.
        k (int): number of points to sample
        initial_idx (int): initial index to start farthest point sampling.
            `None` indicates to sample from random index,
            in this case the returned value is not deterministic.
        metrics (callable): metrics function, indicates how to calc distance.
        skip_initial (bool): If True, initial point is skipped to store as
            farthest point. It stabilizes the function output.
        xp (numpy or cupy):
        indices_dtype (): dtype of output `indices`
        distances_dtype (): dtype of output `distances`

    Returns (tuple): `indices` and `distances`.
        indices (numpy.ndarray or cupy.ndarray): 2-dim array (batch_size, k, )
            indices of sampled farthest points.
            `pts[indices[i, j]]` represents `i-th` batch element of `j-th`
            farthest point.
        distances (numpy.ndarray or cupy.ndarray): 3-dim array
            (batch_size, k, num_point)

    """
    if pts.ndim == 2:
        # insert batch_size axis
        pts = pts[None, ...]
    assert pts.ndim == 3
    xp = cuda.get_array_module(pts)
    batch_size, num_point, coord_dim = pts.shape
    indices = xp.zeros((
        batch_size,
        k,
    ), dtype=indices_dtype)

    # distances[bs, i, j] is distance between i-th farthest point `pts[bs, i]`
    # and j-th input point `pts[bs, j]`.
    distances = xp.zeros((batch_size, k, num_point), dtype=distances_dtype)
    if initial_idx is None:
        indices[:, 0] = xp.random.randint(len(pts))
    else:
        indices[:, 0] = initial_idx

    batch_indices = xp.arange(batch_size)
    farthest_point = pts[batch_indices, indices[:, 0]]
    # minimum distances to the sampled farthest point
    try:
        min_distances = metrics(farthest_point[:, None, :], pts)
    except Exception as e:
        import IPython
        IPython.embed()

    if skip_initial:
        # Override 0-th `indices` by the farthest point of `initial_idx`
        indices[:, 0] = xp.argmax(min_distances, axis=1)
        farthest_point = pts[batch_indices, indices[:, 0]]
        min_distances = metrics(farthest_point[:, None, :], pts)

    distances[:, 0, :] = min_distances
    for i in range(1, k):
        indices[:, i] = xp.argmax(min_distances, axis=1)
        farthest_point = pts[batch_indices, indices[:, i]]
        dist = metrics(farthest_point[:, None, :], pts)
        distances[:, i, :] = dist
        min_distances = xp.minimum(min_distances, dist)
    return indices, distances
    def compose_tree(self, treex, latent_canvas_size):
        for i in range(0, treex.num_children):
            treex.children[i] = self.compose_tree(treex.children[i], latent_canvas_size)

        # one hot embedding of a word
        ohe = self.get_code(self.dictionary, treex.word)

        if treex.function == 'combine':
            vis_dist = self.vis_dist(ohe)
            pos_dist = self.pos_dist(ohe)
            if treex.num_children > 0:
                # visual content
                vis_dist_child = treex.children[0].vis_dist
                vis_dist = self.combine(vis_dist, vis_dist_child, 'vis')
                # visual position
                pos_dist_child = treex.children[0].pos_dist
                pos_dist = self.combine(pos_dist, pos_dist_child, 'pos')

            treex.vis_dist = vis_dist
            treex.pos_dist = pos_dist

        elif treex.function == 'describe':
            # blend visual words
            vis_dist = self.vis_dist(ohe)
            pos_dist = self.pos_dist(ohe)
            if treex.num_children > 0:
                # visual content
                vis_dist_child = treex.children[0].vis_dist
                vis_dist = self.describe(vis_dist_child, vis_dist, 'vis')
                # visual position
                pos_dist_child = treex.children[0].pos_dist
                pos_dist = self.describe(pos_dist_child, pos_dist, 'pos')

            treex.pos_dist = pos_dist

            # regress bbox
            treex.pos = np.maximum(treex.bbox[2:] // self.ds, [1, 1])
            target_box = Variable(torch.from_numpy(np.array(treex.bbox[2:])[np.newaxis, ...].astype(np.float32))).cuda()
            regress_box, kl_box = self.box_vae(target_box, prior=treex.pos_dist)
            treex.pos_loss = self.pos_criterion(regress_box, target_box) + kl_box

            if treex.parent == None:
                ones = self.get_ones(torch.Size([1, 1]))
                if not self.bg_bias:
                    bg_vis_dist = [Variable(torch.zeros(latent_canvas_size)).cuda(), \
                                   Variable(torch.zeros(latent_canvas_size)).cuda()]
                else:
                    bg_vis_dist = [self.bias_mean(ones).view(*latent_canvas_size), \
                                   self.bias_var(ones).view(*latent_canvas_size)]
                b = np.maximum(treex.bbox // self.ds, [0, 0, 1, 1])

                bg_vis_dist = [self.assign_util(bg_vis_dist[0], b, self.transform(vis_dist[0], treex.pos),
                                                'assign'), \
                               self.assign_util(bg_vis_dist[1], b,
                                                self.transform(vis_dist[1], treex.pos, variance=True),
                                                'assign')]
                vis_dist = bg_vis_dist
            else:
                try:
                    # resize vis_dist
                    vis_dist = [self.transform(vis_dist[0], treex.pos), \
                                self.transform(vis_dist[1], treex.pos, variance=True)]
                except:
                    import IPython;
                    IPython.embed()

            treex.vis_dist = vis_dist

        elif treex.function == 'layout':
            # get pos word as position prior
            treex.pos_dist = self.pos_dist(ohe)
            assert (treex.num_children > 0)

            # get offsets: use gt for training
            l_pos = treex.children[0].pos
            l_offset = np.maximum(treex.children[0].bbox[:2] // self.ds, [1, 1])

            r_pos = treex.children[1].pos
            r_offset = np.maximum(treex.children[1].bbox[:2] // self.ds, [1, 1])

            # regress offsets
            target_offset = np.append(l_offset * self.ds, r_offset * self.ds).astype(np.float32)
            target_offset = Variable(torch.from_numpy(target_offset[np.newaxis, ...])).cuda()
            regress_offset, kl_offset = self.offset_vae(target_offset, prior=treex.pos_dist)
            treex.pos_loss = self.pos_criterion(regress_offset, target_offset) + kl_offset + treex.children[
                0].pos_loss + \
                             treex.children[1].pos_loss

            ######################### constructing latent map ###############################
            # bias filled mean&var
            ones = self.get_ones(torch.Size([1, 1]))
            if not self.bg_bias:
                vis_dist = [Variable(torch.zeros(latent_canvas_size)).cuda(), \
                            Variable(torch.zeros(latent_canvas_size)).cuda()]
            else:
                vis_dist = [self.bias_mean(ones).view(*latent_canvas_size), \
                            self.bias_var(ones).view(*latent_canvas_size)]

            # arrange the layout of two children
            vis_dist[0] = self.assign_util(vis_dist[0], list(l_offset) + list(l_pos), treex.children[0].vis_dist[0],
                                           'assign')
            vis_dist[1] = self.assign_util(vis_dist[1], list(l_offset) + list(l_pos), treex.children[0].vis_dist[1],
                                           'assign')

            vis_dist[0] = self.assign_util(vis_dist[0], list(r_offset) + list(r_pos), treex.children[1].vis_dist[0],
                                           'assign')
            vis_dist[1] = self.assign_util(vis_dist[1], list(r_offset) + list(r_pos), treex.children[1].vis_dist[1],
                                           'assign')

            # continue layout
            if treex.parent != None:
                p = [min(l_offset[0], r_offset[0]), min(l_offset[1], r_offset[1]), \
                     max(l_offset[0] + l_pos[0], r_offset[0] + r_pos[0]),
                     max(l_offset[1] + l_pos[1], r_offset[1] + r_pos[1])]
                treex.pos = [p[2] - p[0], p[3] - p[1]]
                treex.vis_dist = [vis_dist[0][:, :, p[0]:p[2], p[1]:p[3]], \
                                  vis_dist[1][:, :, p[0]:p[2], p[1]:p[3]]]
            else:
                treex.vis_dist = vis_dist

        return treex