Exemplo n.º 1
0
 def testFail0(self):
     with self.assertRaises(TypeError):
         JPackage(1)
Exemplo n.º 2
0
 def testGetArrayAsObject(self):
     t = JPackage("jpype").array.TestArray()
     v = t.getArrayAsObject()
Exemplo n.º 3
0
 def __init__(self, jdbc: Jdbc, table: str,
              fstream=None, commit_mode=UPLOAD_MODE_DRYRUN, exit_on_fail=True, **kwargs):
     super(ParameterUploader, self).__init__(jdbc, table, fstream=fstream, commit_mode=commit_mode,
                                             exit_on_fail=exit_on_fail, **kwargs)
     self.sqlDate = JPackage('java').sql.Date
Exemplo n.º 4
0
 def testCharArrayAsString(self):
     t = JPackage("jpype").array.TestArray()
     v = t.charArray
     self.assertEqual(str(v), 'avcd')
     self.assertEqual(unicode(v), u'avcd')
Exemplo n.º 5
0
    def testIterateArray(self):
        t = JPackage("jpype").array.TestArray()
        self.assertFalse(isinstance(t, JPackage))

        for i in t.i:
            self.assertNotEqual(i, 0)
Exemplo n.º 6
0
 def setUp(self):
     self.jpype = JPackage('jpype')
Exemplo n.º 7
0
 def testDoubleConversion(self):
     f = java.lang.Float.MAX_VALUE * 2
     jpype = JPackage("jpype")
     self.assertTrue(jpype.numeric.NumericTest.doubleIsTwiceMaxFloat(f))
Exemplo n.º 8
0
def compute_cond_mi_multivariate(data={},
                                 estimator="kraskov1",
                                 normalize=True,
                                 delay=0):
    """infth_mi_multivariate

    Compute the total (scalar) multivariate conditional mutual information
    """

    # init class and instance
    if estimator == 'kraskov1':
        calcClass = JPackage(
            "infodynamics.measures.continuous.kraskov"
        ).ConditionalMutualInfoCalculatorMultiVariateKraskov1
    elif estimator == 'kraskov2':
        calcClass = JPackage(
            "infodynamics.measures.continuous.kraskov"
        ).ConditionalMutualInfoCalculatorMultiVariateKraskov2
    elif estimator == 'kernel':
        calcClass = JPackage(
            "infodynamics.measures.continuous.kernel"
        ).ConditionalMutualInfoCalculatorMultiVariateKernel

    # instantiate
    calc = calcClass()

    # set properties
    calc.setProperty("NORMALISE", str(normalize).lower())
    # calc.setProperty("PROP_TIME_DIFF", str(delay))

    # print "measures_infth: infth_mi_multivariate: calc.timeDiff = %d" % (calc.timeDiff)

    calc.timeDiff = delay

    # print "measures_infth: infth_mi_multivariate: calc.timeDiff = %d" % (calc.timeDiff)

    # prepare data and attributes
    assert 'C' in data, 'No condition passed via data, %s' % (data.keys())
    src, dst, cond = prepare_data_and_attributes(data)
    # src_ = src.copy()
    # src = dst.copy()

    # logger.debug('src = %s, dst = %s, cond = %s', src.shape, dst.shape, cond)

    # pl.hist(src[0], bins=255)
    # pl.show()

    # print "infth_mi_multivariate src/dst shapes", src.shape, dst.shape
    # print "infth_mi_multivariate src/dst dtypes", src.dtype, dst.dtype

    dim_src, dim_dst, dim_cond = src.shape[1], dst.shape[1], cond.shape[1]

    # compute stuff
    # calc.initialise()
    calc.initialise(dim_src, dim_dst, dim_cond)
    calc.setObservations(src, dst, cond)
    # the average global MI between all source channels and all destination channels
    try:
        mimv_avg = calc.computeAverageLocalOfObservations()
    except Exception, e:
        mimv_avg = np.random.uniform(0, 1e-5, (1, 1))  # np.zeros((1,1))
        logger.error(
            "Error occured in mimv calc, %s. Setting default mimv_avg = %s" %
            (e, mimv_avg))
Exemplo n.º 9
0
# 
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse 
#

from jpype import JPackage, JInt
from pytos.util.JavaInheritor import JavaInheritor
import pytos.Comm as Comm
from  copy import *

drip = JPackage("net.tinyos.drip")

def getDripObject(app, motecom=None, channel=None) :
  """This function returns the drip object stored in app that is connected to optional motecom
  with a optional channel.  If motecome and channel are specified but there is no drip object
  with these specs, it creates one"""
  drips = []
  for conn in app.connections :
    if isinstance(conn, Drip) :
#      if motecom == None or conn.motecom == motecom :                       #we need this funtion in java
#        if channel == None or drip.channel == channel :  #we need this funtion in java
          drips.append( conn )
  if len(drips) == 0 and motecom != None and channel != None :
      drip = Drip(app, channel, app.motecom)
      app.connections.append(drip)
      drips.append(drip)
Exemplo n.º 10
0
class learnerReward(object):
    # infth stuff
    
    # discretization base
    base = 1000
    basehalf = base/2

    if HAVE_JPYPE:
        # calculation classes
        piCalcClass = JPackage("infodynamics.measures.discrete").PredictiveInformationCalculatorDiscrete
        piCalcD = piCalcClass(base,1)

        # ais
        aisCalcClassD = JPackage("infodynamics.measures.discrete").ActiveInformationCalculatorDiscrete
        aisCalcD = aisCalcClassD(base,1)

        # contiuous estimation
        # piCalcClassC = JPackage("infodynamics.measures.continuous.kernel").MutualInfoCalculatorMultiVariateKernel
        piCalcClassC = JPackage("infodynamics.measures.continuous.kraskov").PredictiveInfoCalculatorKraskov
        piCalcC = piCalcClassC();
        # print dir(piCalcC)
        piCalcC.setProperty("NORMALISE", "true"); # Normalise the individual variables
    
        # active information storage
        aisCalcClassC = JPackage("infodynamics.measures.continuous.kraskov").ActiveInfoStorageCalculatorKraskov
        aisCalcC = aisCalcClassC()
        aisCalcC.setProperty("NORMALISE", "false"); # Normalise the individual variables
    
        # FIXME: do a shutdownJVM after being finished, use isJVMStarted?, attachtoJVM
    
    """Learner reward data"""
    def __init__(self, idim=1, odim=1, memlen=1000, coeff_a = 0.2):
        """Init learnerReward
        
        idim: input dimensionality (default: 1) \n
        odim: output dimensionality (default: 1) \n
        memlen: length of memory to be kept, in steps (default: 1000)"""
        
        self.idim = idim
        self.odim = odim
        # print "learnerReward", self.idim, self.odim
        self.len = memlen
        # reward
        self.perf     = np.zeros((self.odim, 1))
        self.perf_lp  = np.zeros((self.odim, 1))
        self.perf_    = np.zeros((self.len, self.odim)) # FIXME: transpose this
        self.perf_lp_ = np.zeros((self.len, self.odim))
        self.coeff_a  = coeff_a
        # self.coeff_a  = 0.05
        self.mdltr    = np.zeros((self.odim, 1))
        
        # print "learnerReward", self.perf.shape
    def discretize(self, x, llim=None, ulim=None):
        if llim == None and ulim == None:
            bins = np.linspace(np.min(x), np.max(x), learnerReward.base-1)
        else:
            bins = np.linspace(llim, ulim, learnerReward.base-1)
        return np.digitize(x, bins)

    def perf_accel(self, err, acc):
        """Simple reward: let body acceleration point into reduced error direction"""
        self.perf[0:self.odim,0] = np.sign(err) * acc
        # self.perf = np.sign(err) * np.sign(acc) * acc**2
        
    def perf_pos(self, err, acc):
        """Simple reward: let body acceleration point into reduced error direction"""
        self.perf = err

    def perf_gauss_double(self, mean=0., sigma=1.0, accel=0.):
        """Double gaussian for forcing output to be in a given range"""
        g1 = gaussian(mean,  sigma, accel) 
        g2 = gaussian(-mean, sigma, accel)
        self.perf = g1 + g2 # .reshape((1,2))

    @dec_compute_infth_soft()
    def perf_pi_discrete(self, x, avg=False):
        if avg:
            # compute average PI
            return learnerReward.piCalcD.computeAverageLocal(x)
        else:
            # compute list of momentary PI estimates
            pi = learnerReward.piCalcD.computeLocal(x)
            # return last element of list
            return list(pi)[-1]

    @dec_compute_infth_soft()
    def perf_ais_discrete(self, x, avg=False):
        if avg:
            # compute average PI
            return learnerReward.aisCalcD.computeAverageLocal(x)
        else:
            # compute list of momentary PI estimates
            pi = learnerReward.aisCalcD.computeLocal(x)
            # return last element of list
            return list(pi)[-1]

        
    @dec_compute_infth_soft()
    def perf_pi_continuous(self, x):
        # Use history length 1 (Schreiber k=1), kernel width of 0.5 normalised units
        # learnerReward.piCalcC.initialise(40, 1, 0.5);
        # print "perf_pi_continuous", x
        learnerReward.piCalcC.initialise(100, 1)
        # src = np.atleast_2d(x[0:-1]).T # start to end - 1
        # dst = np.atleast_2d(x[1:]).T # 1 to end
        # learnerReward.piCalcC.setObservations(src, dst)
        # src = np.atleast_2d(x).T # start to end - 1
        # learnerReward.piCalcC.setObservations(src.reshape((src.shape[0],)))
        learnerReward.piCalcC.setObservations(x)
        # print type(src), type(dst)
        # print src.shape, dst.shape
        return learnerReward.piCalcC.computeAverageLocalOfObservations()# * -1

    @dec_compute_infth_soft()
    def perf_ais_continuous(self, x):
        # Use history length 1 (Schreiber k=1), kernel width of 0.5 normalised units
        # learnerReward.piCalcC.initialise(40, 1, 0.5);
        # print "perf_pi_continuous", x
        learnerReward.aisCalcC.initialise(100, 1);
        # src = np.atleast_2d(x[0:-1]).T # start to end - 1
        # dst = np.atleast_2d(x[1:]).T # 1 to end
        # learnerReward.piCalcC.setObservations(src, dst)
        # src = np.atleast_2d(x).T # start to end - 1
        # learnerReward.piCalcC.setObservations(src.reshape((src.shape[0],)))
        learnerReward.aisCalcC.setObservations(x)
        # print type(src), type(dst)
        # print src.shape, dst.shape
        return learnerReward.aisCalcC.computeAverageLocalOfObservations()# * -1
Exemplo n.º 11
0
#   limitations under the License.
#
# *****************************************************************************

import time
import os.path

from jpype import javax, JProxy, JPackage, startJVM, getDefaultJVMPath, \
    shutdownJVM

root = os.path.abspath(os.path.dirname(__file__))
classes = os.path.join(root, "classes")
startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path={0}".format(classes))

# XML test
Element = JPackage("org").w3c.dom.Element


class ContentHandler(object):
    def characters(self, ch, start, length):
        pass

    def endDocument(self):
        pass

    def endElement(self, namespaceURI, localName, qName):
        pass

    def endPrefixMapping(self, prefix):
        pass
Exemplo n.º 12
0
    def __init__(self,
                 cursor: Cursor,
                 return_type=tuple,
                 upper_case: bool = True,
                 include_none: bool = False):
        """
        Instantiate a DataTransformer

        @param cursor: Cursor containing query data.
        @param return_type: (optional) return type of the transformation. May be list, tuple (default), dict, or
            OrderedDict (see collections)
        @param upper_case: bool - transform column names in upper case (defaults to True)
        @param include_none: bool - include None values in dictionary return types. Defaults to False
        @return DataTransformer

        @raise ValueError if the cursor has no data
        @raise TypeError on a wrong cursor type, or wrong return type
        """
        global JAVA_STRING

        if not isinstance(cursor, Cursor):
            raise TypeError(
                'Variable for the DataTransformer must be a Cursor. Found: ' +
                type(cursor).__name__)
        elif cursor.description is None:
            raise ValueError(
                'Cannot create a DataTransformer on a cursor without data.')

        expected_types = [list, tuple, dict, OrderedDict]
        if return_type not in expected_types:
            str_types = [str(t).split("'")[1] for t in expected_types]
            raise TypeError(
                'Specified return type must me one of: %s. Found: %s' %
                (', '.join(str_types), type(return_type).__name__))
        self.return_type = return_type
        self.include_none = verified_boolean(include_none)

        upper_case = verified_boolean(upper_case)

        columns = []
        column_types = []
        for x in range(cursor._meta.getColumnCount()):
            name = cursor._meta.getColumnLabel(x + 1)
            if upper_case:
                name = name.upper()
            columns.append(name)
            type_def = cursor.description[x][1]
            if type_def is None:
                column_types.append(COLUMN_TYPE_STRING)
            elif len({'INTEGER', 'DECIMAL', 'NUMERIC'}.intersection(
                    type_def.values)) > 0:
                column_types.append(COLUMN_TYPE_NUMBER)
            elif len({'FLOAT', 'REAL', 'DOUBLE'}.intersection(
                    type_def.values)) > 0:
                column_types.append(COLUMN_TYPE_FLOAT)
            elif 'TIMESTAMP' in type_def.values:
                column_types.append(COLUMN_TYPE_DATE)
            else:
                column_types.append(COLUMN_TYPE_STRING)
        self.columns = tuple(columns)
        self.transformer = column_types
        self.nr_of_columns = len(columns)

        if JAVA_STRING is None:
            # JVM must have started for this
            JAVA_STRING = JPackage('java').lang.String
Exemplo n.º 13
0
 def setUp(self):
     common.JPypeTestCase.setUp(self)
     self.jl = JPackage('java.lang')
Exemplo n.º 14
0
 def testInvalid(self):
     JL = JPackage("java.lng")
     with self.assertRaisesRegex(AttributeError,
                                 "Java package 'java.lng' is not valid"):
         getattr(JL, "foo")
Exemplo n.º 15
0
 def testMultipleInterfaces(self):
     j = JPackage("jpype").mro.MultipleInterfaces
     myinstance = j()
Exemplo n.º 16
0
 def testByteArrayAsString(self):
     t = JPackage("jpype").array.TestArray()
     v = t.byteArray
     assert str(v) == 'avcd'
Exemplo n.º 17
0
def startJVM(jvm=None, args=None):
    """
    semantically the same like jpype.startJVM, but appends the stallone jar to
    the (given) classpath.
    
    Parameters
    ----------
    jvm : (optional) string 
        Path to jvm (see jpype.getDefaultJVMPath), if none is given a path
        will be choosen via jpype.
    
    args : (optional) list
    list of additional jvm parameters like ['-xms', '64m'] etc.
    
    """
    import os
    import pkg_resources

    global stallone, API

    if not jvm:
        jvm = getDefaultJVMPath()

    if not os.path.exists(jvm):
        raise RuntimeError('jvm path "%s" does not exist!' % jvm)

    if not args:
        args = []

    def append_to_classpath(args):
        """
        args := args for startJVM containing classpath str
        """
        # define classpath separator
        if os.name is 'posix':
            sep = ':'
        else:
            sep = ';'

        stallone_jar_file = pkg_resources.resource_filename(
            'pystallone', stallone_jar)
        if not os.path.exists(stallone_jar_file):
            raise RuntimeError('stallone jar not found! Expected it here: %s' %
                               stallone_jar_file)

        cp_extended = False
        n = len(args) if hasattr(args, '__len__') else 0

        arg_ind = -1
        cp_ind = -1

        # search for classpath definition and extend it, if found.
        for i in range(n):
            cp_ind = args[i].find('-Djava.class.path=')
            # append stallone jar to classpath
            if cp_ind != -1:
                # find end of classpath
                arg_ind = i
                p = args[i][cp_ind:].find(' ')
                end = p if p > 0 else None
                cp_str = args[i][cp_ind:end] + sep + stallone_jar_file
                cp_extended = True
                break

        if not cp_extended:
            args.append('-Djava.class.path=' + stallone_jar_file)
        else:
            args[arg_ind] = cp_str

        return args

    args = append_to_classpath(args)
    _startJVM(jvm, *args)

    stallone = JPackage('stallone')
    API = stallone.api.API
    if type(API).__name__ != 'stallone.api.API$$Static':
        raise RuntimeError('Stallone package initialization borked.'
                           'Check your JAR/classpath!')
Exemplo n.º 18
0
    def testIterateArray(self):
        t = JPackage("jpype").array.TestArray()
        assert not isinstance(t, JPackage)

        for i in t.i:
            assert i != 0
Exemplo n.º 19
0
def throwByJavaException():
    JPackage('jpype').exc.ExceptionTest.throwIOException()
Exemplo n.º 20
0
    def testGetSubclass(self):
        t = JPackage("jpype").array.TestArray()
        v = t.getSubClassArray()

        assert isinstance(v[0], str)
Exemplo n.º 21
0
def compute_conditional_transfer_entropy(src, dst, cond, delay = 0, xcond = False):
    """!@breif compute the conditional transfer entropy using jidt

params
src: source variables
dst: destination variables
cond: conditioning vars
delay: delay u between src/dst
xcond: do cross conditional assuming y and cond are the same vector
    """

    numsrcvars, numdstvars, numcondvars = (src.shape[1], dst.shape[1], cond.shape[1])

    cteCalcClassC = JPackage("infodynamics.measures.continuous.kraskov").ConditionalTransferEntropyCalculatorKraskov
    # teCalcClassC = JPackage("infodynamics.measures.continuous.kernel").TransferEntropyCalculatorKernel
    cteCalcC = cteCalcClassC()
    cteCalcC.setProperty("NORMALISE", "true")
    # k is destination embedding length
    # cteCalcC.setProperty(cteCalcC.K_PROP_NAME, "1")
    # teCalcC.setProperty("k", "100")
    # l is source embedding length
    # cteCalcC.setProperty(cteCalcC.L_PROP_NAME, "1")
    # cteCalcC.setProperty(cteCalcC.DELAY_PROP_NAME, "0")
    # teCalcC.setProperty(teCalcC.PROP_AUTO_EMBED_METHOD, "AUTO_EMBED_METHOD_NONE")
    # print("teCalcClassC", teCalcClassC, "teCalcC", teCalcC)

    # init return container
    measmat  = np.zeros((numdstvars, numsrcvars))

    # init calc params
    k = 1
    k_tau = 1
    l = 1
    l_tau = 1
    cond_emb_len = 1
    cond_emb_tau = 1
    cond_emb_delay = 0

    # loop over all combinations
    for m in range(numdstvars):
        for s in range(numsrcvars):
            # print("m,s", m, s)
            # cteCalcC.initialise(1, 1, 1, 1, 0, [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0])
            # k, k_tau, l, l_tau, delay,
            # cteCalcC.initialise(1, 1, 1, 1, delay, [1] * numcondvars, [1] * numcondvars, [0] * numcondvars)

            condsl = range(numcondvars)
            numcondvars_ = numcondvars
            # cross-condition with src/cond being the same vector, condition on all vector elements besides s
            if xcond:
                del condsl[s]
                numcondvars_ -= 1

            # print "numsrcvars = %d, numdstvars = %d, numcondvars = %d, numcondvars_ = %d" % (numsrcvars, numdstvars, numcondvars, numcondvars_)
            # print "condsl = %s" % (condsl, )

            cond_emb_lens = [cond_emb_len] * numcondvars_
            cond_emb_taus = [cond_emb_tau] * numcondvars_
            cond_emb_delays = [cond_emb_delay] * numcondvars_

            # re-initialise calc
            cteCalcC.initialise(k, k_tau, l, l_tau, delay,
                                cond_emb_lens,
                                cond_emb_taus,
                                cond_emb_delays)
            # load the data
            cteCalcC.setObservations(src[:,s], dst[:,m], cond[:,condsl])
            # compute the measures
            cte = cteCalcC.computeAverageLocalOfObservations()
            # tes = teCalcC.computeSignificance(10)
            # print("cte", cte)
            measmat[m,s] = cte

    return measmat
Exemplo n.º 22
0
class LPZRosEH(object):
    modes = {"eh_pi_d": 2, "eh_ais_d": 3, "eh_pi_c_l": 4, "eh_var": 5, "eh_ent_d": 6, "eh_pi_c_avg": 7}

    base = 1000
    basehalf = base/2
    
    entCalcClassD = JPackage("infodynamics.measures.discrete").EntropyCalculatorDiscrete
    entCalc = entCalcClassD(base)

    piCalcClass = JPackage("infodynamics.measures.discrete").PredictiveInformationCalculatorDiscrete
    # print piCalcClass
    piCalc10 = piCalcClass(base,1)

    aisCalcClassD = JPackage("infodynamics.measures.discrete").ActiveInformationCalculatorDiscrete
    aisCalc = aisCalcClassD(base,1)
        
    piCalcClassC = JPackage("infodynamics.measures.continuous.kernel").MutualInfoCalculatorMultiVariateKernel
    piCalc = piCalcClassC();
    piCalc.setProperty("NORMALISE", "true"); # Normalise the individual variables
    
    def __init__(self, mode="hs"):
        self.init = False
        self.name = "lpzros"
        self.mode = LPZRosEH.modes[mode]
        self.cnt = 0
        ############################################################
        # ros stuff
        rospy.init_node(self.name)
        # pub=rospy.Publisher("/motors", Float64MultiArray, queue_size=1)
        self.pub_motors   = rospy.Publisher("/motors", Float64MultiArray)
        self.pub_res_r    = rospy.Publisher("/reservoir/r", Float64MultiArray)
        self.pub_res_w    = rospy.Publisher("/reservoir/w", Float64MultiArray)
        self.pub_res_perf = rospy.Publisher("/reservoir/perf", Float64MultiArray)
        self.pub_res_perf_lp = rospy.Publisher("/reservoir/perf_lp", Float64MultiArray)
        self.pub_res_mdltr = rospy.Publisher("/reservoir/mdltr", Float64MultiArray)
        self.sub_sensor   = rospy.Subscriber("/sensors", Float64MultiArray, self.cb_sensors)
        # pub=rospy.Publisher("/chatter", Float64MultiArray)
        self.msg          = Float64MultiArray()
        self.msg_res_r    = Float64MultiArray()
        self.msg_res_w    = Float64MultiArray()
        self.msg_res_perf = Float64MultiArray()
        self.msg_res_perf_lp = Float64MultiArray()
        self.msg_res_mdltr = Float64MultiArray()

        # controller
        self.N = 200
        self.p = 0.1
        # self.g = 1.5
        # self.g = 1.2
        self.g = 0.999
        # self.g = 0.001
        # self.g = 0.
        self.alpha = 1.0

        self.wf_amp = 0.0
        # self.wf_amp = 0.005
        # self.wi_amp = 0.01
        # self.wi_amp = 0.1
        # self.wi_amp = 1.0 # barrel
        self.wi_amp = 5.0

        self.idim = 2
        self.odim = 2

        # simulation time / experiment duration
        # self.nsecs = 500 #1440*10
        # self.nsecs = 250
        # self.nsecs = 100
        self.nsecs = 50 #1440*10
        self.dt = 0.005
        self.dt = 0.01
        self.learn_every = 1
        self.test_rate = 0.95
        self.washout_rate = 0.05
        self.simtime = np.arange(0, self.nsecs, self.dt)
        self.simtime_len = len(self.simtime)
        self.simtime2 = np.arange(1*self.nsecs, 2*self.nsecs, self.dt)

        # self.x0 = 0.5*np.random.normal(size=(self.N,1))
        self.x0 = 0.5 * np.random.normal(size=(self.N, 1))
        self.z0 = 0.5 * np.random.normal(size=(1, self.odim))

        # EH stuff
        self.z_t = np.zeros((2,self.simtime_len))
        self.zn_t = np.zeros((2,self.simtime_len)) # noisy readout
        # self.zp_t = np.zeros((2,self.simtime_len))
        self.wo_len_t = np.zeros((2,self.simtime_len))
        self.dw_len_t = np.zeros((2,self.simtime_len))
        self.perf = np.zeros((1,self.odim))
        self.perf_t = np.zeros((2,self.simtime_len))
        self.mdltr = np.zeros((1,2))
        self.mdltr_t = np.zeros((2,self.simtime_len))
        self.r_t = np.zeros(shape=(self.N, self.simtime_len))
        self.zn_lp = np.zeros((1,2))
        self.perf_lp = np.zeros((1,2))
        self.perf_lp_t = np.zeros((2,self.simtime_len))
        # self.coeff_a = 0.2
        # self.coeff_a = 0.1
        self.coeff_a = 0.05
        # self.coeff_a = 0.03
        # self.coeff_a = 0.001
        # self.coeff_a = 0.0001
        # self.eta_init = 0.0001
        # self.eta_init = 0.001 # limit energy in perf
        self.eta_init = 0.0025 # limit energy in perf
        # self.eta_init = 0.01 # limit energy in perf
        self.T = 200000.
        # self.T = 2000.

        # exploration noise
        # self.theta = 1.0
        # self.theta = 1/4.
        self.theta = 0.1
        # self.state noise
        self.theta_state = 0.1
        # self.theta_state = 1e-3

        # leaky integration time constant
        # self.tau
        # self.tau = 0.001
        # self.tau = 0.005
        self.tau = 0.2
        # self.tau = 0.9
        # self.tau = 0.99
        # self.tau = 1.

        ############################################################
        # alternative controller network / reservoir from lib
        # FIXME: use config file
        # FIXME: use input coupling matrix for non-homogenous input scaling
        self.res = Reservoir2(N = self.N, p = self.p, g = self.g, alpha = 1.0, tau = self.tau,
                         input_num = self.idim, output_num = self.odim, input_scale = self.wi_amp,
                         feedback_scale = self.wf_amp, bias_scale = 0., eta_init = self.eta_init,
                         sparse=True)
        self.res.theta = self.theta
        self.res.theta_state = self.theta_state
        self.res.coeff_a = self.coeff_a

        self.res.x = self.x0
        self.res.r = np.tanh(self.res.x)
        self.res.z = self.z0
        self.res.zn = np.atleast_2d(self.z0).T

        # print res.x, res.r, res.z, res.zn

        print '   N: ', self.N
        print '   g: ', self.g
        print '   p: ', self.p
        print '   nsecs: ', self.nsecs
        print '   learn_every: ', self.learn_every
        print '   theta: ', self.theta

        # set point for goal based learning
        self.sp = 0.4

        # explicit memory
        # self.piwin = 100
        # self.piwin = 200
        # self.piwin = 500
        # self.piwin = 1000
        self.piwin = 2000

        self.x_t = np.zeros((self.idim, self.piwin))
        self.z_t = np.zeros((self.odim, self.piwin))

        # self.wgt_lim = 0.5 # barrel
        self.wgt_lim = 0.1
        self.wgt_lim_inv = 1/self.wgt_lim
        
        self.init = True
        print "init done"

    def soft_bound(self):
        # FIXME: for now its actually hard bounded
        # FIXME: modulate self.eta_init for effective soft bounds
        # FIXME: decouple the readouts / investigate coupled vs. uncoupled
        # 1 determine norm
        wo_norm = np.linalg.norm(self.res.wo, 2)
        print "|wo| =", wo_norm
        # 2 scale weights down relatively to some setpoint norm
        if wo_norm > self.wgt_lim:
            self.res.wo /= (wo_norm * self.wgt_lim_inv)
            # 3 slight randomization / single weight flips?
            for ro_idx in range(self.odim):
                if np.random.uniform(0., 1.) > 0.95:
                    numchoice = np.random.randint(0, 5)
                    selidx = np.random.choice(self.N, numchoice, replace=False)
                    print "randomize weights", selidx
                    # self.res.wo[selidx, ro_idx] += np.random.normal(self.res.wo[selidx, ro_idx], 0.001)
                    # reduce weights only
                    self.res.wo[selidx, ro_idx] -= np.random.exponential(0.001, numchoice)
                    
    def cb_sensors(self, msg):
        """lpz sensors callback: receive sensor values, sos algorithm attached"""
        if not self.init: return
        # self.msg.data = []
        self.x_t = np.roll(self.x_t, -1, axis=1) # push back past
        # self.z = np.roll(self.y, 1, axis=1) # push back past

        # update input with new sensor data
        u = np.reshape(np.asarray(msg.data), (self.idim, 1))
        self.x_t[:,-1] = u.reshape((self.idim,))
        # compute network output
        self.res.execute(u)
        # print self.res.z

        # learning
        dw = 0
        # if self.cnt > 0: # self.piwin:
        if self.cnt > self.piwin:
            for sysdim in range(self.idim):
                attachThreadToJVM()
                # x_tmp = self.x_t[sysdim,:]
                # x_tmp = (((self.x_t[sysdim,:] / (2*np.pi)) + 0.5) * 999).astype(int)
                # discrete ENT / PI / AIS / ...
                # FIXME: use digitize and determine bin boundaries from min/max
                x_tmp = (((self.x_t[sysdim,:] / 3.) + 0.5) * 999).astype(int)
                # print "x_tmp", sysdim, x_tmp
                # x_tmp = x_tmp - np.min(x_tmp)
                # x_tmp = ((x_tmp / np.max(x_tmp)) * (LPZRosEH.base-1)).astype(int)
                # print "x_tmp", x_tmp
                # # print "jvm", isThreadAttachedToJVM()
                # pis = LPZRosEH.piCalc10.computeLocal(x_tmp)
                if self.mode == LPZRosEH.modes["eh_ent_d"]: # EH learning, discrete PI
                    # plain entropy
                    pis = LPZRosEH.entCalc.computeLocal(x_tmp)
                    self.perf[0,sysdim] = list(pis)[-1] * -1
                elif self.mode == LPZRosEH.modes["eh_pi_d"]: # EH learning, discrete PI
                    # predictive information
                    # pis = LPZRosEH.piCalc10.computeLocal(x_tmp)
                    # self.perf[0,sysdim] = list(pis)[-1]
                    pis = LPZRosEH.piCalc10.computeAverageLocal(x_tmp)
                    self.perf[0,sysdim] = pis
                elif self.mode == LPZRosEH.modes["eh_ais_d"]: # EH learning, discrete PI
                    # pis = LPZRosEH.aisCalc.computeLocal(x_tmp)
                    # self.perf[0,sysdim] = list(pis)[-1]
                    pis = LPZRosEH.aisCalc.computeAverageLocal(x_tmp)
                    self.perf[0,sysdim] = pis
                elif self.mode == LPZRosEH.modes["eh_pi_c_l"]: # EH learning, discrete PI
                    # local continuous predictive information
                    LPZRosEH.piCalc.initialise(1, 1, 0.5); # Use history length 1 (Schreiber k=1),
                    x_src = np.atleast_2d(self.x_t[sysdim,0:-1]).T
                    x_dst = np.atleast_2d(self.x_t[sysdim,1:]).T
                    LPZRosEH.piCalc.setObservations(x_src, x_dst)
                    pis = LPZRosEH.piCalc.computeLocalOfPreviousObservations()
                    self.perf[0,sysdim] = list(pis)[-1]
                elif self.mode == LPZRosEH.modes["eh_pi_c_avg"]: # EH learning, discrete PI
                    # average continuous predictive information
                    LPZRosEH.piCalc.initialise(1, 1, 0.5); # Use history length 1 (Schreiber k=1),
                    x_src = np.atleast_2d(self.x_t[sysdim,0:-1]).T
                    x_dst = np.atleast_2d(self.x_t[sysdim,1:]).T
                    LPZRosEH.piCalc.setObservations(x_src, x_dst)
                    self.perf[0,sysdim] = LPZRosEH.piCalc.computeAverageLocalOfObservations()
                elif self.mode == LPZRosEH.modes["eh_var"]: # EH learning, discrete PI
                    # variance
                    self.perf[0,sysdim] = np.var(self.x_t[sysdim,:])
                else:
                    self.perf[0,sysdim] = 0.
                    # print "pis", pis
            print "perf", self.perf

            # recent performance
            self.perf_lp = self.perf_lp * (1 - self.coeff_a) + self.perf * self.coeff_a
            
            ############################################################
            # learning
            # FIXME: put that into res / model member function
            for ro_idx in range(self.odim):
                # if gaussian / acc based
                # if perf[0, ro_idx] > (perf_lp[0, ro_idx] + 0.1):
                # for information based
                # FIXME: consider single perf / modulator for all readouts
                if self.perf[0, ro_idx] > self.perf_lp[0, ro_idx]:
                    self.mdltr[0, ro_idx] = 1.
                else:
                    self.mdltr[0, ro_idx] = 0.
            eta = self.eta_init / (1 + (self.cnt/self.T))
            # eta = eta_init
            # dw = eta * (zn_t[0, ti - 20] - zn_lp) * mdltr * r
            # dw = eta * (zn.T - zn_lp) * mdltr * r
            # print "2D dbg" zn.shape, zn_lp.shape
            if True: # self.cnt < self.test_rate * self.simtime_len:
                # dw = eta * (zn.T - zn_lp) * mdltr * r
                # wo += dw
                dw = eta * (self.res.zn.T - self.res.zn_lp.T) * self.mdltr * self.res.r
                # print dw
                self.res.wo += dw
                # FIXME: apply soft bounding on weights or weight decay
                self.soft_bound()
            else:
                dw = np.zeros(self.res.r.shape)
                self.mdltr[0,:] = 0.

                # if np.abs(ip2d.x[ti,0]) > 10. or np.abs(ip2d.x[ti,1]) > 10.:
                #     sys.exit()

        # # check this
        # bins = np.arange(-1, 1.1, 0.1)
        # x_tmp = np.digitize(x[:,0], bins)

        # base = np.max(x_tmp)+1 # 1000
        # basehalf = base/2
        # piCalc10 = piCalcClassD(base,3)
        # aisCalc10 = aisCalcClassD(base,5)

        # pi = list(piCalc10.computeLocal(x_tmp))
        # ais = list(aisCalc10.computeLocal(x_tmp))
                    
        self.msg.data = self.res.zn.flatten().tolist()
        # print self.msg.data
        # print("sending msg", msg)
        self.pub_motors.publish(self.msg)
        self.msg_res_r.data = self.res.r.flatten().tolist()
        self.pub_res_r.publish(self.msg_res_r)
        self.msg_res_w.data = np.linalg.norm(self.res.wo, 2, axis=0)
        self.pub_res_w.publish(self.msg_res_w)
        self.msg_res_perf.data = self.perf.flatten().tolist()
        self.pub_res_perf.publish(self.msg_res_perf)
        self.msg_res_perf_lp.data = self.perf_lp.flatten().tolist()
        self.pub_res_perf_lp.publish(self.msg_res_perf_lp)
        self.msg_res_mdltr.data = self.mdltr.flatten().tolist()
        self.pub_res_mdltr.publish(self.msg_res_mdltr)
        # time.sleep(0.1)
        # if self.cnt > 20:
        #     rospy.signal_shutdown("stop")
        #     sys.exit(0)
        self.cnt += 1
Exemplo n.º 23
0
 def testByteArrayAsString(self):
     t = JPackage("jpype").array.TestArray()
     v = t.byteArray
     self.assertEqual(str(v), 'avcd')
Exemplo n.º 24
0
# temp_array = file.read().split("\n")
# file_array = []
# for line in temp_array:
# 	temp = line.split(" ")
# 	file_array.append(temp)

# file_rid = open("foo.txt", "wb") #to open file in append mode use a+
# max_retry = 3

nlp_url = "http://0.0.0.0:10001/hello"  # server_nlp.py in NLP folder.
elasticsearch_url = "http://104.199.168.125:9200"
tp_url = "http://0.0.0.0:9989/integrated"  # topic_modeller_ssimplified.
s = requests.Session()  #session objects to send requests.
startJVM(getDefaultJVMPath(), "-ea")

testPkg = JPackage('com.rygbee.simdoc')
SimDocInterface = testPkg.SimDocInterface


def constructNewsfeedDictionary(user_posts):  #default show all posts
    data = []
    for post in user_posts:
        tag_list = ""
        post_dic = {}
        authors = ""
        post_dic["text"] = post["_source"]["raw_text"]
        for author in post["_source"]["authors"]:
            authors = author + " "
        post_dic['authors'] = authors
        post_dic['title'] = str(post["_source"]["title"][0])
        post_dic[
Exemplo n.º 25
0
    def testGetSubclass(self):
        t = JPackage("jpype").array.TestArray()
        v = t.getSubClassArray()

        self.assertTrue(isinstance(v[0], unicode))
Exemplo n.º 26
0
try:
    from jpype import startJVM, getDefaultJVMPath, JPackage
except ImportError:
    pass
#------------PUT THE FOLLOWING IN EACH PROGRAM--------------------

classpath = "dist/nats-client.jar:dist/nats-shared.jar:dist/json.jar:dist/rmiio-2.1.2.jar:dist/commons-logging-1.2.jar"

startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % classpath)

# Flight phase value definition
# You can detect and know the flight phase by checking its value
FLIGHT_PHASE_PREDEPARTURE = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_PREDEPARTURE
FLIGHT_PHASE_ORIGIN_GATE = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_ORIGIN_GATE
FLIGHT_PHASE_PUSHBACK = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_PUSHBACK
FLIGHT_PHASE_RAMP_DEPARTING = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_RAMP_DEPARTING
FLIGHT_PHASE_TAXI_DEPARTING = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_TAXI_DEPARTING
FLIGHT_PHASE_RUNWAY_THRESHOLD_DEPARTING = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_RUNWAY_THRESHOLD_DEPARTING
FLIGHT_PHASE_TAKEOFF = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_TAKEOFF
FLIGHT_PHASE_CLIMBOUT = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_CLIMBOUT
FLIGHT_PHASE_HOLD_IN_DEPARTURE_PATTERN = JPackage(
    'com').osi.util.FlightPhase.FLIGHT_PHASE_HOLD_IN_DEPARTURE_PATTERN
FLIGHT_PHASE_CLIMB_TO_CRUISE_ALTITUDE = JPackage(
Exemplo n.º 27
0
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 12:46:18 2018

@author: hcji
"""

import os
import numpy as np
from jpype import isJVMStarted, startJVM, getDefaultJVMPath, JPackage
import PyFingerprint

if not isJVMStarted():
    cdk_path = os.path.join(PyFingerprint.__path__[0], 'CDK', 'cdk-2.2.jar')
    startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % cdk_path)
    cdk = JPackage('org').openscience.cdk
    

def cdk_parser_smiles(smi):
    sp = cdk.smiles.SmilesParser(cdk.DefaultChemObjectBuilder.getInstance())
    try:
        mol = sp.parseSmiles(smi)
    except:
        raise IOError('invalid smiles input')
    return mol


def cdk_fingerprint(smi, fp_type="daylight", size=1024, depth=6, output='bit'):
    if fp_type == 'maccs':
        nbit = 166
    elif fp_type == 'estate':
Exemplo n.º 28
0
 def testMro(self):
     C = JPackage('jpype.mro').C
Exemplo n.º 29
0
def infth_mi_multivariate(data={},
                          estimator="kraskov1",
                          normalize=True,
                          delay=0):
    """infth_mi_multivariate

    Compute the total (scalar) multivariate mutual information
    
    see also playground/infth_feature_relevance
    """
    # print "infth_mi_multivariate estimator = %s" % estimator
    # init class and instance
    if estimator == 'kraskov1':
        mimvCalcClass = JPackage("infodynamics.measures.continuous.kraskov"
                                 ).MutualInfoCalculatorMultiVariateKraskov1
    elif estimator == 'kraskov2':
        mimvCalcClass = JPackage("infodynamics.measures.continuous.kraskov"
                                 ).MutualInfoCalculatorMultiVariateKraskov2
    elif estimator == 'kernel':
        mimvCalcClass = JPackage("infodynamics.measures.continuous.kernel"
                                 ).MutualInfoCalculatorMultiVariateKernel

    # instantiate
    mimvCalc = mimvCalcClass()

    # set properties
    mimvCalc.setProperty("NORMALISE", str(normalize).lower())
    # mimvCalc.setProperty("PROP_TIME_DIFF", str(delay))

    # print "measures_infth: infth_mi_multivariate: mimvCalc.timeDiff = %d" % (mimvCalc.timeDiff)

    mimvCalc.timeDiff = delay

    # print "measures_infth: infth_mi_multivariate: mimvCalc.timeDiff = %d" % (mimvCalc.timeDiff)

    # prepare data and attributes
    src, dst = prepare_data_and_attributes(data)
    # src_ = src.copy()
    # src = dst.copy()

    # pl.hist(src[0], bins=255)
    # pl.show()

    # print "infth_mi_multivariate src/dst shapes", src.shape, dst.shape
    # print "infth_mi_multivariate src/dst dtypes", src.dtype, dst.dtype

    dim_src, dim_dst = src.shape[1], dst.shape[1]

    # compute stuff
    # mimvCalc.initialise()
    mimvCalc.initialise(dim_src, dim_dst)
    mimvCalc.setObservations(src, dst)
    # the average global MI between all source channels and all destination channels
    try:
        mimv_avg = mimvCalc.computeAverageLocalOfObservations()
    except Exception as e:
        mimv_avg = np.random.uniform(0, 1e-5, (1, 1))  # np.zeros((1,1))
        logger.error(
            "Error occured in mimv calc, %s. Setting default mimv_avg = %s" %
            (e, mimv_avg))
    return mimv_avg
Exemplo n.º 30
0
	def generate(self,
				flight_plan_type,
				origin_airport,
				destination_airport,
				origin_gate,
				destination_gate,
				departure_runway,
				arrival_runway) :
		fp_generated = "" # Reset
				
		self.starting_latitude = "" # Reset
		self.starting_longitude = "" # Reset
		self.selected_approach = ""
		self.selected_sid = ""
		self.selected_star = ""
		
		tmp_departing_surface_plan_string = "" # Reset
		tmp_landing_surface_plan_string = "" # Reset
		
		departing_runway_entry = "" # Reset
		landing_runway_end = "" # Reset
		
		if flight_plan_type == self.FLIGHT_PLAN_TYPE_GATE_TO_GATE :
			if origin_airport is None or origin_airport == "" or destination_airport is None or destination_airport == "":
				print("Please input origin and destination airports")
				quit()
			
			if departure_runway is None or departure_runway == "" or arrival_runway is None or arrival_runway == "":
				print("Please input departure and arrival runways")
				quit()
			
			if origin_gate is None or origin_gate == "" or destination_gate is None or destination_gate == "":
				print("Please input origin and destination gates")
				quit()
		elif flight_plan_type == self.FLIGHT_PLAN_TYPE_RUNWAY_TO_RUNWAY :
			if origin_airport is None or origin_airport == "" or destination_airport is None or destination_airport == "":
				print("Please input origin and destination airports")
				quit()
			
			if departure_runway is None or departure_runway == "" or arrival_runway is None or arrival_runway == "":
				print("Please input departure and arrival runways")
				quit()
		elif flight_plan_type == self.FLIGHT_PLAN_TYPE_CRUISE :
			if origin_airport is None or origin_airport == "" or destination_airport is None or destination_airport == "":
				print("Please input origin and destination airports")
				quit()
		elif flight_plan_type == self.FLIGHT_PLAN_TYPE_CRUISE_TO_GATE :
			if origin_airport is None or origin_airport == "" or destination_airport is None or destination_airport == "":
				print("Please input origin and destination airports")
				quit()
			
			if arrival_runway is None or arrival_runway == "":
				print("Please input arrival runway")
				quit()
			
			if destination_gate is None or destination_gate == "":
				print("Please input destination gate")
		else :
			print("Please input valid flight plan type")
			quit()
		
		# =================================================
		
		self.str_readFlightPlan = self.readFlightPlan(origin_airport, destination_airport)

		if (-1 < self.str_readFlightPlan.index(".")) and (self.str_readFlightPlan.index(".") < self.str_readFlightPlan.rindex(".")) :
			self.enroute_fp = self.str_readFlightPlan[self.str_readFlightPlan.index(".")+1 : self.str_readFlightPlan.rindex(".")]

		if flight_plan_type == self.FLIGHT_PLAN_TYPE_GATE_TO_GATE or flight_plan_type == self.FLIGHT_PLAN_TYPE_RUNWAY_TO_RUNWAY or flight_plan_type==self.FLIGHT_PLAN_TYPE_CRUISE_TO_GATE:
			self.result_terminalProcedure = self.getTerminalProcedures(origin_airport, destination_airport, departure_runway, arrival_runway)
			if len(self.result_terminalProcedure) == 3:
				self.selected_sid = self.result_terminalProcedure[0]
				self.selected_star = self.result_terminalProcedure[1]
				self.selected_approach = self.result_terminalProcedure[2]
			
		if (self.enroute_fp.find("/.") == 0) :
			self.enroute_fp = self.enroute_fp[2:]

		if flight_plan_type == self.FLIGHT_PLAN_TYPE_GATE_TO_GATE :
			tmp_node_seq = -1 # Reset
			tmp_node_seq_first_point = -1 # Reset
			
			# Obtain departing surface plan ---------------
			tmp_departing_surface_plan_string = self.generateDepartureTaxiPlan(origin_airport, departure_runway, origin_gate)
			
			# Obtain landing surface plan -----------------
			tmp_landing_surface_plan_string = self.generateArrivalTaxiPlan(destination_airport, arrival_runway, destination_gate)

		elif flight_plan_type == self.FLIGHT_PLAN_TYPE_RUNWAY_TO_RUNWAY :
			tmp_array_node_data = self.natsSim.airportInterface.getLayout_node_data(origin_airport)
			if not(tmp_array_node_data is None):
				for i in range(0, len(tmp_array_node_data)) :
					if (tmp_array_node_data[i][3] == departure_runway) and (tmp_array_node_data[i][4] == "Entry") :
						self.starting_latitude = str(tmp_array_node_data[i][1])
						self.starting_longitude = str(tmp_array_node_data[i][2])
						break
		
		elif (flight_plan_type == self.FLIGHT_PLAN_TYPE_CRUISE) :
			tmp_first_waypoint = self.enroute_fp[: self.enroute_fp.find(".")]
			tmp_lat_lon = self.natsSim.terminalAreaInterface.getWaypoint_Latitude_Longitude_deg(tmp_first_waypoint)
			if not(tmp_lat_lon is None) :
				self.starting_latitude = str(tmp_lat_lon[0])
				self.starting_longitude = str(tmp_lat_lon[1])
		elif (flight_plan_type == self.FLIGHT_PLAN_TYPE_CRUISE_TO_GATE) :
			# Obtain landing surface plan -----------------
			tmp_landing_surface_plan_string = self.generateArrivalTaxiPlan(destination_airport, arrival_runway, destination_gate)
			tmp_first_waypoint = self.enroute_fp[: self.enroute_fp.find(".")]

			tmp_lat_lon = self.natsSim.terminalAreaInterface.getWaypoint_Latitude_Longitude_deg(tmp_first_waypoint)
			if not(tmp_lat_lon is None) :
				self.starting_latitude = str(tmp_lat_lon[0])
				self.starting_longitude = str(tmp_lat_lon[1])
		
		# =================================================
		# Combine the final returning value
		fp_generated = origin_airport
		
		fp_generated = fp_generated + ".<"
		if not(tmp_departing_surface_plan_string == "") :
			fp_generated = fp_generated + tmp_departing_surface_plan_string
		fp_generated = fp_generated + ">"
		
		if not(departure_runway == "") :
			fp_generated = fp_generated + "." + departure_runway	

		if (flight_plan_type == self.FLIGHT_PLAN_TYPE_GATE_TO_GATE) or (flight_plan_type == self.FLIGHT_PLAN_TYPE_RUNWAY_TO_RUNWAY) :
			if not(self.selected_sid == "") :
				fp_generated = fp_generated + "." + self.selected_sid
		
		fp_generated = fp_generated + "." + self.enroute_fp

		if (flight_plan_type == self.FLIGHT_PLAN_TYPE_GATE_TO_GATE) or (flight_plan_type == self.FLIGHT_PLAN_TYPE_RUNWAY_TO_RUNWAY) or (flight_plan_type == self.FLIGHT_PLAN_TYPE_CRUISE_TO_GATE) :
			if not(self.selected_star == "") :
				fp_generated = fp_generated + "." + self.selected_star
			
			if not(self.selected_approach == "") :
				fp_generated = fp_generated + "." + self.selected_approach
		
		if not(arrival_runway == "") :
			fp_generated = fp_generated + "." + arrival_runway
		
		fp_generated = fp_generated + ".<"
		if not(tmp_landing_surface_plan_string == "") :
			fp_generated = fp_generated + tmp_landing_surface_plan_string
		fp_generated = fp_generated + ">"
		
		fp_generated = fp_generated + "." + destination_airport
		
		clsGeometry = JPackage('com').osi.util.Geometry
		
		if not(self.starting_latitude.strip() == "") and not(self.starting_longitude.strip() == "") :
			# Convert latitude/longitude degree string to degree-minute-second format
			self.starting_latitude = clsGeometry.convertLatLonDeg_to_degMinSecString(self.starting_latitude)
			self.starting_longitude = clsGeometry.convertLatLonDeg_to_degMinSecString(self.starting_longitude)
		
		return (fp_generated, self.starting_latitude, self.starting_longitude)