예제 #1
0
import jpype
import os.path
from getpass import getpass

# ensure that the uniobjects package in included in the jvm
if os.path.isabs(__file__):
    root = os.path.dirname(os.path.dirname(__file__))
else:
    root = os.path.dirname(os.getcwd())
lib = os.path.join(root, 'lib')
ou = os.path.join(lib, 'asjava.zip')
default_jvm = jpype.get_default_jvm_path()
jpype.startJVM(default_jvm, '-Djava.class.path={}'.format(ou))

passwd = getpass()
UniSession = jpype.JPackage('asjava').uniobjects.UniSession
sess = UniSession()
try:
    sess.connect('jssrhl2', 'jmileson', passwd, 'JSS')
    parts = sess.openFile('PARTS')
    data = parts.read('010G.11')
    prov = sess.dynArray(data)
finally:
    sess.disconnect()
    jpype.shutdownJVM()
print('Complete')
예제 #2
0
# load the JAR file
print("Setting up the JVM and class path to JARs");
jvmPath   = "/usr/lib/jvm/java-6-openjdk/jre/lib/amd64/server/libjvm.so";
classPath = "-Djava.class.path=";
classPath = classPath + "$HOME/research_linux/SELM_Builder/dist/SELM_Builder.jar";
classPath = classPath + ":/usr/lib/jvm/java-6-openjdk/jre/lib/rt.jar";
classPath = classPath + ":/usr/lib/jvm/java-6-openjdk/jre/lib/resources.jar";
classPath = classPath + ":/usr/share/netbeans/java4/modules/ext/appframework-1.0.3.jar";
classPath = classPath + ":/usr/share/netbeans/java4/modules/ext/swing-worker-1.1.jar";

jpype.startJVM(jvmPath, classPath)

# load main class package
print("Loading the application packages");
selm_builder = jpype.JPackage("org.atzberger.application.selm_builder");

application_Main = jpype.JClass("org.atzberger.application.selm_builder.application_Main");

# launches the application
print("Launching the application");
application_Main.launchNoArgs();

# gets instance of the application (not sure how this is determined exactly)
print("Getting application data : WARNING may need to call getApplSharedData() after application finishes launching. ");

Atz_Application_Data_Communication = jpype.JClass("org.atzberger.application.selm_builder.Atz_Application_Data_Communication");
applSharedData = Atz_Application_Data_Communication.getApplSharedData();  # allows for Python to control the application

String = jpype.JClass("java.lang.String");
예제 #3
0
import jpype as jp

jp.startJVM(jp.getDefaultJVMPath(), "-ea")

testpackage = jp.JPackage('Test').Test
Test = testpackage

Test.speak("Hello from a custom Java class")
t = Test()
print t.getString()

jp.shutdownJVM()

예제 #4
0
import jpype
from jpype import *
import os.path

app = Flask(__name__)

PORT = 5015

jvmPath = jpype.getDefaultJVMPath()
jarpath = os.path.join(os.path.abspath('.'), 'D:\\jar\\')
if not jpype.isJVMStarted():
    jpype.startJVM(jvmPath, '-ea',
                   "-Djava.class.path=%s" % (jarpath + 'AccountingSystem.jar'))
print(jarpath + 'AccountingSystem.jar')
jpype.java.lang.System.out.println("Hello World")
JDClass = jpype.JPackage('program').AccountManager  # 类
# jd = JDClass()  # 对象


@app.route("/")
def show_web():
    return render_template('main.html')


@app.route("/user/login", methods=['POST'])
def user_login():
    data = request.form
    return JDClass.user_login(data['username'], data['pwd'])


@app.route("/user/logout", methods=['POST'])
예제 #5
0
def jidt_kraskov(self, source, target, opts):
    """Calculate transfer entropy with JIDT's Kraskov implementation.

    Calculate transfer entropy between a source and a target variable using
    JIDT's implementation of the Kraskov type 1 estimator. Transfer entropy is
    defined as the conditional mutual information between the source's past
    state and the target's current value, conditional on the target's past.

    Past states need to be defined in the opts dictionary, where a past state
    is defined as a uniform embedding with parameters history and tau. The
    history describes the number of samples taken from a variable's past, tau
    descrices the embedding delay, i.e., the spacing between every two samples
    from the processes' past.

    References:

    Schreiber, T. (2000). Measuring information transfer. Physical Review
    Letters, 85(2), 461.

    Kraskov, A., Stoegbauer, H., & Grassberger, P. (2004). Estimating mutual
    information. Physical review E, 69(6), 066138.

    Lizier, Joseph T. (2014). JIDT: an information-theoretic toolkit for
    studying the dynamics of complex systems. Front. Robot. AI, 1(11).

    This function is ment to be imported into the set_estimator module and used
    as a method in the Estimator_cmi class.

    Args:
        self : instance of Estimator_cmi
            function is supposed to be used as part of the Estimator_cmi class
        source : numpy array
            realisations of the source variable
        target : numpy array
            realisations of the target variable
        opts : dict [optional]
            sets estimation parameters:

            - 'kraskov_k' - no. nearest neighbours for KNN search (default=4)
            - 'normalise' - z-standardise data (default=False)
            - 'theiler_t' - no. next temporal neighbours ignored in KNN and
              range searches (default='ACT', the autocorr. time of the target)
            - 'noise_level' - random noise added to the data (default=1e-8)
            - 'local_values' - return local TE instead of average TE
              (default=False)
            - 'history_target' - number of samples in the target's past to
              consider (mandatory to provide)
            - 'history_source' - number of samples in the source's past to
              consider (default=same as the target history)
            - 'tau_source' - source's embedding delay (default=1)
            - 'tau_target' - target's embedding delay (default=1)
            - 'source_target_delay' - information transfer delay between source
              and target (default=1)
            - 'debug' - set debug prints from the calculator on (default=False)

    Returns:
        float
            transfer entropy from source to target
        OR
        numpy array of floats
            local transfer entropy if local_values is set

    Note:
        Some technical details: JIDT normalises over realisations, IDTxl
        normalises over raw data once, outside the CMI calculator to save
        computation time. The Theiler window ignores trial boundaries. The
        CMI estimator does add noise to the data as a default. To make analysis
        runs replicable set noise_level to 0.
    """
    if type(opts) is not dict:
        raise TypeError('Opts should be a dictionary.')

    # Get histories.
    try:
        history_target = opts['history_target']
    except KeyError:
        raise RuntimeError('No history was provided for TE estimation.')
    history_source = opts.get('history_source', history_target)
    tau_target = opts.get('tau_target', 1)
    tau_source = opts.get('tau_source', 1)
    delay = opts.get('source_target_delay', 1)
    debug = opts.get('debug', False)

    # Get defaults for estimator options.
    kraskov_k = str(opts.get('kraskov_k', 4))
    normalise = str(opts.get('normalise', False)).lower()
    theiler_t = str(opts.get('theiler_t', 0))  # TODO necessary?
    noise_level = str(opts.get('noise_level', 1e-8))
    local_values = opts.get('local_values', False)

    # Start JAVA virtual machine.
    jarLocation = resource_filename(__name__, 'infodynamics.jar')
    if not jp.isJVMStarted():
        jp.startJVM(jp.getDefaultJVMPath(), '-ea',
                    ('-Djava.class.path=' + jarLocation))
    # Estimate TE.
    calcClass = (jp.JPackage('infodynamics.measures.continuous.kraskov').
                 TransferEntropyCalculatorKraskov)
    calc = calcClass()
    calc.setDebug(debug)
    calc.setProperty('NORMALISE', normalise)
    calc.setProperty('k', kraskov_k)
    calc.setProperty('PROP_KRASKOV_ALG_NUM', str(1))
    calc.setProperty('NOISE_LEVEL_TO_ADD', noise_level)
    calc.setProperty('DYN_CORR_EXCL', theiler_t)
    calc.initialise(history_target, tau_target, history_source, tau_source,
                    delay)
    calc.setObservations(source, target)
    if local_values:
        return np.array(calc.computeLocalOfPreviousObservations())
    else:
        return calc.computeAverageLocalOfObservations()
예제 #6
0
파일: test_array.py 프로젝트: ofek/jpype
 def testEmptyObjectArray(self):
     ''' Test for strange crash reported in bug #1089302'''
     Test2 = jpype.JPackage('jpype.array').Test2
     test = Test2()
     test.test(test.getValue())
예제 #7
0
        for p, v in jd.fields.items():
            print(v)
            print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - -")
    print("=========================================================")


def renderPackage(pkg):
    for i in dir(pkg):
        print("Test", i)
        try:
            p = getattr(pkg, i)
        except Exception:
            continue
        if isinstance(p, _jpype._JPackage):
            renderPackage(p)
            continue
        if isinstance(p, jpype.JClass):
            renderClass(p)


try:
    renderPackage(jpype.JPackage('com.google.gson'))
except org.jpype.javadoc.JavadocException as ex:
    print("Javadoc Error: ", ex.message())
    print(hw.asString(ex.node))
    raise ex
except Exception as ex:
    print("Error in")
    print(hw.asString(current))
    raise ex
예제 #8
0
def load_hanalmot():
    check_jar()
    if not jpype.isJVMStarted():
        init_jvm()

    return jpype.JPackage('net.ingtra.hanalmot').HanalmotJava
예제 #9
0
 def __init__(self):
     if not jpype.isJVMStarted():
         jpype.startJVM(jpype.getDefaultJVMPath(),"-ea","-Djava.class.path=jasypt-1.7.1.jar")
     self.BasicTextEncryptor = jpype.JPackage('org').jasypt.util.text.BasicTextEncryptor
     self.BasicTextEncryptor_inst = self.BasicTextEncryptor()
     self.BasicTextEncryptor_inst.setPassword("WWW.IAPPPAY.COM_YANGFENG")
예제 #10
0
'''This uses Autoplot Java/Swing GUIs to create URIs'''
from autoplot import *
import jpype

org = javaaddpath('http://autoplot.org/latest/autoplot.jar')

oldURI = 'vap+cdaweb:'

javax = jpype.JPackage('javax')
java = jpype.JPackage('java')
parent = javax.swing.JPanel()
parent.setLayout(java.awt.BorderLayout())

DataSourceEditorPanelUtil = org.autoplot.datasource.DataSourceEditorPanelUtil
p = DataSourceEditorPanelUtil.getDataSourceEditorPanel(parent, oldURI)

WindowManager = org.autoplot.datasource.WindowManager
if WindowManager.OK_OPTION == WindowManager.showConfirmDialog(
        None, p, "Editing URI", WindowManager.OK_CANCEL_OPTION):

    print(p.getURI())
예제 #11
0
def compute_bTE_all_pairs(traj):
    nodes_n = traj.par.topology.initial.nodes_n
    # Generate initial network
    G = network_dynamics.generate_network(traj.par.topology.initial)
    # Get adjacency matrix
    adjacency_matrix = np.array(
        nx.to_numpy_matrix(G, nodelist=np.array(range(0, nodes_n)), dtype=int))
    # Add self-loops
    np.fill_diagonal(adjacency_matrix, 1)
    # Generate initial node coupling
    coupling_matrix = network_dynamics.generate_coupling(
        traj.par.node_coupling.initial, adjacency_matrix)
    # Generate delay
    delay_matrices = network_dynamics.generate_delay(traj.par.delay.initial,
                                                     adjacency_matrix)
    # Generate coefficient matrices
    coefficient_matrices = np.transpose(delay_matrices * coupling_matrix,
                                        (0, 2, 1))
    # Run dynamics
    time_series = network_dynamics.run_dynamics(traj.par.node_dynamics,
                                                coefficient_matrices)
    # initialise an empty data object
    dat = Data()
    # Load time series
    if traj.par.node_dynamics.model == 'boolean_random':
        normalise = False
    elif traj.par.node_dynamics.model == 'AR_gaussian_discrete':
        normalise = True
    dat = Data(time_series, dim_order='psr', normalise=normalise)
    data = dat.data

    # Compute empirical bTE between all pairs
    lag = 1
    history_target = traj.par.estimation.history_target
    settings = {}
    settings['source_target_delay'] = lag
    settings['history_source'] = traj.par.estimation.history_source
    if traj.par.node_dynamics.model == 'boolean_random':
        settings['history_target'] = history_target
        est = JidtDiscreteTE(settings)
    elif traj.par.node_dynamics.model == 'AR_gaussian_discrete':
        settings['history_target'] = history_target
        est = JidtGaussianTE(settings)
    bTE_empirical_matrix = np.full((nodes_n, nodes_n), np.NaN)
    for X in range(nodes_n):
        for Y in range(nodes_n):
            if (adjacency_matrix[X, Y] > 0) and (X != Y):
                bTE_empirical_matrix[X,
                                     Y] = est.estimate(data[X, :, 0],
                                                       data[Y, :, 0])

    # Add results to the trajectory
    # The wildcard character $ will be replaced by the name of the current run,
    # formatted as `run_XXXXXXXX`
    traj.f_add_result('$.topology.initial',
                      adjacency_matrix=adjacency_matrix,
                      comment='')
    traj.f_add_result('$.node_coupling.initial',
                      coupling_matrix=coupling_matrix,
                      coefficient_matrices=coefficient_matrices,
                      comment='')
    traj.f_add_result('$.delay.initial',
                      delay_matrices=delay_matrices,
                      comment='')
    # traj.f_add_result(
    #     '$.node_dynamics',
    #     time_series=time_series,
    #     comment='')
    traj.f_add_result(PickleResult,
                      '$.bTE',
                      bTE_empirical_matrix=bTE_empirical_matrix,
                      comment='')

    jSystem = jpype.JPackage("java.lang").System
    jSystem.gc()
예제 #12
0
    def render_text(self, outfd, data):
        j_test_path = '-Djava.class.path=/root/JDI-old.jar'
        jpype.startJVM(jpype.getDefaultJVMPath(), j_test_path)
        tasks = self.calculate()

        if len(tasks) > 0:
            task = tasks[0]
        else:
            jpype.shutdownJVM()
            raise Exception("no task or wrong pid")
        libnames = []
        libbases = []
        libs = []
        name_set = set()
        for vma in task.get_proc_maps():
            fname = vma.vm_name(task)
            if fname == "Anonymous Mapping":
                fname = ""
            if len(fname) > 0 and fname not in name_set:
                name_set.add(fname)
                lib = Library()
                lib.base = vma.vm_start
                lib.name = fname
                libnames.append(str(lib.name))
                libbases.append(long(lib.base))
                libs.append(lib)

        for lib in libs:
            if ".so" in lib.name or "java" in lib.name:
                print "base:", lib.base, "name:", lib.name
        threadsId = []
        for thread in task.threads():
            threadsId.append(long(thread.pid))

        self.libnames = libnames
        self.libbases = libbases
        self.threadsId = threadsId
        self.currentTask = task
        self.libs = libs
        self.symbolDict = {}
        self.total_time = 0
        self.rr_time = 0
        symbol = volatility.plugins.linux.java.readelf.read_sym_offset(
            "/usr/local/development/jdk1.7.0_79/jre/lib/amd64/server/libjvm.so"
        )
        self.symbolDict[
            "/usr/local/development/jdk1.7.0_79/jre/lib/amd64/server/libjvm.so"] = symbol
        j_frames = jpype.JPackage('sun.tools.jdi').Frames
        method_dict = {
            'getThreadsId': self.getThreadsId,
            'getLibName': self.getLibName,
            'getLibBase': self.getLibBase,
            'lookUpByName': self.lookUpByName,
            'readBytesFromProcess': self.readBytesFromProcess
        }
        jp = jpype.JProxy(
            'sun.jvm.hotspot.debugger.linux.PythonMethodInterface',
            dict=method_dict)
        j_frames.init(str(task.pid), jp, 1, 'func4')
        print "real read time is", self.rr_time
        print "read time is", self.total_time
        cmd = raw_input()
        while "q" not in cmd:
            res = j_frames.getThread('func4')
            if res == 0:
                print "not contains"
                cmd = raw_input()
                continue
            self.total_time = 0
            self.rr_time = 0
            t1 = time.time()
            j_frames.pythonTest(str(cmd), 'func4')
            t2 = time.time()
            print "real read time is", self.rr_time
            print "read time is", self.total_time
            print "time is", t2 - t1
            cmd = raw_input()
        jpype.shutdownJVM()
def get_transfer_entropy(brain_output,
                         delay=1,
                         reciprocal=True,
                         log=False,
                         local=False,
                         binning=True,
                         min_v=0.,
                         max_v=1.):
    """
    Calculate transfer entropy from 2D time series.
    :param brain_output: time series numpy array
    :param delay: lag between source and destination
    :param reciprocal: whether to calculate average TE in both directions
    :param log: whether to print intermediate results
    :param local: whether to calculate local entropy values
    """
    if binning:
        calcClass = jpype.JPackage(
            "infodynamics.measures.discrete").TransferEntropyCalculatorDiscrete
        source = discretize(brain_output[:, 0], BINS, min_v, max_v).tolist()
        destination = discretize(brain_output[:, 1], BINS, min_v,
                                 max_v).tolist()
        calc = calcClass(BINS, 1)
        calc.initialise()
        calc.addObservations(source, destination)
    else:
        calcClass = jpype.JPackage("infodynamics.measures.continuous.kraskov"
                                   ).TransferEntropyCalculatorKraskov
        source = brain_output[:, 0]
        destination = brain_output[:, 1]
        calc = calcClass()
        initialize_calc(calc, delay)
        calc.setObservations(source, destination)

    te_src_dst = calc.computeAverageLocalOfObservations()
    if binning:
        te_src_dst = te_src_dst / np.log2(BINS)

    if log:
        print('te_src_dst: {}'.format(te_src_dst))
    local_te = []
    if local:
        te_src_dst_local = calc.computeLocalOfPreviousObservations()
        local_te.append(te_src_dst_local)
    if not reciprocal:
        return te_src_dst

    calc.initialise()  # Re-initialise leaving the parameters the same
    if binning:
        calc.addObservations(destination, source)
    else:
        calc.setObservations(destination, source)
    te_dst_src = calc.computeAverageLocalOfObservations()
    if binning:
        te_dst_src = te_dst_src / np.log2(BINS)
    avg_te = np.mean([te_src_dst, te_dst_src])
    if log:
        print('te_dst_src: {}'.format(te_dst_src))
    if local:
        te_dst_src_local = calc.computeLocalOfPreviousObservations()
        local_te.append(te_dst_src_local)
        return avg_te, local_te
    return avg_te
    def initialize_gt_generation(self, des_dist=5, max_d=50):
        # Create list of tuples containing the surrounding polygon, the baseline and the article id of each textline
        tl_list = []
        for tl in self.textlines:
            try:
                tl_bl = tl.baseline.to_polygon()
                tl_bl.calculate_bounds()
            except AttributeError:
                print(f"Textline with id {tl.id} has no baseline coordinates. Skipping...")
                continue

            tl_surr_poly = None
            try:
                tl_surr_poly = tl.surr_p.to_polygon().get_bounding_box()
            except (AttributeError, TypeError):
                print(f"Textline with id {tl.id} has no surrounding polygon.")

            tl_list.append([tl, tl_surr_poly, tl_bl, tl.get_article_id()])

        # Calculate the interline distance for each baseline
        # calculation of the normed polygons (includes also the calculation of their bounding boxes)
        list_of_normed_polygons = norm_poly_dists([tl[2] for tl in tl_list], des_dist=des_dist)

        # call java code to calculate the interline distances
        java_util = jpype.JPackage("citlab_article_separation.java").Util()

        list_of_normed_polygon_java = []

        for poly in list_of_normed_polygons:
            list_of_normed_polygon_java.append(jpype.java.awt.Polygon(poly.x_points, poly.y_points, poly.n_points))

        list_of_interline_distances_java = java_util.calcInterlineDistances(list_of_normed_polygon_java, des_dist,
                                                                            max_d)
        list_of_interline_distances = list(list_of_interline_distances_java)

        tl_list_copy = copy.deepcopy(tl_list)

        # Update the bounding boxes for the textlines
        for tl_tuple, tl_interdist in zip(tl_list_copy, list_of_interline_distances):
            _, _, tl_bl, _ = tl_tuple

            # bounding rectangle moved up and down
            height_shift = int(tl_interdist)
            tl_bl.bounds.translate(dx=0, dy=-height_shift)
            tl_bl.bounds.height += int(1.1 * height_shift)

        tl_surr_poly_final = []
        has_intersect_surr_polys = [False] * len(tl_list_copy)
        for i in range(len(tl_list_copy)):
            tl1, tl1_surr_poly, tl1_bl, tl1_aid = tl_list_copy[i]

            for j in range(i + 1, len(tl_list_copy)):
                tl2, tl2_surr_poly, tl2_bl, tl2_aid = tl_list_copy[j]

                def baseline_intersection_loop(bl1, bl2):
                    intersect = bl1.bounds.intersection(bl2.bounds)
                    while intersect.width >= 0 and intersect.height >= 0:

                        # TODO: Check if this works (bounding boxes intersect in a horizontal way)
                        if intersect.height == bl1.bounds.height or intersect.height == bl2.bounds.height:
                            width_shift = 1
                            # bl1 lies right of bl2
                            if bl1.bounds.x + bl1.bounds.width > bl2.bounds.x + bl2.bounds.width:
                                bl1.bounds.width -= width_shift
                                bl1.bounds.x += width_shift
                                bl2.bounds.width -= width_shift
                            # bl1 lies left of bl2
                            else:
                                bl1.bounds.width -= width_shift
                                bl2.bounds.x += width_shift
                                bl2.bounds.width -= width_shift

                        elif bl1.bounds.y + bl1.bounds.height > bl2.bounds.y + bl2.bounds.height:
                            height_shift = max(1, int(0.05 * bl1.bounds.height))

                            bl1.bounds.height -= height_shift
                            bl1.bounds.y += height_shift

                        elif bl2.bounds.y + bl2.bounds.height > bl1.bounds.y + bl1.bounds.height:
                            height_shift = max(1, int(0.05 * bl2.bounds.height))

                            bl2.bounds.height -= height_shift
                            bl2.bounds.y += height_shift

                        intersect = bl1.bounds.intersection(bl2.bounds)

                    return bl1

                if tl1_surr_poly is not None and not has_intersect_surr_polys[i]:
                    if tl2_surr_poly is not None and not has_intersect_surr_polys[j]:
                        intersection = tl1_surr_poly.intersection(tl2_surr_poly)
                        has_intersect_surr_polys[
                            j] = True if intersection.width >= 0 and intersection.height >= 0 else False
                    else:
                        intersection = tl1_surr_poly.intersection(tl2_bl.bounds)
                    if not (intersection.width >= 0 and intersection.height >= 0 and tl1_aid != tl2_aid):
                        if j == len(tl_list_copy) - 1:
                            tl_surr_poly_final.append((tl1, tl1_surr_poly, tl1_aid))
                        continue
                    has_intersect_surr_polys[i] = True
                else:
                    if tl2_surr_poly is not None:
                        intersection = tl1_bl.bounds.intersection(tl2_surr_poly)
                        has_intersect_surr_polys[
                            j] = True if intersection.width >= 0 and intersection.height >= 0 else False
                    else:
                        intersection = tl1_bl.bounds.intersection(tl2_bl.bounds)

                if intersection.width >= 0 and intersection.height >= 0 and tl1_aid != tl2_aid:
                    bl = baseline_intersection_loop(tl1_bl, tl2_bl)
                    if j == len(tl_list_copy) - 1:
                        tl_surr_poly_final.append((tl1, bl.bounds, tl1_aid))
                elif j == len(tl_list_copy) - 1:
                    tl_surr_poly_final.append((tl1, tl1_bl.bounds, tl1_aid))

        if len(has_intersect_surr_polys) > 0:
            if has_intersect_surr_polys[-1] or tl_list_copy[-1][1] is None:
                tl_surr_poly_final.append((tl_list_copy[-1][0], tl_list_copy[-1][2].bounds, tl_list_copy[-1][3]))
            else:
                tl_surr_poly_final.append((tl_list_copy[-1][0], tl_list_copy[-1][1], tl_list_copy[-1][3]))

        return tl_surr_poly_final
예제 #15
0
import jpype
import jpype.imports
from jpype.types import *

# Launch the JVM
jpype.startJVM(classpath=['amidst-v4-5-beta3.jar'])
import java.nio.file as file
mc = jpype.JPackage("amidst")
#inst_path = file.FileSystems.getDefault().getPath('/', 'home', 'sandi', '.local', 'share', 'multimc', 'instances', '1.16.1', '.minecraft')
inst = mc.mojangapi.file.MinecraftInstallation.newLocalMinecraftInstallation()
versions = list(inst.readInstalledVersionsAsLauncherProfiles())
prof = next(filter(lambda version: version.getVersionId() == '1.16.1', versions))
mc_interface = mc.mojangapi.minecraftinterface.MinecraftInterfaces.fromLocalProfile(prof)


seed = mc.mojangapi.world.WorldSeed.fromUserInput("hello")
world_type = mc.mojangapi.world.WorldType.DEFAULT
gen_opts = ""
opts = mc.mojangapi.world.WorldOptions(seed, world_type, gen_opts)
world = mc_interface.createWorld(seed.getLong(), world_type, gen_opts)
recognized_version = mc_interface.getRecognisedVersion()
builder = mc.mojangapi.world.versionfeatures.DefaultVersionFeatures.builder(opts, world)
features = builder.create(recognized_version)
biome_list = features.get(mc.mojangapi.world.versionfeatures.FeatureKey.BIOME_LIST)
biome_oracle = mc.mojangapi.world.oracle.BiomeDataOracle(world, recognized_version, biome_list)



from tkinter import *

root = Tk()
예제 #16
0
        tables = get_tables(conn, schema)

        for table in tables:
            print(table)
            columns = get_columns(cursor, table)
            for column in columns:
                print(column)

        conn.close()

    try:
        init_jvm(class_path, max_heap_size)
        import java.lang

        WbManager = jp.JPackage('workbench').WbManager
        WbManager.prepareForEmbedded()

        batch = jp.JPackage('workbench.sql').BatchRunner()
        batch.setBaseDir(data_dir + '/jpype_test/')
        #    batch.setStoreErrors(True)
        #    batch.setErrorScript('error.log')
        #    batch.setShowStatementWithResult(True)
        #    batch.setScriptToRun('wbexport.sql')
        #    batch.execute()
        batch.runScript("WbConnect -url='" + url + "' -password="******";")
        gen_report_str = "WbSchemaReport -file=metadata.xml -schemas=" + schema + " -types=SYNONYM,TABLE,VIEW -includeProcedures=true -includeTriggers=true -writeFullSource=true;"
        batch.runScript(gen_report_str)

        #        batch.showResultSets(True) # TODO: Viser resultat da -> Fikse bedre visning i console hvordan?
        #    batch.setVerboseLogging(True)
예제 #17
0
# teCalc.setObservations(jpype.JArray(jpype.JDouble, 1)(sourceArray2), jpype.JArray(jpype.JDouble, 1)(destArray))
# result2 = teCalc.computeAverageLocalOfObservations()
# print("TE result %.4f nats; expected to be close to 0 nats for these uncorrelated Gaussians" % result2)
"""Continuous Transfer Entropy with automatic embedding (Kraskov)"""

# Examine the heart-breath interaction that Schreiber originally looked at:
datafile = "/Users/katja/javawd/infodynamics-dist-1.4/demos/data/SFI-heartRate_breathVol_bloodOx.txt"
data = readFloatsFile.readFloatsFile(datafile)
# As numpy array:
A = np.array(data)
# Select data points 2350:3550, pulling out the relevant columns:
breathRate = A[2350:3551, 1]
heartRate = A[2350:3551, 0]

# Create a Kraskov TE calculator:
teCalcClass = jpype.JPackage("infodynamics.measures.continuous.kraskov"
                             ).TransferEntropyCalculatorKraskov
teCalc = teCalcClass()

# Set properties for auto-embedding of both source and destination
#  using the Ragwitz criteria:
#  a. Auto-embedding method
teCalc.setProperty(teCalcClass.PROP_AUTO_EMBED_METHOD,
                   teCalcClass.AUTO_EMBED_METHOD_RAGWITZ)
#  b. Search range for embedding dimension (k) and delay (tau)
teCalc.setProperty(teCalcClass.PROP_K_SEARCH_MAX, "6")
teCalc.setProperty(teCalcClass.PROP_TAU_SEARCH_MAX, "6")
# Since we're auto-embedding, no need to supply k, l, k_tau, l_tau here:
teCalc.initialise()
# Compute TE from breath (column 1) to heart (column 0)
teCalc.setObservations(breathRate, heartRate)
teBreathToHeart = teCalc.computeAverageLocalOfObservations()
예제 #18
0
# _*_ coding: utf-8 _*_
'''
Created on 2018年1月29日
测试python调用其他语言
@author: hudaqiang
'''
from outer_invoke.java_invoke import JavaInvocation
import jpype
if __name__ == '__main__':

    javaInvocation = JavaInvocation('commons-io-2.4.jar')
    javaInvocation.start_jvm()

    #java和javax两个库可以包名.类名调用
    salt_file = jpype.java.io.File('/Users/hudaqiang/Downloads/resource/salt')
    fileUtils = jpype.JPackage('org').apache.commons.io.FileUtils
    read_lines = fileUtils.readLines(salt_file)
    javaInvocation.jprint(read_lines)

    javaInvocation.shutdown_jvm()
예제 #19
0
class OpenRocketInterface(object):
    def __init__(self):
        jpype.startJVM(jvm, "-Djava.class.path=%s" % classpath)
        openRocketAPIClass = 'net.sf.openrocket.startup.OpenRocketAPI'

        try:
            OpenRocketAPI = jpype.JClass(openRocketAPIClass)
        except jpype.JavaException, ex:
            if ex.message() == "Class " + openRocketAPIClass + " not found":
                print "Caught the runtime exception:\n     ", ex.message()
                print "Ensure that you are using a version of OpenRocket with this class"
            else:
                print "Caught the runtime exception:\n     ", ex.message()
            quit()
        self.apiInstance = OpenRocketAPI()

        # openrocket.simulation.FlightDataType
        # depends on resources.I10n.messages.properties
        fdt = jpype.JPackage('net.sf.openrocket.simulation').FlightDataType
        self.type_dic = {
            'TYPE_TIME': fdt.TYPE_TIME,
            'TYPE_TIME_STEP': fdt.TYPE_TIME_STEP,
            'TYPE_ALTITUDE': fdt.TYPE_ALTITUDE,
            'TYPE_VELOCITY_Z': fdt.TYPE_VELOCITY_Z,
            'TYPE_ACCELERATION_Z': fdt.TYPE_ACCELERATION_Z,
            'TYPE_VELOCITY_TOTAL': fdt.TYPE_VELOCITY_TOTAL,
            'TYPE_ACCELERATION_TOTAL': fdt.TYPE_ACCELERATION_TOTAL,
            'TYPE_POSITION_X': fdt.TYPE_POSITION_X,
            'TYPE_POSITION_Y': fdt.TYPE_POSITION_Y,
            'TYPE_POSITION_XY': fdt.TYPE_POSITION_XY,
            'TYPE_POSITION_DIRECTION': fdt.TYPE_POSITION_DIRECTION,
            'TYPE_VELOCITY_XY': fdt.TYPE_VELOCITY_XY,
            'TYPE_ACCELERATION_XY': fdt.TYPE_ACCELERATION_XY,
            'TYPE_ACCELERATION_X': fdt.TYPE_ACCELERATION_X,
            'TYPE_ACCELERATION_Y': fdt.TYPE_ACCELERATION_Y,
            'TYPE_LATITUDE': fdt.TYPE_LATITUDE,
            'TYPE_LONGITUDE': fdt.TYPE_LONGITUDE,
            'TYPE_GRAVITY': fdt.TYPE_GRAVITY,
            'TYPE_AOA': fdt.TYPE_AOA,
            'TYPE_ROLL_RATE': fdt.TYPE_ROLL_RATE,
            'TYPE_PITCH_RATE': fdt.TYPE_PITCH_RATE,
            'TYPE_YAW_RATE': fdt.TYPE_YAW_RATE,
            'TYPE_MASS': fdt.TYPE_MASS,
            'TYPE_PROPELLANT_MASS': fdt.TYPE_PROPELLANT_MASS,
            'TYPE_LONGITUDINAL_INERTIA': fdt.TYPE_LONGITUDINAL_INERTIA,
            'TYPE_ROTATIONAL_INERTIA': fdt.TYPE_ROTATIONAL_INERTIA,
            'TYPE_CP_LOCATION': fdt.TYPE_CP_LOCATION,
            'TYPE_CG_LOCATION': fdt.TYPE_CG_LOCATION,
            'TYPE_STABILITY': fdt.TYPE_STABILITY,
            'TYPE_MACH_NUMBER': fdt.TYPE_MACH_NUMBER,
            'TYPE_REYNOLDS_NUMBER': fdt.TYPE_REYNOLDS_NUMBER,
            'TYPE_THRUST_FORCE': fdt.TYPE_THRUST_FORCE,
            'TYPE_DRAG_FORCE': fdt.TYPE_DRAG_FORCE,
            'TYPE_DRAG_COEFF': fdt.TYPE_DRAG_COEFF,
            'TYPE_AXIAL_DRAG_COEFF': fdt.TYPE_AXIAL_DRAG_COEFF,
            'TYPE_FRICTION_DRAG_COEFF': fdt.TYPE_FRICTION_DRAG_COEFF,
            'TYPE_PRESSURE_DRAG_COEFF': fdt.TYPE_PRESSURE_DRAG_COEFF,
            'TYPE_BASE_DRAG_COEFF': fdt.TYPE_BASE_DRAG_COEFF,
            'TYPE_NORMAL_FORCE_COEFF': fdt.TYPE_NORMAL_FORCE_COEFF,
            'TYPE_PITCH_MOMENT_COEFF': fdt.TYPE_PITCH_MOMENT_COEFF,
            'TYPE_YAW_MOMENT_COEFF': fdt.TYPE_YAW_MOMENT_COEFF,
            'TYPE_SIDE_FORCE_COEFF': fdt.TYPE_SIDE_FORCE_COEFF,
            'TYPE_ROLL_MOMENT_COEFF': fdt.TYPE_ROLL_MOMENT_COEFF,
            'TYPE_ROLL_FORCING_COEFF': fdt.TYPE_ROLL_FORCING_COEFF,
            'TYPE_ROLL_DAMPING_COEFF': fdt.TYPE_ROLL_DAMPING_COEFF,
            'TYPE_PITCH_DAMPING_MOMENT_COEFF': fdt.TYPE_PITCH_DAMPING_MOMENT_COEFF,
            'TYPE_YAW_DAMPING_MOMENT_COEFF': fdt.TYPE_YAW_DAMPING_MOMENT_COEFF,
            'TYPE_CORIOLIS_ACCELERATION': fdt.TYPE_CORIOLIS_ACCELERATION,
            'TYPE_REFERENCE_LENGTH': fdt.TYPE_REFERENCE_LENGTH,
            'TYPE_REFERENCE_AREA': fdt.TYPE_REFERENCE_AREA,
            'TYPE_ORIENTATION_THETA': fdt.TYPE_ORIENTATION_THETA,
            'TYPE_ORIENTATION_PHI': fdt.TYPE_ORIENTATION_PHI,
            'TYPE_WIND_VELOCITY': fdt.TYPE_WIND_VELOCITY,
            'TYPE_AIR_TEMPERATURE': fdt.TYPE_AIR_TEMPERATURE,
            'TYPE_AIR_PRESSURE': fdt.TYPE_AIR_PRESSURE,
            'TYPE_SPEED_OF_SOUND': fdt.TYPE_SPEED_OF_SOUND,
            'TYPE_COMPUTATION_TIME': fdt.TYPE_COMPUTATION_TIME
        }
예제 #20
0
import jpype

# start the JVM
if not jpype.isJVMStarted():
    jar = "HeidelTime/de.unihd.dbs.heideltime.standalone.jar"
    jpype.startJVM(jpype.getDefaultJVMPath(), "-ea",
                   "-Djava.class.path=%s" % jar)

# get the Java classes we want to use
heideltime_resources = jpype.JPackage(
    "de.unihd.dbs.uima.annotator.heideltime.resources")
heideltime_standalone = jpype.JPackage("de.unihd.dbs.heideltime.standalone")

# constants
LANGUAGES = {
    'english': heideltime_resources.Language.ENGLISH,
    'german': heideltime_resources.Language.GERMAN,
    'dutch': heideltime_resources.Language.DUTCH,
    'italian': heideltime_resources.Language.ITALIAN,
    'spanish': heideltime_resources.Language.SPANISH,
    'arabic': heideltime_resources.Language.ARABIC,
    'french': heideltime_resources.Language.FRENCH,
    'chinese': heideltime_resources.Language.CHINESE,
    'russian': heideltime_resources.Language.RUSSIAN,
    'portuguese': heideltime_resources.Language.PORTUGUESE
}

DOCUMENTS = {
    'narratives': heideltime_standalone.DocumentType.NARRATIVES,
    'news': heideltime_standalone.DocumentType.NEWS,
    'colloquial': heideltime_standalone.DocumentType.COLLOQUIAL,
예제 #21
0
    try:
        #print('--------startjvm---------')
        jpype.startJVM(jvm, jvm_options)
        #print("JVM path:"+ jpype.getDefaultJVMPath())
        #print('----- running jvm -------------')

    except Exception as e:
        print('====== throw error ======')
        traceback.print_exc()
        jpype.shutdownJVM()

    System = jpype.java.lang.System
    #System = jpype.JClass('java.lang.System')
    System.setProperty("java.security.krb5.conf", "/you_filed_algos/krb5.conf")

    Configuration = jpype.JPackage('org.apache.hadoop.conf').Configuration
    conf = Configuration()
    conf.set("hadoop.security.authentication", "kerberos")

    try:
        UserGroupInformation = jpype.JClass(
            'org.apache.hadoop.security.UserGroupInformation')
        UserGroupInformation.setConfiguration(conf)
        UserGroupInformation.loginUserFromKeytab(
            "sjfw_pbpang", "/you_filed_algos/sjfw_pbpang.keytab")

        conn = jaydebeapi.connect(dirver, url)
        print("* create connection object")

        cur = conn.cursor()
        cur.execute('show databases')
예제 #22
0
def convert_bunch_to_weka_instances(bunch):
    '''Converts a dataset from a scikit-learn Bunch format to a Weka Instances object

    :param bunch: dataset in scikit Bunch format; looks for field 'feature_value_names' for nominal features
    :return: dataset in Weka Instances format
    '''

    util = jp.JPackage("java.util")
    weka_core = jp.JPackage("weka.core")

    list_attrs = util.ArrayList()

    num_samples = bunch.data.shape[0]  # num_samples = 5
    num_features = bunch.data.shape[1]  # numAtt = 3
    num_targets = 1

    # create the features
    for j in range(0, num_features):
        if bunch.has_key("feature_value_names") and len(
                bunch.feature_value_names[j]) > 0:
            att_vals = util.ArrayList()
            for v in bunch.feature_value_names[j]:
                att_vals.add(v)

            # Constructor for nominal attributes and string attributes.
            att = weka_core.Attribute(bunch.feature_names[j], att_vals)

        else:
            # Constructor for a numeric attribute with a particular index.
            att = weka_core.Attribute(bunch.feature_names[j], j)

        list_attrs.add(att)

    num_att = num_features + num_targets
    if len(bunch.target_names) > 0:
        # nominal target
        att_vals = util.ArrayList()
        for v in bunch.target_names:
            att_vals.add(v)
        # Constructor for nominal attributes and string attributes.
        target_att = weka_core.Attribute("nominalClass", att_vals)

    else:
        # numeric target
        target_att = weka_core.Attribute("numericClass", num_att - 1)

    list_attrs.add(target_att)

    inst = weka_core.Instances(bunch.DESCR, list_attrs, num_samples)

    for i in range(0, num_samples):
        my_row = weka_core.DenseInstance(num_att)
        for j in range(0, num_features):
            my_row.setValue(j, bunch.data[i][j])

        my_row.setValue(num_att - 1, jp.JDouble(bunch.target[i]))

        inst.add(jp.JObject(my_row, 'weka.core.Instance'))

    inst.setClassIndex(inst.numAttributes() - 1)

    return inst
예제 #23
0
def jidt_discrete(self, source, target, opts):
    """Calculate TE with JIDT's implementation for discrete variables.

    Calculate the transfer entropy between two time series processes.
    Call JIDT via jpype and use the discrete estimator. Transfer entropy is
    defined as the conditional mutual information between the source's past
    state and the target's current value, conditional on the target's past.

    References:

    Schreiber, T. (2000). Measuring information transfer. Physical Review
    Letters, 85(2), 461.

    Lizier, Joseph T. (2014). JIDT: an information-theoretic toolkit for
    studying the dynamics of complex systems. Front. Robot. AI, 1(11).

    This function is ment to be imported into the set_estimator module and used
    as a method in the Estimator_te class.

    Args:
        self : instance of Estimator_te
            function is supposed to be used as part of the Estimator_te class
        source : numpy array (either of integers or doubles to be discretised)
            time series realisations of the first random variable.
            Can be multidimensional (i.e. multivariate) where dimensions of the
            array are realisations x variable dimension
        target : numpy array (either of integers or doubles to be discretised)
            time series realisations of the second random variable.
            Can be multidimensional (i.e. multivariate) where dimensions of the
            array are realisations x variable dimension
        opts : dict
            sets estimation parameters:

            - 'discretise_method' - if and how to discretise incoming
              continuous variables to discrete values.
              'max_ent' means to use a maximum entropy binning
              'equal' means to use equal size bins
              'none' means variables are already discrete (default='none')
            - 'num_discrete_bins' - number of discrete bins for discretisation
               if requested (default=2 for binary). If this is set, parameters
               'alph_source', 'alph_target' and 'alphc' are all set to
               'num_discrete_bins'
            - 'alph_source' - number of discrete bins/levels for source
              (default=2 for binary, or the value set for 'num_discrete_bins')
            - 'alph_target' - number of discrete bins/levels for target
              (default=2 for binary, or the value set for 'num_discrete_bins')
            - 'history_target' - number of samples in the target's past to
              consider (mandatory)
            - 'history_source' - number of samples in the source's past to
              consider (default=same as the target history)
            - 'tau_source' - source's embedding delay (default=1)
            - 'tau_target' - target's embedding delay (default=1)
            - 'source_target_delay' - information transfer delay between source
              and target (default=1)
            - 'debug' - set debug prints from the calculator on (default=False)

    Returns:
        float
            transfer entropy
    """
    if type(opts) is not dict:
        raise TypeError('Opts should be a dictionary.')

    # Get alphabet sizes and check if discretisation is requested
    discretise_method = opts.get('discretise_method', 'none')
    alph_source = int(opts.get('alph_source', 2))
    alph_target = int(opts.get('alph_target', 2))

    if (discretise_method == 'none'):
        if alph_source < np.unique(source).shape[0]:
            raise RuntimeError('The source'
                               's alphabet size does not match the'
                               ' no. unique elements in the source array.')
        if alph_target < np.unique(target).shape[0]:
            raise RuntimeError('The target'
                               's alphabet size does not match the'
                               ' no. unique elements in the target array.')
    else:  # get the number of bins if discretisation was requested
        try:
            num_discrete_bins = int(opts['num_discrete_bins'])
            alph_source = num_discrete_bins
            alph_target = num_discrete_bins
        except KeyError:
            pass  # Do nothing and use the default for alph_* set above

    # Get embedding and delay parameters.
    try:
        history_target = opts['history_target']
    except KeyError:
        raise RuntimeError('No history was provided for TE estimation.')
    try:
        history_source = opts['history_source']
    except KeyError:
        history_source = history_target
    tau_target = opts.get('tau_target', 1)
    tau_source = opts.get('tau_source', 1)
    delay = opts.get('source_target_delay', 1)
    debug = opts.get('debug', False)

    # Work out the number of samples and dimensions for each variable, before
    #  we collapse all dimensions down:
    if len(source.shape) > 1:
        # source is is multidimensional
        source_dimensions = source.shape[1]
    else:
        # source is unidimensional
        source_dimensions = 1
    if len(target.shape) > 1:
        # target is is multidimensional
        target_dimensions = target.shape[1]
    else:
        # target is unidimensional
        target_dimensions = 1

    # Now discretise if required
    if (discretise_method == 'equal'):
        source = utils.discretise(source, alph_source)
        target = utils.discretise(target, alph_target)
    elif (discretise_method == 'max_ent'):
        source = utils.discretise_max_ent(source, alph_source)
        target = utils.discretise_max_ent(target, alph_target)
    else:
        pass  # don't discretise at all, assume data to be discrete

    # Then collapse any mulitvariates into univariate arrays:
    source = utils.combine_discrete_dimensions(source, alph_source)
    target = utils.combine_discrete_dimensions(target, alph_target)

    # And finally make the TE calculation:
    jarLocation = resource_filename(__name__, 'infodynamics.jar')
    if not jp.isJVMStarted():
        jp.startJVM(jp.getDefaultJVMPath(), '-ea',
                    ('-Djava.class.path=' + jarLocation))
    calcClass = (jp.JPackage(
        'infodynamics.measures.discrete').TransferEntropyCalculatorDiscrete)
    calc = calcClass(
        int(
            max(math.pow(alph_source, source_dimensions),
                math.pow(alph_target, target_dimensions))), history_target,
        tau_target, history_source, tau_source, delay)
    calc.setDebug(debug)
    calc.initialise()
    # Unfortunately no faster way to pass numpy arrays in than this list conversion
    calc.addObservations(
        jp.JArray(jp.JInt, 1)(source.tolist()),
        jp.JArray(jp.JInt, 1)(target.tolist()))
    return calc.computeAverageLocalOfObservations()
jp.java.opt.ga.MutationFunction
jp.java.opt.ga.StandardGeneticAlgorithm
jp.java.opt.ga.UniformCrossOver
jp.java.opt.prob.GenericProbabilisticOptimizationProblem
jp.java.opt.prob.MIMIC
jp.java.opt.prob.ProbabilisticOptimizationProblem
jp.java.shared.FixedIterationTrainer
jp.java.opt.example.ContinuousPeaksEvaluationFunction


"""
Commandline parameter(s):
   none
"""

ContinuousPeaksEvaluationFunction = jp.JPackage('opt').example.ContinuousPeaksEvaluationFunction
DiscreteUniformDistribution = jp.JPackage('dist').DiscreteUniformDistribution
DiscreteChangeOneNeighbor = jp.JPackage('opt').DiscreteChangeOneNeighbor
DiscreteChangeOneMutation = jp.JPackage('opt').ga.DiscreteChangeOneMutation
SingleCrossOver = jp.JPackage('opt').ga.SingleCrossOver
DiscreteDependencyTree = jp.JPackage('dist').DiscreteDependencyTree
GenericHillClimbingProblem = jp.JPackage('opt').GenericHillClimbingProblem
GenericGeneticAlgorithmProblem = jp.JPackage('opt').ga.GenericGeneticAlgorithmProblem
GenericProbabilisticOptimizationProblem = jp.JPackage('opt').prob.GenericProbabilisticOptimizationProblem
RandomizedHillClimbing = jp.JPackage('opt').RandomizedHillClimbing
FixedIterationTrainer = jp.JPackage('shared').FixedIterationTrainer
SimulatedAnnealing = jp.JPackage('opt').SimulatedAnnealing
StandardGeneticAlgorithm = jp.JPackage('opt').ga.StandardGeneticAlgorithm
MIMIC = jp.JPackage('opt').prob.MIMIC

예제 #25
0
import jpype

jvmPath = jpype.getDefaultJVMPath()
ext_class_dir = "jars/"
jvmArg = '-Djava.ext.dirs=' + ext_class_dir
jpype.startJVM(jvmPath, jvmArg)

cuengh_tokenizer = jpype.JPackage('org.roeg.cytokenizer').CuenghTokenizer()

vlist = open("tools/sawguq.txt").read().strip().split("\n")
vset = set()
for line in vlist:
    key = line.split(" ")[0]
    klist = cuengh_tokenizer.tokenize(key)
    klist = map(lambda x: x.toString(), klist)
    vset |= set(klist)

print(len(vset))
예제 #26
0
    result_list2d = []

    for i in result_yield:
        result_list2d.append([Levenshtein.distance(key, i[0]), i])

    result_list2d.sort()
    for i in result_list2d:
        yield i[1]


jvmPath = jpype.getDefaultJVMPath()
ext_class_dir = "jars/"
jvmArg = '-Djava.ext.dirs=' + ext_class_dir
jpype.startJVM(jvmPath, jvmArg)

Dict = jpype.JPackage('org.roeg.sawroeg').Dict
cuengh_tokenizer = jpype.JPackage('org.roeg.cytokenizer').CuenghTokenizer()
bouyei_tokenizer = jpype.JPackage('org.roeg.cytokenizer').BouyeiTokenizer()
wordsSimilarity = Dict.wordsSimilarity
isStringChinese = Dict.isStringChinese
languageFilter = Dict.languageFilter

WORD_TIMES = 1000
Max_Ch_Word_Length = 7


def filter_func(s):
    s = re.sub("\\[[^\\]]+\\]", "", s)
    s = re.sub("([^)]*\\)", "", s)
    s = re.sub("<[^>]*>", "", s)
    s = re.sub("\\{[^\\}]*\\}", "", s)
    def render_text(self, outfd, data):
        # local_conf = Conf()
        # local_conf.config_no()
        # local_conf.start()

        j_test_path = '-Djava.class.path=/home/kong/JavaMemory/JDI/out/artifacts/JDI/JDI.jar'
        jpype.startJVM(jpype.getDefaultJVMPath(), j_test_path)
        tasks = self.calculate()

        if len(tasks) > 0:
            task = tasks[0]
        else:
            jpype.shutdownJVM()
            raise Exception("no task or wrong pid")

        # configuration
        self.fnames = ['add', 'sub']
        self.vnames = [['x', 'y'], ['x', 'y']]
        self.vtypes = [[4, 4], [4, 4]]

        # ssh
        hostname = '10.108.164.232'
        port = 22
        username = '******'
        password = '******'

        cmd = 'java -jar /home/vm/pyagent.jar ' + str(task.pid)
        ssh_res = ssh_cmd(hostname=hostname, port=port, username=username, password=password, cmd=cmd)
        if 'yes' not in ssh_res:
            print ssh_res
            raise Exception("no task or wrong pid")

        libnames = []
        libbases = []
        libs = []
        name_set = set()
        for vma in task.get_proc_maps():
            fname = vma.vm_name(task)
            if fname == "Anonymous Mapping":
                fname = ""
            if len(fname) > 0 and fname not in name_set:
                name_set.add(fname)
                lib = Library()
                lib.base = vma.vm_start
                lib.name = fname
                libnames.append(str(lib.name))
                libbases.append(long(lib.base))
                libs.append(lib)

        for lib in libs:
            if ".so" in lib.name or "java" in lib.name:
                print "base:", lib.base, "name:", lib.name
        threadsId = []
        for thread in task.threads():
            threadsId.append(long(thread.pid))

        self.libnames = libnames
        self.libbases = libbases
        self.threadsId = threadsId
        self.currentTask = task
        self.libs = libs
        self.symbolDict = {}
        # symbol = volatility.plugins.linux.java.readelf.read_sym_offset("/usr/local/development/jdk1.7.0_79/jre/lib/amd64/server/libjvm.so")
        # self.symbolDict["/usr/local/development/jdk1.7.0_79/jre/lib/amd64/server/libjvm.so"] = symbol
        symbol = volatility.plugins.linux.java.readelf.read_sym_offset(
            "/home/kong/JavaMemory/jdk1.7.0_79/jre/lib/amd64/server/libjvm.so")
        self.symbolDict["/home/vm/jdk1.7.0_79/jre/lib/amd64/server/libjvm.so"] = symbol
        PyDump = jpype.JPackage('sun.tools.python').PyDump
        self.PyDump = PyDump
        method_dict = {
            'getThreadsId': self.getThreadsId,
            'getLibName': self.getLibName,
            'getLibBase': self.getLibBase,
            'lookUpByName': self.lookUpByName,
            'readBytesFromProcess': self.readBytesFromProcess
        }
        jp = jpype.JProxy('sun.jvm.hotspot.debugger.linux.PythonMethodInterface', dict = method_dict)

        # java init
        PyDump.initVM(jp, int(task.pid))
        self.first_fp_list = []
        for index in range(1, 11):
            threadName = 'http-nio-8080-exec-' + str(index)
            first_fp = PyDump.initJavaFirstFPAddress(threadName, True)
            self.first_fp_list.append(first_fp)
            print threadName, first_fp

        # event
        self.event_front_1 = '<xml type="event" name="'
        self.event_front_2 = '">'
        self.event_middle_1 = '<'
        self.event_middle_2 = '>'
        self.event_middle_3 = '</'
        self.event_middle_4 = '>'
        self.event_end = '</xml>\r\n\r\n'

        self.client = None

        # tcpSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # tcpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # tcpSocket.bind(('', 6666))
        # tcpSocket.listen(5)
        # while True:
        #     try:
        #         print "waiting for connection..."
        #         client, addr = tcpSocket.accept()
        #         print "...connected from:", addr
        #         while True:
        #             time.sleep(1)
        #             for result in self.getEvent(first_fp, fnames, vnames, vtypes):
        #                 if result is not None:
        #                     print result
        #                     client.sendall(result)
        #     except Exception, e:
        #         print repr(e)
        #     finally:
        #         client.close()
        # tcpSocket.close()
        # self.conf = Conf()
        # self.conf.config_c(self.run_command, self.stop_command)
        # self.conf.start()

        count = 10
        while count > 0:
            print "#######################"
            for first_fp in self.first_fp_list:
                inf = self.getEvent(first_fp, self.fnames, self.vnames, self.vtypes, self.client)
            count -= 1

        PyDump.stop()
        jpype.shutdownJVM()
예제 #28
0
    '-Djava.class.path=/Users/jennyhung/MathfreakData/School/OMSCS_ML/Assign2/abagail_py/ABAGAIL/ABAGAIL.jar'
)
jp.java.lang.System.out.println("hello world")
jp.java.func.nn.backprop.BackPropagationNetworkFactory
jp.java.func.nn.backprop.RPROPUpdateRule
jp.java.func.nn.backprop.BatchBackPropagationTrainer
jp.java.shared.SumOfSquaresError
jp.java.shared.DataSet
jp.java.shared.Instance
jp.java.opt.SimulatedAnnealing
jp.java.opt.example.NeuralNetworkOptimizationProblem
jp.java.opt.RandomizedHillClimbing
jp.java.ga.StandardGeneticAlgorithm
jp.java.func.nn.activation.RELU

BackPropagationNetworkFactory = jp.JPackage(
    'func').nn.backprop.BackPropagationNetworkFactory
DataSet = jp.JPackage('shared').DataSet
SumOfSquaresError = jp.JPackage('shared').SumOfSquaresError
NeuralNetworkOptimizationProblem = jp.JPackage(
    'opt').example.NeuralNetworkOptimizationProblem
RandomizedHillClimbing = jp.JPackage('opt').RandomizedHillClimbing
Instance = jp.JPackage('shared').Instance
RELU = jp.JPackage('func').nn.activation.RELU

INPUT_LAYER = 109
OUTPUT_LAYER = 1
TRAINING_ITERATIONS = 2001
OUTFILE = './NN_OUTPUT/RHC_LOG.txt'


def get_all_data():
예제 #29
0
def bayesServer():
    return jp.JPackage("com.bayesserver")
예제 #30
0
logger.info(
    "Attempting to start creating client for the following Web Service: " +
    str(url))

# The webservice is a soap webservice
if servicetype == 'SOAP':

    logger.info("Setting up jpype variables")
    galaxyHomePath = str(os.environ.get('GALAXY_HOME'))
    jar_home = str(os.environ.get(
        'GALAXY_HOME')) + '/tools/WebServiceToolWorkflow_REST_SOAP/engine'
    jarpath = os.path.join(os.path.abspath(jar_home), '')
    logger.info("Starting the JVM")
    jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % jarpath)
    logger.info("JVM is up and running")
    toolCreatorPackage = jpype.JPackage(
        "edu.uga.WSExtension.AddingWebServiceAsTools")
    logger.info("Creating objects to create stand alone and workflow tool")
    standAloneToolCreator = toolCreatorPackage.CreatingWSDLToolAsStandalone(
        url, galaxyHomePath)
    logger.info("Object for stand alone tool has been created successfully")
    workflowToolCreator = toolCreatorPackage.CreatingWSDLToolAsWorkflow(
        url, galaxyHomePath)
    logger.info("Object for workflow tool has been created successfully")
    for operation in operations:
        if operation != '':
            logger.info("Creating tool stand alone and workflow tool for " +
                        str(operation))
            statusOfStandAlone = standAloneToolCreator.createTool(operation)
            logger.info("Stand alone tool created successfully " +
                        str(statusOfStandAlone))
            statusOfWorkflowTool = workflowToolCreator.createTool(operation)