Пример #1
0
    def _get_app_info(self):
        """Get information about the app.
        """

        info = dict()
        info['core_data'] = core_data = _get_core_data()
        info['extensions'] = extensions = self._get_extensions(core_data)
        page_config = self._read_page_config()
        info['disabled'] = page_config.get('disabledExtensions', [])
        info['local_extensions'] = self._get_local_extensions()
        info['linked_packages'] = self._get_linked_packages()
        info['app_extensions'] = app = []
        info['sys_extensions'] = sys = []
        for (name, data) in extensions.items():
            data['is_local'] = name in info['local_extensions']
            if data['location'] == 'app':
                app.append(name)
            else:
                sys.append(name)

        info['uninstalled_core'] = self._get_uninstalled_core_extensions()
        info['version'] = core_data['jupyterlab']['version']
        info['sys_dir'] = self.sys_dir
        info['app_dir'] = self.app_dir

        info['core_extensions'] = core_extensions = _get_core_extensions()

        disabled_core = []
        for key in core_extensions:
            if key in info['disabled']:
                disabled_core.append(key)

        info['disabled_core'] = disabled_core
        return info
def plan_traj():
    path_points = [do_trim(vt=100, gamma_deg=90, m_fuel=0.8)]
    path_points.append(do_trim(vt=100, gamma_deg= 45, m_fuel = 0.4))
    path_points.append(do_trim(vt=100, gamma_deg = 0,m_fuel = 0))
    lin = linearize()
    sys = []
    ABCDs = []
    gam_ref = [90,45,0]
    h_ref = [0,]
    for i,p in enumerate(path_points):
        x0 = p[0]
        u0 = p[1]
        p0 = p[2]
        A,B,C,D = lin(x0,u0,p0)
        ABCDs.append([A,B,C,D])
        sys.append(control.ss(A,B,C,D))
    
#     A = np.array(ABCDs[0][0])
#     B = np.array(ABCDs[0][1])
#     v1 = np.zeros((14,1))
#     v2 = np.zeros((1,15))
#     v2[0,9] = -1
#     Ai = np.vstack([np.hstack([A,v1]),v2])
#     Bi = np.vstack([B,np.zeros(4)])
#     print(np.shape(Ai))
#     print(np.shape(Bi))
#     q = zeros(15)
#     q[14] = 40
#     q[]
#     K,S,E = control.lqr(Ai,Bi,)
        
    return sys
Пример #3
0
def argfilter(args):
  switches = []
  sys = []
  for arg in args:
    if arg[0] == '-':
      switches.append(arg)
    else:
      sys.append(arg)
  return switches, " ".join(sys)
Пример #4
0
def argfilter(args):
    switches = []
    sys = []
    for arg in args:
        if arg[0] == '-':
            switches.append(arg)
        else:
            sys.append(arg)
    return switches, " ".join(sys)
Пример #5
0
def install_module(command, dest_path=None):
    try:
        subprocess.check_call([sys.executable, "-m", "ensurepip"])
        exe = [sys.executable]
        exe_cmd = exe.extend(command)
        if dest_path:
            subprocess.check_call(exe_cmd.extend(["-t", dest_path]))
            sys.append(dest_path)
            os.environ["PYTHONPATH"] = dest_path + (";" + os.environ["PYTHONPATH"] if "PYTHONPATH" in os.environ else "")
        else:
            subprocess.check_call(exe_cmd)
    except subprocess.CalledProcessError(e):
        print(e.output)
Пример #6
0
    def getLTLRepresentation(self, mark_players=True, use_next=False, include_inputs=True, include_outputs=True, swap_players=False, exclude=[]):
        """ Returns an LTL formula representing this state.

            If `mark_players` is True, input propositions are prepended with
            "e.", and output propositions are prepended with "s.". (If `swap_players`
            is True, these labels will be reversed [this feature is used by Mopsy])

            If `use_next` is True, all propositions will be modified by a single
            "next()" operator.  `include_env`, which defaults to True,
            determines whether to include input propositions in addition to
            output propositions. """

        # Make a helpful little closure for adding operators to bare props
        def decorate_prop(prop, polarity):
            #### TEMPORARY HACK: REMOVE ME AFTER OTHER COMPONENTS ARE UPDATED!!!
            # Rewrite proposition names to make the old bitvector system work
            # with the new one
            prop = re.sub(r'^([se]\.)region_b(\d+)$', r'\1bit\2', prop)
            prop = re.sub(r'^([se]\.)regionCompleted_b(\d+)$', r'\1sbit\2', prop)
            #################################################################

            if use_next:
                prop = "next({})".format(prop)
            if polarity is False:
                prop = "!"+prop
            return prop

        if swap_players:
            env_label, sys_label = "s.", "e."
        else:
            env_label, sys_label = "e.", "s."

        if include_outputs:
            sys = []
            for p, v in self.getOutputs(expand_domains=True).iteritems():
                if p not in exclude:
                    sys.append(decorate_prop(sys_label+p, v))
            sys_state = ' & '.join(sys)

        if include_inputs:
            env_state = " & ".join((decorate_prop(env_label+p, v) for p, v in \
                                    self.getInputs(expand_domains=True).iteritems()))
        if include_outputs and not include_inputs:
            return sys_state
        elif not include_outputs and include_inputs:
            return env_state
        elif include_outputs and include_inputs:
            return " & ".join([env_state, sys_state])
        else:
            print "please specified either outputs or inputs to print"
    def _format(self, n):
        '''Helper function for _makeMoreChannels()'''
        n = [i for i in n.split(';')]

        sys = []
        dev = []
        for elem in n:
            if ':' in elem:
                sys.append(elem.split(':')[0])
                dev.append(elem.split(':')[1])
            else:
                sys.append(elem)
                dev.append('None')

        return sys, dev
Пример #8
0
def get_systematics(file, overrides={}, rename=lambda u: u, samples=False):
    """Reads `file` and returns a list of (uncertainty, type, {sample:
    value}).

    The parameter `overrides` allows to specify a dict of form
    {uncertainty: value}, and values of "x" n the systematics file are
    replaced by the one specified in the dict.

    A function passed as `rename` allows to alter the uncertainty name,
    _after_ all other processing has happened.

    The parameter `samples` can be set to `True` to obtain the available
    sample names defined in the systematics file.
    """
    reader = csv.DictReader(open(file))
    reader.fieldnames = map(str.strip, reader.fieldnames)
    sys_samples = reader.fieldnames[2:]

    if samples:
        return sys_samples

    sys = []
    # create a list whose elements are the tuples
    # (uncertainty, type, {sample:value})
    # row is a list of the items in the row
    for row in reader:
        # comment character
        # if the line is not empty
        if len(row) >0:            
            # print row
            # if the row starts with comment #
            if re.match("^#", row["Uncertainty"]):
                continue
            # end if re.match
        # end if len
        unc = row.pop("Uncertainty").strip()
        type = row.pop("Type").strip()
        # `row` is a dictionary with sample names as keys.  Strip spaces
        # from the actual value
        row = dict(map(lambda (k,v): (k, v.strip()), row.items()))
        if unc in overrides:
            row = dict(
                    map(
                        lambda (k,v): (k, overrides[unc] if v == "x" else v),
                        row.items()))
        sys.append((rename(unc), type, row))
    return sys
Пример #9
0
def get_systematics(file, overrides={}, rename=lambda u: u, samples=False):
    """Reads `file` and returns a list of (uncertainty, type, {sample:
    value}).

    The parameter `overrides` allows to specify a dict of form
    {uncertainty: value}, and values of "x" n the systematics file are
    replaced by the one specified in the dict.

    A function passed as `rename` allows to alter the uncertainty name,
    _after_ all other processing has happened.

    The parameter `samples` can be set to `True` to obtain the available
    sample names defined in the systematics file.
    """
    reader = csv.DictReader(open(file))
    reader.fieldnames = map(str.strip, reader.fieldnames)
    sys_samples = reader.fieldnames[2:]

    if samples:
        return sys_samples

    sys = []
    # create a list whose elements are the tuples
    # (uncertainty, type, {sample:value})
    # row is a list of the items in the row
    for row in reader:
        # comment character
        # if the line is not empty
        if len(row) >0:            
            # print row
            # if the row starts with comment #
            if re.match("^#", row["Uncertainty"]):
                continue
            # end if re.match
        # end if len
        unc = row.pop("Uncertainty").strip()
        type = row.pop("Type").strip()
        # `row` is a dictionary with sample names as keys.  Strip spaces
        # from the actual value
        row = dict(map(lambda (k,v): (k, v.strip()), row.items()))
        if unc in overrides:
            row = dict(
                    map(
                        lambda (k,v): (k, overrides[unc] if v == "x" else v),
                        row.items()))
        sys.append((rename(unc), type, row))
    return sys
Пример #10
0
def split_features(data):
  # print("CRAFTING FEATURES...")
  # print()
  dia = []
  eda = []
  sys = []
  res = []
  for i in range(len(data)):
    if i % 4 == 0: 
      dia.append(data[i])
    elif i % 4 == 1: 
      eda.append(data[i])
    elif i % 4 == 2: 
      sys.append(data[i])
    elif i % 4 == 3: 
      res.append(data[i])
  return dia, eda, sys, res
def AppendPythonPath():

    import sys
    import Tkinter, Tkconstants, tkFileDialog

    root = Tkinter.Tk()
    root.withdraw()

    options = {}
    options['initialdir'] = '/'
    options['mustexist'] = False
    options['parent'] = root
    options['title'] = 'Choose directory to add to PATH'
    newdir = tkFileDialog.askdirectory(**options)
    sys.append(newdir)

    print '\nDirectory add to PATH: ' + newdir + '\n'
Пример #12
0
def AppendPythonPath():

    import sys
    import Tkinter,Tkconstants,tkFileDialog
    
    root = Tkinter.Tk()
    root.withdraw()
  
    options = {}
    options['initialdir'] = '/'
    options['mustexist'] = False
    options['parent'] = root
    options['title'] = 'Choose directory to add to PATH'
    newdir = tkFileDialog.askdirectory(**options)
    sys.append(newdir)

    print '\nDirectory add to PATH: ' + newdir + '\n'
Пример #13
0
def plan_traj():
    path_points = [do_trim(vt=100, gamma_deg=90, m_fuel=0.8)]
    path_points.append(do_trim(vt=100, gamma_deg= 45, m_fuel = 0.4))
    path_points.append(do_trim(vt=100, gamma_deg = 0,m_fuel = 0))
    lin = linearize()
    sys = []
    ABCDs = []
    gam_ref = [90,45,0]
    h_ref = [0,]
    for i,p in enumerate(path_points):
        x0 = p[0]
        u0 = p[1]
        p0 = p[2]
        A,B,C,D = lin(x0,u0,p0)
        ABCDs.append([A,B,C,D])
        sys.append(control.ss(A,B,C,D))
    
    return sys
Пример #14
0
def calc_word_sim(model, eval_file):
    df = pd.read_csv(eval_file, sep=',', header=0) # eval dataset
    col1, col2, score = df.columns.values
    model_vocab = model.vocab.keys()
    ground = []
    sys = []
    for idx, row in df.iterrows():
        if row[col1] in model_vocab and row[col2] in model_vocab:
            ground.append(float(row[score]))
            sys.append(model.similarity(row[col1], row[col2]))

    # compute Spearman's rank correlation coefficient (https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient)
    print sys
    # import pdb;pdb.set_trace()
    corr, p_val = stats.spearmanr(sys, ground)
    logger.info("# of pairs found: %s / %s" % (len(ground), len(df)))
    logger.info("correlation: %s" % corr)
    return corr, p_val
Пример #15
0
    def _get_app_info(self):
        """Get information about the app.
        """

        info = dict()
        info['core_data'] = core_data = _get_core_data()
        info['extensions'] = extensions = self._get_extensions(core_data)
        page_config = self._read_page_config()
        info['disabled'] = page_config.get('disabledExtensions', [])
        info['local_extensions'] = self._get_local_extensions()
        info['linked_packages'] = self._get_linked_packages()
        info['app_extensions'] = app = []
        info['sys_extensions'] = sys = []
        for (name, data) in extensions.items():
            data['is_local'] = name in info['local_extensions']
            if data['location'] == 'app':
                app.append(name)
            else:
                sys.append(name)

        info['uninstalled_core'] = self._get_uninstalled_core_extensions()

        info['static_data'] = _get_static_data(self.app_dir)
        app_data = info['static_data'] or core_data
        info['version'] = app_data['jupyterlab']['version']
        info['publicUrl'] = app_data['jupyterlab'].get('publicUrl', '')

        info['sys_dir'] = self.sys_dir
        info['app_dir'] = self.app_dir

        info['core_extensions'] = core_extensions = _get_core_extensions()

        disabled_core = []
        for key in core_extensions:
            if key in info['disabled']:
                disabled_core.append(key)

        info['disabled_core'] = disabled_core
        return info
Пример #16
0
import sys
import json
import numpy as np
import progressbar
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator

sys.append("../")
from pyimagesearch.utils.ranked import rank5_accuracy
from dog_vs_cat.configs import dog_vs_cat_configs as configs
from pyimagesearch.preprocessing.croppreprocessor import CropPreprocessor
from pyimagesearch.preprocessing.meanpreprocessor import MeanPreprocessor
from pyimagesearch.preprocessing.simplepreprocessor import SimplePreprocessor
from pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor

# load RGB mean values
means = json.loads(open(configs.DATASET_MEAN).read())

# load preprocessors
ip = ImageToArrayPreprocessor()
cp = CropPreprocessor(width=227, height=227)
sp = SimplePreprocessor(width=227, height=227)
mp = MeanPreprocessor(rMean=means["R"], gMean=means["G"], bMean=means["B"])


# custom preprocessor class which utilizes custom preprocessors
class CustomPreprocessing:
    def __init__(self, preprocessors=[]):
        self.preprocessors = preprocessors

    def preprocess(self, image):
#!/usr/bin/env python
'''
NPR 2015-12-06

http://www.npr.org/2015/12/06/458661972/transform-words-with-an-additional-letter-in-this-weeks-puzzle

Name a state capital. Drop one of its letters. 
The remaining letters can be rearranged to name of another major city 
in the United States. What is it? 
There are two different answers, and you should find both of them.
'''

import sys
sys.append('..')
from nprcommontools import sort_string

from nltk.corpus import wordnet as wn, gazetteers
import re

# U.S. States
states = set(gazetteers.words('usstates.txt'))

# capitals and major cities
cities = set(); capitals = set()
for synset in wn.all_synsets():
    d = synset.definition()
    for state in states:
        if state in d and 'city' in d:
            for l in synset.lemma_names():
                if l[0] == l[0].upper():
                    cities.add(l)
Пример #18
0
#!/annoroad/data1/bioinfo/PMO/yangmengcheng/SoftWare/Anaconda3-5.3.1/bin/python
import sys
sys.append('../lib')
import bam.bam
from read.Read import *
import tempfile


Пример #19
0
import sys
sys.append("../Py2D")
sys.append("./Py2D")

Пример #20
0
    calc.set(isif=2)

    secban('Elastic tensor by internal routine')

    # Elastic tensor by internal routine
    Cij, Bij=cryst.get_elastic_tensor(n=5,d=0.33)
    print("Cij (GPa):", Cij/GPa)

    calc.clean()

    secban('C11 and C12 by hand')

    # Now let us do it (only c11 and c12) by hand
    sys=[]
    for d in linspace(-0.5,0.5,6):
        sys.append(cryst.get_cart_deformed_cell(axis=0,size=d))
    r=ParCalculate(sys,cryst.calc)
    ss=[]
    for s in r:
        ss.append([s.get_strain(cryst), s.get_stress()])
    # Plot strain-stress relation
    figure(3)

    ss=[]
    for p in r:
        ss.append([p.get_strain(cryst),p.get_stress()])
    ss=array(ss)
    lo=min(ss[:,0,0])
    hi=max(ss[:,0,0])
    mi=(lo+hi)/2
    wi=(hi-lo)/2
Пример #21
0
	def plot_core(self):
		f = open(self.path + "cores.txt", "r")
		lines = f.readlines()

		usr = []
		sys = []
		softirq = []
		iowait = []
		idle = []
		
		new_usr = []
		new_sys = []
		new_softirq = []
		new_iowait = []
		new_idle = []

		for i in range(5):
			usr.append([])
			sys.append([])
			softirq.append([])
			iowait.append([])
			idle.append([])
			
		for i in range(5):
			new_usr.append([])
			new_sys.append([])
			new_softirq.append([])
			new_iowait.append([])
			new_idle.append([])

		for i in lines:
			k = i.split()	
			if(len(k) > 1 and k[2] != 'CPU'):
				if(k[2] == 'all'):
					new_usr[4].append(float(k[3]))
					new_sys[4].append(float(k[5]))
					new_softirq[4].append(float(k[6]))
					new_iowait[4].append(float(k[6]))
					new_idle[4].append(float(k[12]))
				else:
					new_usr[int(k[2])].append(float(k[3]))
					new_sys[int(k[2])].append(float(k[5]))
					new_softirq[int(k[2])].append(float(k[6]))
					new_iowait[int(k[2])].append(float(k[6]))
					new_idle[int(k[2])].append(float(k[12]))


		for j in range(5):
			usr[j].append(new_usr[j][0])
			sys[j].append(new_sys[j][0])
			softirq[j].append(new_softirq[j][0])
			iowait[j].append(new_iowait[j][0])
			idle[j].append(new_idle[j][0])
			for i in range(1, len(new_usr[j])):
				x = self.nextCur(new_usr[j][i-1], new_usr[j][i], i)
				usr[j].append(x)
				x = self.nextCur(new_sys[j][i-1], new_sys[j][i], i)
				sys[j].append(x)
				x = self.nextCur(new_softirq[j][i-1], new_softirq[j][i], i)
				softirq[j].append(x)
				x = self.nextCur(new_iowait[j][i-1], new_iowait[j][i], i)
				iowait[j].append(x)
				x = self.nextCur(new_idle[j][i-1], new_idle[j][i], i)
				idle[j].append(x)

		if(self.d['core_choice'] == 'both'):
			plt.title('softirq ' + self.exp)
			plt.plot(range(len(softirq[0])), softirq[0], 'r.')
			plt.plot(range(len(softirq[1])), softirq[1], 'g.')
			plt.plot(range(len(softirq[2])), softirq[2], 'b.')
			plt.plot(range(len(softirq[3])), softirq[3], 'y.')
			#plt.plot(range(len(softirq[4])), softirq[4], 'c.')
			plt.axis([0, len(usr[0]), 0, 100])
			plt.show()
			
			for i in range(4):
				plt.xlabel('seconds')
				#plt.ylabel('Load on core 3')
				#plt.title('CPU ' + str(i) + ' ' + self.exp)
				plt.plot(range(len(iowait[i])), iowait[i], 'r+', label = 'iowait')
				#plt.plot(range(len(iowait[i])), iowait[i], 'b.')
				plt.plot(range(len(idle[i])), idle[i], 'g*', label = 'Idle')

				plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
			
				plt.axis([0, len(usr[i]), 0, 100])
			
				plt.savefig('pictures/plot_' + str(i) + str(self.exp))
				plt.show()
				plt.close()

		elif(self.d['core_choice'] == 'cpu'):
			for i in range(5):
				plt.title('CPU ' + str(i) + ' ' + self.exp)
				plt.plot(range(len(usr[i])), usr[i], 'ro')
				plt.plot(range(len(sys[i])), sys[i], 'b+')
				plt.plot(range(len(softirq[i])), softirq[i], 'k.')
				plt.plot(range(len(iowait[i])), iowait[i], 'rs')
				plt.plot(range(len(idle[i])), idle[i], 'go')

				plt.axis([0, len(usr[i]), 0, 100])
				plt.show()

		else:
			plt.title('usr ' + self.exp)
			plt.plot(range(len(usr[0])), usr[0], 'r+')
			plt.plot(range(len(usr[1])), usr[1], 'g+')
			plt.plot(range(len(usr[2])), usr[2], 'b+')
			plt.plot(range(len(usr[3])), usr[3], 'y+')
			plt.plot(range(len(usr[4])), usr[4], 'c+')
			plt.axis([0, len(usr[0]), 0, 100])
			plt.show()

			plt.title('sys ' + self.exp)
			plt.plot(range(len(sys[0])), sys[0], 'r+')
			plt.plot(range(len(sys[1])), sys[1], 'g+')
			plt.plot(range(len(sys[2])), sys[2], 'b+')
			plt.plot(range(len(sys[3])), sys[3], 'y+')
			plt.plot(range(len(sys[4])), sys[4], 'c+')
			plt.axis([0, len(usr[0]), 0, 100])
			plt.show()
			
			plt.title('softirq ' + self.exp)
			plt.plot(range(len(softirq[0])), softirq[0], 'r+')
			plt.plot(range(len(softirq[1])), softirq[1], 'g+')
			plt.plot(range(len(softirq[2])), softirq[2], 'b+')
			plt.plot(range(len(softirq[3])), softirq[3], 'y+')
			plt.plot(range(len(softirq[4])), softirq[4], 'c+')
			plt.axis([0, len(usr[0]), 0, 100])
			plt.show()

			plt.title('iowait ' + self.exp)
			plt.plot(range(len(iowait[0])), iowait[0], 'r')
			plt.plot(range(len(iowait[1])), iowait[1], 'g')
			plt.plot(range(len(iowait[2])), iowait[2], 'b')
			plt.plot(range(len(iowait[3])), iowait[3], 'y')
			plt.plot(range(len(iowait[4])), iowait[4], 'c')
			plt.axis([0, len(usr[0]), 0, 100])
			plt.show()

			
			plt.title('idle ' + self.exp)
			plt.plot(range(len(idle[0])), idle[0], 'r+')
			plt.plot(range(len(idle[1])), idle[1], 'g+')
			plt.plot(range(len(idle[2])), idle[2], 'b+')
			plt.plot(range(len(idle[3])), idle[3], 'y+')
			plt.plot(range(len(idle[4])), idle[4], 'c+')
			plt.axis([0, len(usr[0]), 0, 100])
			plt.show()
Пример #22
0
        # Scan over deformations

        # Switch to IDOF optimizer
        calc.set(isif=2)

        # Elastic tensor by internal routine
        Cij, Bij = cryst.get_elastic_tensor(n=5, d=0.33)
        print("Cij (GPa):", Cij / units.GPa)

        calc.clean()

        # Now let us do it (only c11 and c12) by hand
        sys = []
        for d in linspace(-0.5, 0.5, 6):
            sys.append(cryst.get_cart_deformed_cell(axis=0, size=d))
        r = ParCalculate(sys, cryst.get_calculator())
        ss = []
        for s in r:
            ss.append([s.get_strain(cryst), s.get_stress()])
        # Plot strain-stress relation
        figure(3)

        ss = []
        for p in r:
            ss.append([p.get_strain(cryst), p.get_stress()])
        ss = array(ss)
        lo = min(ss[:, 0, 0])
        hi = max(ss[:, 0, 0])
        mi = (lo + hi) / 2
        wi = (hi - lo) / 2
Пример #23
0
import os
import sys

try:
    from gitcrawler import collect_details as detail
except:
    sys.append('/media/laxmena/Bazinga/Crawler/GIT-beta')
    from gitcrawler import collect_details as detail

count = 0
crawl_list = []
#Get initial Username from the User
username = detail.get_username()
crawl_list.append(username)

if not os.path.exists('/home/laxmena/gitcrawler'):
    os.mkdir('/home/laxmena/gitcrawler')
for i in crawl_list:
    try:
        file_name = '/home/laxmena/gitcrawler/' + i + '.txt'
        f = open(file_name,'w')
        f.write('Personal Details:\n')
        f.write('-'*50 + '\n')

        print ('Scraping Details of %s' % i)
        for itr in detail.personal_details(i):
            f.write(itr)
            f.write('\n')
        f.write('\n\nAll Repositories:\n')
        f.write('-'*50 + '\n')
        f.write('Repository Name\n')
Пример #24
0
    for pep in nt_all:
        if item == pep[i]:
            pass
        elif item not in codon:
            pass
        elif pep[i] not in codon:
            pass
        elif codon[item] == codon[pep[i]]:
            countsys += 1
        elif codon[item] != codon[pep[i]]:
            countnonsys += 1
        else :
            pass
    print(x)
    x = x + 1 
    sys.append(countsys)
    nonsys.append(countnonsys)
    diff = countnonsys - countsys
    dif.append(diff)
    rat = countnonsys - countsys
    ratio.append(rat)
    countsys = 0
    countnonsys = 0
print(dif)
deviation = np.std(dif)
print(deviation)
zscore = dif/deviation
print(zscore)
zscore = []
for di in dif:
    zscor = di/deviation
import sys
sys.append('~/Development/uvadev/nlp2/project1')
from flask import Flask, render_template, request, jsonify
from flask_socketio import SocketIO, emit
from flask_cors import CORS, cross_origin
from models.make_vae import make_vae
from app_config import config

model_paths = {
    'vae_vanilla': 'vae_best_mu0_wd1_fb-1.pt',
    'vae_high_freebits': 'vae_best_mu5_wd1_fb8.pt',
    'vae_md_freebits': 'vae_best_mu5_wd1_fv4.pt'
}

loaded_models = {
    key: make_vae(config, True, val)
    for key, val in model_paths.items()
}

app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
CORS(app)


@app.route('', methods=['GET'])
def init():
    return loaded_models.keys()


@app.route('/predict', methods=['POST'])
def predict():
Пример #26
0
import numpy as np
import pdb
import numpy as np
from skimage.measure import label
from utils import segmentation_metrics
import torch
import SimpleITK as sitk
import sys
sys.append("/home/mahaoran/lei_nao/seg_thor")
def get_metrics(segmentation, mask, n_class=4):
    results = np.zeros((n_class, 5))
    overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
    hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
    for i in range(n_class):
        cur_mask = (mask == i+1).astype(np.int16)
        cur_segmentation = (segmentation == i+1).astype(np.int16)
        segmentation_itk = sitk.GetImageFromArray(cur_segmentation)
        mask_itk = sitk.GetImageFromArray(cur_mask)
        overlap_measures_filter.Execute(segmentation_itk, mask_itk)
        hausdorff_distance_filter.Execute(segmentation_itk, mask_itk)
        results[i, 0] = overlap_measures_filter.GetJaccardCoefficient()
        results[i, 1] = overlap_measures_filter.GetDiceCoefficient()
        results[i, 2] = hausdorff_distance_filter.GetHausdorffDistance()
        results[i, 3] = overlap_measures_filter.GetFalseNegativeError()
        results[i, 4] = overlap_measures_filter.GetFalsePositiveError()
    return np.mean(results, 0)

def multi_scale(images, if_zoom=True, if_flip=True):
    total_images = []
    total_flip_flag = []
    total_zoom_flag = []
Пример #27
0
def load_test_data(
    vref_file_name: str,
    src_file_name: str,
    pred_file_name: str,
    ref_pattern: str,
    output_file_name: str,
    ref_projects: Set[str],
    config: Config,
    books: Set[int],
    by_book: bool,
) -> Tuple[Dict[str, Tuple[List[str], List[List[str]]]], Dict[str, dict]]:
    dataset: Dict[str, Tuple[List[str], List[List[str]]]] = {}
    src_file_path = config.exp_dir / src_file_name
    pred_file_path = config.exp_dir / pred_file_name
    with src_file_path.open("r",
                            encoding="utf-8") as src_file, pred_file_path.open(
                                "r", encoding="utf-8") as pred_file, (
                                    config.exp_dir / output_file_name).open(
                                        "w", encoding="utf-8") as out_file:
        ref_file_paths = list(config.exp_dir.glob(ref_pattern))
        select_rand_ref_line = False
        if len(ref_file_paths) > 1:
            if len(ref_projects) == 0:
                # no refs specified, so randomly select verses from all available train refs to build one ref
                select_rand_ref_line = True
                ref_file_paths = [
                    p for p in ref_file_paths if config.is_train_project(p)
                ]
            else:
                # use specified refs only
                ref_file_paths = [
                    p for p in ref_file_paths
                    if config.is_ref_project(ref_projects, p)
                ]
        ref_files: List[IO] = []
        vref_file: Optional[IO] = None
        vref_file_path = config.exp_dir / vref_file_name
        if len(books) > 0 and vref_file_path.is_file():
            vref_file = vref_file_path.open("r", encoding="utf-8")
        try:
            for ref_file_path in ref_file_paths:
                ref_files.append(ref_file_path.open("r", encoding="utf-8"))
            default_trg_iso = config.default_trg_iso
            for lines in zip(src_file, pred_file, *ref_files):
                if vref_file is not None:
                    vref_line = vref_file.readline().strip()
                    if vref_line != "":
                        vref = VerseRef.from_string(vref_line,
                                                    ORIGINAL_VERSIFICATION)
                        if vref.book_num not in books:
                            continue
                src_line = lines[0].strip()
                pred_line = lines[1].strip()
                detok_pred_line = decode_sp(pred_line)
                iso = default_trg_iso
                if src_line.startswith("<2"):
                    index = src_line.index(">")
                    val = src_line[2:index]
                    if val != "qaa":
                        iso = val
                if iso not in dataset:
                    dataset[iso] = ([], [])
                sys, refs = dataset[iso]
                sys.append(detok_pred_line)
                if select_rand_ref_line:
                    ref_lines: List[str] = [
                        l for l in map(lambda l: l.strip(), lines[2:])
                        if len(l) > 0
                    ]
                    ref_index = random.randint(0, len(ref_lines) - 1)
                    ref_line = ref_lines[ref_index]
                    if len(refs) == 0:
                        refs.append([])
                    refs[0].append(ref_line)
                else:
                    for ref_index in range(len(ref_files)):
                        ref_line = lines[ref_index + 2].strip()
                        if len(refs) == ref_index:
                            refs.append([])
                        refs[ref_index].append(ref_line)
                out_file.write(detok_pred_line + "\n")
            book_dict: Dict[str, dict] = {}
            if by_book:
                book_dict = process_individual_books(
                    src_file_path,
                    pred_file_path,
                    ref_file_paths,
                    vref_file_path,
                    default_trg_iso,
                    select_rand_ref_line,
                    books,
                )
        finally:
            if vref_file is not None:
                vref_file.close()
            for ref_file in ref_files:
                ref_file.close()
    return dataset, book_dict
#-*- coding: utf-8 -*-
# 此程式用來檢查 OrderQuote.dat 中的時間是否順序正確
import os
import collections
import sys
from subprocess import call
import unittest
import subprocess
import re
import time
import shlex
import shutil
from datetime import datetime

if(len(sys.argv)<2):
    sys.append('/home/newemg/data/OrderQuote.dat')

arg_names = ['command', 'file']
args = dict(zip(arg_names, sys.argv))
Arg_list = collections.namedtuple('Arg_list', arg_names)
args = Arg_list(*(args.get(arg, None) for arg in arg_names))

class CheckQrderQuoteTimeSeq(unittest.TestCase):
    def setUp(self):
        pass
    def tearDown(self):
        pass

    def getLineInfo(self, line):
        ret=''
        if(line[41:133]=='                                                                                            '):
Пример #29
0
#!/usr/bin/env python
#-*-coding: utf-8-*-
## File name: ub_model/mr/mr_fac.py

import os
import sys
import logging
sys.append('../../common')
from mr import Mr
from utils import Utils

class MrFac:
  def __init__(self):
    self.mr = None

  def getmr(self, mrname, productline):
    abspath = self.getmr_abspath(mrname)
    self.mr = Utils.getinstanceffile(abspath, Mr, productline)

    return self.mr

  def getmr_abspath(self, mrname):
    if mrname != None and not mrname.endswith('.py'):
      mrname = '%s.py' %(mrname)
    
    abspath = ''
    files = os.listdir('.')

    for f in files:
      if f == mrname:
        abspath = os.path.abspath('./%s' %(mrname))
Пример #30
0
#!/usr/bin/env python3
# coding: utf-8
"""
Script to run the DeepSphere experiment with SHREC17 dataset.
"""

import os
import shutil
import sys
sys.append('../..')

os.environ[
    "CUDA_VISIBLE_DEVICES"] = "0"  # change to chosen GPU to use, nothing if work on CPU

import numpy as np

from deepsphere import models
from . import hyperparameters

from load_shrec import Shrec17DatasetCache, Shrec17DatasetTF, fix_dataset

Nside = 32
experiment_type = 'CNN'
ename = '_' + experiment_type
datapath = '../../data/shrec17/'  # localisation of the .obj files

if len(sys.argv) > 4:
    Nside = int(sys.argv[1])
    augmentation = int(sys.argv[2])
    experiment = sys.argv[3]
    nfeat = int(sys.argv[4])
def txt2json(txtfile):
    # create base dictionary:
    SET_SLOTS = False
    ont = dict.fromkeys(['type','requestable','discourseAct','system_requestable','method','informable'])
    ont['discourseAct'] = [
        "ack", 
        "hello", 
        "none", 
        "repeat", 
        "silence", 
        "thankyou"
    ] 
    ont['method'] = [
        "none", 
        "byconstraints", 
        "byname", 
        "finished", 
        "byalternatives", 
        "restart"
    ]
   
    # step through txt rules
    with open(txtfile+'.txt','r') as txtFILE:
        f = txtFILE.readlines()

        for i in range(len(f)):
            line = f[i]
            if line[0] == '#':
                continue
            if 'task' in line:
                continue
            if 'entity' in line:
                # assume entity comes before type:
                req = re.findall(r"[\w']+", line)
                continue
            if 'type' in line:
                req += re.findall(r"[\w']+", line) 
                for x in remove_from_req:
                    while x in req:
                        req.remove(x)  # this should remove all occurences.
                print 'requestable slots are (ie all slots)'
                req_final = copy.deepcopy(req)
                ont['requestable'] = req_final
                print ont['requestable']
                ont['informable'] = dict.fromkeys(req)
                # also set the breakup of informable and requestable slots via inform_req_lines - 
                # do this MANUALLY - easier! but gives some confidence/checks:
                sys = []
                for slot in req:
                    ans = raw_input('is slot: '+slot+' SYSTEM REQUESTABLE?')
                    if ans == 'y':
                        sys.append(slot)
                print 'system requestable slots are:'
                print sys
                ont['system_requestable'] = sys
                SET_SLOTS = True
                continue
            if 'method' in line:
                continue
            if 'discourseAct' in line:
                continue
            if SET_SLOTS:
                #for slot in ont['requestable']:
                for slot in req:
                    #print slot
                    #print line
                    #raw_input()
                    if slot in line and ('"' not in line):
                        count = 0
                        while ');' not in f[i+count]:
                            count += 1
                        values = f[i+1:i+count]
                        #raw_input('-- print values now:')
                        #print values
                        #raw_input('check')
                        vals = []
                        for v in values:
                            x = v.split('"')
                            vals.append(x[1])
                        print vals
                        print '^^ are for slot: ', slot
                        raw_input('check values parsed')
                        ont['informable'][slot] = vals
                        req.remove(slot)
                        break
    writeto = open(txtfile+'.json','w')
    json.dump(ont,writeto)
    writeto.close()
Пример #32
0
def list_extensions(app_dir=None, logger=None):
    """List the extensions.
    """
    logger = logger or logging
    app_dir = get_app_dir(app_dir)
    extensions = _get_extensions(app_dir)
    disabled = _get_disabled(app_dir)
    linked = _get_linked_packages(app_dir, logger=logger)
    app = []
    sys = []
    linked = []
    errors = dict()

    core_data = _get_core_data()

    # We want to organize by dir.
    sys_path = pjoin(get_app_dir(), 'extensions')
    for (key, value) in extensions.items():
        deps = extensions[key].get('dependencies', dict())
        errors[key] = _validate_compatibility(key, deps, core_data)
        if key in linked:
            linked.append(key)
        if value['path'] == sys_path and sys_path != app_dir:
            sys.append(key)
            continue
        app.append(key)

    logger.info('JupyterLab v%s' % __version__)
    logger.info('Known labextensions:')
    if app:
        logger.info('   app dir: %s' % app_dir)
        for item in sorted(app):
            extra = ''
            if item in disabled:
                extra += ' %s' % RED_DISABLED
            else:
                extra += ' %s' % GREEN_ENABLED
            if errors[item]:
                extra += ' %s' % RED_X
            else:
                extra += ' %s' % GREEN_OK
            if item in linked:
                extra += '*'
            logger.info('        %s%s' % (item, extra))
            version = extensions[item]['version']
            if errors[item]:
                msg = _format_compatibility_errors(item, version, errors[item])
                logger.warn(msg + '\n')

    if sys:
        logger.info('   sys dir: %s' % sys_path)
        for item in sorted(sys):
            extra = ''
            if item in disabled:
                extra += ' %s' % RED_DISABLED
            else:
                extra += ' %s' % GREEN_ENABLED
            logger.info('        %s%s' % (item, extra))
            if errors[item]:
                extra += ' %s' % RED_X
            else:
                extra += ' %s' % GREEN_OK
            if item in linked:
                extra += '*'
            logger.info('        %s%s' % (item, extra))
            version = extensions[item]['version']
            if errors[item]:
                msg = _format_compatibility_errors(item, version, errors[item])
                logger.warn(msg + '\n')

    if linked:
        logger.info(
            '* Denotes linked extensions. Use `jupyter labextension listlinked` to see details'
        )

    # Handle uninstalled and disabled core packages
    uninstalled_core = _get_uinstalled_core_extensions(app_dir)
    if uninstalled_core:
        logger.info('\nUninstalled core extensions:')
        [logger.info('    %s' % item) for item in sorted(uninstalled_core)]

    core_extensions = _get_core_extensions()

    disabled_core = []
    for key in core_extensions:
        if key in disabled:
            disabled_core.append(key)

    if disabled_core:
        logger.info('\nDisabled core extensions:')
        [logger.info('    %s' % item) for item in sorted(disabled_core)]
Пример #33
0
def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features):
    if cfg.verbose:
        print_title('Concurrency Breakdown Analysis')

    total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
    elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
    total_interval_vector = []
    total_performace_vector = []
    
 
    if len(df_mpstat) == 0:
        print_warning(cfg, 'no mpstat and perf traces!')
        return features

    t_begin = df_mpstat.iloc[0]['timestamp']
    t_end = df_mpstat.iloc[-1]['timestamp']
    t = t_begin
    sample_time = (1 / float(cfg.sys_mon_rate))
    while t < t_end:
        t = t + sample_time
        if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end):
            continue
        
        window_begin = t - sample_time 
        window_end = t
       
        if len(df_cpu) > 0: 
            if df_cpu.iloc[0].timestamp > window_end:
                continue
            cond1 = (df_cpu['timestamp'] > window_begin)
            cond2 = (df_cpu['timestamp'] <= window_end)
            df_cpu_interval = df_cpu[ cond1 & cond2 ]
        
        num_gpus = len(list(set(df_nvsmi['deviceId'])))
        cond1 = (df_nvsmi['timestamp'] > window_begin)
        cond2 = (df_nvsmi['timestamp'] <= window_end)
        sm = df_nvsmi['event'] == int(0)
        df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ]
        
        cond1 = (df_mpstat['timestamp'] > window_begin)
        cond2 = (df_mpstat['timestamp'] <= window_end)
        df_mpstat_interval = df_mpstat[ cond1 & cond2 ]
         
        cond1 = (df_bandwidth['timestamp'] > window_begin)
        cond2 = (df_bandwidth['timestamp'] <= window_end)
        tx = df_bandwidth['event'] == float(0)
        rx = df_bandwidth['event'] == float(1)
        df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ]
        df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ]

        mp_usr = []
        mp_sys = []
        mp_idl = []
        mp_iow = []

        usr = []
        sys = []
        irq = []     
        
        cpu_max = 0
        cpu_min = 100
        for i in range(len(df_mpstat_interval)):
            ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|')
            
            #print(ratios)
            mp_usr.append(sample_time*int(ratios[1])/100.0)
            mp_sys.append(sample_time*int(ratios[2])/100.0)
            mp_idl.append(sample_time*int(ratios[3])/100.0)
            mp_iow.append(sample_time*int(ratios[4])/100.0)

            usr.append(int(ratios[1]))
            sys.append(int(ratios[2]))
            irq.append(int(ratios[5]))
                     
            cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5])
            if cpu_tmp > cpu_max:
                cpu_max = cpu_tmp
            if cpu_tmp < cpu_min:
                cpu_min = cpu_tmp
        mp_usr = np.asarray(mp_usr)
        mp_sys = np.asarray(mp_sys)
        mp_idl = np.asarray(mp_idl)
        mp_iow = np.asarray(mp_iow)

        usr = np.asarray(usr)
        sys = np.asarray(sys)
        irq = np.asarray(irq)

        elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0} 

        if len(df_mpstat_interval) > 0:
            elapsed_time['usr'] = mp_usr.max()
            elapsed_time['sys'] = mp_sys.max()
            elapsed_time['gpu'] = df_nvsmi_interval['duration'].max() * 0.01 * sample_time
            elapsed_time['iow'] = mp_iow.max()
            #print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr']) 
            dominator = max(elapsed_time, key=elapsed_time.get)
            #if elapsed_time['gpu'] > 0.1 :
            #    dominator = 'gpu'
            if elapsed_time[dominator] > sample_time * int(cfg.is_idle_threshold)/100:
                total_elapsed_time[dominator] = total_elapsed_time[dominator] + sample_time
            else:
                total_elapsed_time['idl'] += sample_time

            if num_gpus > 0:
                time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * sample_time / num_gpus
            else:
                time_gpu_avg = 0 

            interval_vector = [mp_usr.max(),
                               mp_sys.max(),
                               mp_iow.max(),
                               mp_idl.max(),
                               time_gpu_avg,
                               df_tx_interval['bandwidth'].sum(),
                               df_rx_interval['bandwidth'].sum()]                             
            total_interval_vector.append(tuple(interval_vector)) 
            if num_gpus > 0:
                sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId']))))
            else:
                sm_avg = 0
            performace_vector = [window_end,
                                 df_nvsmi_interval['duration'].max(), 
                                 sm_avg, 
                                 df_nvsmi_interval['duration'].min(), 
                                 round((usr.mean() + sys.mean() + irq.mean()), 0),
                                 cpu_max,
                                 cpu_min]
            total_performace_vector.append(tuple(performace_vector))
                                 
    total_all_elapsed_time = sum(total_elapsed_time.values())
    if total_all_elapsed_time > 0 :
        elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time 
        elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time 
        elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time 
        elapsed_time_ratio['idl'] = 100 * total_elapsed_time['idl'] / total_all_elapsed_time
        elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time 
        if cfg.verbose:
            print('Elapsed Time = %.1lf ' % total_all_elapsed_time)
            print('USR = %.1lf %%' % elapsed_time_ratio['usr'])
            print('SYS = %.1lf %%' % elapsed_time_ratio['sys'])
            if num_gpus > 0:
                print('GPU = %.1lf %%' % elapsed_time_ratio['gpu'])
            print('IDL = %.1lf %%' % elapsed_time_ratio['idl'])
            print('IOW = %.1lf %%' % elapsed_time_ratio['iow'])
        if cfg.spotlight_gpu:
            elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin 
        else:
            elapsed_hotspot_time = 0 
    
        df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio', 
                            'elapsed_iow_time_ratio', 'elapsed_hotspot_time'], 
                            'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'], 
                            elapsed_time_ratio['iow'], elapsed_hotspot_time ] }, 
                            columns=['name','value'])
    
        features = pd.concat([features, df])
    
    if len(total_performace_vector) > 0:
        performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min'])
        performance_table.to_csv('%s/performance.csv' % logdir)
        vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'idl','gpu', 'net_tx', 'net_rx'])
        pearson = vector_table.corr(method ='pearson').round(2)
        if cfg.verbose:
            print('Correlation Table :')
            print(pearson)
        df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value'])
        features = pd.concat([features, df])
    return features
Пример #34
0
def xml(f,extra_tag=None,p_=10):
  '''This takes a list of directories and creates a PROPhet xml file'''
  root = ET.Element("PROPhet")
  nsystms = ET.Element('nsystem')
  root.append(nsystms)
  systems = ET.Element("systems")
  for i in range(200): shuffle(f)
  N_train = int(0.80*len(f))
  N_val = int(0.90*len(f))
  sys = []
  cnt = 1
  for c,i in enumerate(f):
    if c%p_ == 0 : print(c)
    if c < N_train: t_flag = "train"
    elif N_train < c < N_val: t_flag = "val"
    else: t_flag = 'test'
    x = QE.Struct()
    try:
      x.XML_Process(i)
    except:
      continue
    system = ET.Element("system",id=str(c + 1))
    train = ET.Element('train')
    train.text = t_flag
    system.append(train)
    lattice = ET.Element('lattice',units='angstrom')
    for j in x.lattice:
      l = ET.Element(j)
      l.text = ' '.join([str(zz) for zz in x.lattice[j]])
      lattice.append(l)
    system.append(lattice)
    atoms = ET.Element('atoms',units='angstrom')
    atm,cell = x.return_params()
    natoms = ET.Element('natoms')
    natoms.text = str(len(x.atoms))
    species = ET.Element('species')
    ntype = len(set([zz.split()[0] for zz in atm.split('\n')[:-1]]))
    species.text = str(ntype)
    atoms.append(natoms)
    atoms.append(species)
    for j in atm.split('\n')[:-1]: 
      atom = ET.Element("atom",specie=j.split()[0])
      atom.text = ' '.join(j.split()[1:4])
      atoms.append(atom)
    system.append(atoms)
    target = ET.Element('target')
    target.text = str(x.energy)
    system.append(target)
    if extra_tag is not None:
      tag = extra_tag[c]['tag']
      val = extra_tag[c]['val']
      if 'other_tags' in list(extra_tag[c].keys()):
        _ = ET.Element(tag,**extra_tag[c]['other_tags'])
      else: 
        _ = ET.Element(tag)
      _.text = val
      system.append(_)
    sys.append(system)
    cnt += 1
    del x
  for i in sys:
    systems.append(i)
  nsystms.text = str(cnt)
  root.append(systems)
  str_ = ET.tostring(root,pretty_print=True).decode('utf-8')
  return str_
Пример #35
0
# ----------------------------------------
# VARIABLE DECLARATION

ref = sys.argv[1]		# full protein reference structure...
traj_loc = sys.argv[2]		# pointer to the directories of the systems...
out = sys.argv[3]		# output file name w/ extension

selection = 'protein'
alignment = 'protein and name CA and (resid 20:25 or resid 50:55 or resid 73:75 or resid 90:94 or resid 112:116 or resid 142:147 or resid 165:169 or resid 190:194 or resid 214:218 or resid 236:240 or resid 253:258 or resid 303:307)'

grab_frame = 50 	#grab a frame every 50 frames (100 ps);

flush = sys.stdout.flush

sys = []
sys.append(['AMBER_apo', 21, 150])		#
#sys.append(['AMBER_atp', 51, 100])		#
#sys.append(['AMBER_ssrna', 51, 100])		#
#sys.append(['AMBER_ssrna_atp', 51, 100])	#
#sys.append(['AMBER_ssrna_adp_pi', 51, 100])	#
#sys.append(['AMBER_ssrna_adp', 51, 100])	#
#sys.append(['AMBER_ssrna_pi', 51, 100])		#

# ----------------------------------------
# SUBROUTINES:

def ffprint(string):
        print '%s' %(string)
        flush()

# ----------------------------------------
Пример #36
0
def gpt_sample(args_dict):
    print("\n=============== Argument ===============")
    for key in args_dict:
        print("{}: {}".format(key, args_dict[key]))
    print("========================================")

    np.random.seed(1)
    random.seed(1)

    refs = []
    sys = []

    MEGATRON_OUTPUT_FILENAME = ".megatron_output.json"
    VOCAB_FILEPATH = "./models/gpt2-vocab.json"
    MERGE_FILEPATH = "./models/gpt2-merges.txt"

    if os.path.exists(MEGATRON_OUTPUT_FILENAME) == False:
        """Megatron program."""
        os.system(" python Megatron-LM/tools/generate_samples_gpt.py \
                    --num-layers {} \
                    --hidden-size {} \
                    --num-attention-heads {} \
                    --seq-length 1024 \
                    --max-position-embeddings 1024 \
                    --micro-batch-size 4 \
                    --global-batch-size 8 \
                    --vocab-file {} \
                    --merge-file {} \
                    --load ./models/megatron-models/345m \
                    --out-seq-length {} \
                    --temperature 1.0 \
                    --genfile {} \
                    --num-samples {} \
                    --top_k {} \
                    --top_p {} \
                    --recompute ".format(
            args_dict['num_layer'],
            args_dict['head_number'] * args_dict['size_per_head'],
            args_dict['head_number'], VOCAB_FILEPATH, MERGE_FILEPATH,
            args_dict['request_input_len'] + args_dict['request_output_len'],
            MEGATRON_OUTPUT_FILENAME, args_dict['request_batch_size'],
            args_dict['sampling_topk'], args_dict['sampling_topp']))

    with open(MEGATRON_OUTPUT_FILENAME, 'r') as megatron_file:
        for i, line in enumerate(megatron_file.readlines()):
            line_j = json.loads(line)
            refs.append(line_j['text'])
            i += 1
            if i == args_dict['request_batch_size']:
                break
    """ megatron finish """

    enc = encoder.get_encoder(VOCAB_FILEPATH, MERGE_FILEPATH)

    with open("../sample/cpp/start_ids.csv", "w") as f:
        for tokens in refs:
            id_list = enc.encode(tokens)
            id_str = "{}\n".format(id_list[:args_dict['request_input_len']])
            id_str = id_str.replace("[", "").replace("]", "")
            f.write(id_str)

    generate_gpt_config(args_dict)
    os.system("rm out")
    os.system(
        "mpirun -n {} --allow-run-as-root ./bin/gpt_sample .tmp.config.ini".
        format(args_dict['gpu_num']))
    tokens_batch = np.loadtxt("out", dtype=np.int32).T

    for tokens in tokens_batch:
        sys.append(enc.decode(tokens))

    bleu = sacrebleu.corpus_bleu(sys, [refs])
    print("[INFO] bleu score: {}".format(bleu.score))
Пример #37
0
def list_extensions(app_dir=None, logger=None):
    """List the extensions.
    """
    logger = logger or logging
    app_dir = get_app_dir(app_dir)
    extensions = _get_extensions(app_dir)
    disabled = _get_disabled(app_dir)
    all_linked = _get_linked_packages(app_dir, logger=logger)
    app = []
    sys = []
    linked = []
    errors = dict()

    core_data = _get_core_data()

    # We want to organize by dir.
    sys_path = pjoin(get_app_dir(), 'extensions')
    for (key, value) in extensions.items():
        deps = extensions[key].get('dependencies', dict())
        errors[key] = _validate_compatibility(key, deps, core_data)
        if key in all_linked:
            linked.append(key)
        if value['path'] == sys_path and sys_path != app_dir:
            sys.append(key)
            continue
        app.append(key)

    logger.info('JupyterLab v%s' % __version__)
    logger.info('Known labextensions:')
    if app:
        logger.info('   app dir: %s' % app_dir)
        for item in sorted(app):
            version = extensions[item]['version']
            extra = ''
            if item in disabled:
                extra += ' %s' % RED_DISABLED
            else:
                extra += ' %s' % GREEN_ENABLED
            if errors[item]:
                extra += ' %s' % RED_X
            else:
                extra += ' %s' % GREEN_OK
            logger.info('        %s@%s%s' % (item, version, extra))
            if errors[item]:
                msg = _format_compatibility_errors(item, version, errors[item])
                logger.warn(msg + '\n')

    if sys:
        logger.info('   sys dir: %s' % sys_path)
        for item in sorted(sys):
            version = extensions[item]['version']
            extra = ''
            if item in disabled:
                extra += ' %s' % RED_DISABLED
            else:
                extra += ' %s' % GREEN_ENABLED
            logger.info('        %s%s' % (item, extra))
            if errors[item]:
                extra += ' %s' % RED_X
            else:
                extra += ' %s' % GREEN_OK
            if item in linked:
                extra += '*'
            logger.info('        %s@%s%s' % (item, version, extra))
            if errors[item]:
                msg = _format_compatibility_errors(item, version, errors[item])
                logger.warn(msg + '\n')

    if linked:
        logger.info('   linked extensions:')
        for item in sorted(linked):
            logger.info('        %s: %s' % (item, all_linked[item]))

    if len(all_linked) > len(linked):
        logger.info('   linked packages:')
        for key in sorted(all_linked.keys()):
            if (key in linked):
                continue
            logger.info('        %s: %s' % (key, all_linked[key]))

    # Handle uninstalled and disabled core packages
    uninstalled_core = _get_uinstalled_core_extensions(app_dir)
    if uninstalled_core:
        logger.info('\nUninstalled core extensions:')
        [logger.info('    %s' % item) for item in sorted(uninstalled_core)]

    core_extensions = _get_core_extensions()

    disabled_core = []
    for key in core_extensions:
        if key in disabled:
            disabled_core.append(key)

    if disabled_core:
        logger.info('\nDisabled core extensions:')
        [logger.info('    %s' % item) for item in sorted(disabled_core)]
Пример #38
0
import sys

sys.append('../')

from 125_valid_palindrome import Solution

import unittest

class TestSolution(unittest.TestCase):
    def setUp(self):
        self.solution = Solution()

    def test_cases(self):
        pass


if __name__ == '__main__':
    unittest.main()
Пример #39
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 28 17:06:19 2017

@author: wq
"""

import tensorflow as tf
import sys
sys.append('../function/')
from preprocess import *
from multiTask import *
#%%

import sys
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from plotting_functions import *
from sel_list import *

stdev = np.std
sqrt = np.sqrt
nullfmt = NullFormatter()

file1 = sys.argv[1]
time_offset = int(sys.argv[2])		# units of ns 

sys = []
sys.append(['Apo','steelblue',','])
sys.append(['ATP','cadetblue',','])
sys.append(['ssRNA','turquoise',','])
sys.append(['ssRNA+ATP','forestgreen',','])
sys.append(['ssRNA+ADP+Pi','limegreen',','])
sys.append(['ssRNA+ADP','orangered',','])
sys.append(['ssRNA+Pi','crimson',','])

nSys = len(sys)
nSel = len(sel)

# ----------------------------------------
# MAIN PROGRAM:

datalist1 = np.loadtxt(file1)
nSteps = len(datalist1)/nSys
Пример #41
0
sys.path.insert(0, 'python')
from get_modis import *
fh = fhead.split('20')[0]
files = glob.glob('%s*B01.jp2' % fh)
dates = []
ds = []
de = []
sys = []
eys = []
for i in files:
    date = datetime.datetime.strptime(
        (i.split('%s' % fh)[1]).split('0B01.jp2')[0], "%Y%m%d")
    dates.append(date)
    doy = date.timetuple().tm_yday
    if doy - 8 < 0:
        sys.append(date.year - 1)
        days = (datetime.date(date.year, 1, 1) -
                datetime.date(date.year - 1, 1, 1)).days
        ds.append(doy - 8 + days)
    else:
        sys.append(date.year)
        ds.append(doy - 8)

    if doy + 8 > 366:
        eys.append(date.year + 1)
        days = (datetime.date(date.year + 1, 1, 1) -
                datetime.date(date.year, 1, 1)).days
        ds.append(doy + 8 - days)
    else:
        eys.append(date.year)
        de.append(doy + 8)
Пример #42
0
# -*- coding: utf-8 -*-
"""
Created on Tue Dec  4 14:47:15 2018

@author: theod
"""
import sys
sys.append(r'C:\Users\theod\OneDrive\Documents\python army of functions2\bond_price_project\risky_bond_and_CDS')
from first_to_default_swap import price_FtD_swap1
from last_to_default_swap import price_LtD_swap1
from CDS_price import price_CDS
#%%
def test_FtD_swap_price():
    FV1,FV2,c,T,rate,lbd1,lbd2,R1,R2,freq = 100,100,0.02,5,0.05,0.3,0.4,0.4,0.4,1
    print(price_FtD_swap1(FV1,FV2,c,T,rate,lbd1,lbd2,R1,R2,freq))
    print(price_LtD_swap1(FV1,FV2,c,T,rate,lbd1,lbd2,R1,R2,freq))
    print(price_CDS(FV1,c,T,rate,lbd1,R1,freq))
    print(price_CDS(FV2,c,T,rate,lbd2,R2,freq))
test_FtD_swap_price()
#%%
def test_FtD_LtD_CDS_prices():
    FV1,FV2,c,T,rate,lbd1,lbd2,R1,R2,freq = 100,100,0.02,5,0.05,0.3,0.4,0.4,0.4,1
    intensities1 = ([0.5],[0.3,0.3])
    intensities2 = ([0.5],[0.4,0.4])
    from first_to_default_swap import price_FtD_swap2
    from last_to_default_swap import price_LtD_swap2
    from CDS_price2 import price_CDS2
    x1 = price_FtD_swap2(FV1,FV2,c,T,rate,lbd1,lbd2,R1,R2,freq)
    x2 = price_LtD_swap2(FV1,FV2,c,T,rate,lbd1,lbd2,R1,R2,freq)
    x3 = price_CDS2(FV1,c,T,rate,lbd1,R1,freq,option = "clean")
    x4 = price_CDS2(FV2,c,T,rate,lbd2,R2,freq,option = "clean")
Пример #43
0
from find_peaks4 import plot_pks
plot_pks( img, nsigs=4, filt=False, thresh=50, sig_G=1.1, make_sparse=True)
pk,_ = pk_pos( img, nsigs=4, filt=False, thresh=50, sig_G=1.1, make_sparse=True)
pk,_ = pk_pos( img, nsigs=4 thresh=50, sig_G=1.1, make_sparse=True)
pk,_ = pk_pos( img, nsigs=4 ,thresh=50, sig_G=1.1, make_sparse=True)
pk
y,x = list(map(array, list(zip(*pk))))
y
x
for j,i in zip(y,x):
    img[j-3:j+3, i-3:i+3] = 0
    
cla()
imshow( img, vmax=199)
import sys
sys.append("../.asu_tools/lib/python")
sys.path.append("../.asu_tools/lib/python")
from loki.RingData import RingFit
rf = RingFit(img)
#xi,yi = (
img.shape
xi,yi = (1440,1440)
get_ipython().run_line_magic('pinfo', 'rf.fit_circle_fast')
#rf.fit_circle_fast( (xi,yi, )
xi-1137
ri = 303
rf.fit_circle_fast( (xi,yi,ri ), num_fitting_pts=5000, num_high_pix=100, ring_width=100)
x,y,r = array([ 1450.10619788,  1430.22695615,   316.51128542])
from loki.RingData import RadialProfile
rp = RadialProfile( (xi,yi), img.shape)
figure(2);plot( rp.calculate(img))