Ejemplo n.º 1
0
 def extract(obj, arr, key):
     """Recursively search for values of key in JSON tree."""
     if isinstance(obj, dict):
         for k, v in obj.items():
             if isinstance(v, (dict, list)):
                 extract(v, arr, key)
             elif k == key:
                 arr.append(v)
     elif isinstance(obj, list):
         for item in obj:
             extract(item, arr, key)
     return arr
Ejemplo n.º 2
0
Archivo: fx.py Proyecto: sjl421/pyhome
def fetch_currency_page(
        name="美元", url='http://fx.cmbchina.com/Hq/History.aspx?nbr=%s&page=1'):
    url = url % name
    html = requests.get(url, timeout=60).content.decode("utf-8", "ignore")
    last_page = list(
        extract_all('<a href="', 'class="text"',
                    extract('<div class="function">', '<div', html)))[-1]
    last_page_num = int(extract('page=', '"', last_page))
    tr_list = extract_all('<tr>', '</tr>', html)
    for tr in tr_list:
        td_date = extract('<td align="center">', '</td>', tr)
        if td_date:
            td_middle_rate = list(
                extract_all('<td class="numberright">', '</td>', tr))[-1]
            print(td_date, td_middle_rate)
Ejemplo n.º 3
0
def main():
    random.seed(7)

    # AAA: Use a random shuffle to select test/training sets
    print('Extracting twitter data from the database...')
    tm1 = time.time()
    tweets = extract()
    tm2 = time.time()

    print('  time=%0.3fs' % (tm2 - tm1))

    test_set_size = int(TEST_SET_PROPORTION * len(tweets))

    print('Training on %d tweets' % (len(tweets) - test_set_size))

    tm1 = time.time()
    random.shuffle(tweets)

    test_set = tweets[:test_set_size]
    training_set = tweets[test_set_size:]

    classifier = train_nltk(nltk.classify.NaiveBayesClassifier, training_set)
    tm2 = time.time()

    print('  time=%0.3fs' % (tm2 - tm1))

    print('Testing accuracy on %d tweets' % test_set_size)
    tm1 = time.time()
    mat = test_nltk(classifier, test_set)
    tm2 = time.time()

    print mat.pp(show_percents=True)
    print ('%d of %d correct ==> %f%%' % (mat._correct, mat._total,
                                          float(mat._correct) / mat._total))
    print('  time=%0.3fs' % (tm2 - tm1))
Ejemplo n.º 4
0
    def _port_stats_reply_handler(self, ev):
        body = ev.msg.body
        '''self.logger.info('datapath port     '
                         'duration-sec duration-nsec tx-bytes '
                         'rx-bytes tx-error rx-error')
        self.logger.info('-------- -------- '
                         '------------ ------------- -------- '
                         '-------- -------- --------')'''

        for stat in sorted(body, key=attrgetter('port_no')):

            datapath = ev.msg.datapath.id
            '''self.logger.info('%8x %8x %12d %13d %8d %8d %8d %8d',
                             datapath, stat.port_no,
                             stat.duration_sec, stat.duration_nsec, stat.tx_bytes, stat.rx_bytes, stat.tx_errors,
                             stat.rx_errors)'''

            extract(datapath, stat, INTERVALO)
    def __init__(self):
        self.io = io()
        self.c = corpus()
        self.ext = extract()
        self.assoc = association()
        self.chi_square = chi_square()

        self.tagdata = self.load_tagset('validation/*.txt')
        self.pairs = self.get_pairs('test_sample/*.txt')
        self.pairs = [item.split('->') for item in self.pairs]
        return
Ejemplo n.º 6
0
def get_result(resultURL, type=True):
    '''get the result data for given students'''
    global responses
    #print('in get result')
    with requests.session() as s:
        s.cookies = jar
        global token_page
        try:
            token_page = s.get(resultURL,
                               headers=headers,
                               cookies=jar,
                               timeout=10)
        except requests.exceptions.ReadTimeout:
            responses.append('timeout')
            return 4
        except requests.exceptions.ConnectTimeout:
            responses.append('timeout')
            return 4
        except OSError:
            responses.append('None')
            return 0
        extract(token_page, type)
        responses.append('OK')
        return 6
Ejemplo n.º 7
0
def matrify(fileName, target, mode):
	mat = []
	tarOut = []
	for i, location in enumerate(fileName):
		time,pulse,Ts = extract(location)
		N = len(pulse)
		x = np.array(range(N))*Ts
		fList,tList = package(x,pulse,Ts,mode,target[i],1.0)
		for i,feature in enumerate(fList):
			mat.append(feature)
			tarOut.append(tList[i])

	mat = np.array(mat)
	tarOut = np.array(tarOut)
	#mat = np.concatenate(mat)
	return mat, tarOut
Ejemplo n.º 8
0
def extractfeature(f):
    global MON_SITE_NUM
    fname = f.split('/')[-1].split(".")[0]
    # logger.info('Processing %s...'%f)
    try:
        t = parse(f)
        features = extract(t)
        if '-' in fname:
            label = int(fname.split('-')[0])
        else:
            label = int(MON_SITE_NUM)

        return (features, label)
    except Exception as e:
        print(e)
        return None
Ejemplo n.º 9
0
def main():
    random.seed(7)

    # AAA: Use a random shuffle to select test/training sets
    print('Extracting twitter data from the database...')
    tm1 = time.time()
    tweets = extract()
    tm2 = time.time()

    print('  time=%0.3fs' % (tm2 - tm1))

    test_set_size = int(TEST_SET_PROPORTION * len(tweets))

    print('Training on %d tweets' % (len(tweets) - test_set_size))

    tm1 = time.time()
    random.shuffle(tweets)

    test_set = tweets[:test_set_size]
    training_set = tweets[test_set_size:]

    nb = NaiveBayes()
    for tweet in training_set:
        toks = ttokenize.tokenize(tweet.text)
        nb.train(toks, tweet.get_majority_vote())

    tm2 = time.time()

    print('  time=%0.3fs' % (tm2 - tm1))

    print('Testing accuracy on %d tweets' % test_set_size)
    tm1 = time.time()

    predictions = []
    references = []
    for tweet in test_set:
        references.append(tweet.get_majority_vote())
        toks = ttokenize.tokenize(tweet.text)
        predictions.append(nb.classify(toks))

    mat = nltk.ConfusionMatrix(references, predictions)
    tm2 = time.time()

    print mat.pp(show_percents=True)
    print ('%d of %d correct ==> %f%%' % (mat._correct, mat._total,
                                          float(mat._correct) / mat._total))
    print('  time=%0.3fs' % (tm2 - tm1))
def NER_date(input_sen,done):
    ########Finding Date###########
    date_list = []
    
    a = extract (input_sen)

    if (len(a) >0) and a not in done:
        date_list.append(a)

    

    temp = []
    for i in range(len(date_list)):
        for j,each in enumerate(date_list[i]):
            temp.append(each)
        
    return temp
Ejemplo n.º 11
0
def json_extract(obj, key):
    """Recursively fetch values from nested JSON."""
    arr = []

    def extract(obj, arr, key):
        """Recursively search for values of key in JSON tree."""
        if isinstance(obj, dict):
            for k, v in obj.items():
                if isinstance(v, (dict, list)):
                    extract(v, arr, key)
                elif k == key:
                    arr.append(v)
        elif isinstance(obj, list):
            for item in obj:
                extract(item, arr, key)
        return arr
    values = extract(obj, arr, key)
    return values
Ejemplo n.º 12
0
            "http://wenku.baidu.com/view/",\
            "http://jingyan.baidu.com/article/",\
            "http://www.docin.com/zuowen/view.do?id=",\
            "http://www.babytree.com/ask/detail/",\
            "http://www.babytree.com/learn/article/",\
            "http://www.babytree.com/know/weekly.php?type=",\
            "http://www.haodf.com/wenda/",\
            "http://www.docin.com/p-",\
            "http://www.haodf.com/zhuanjiaguandian/")):
            # parse
            #print "url1\t", url
            data = data[:-1]
            data = decompress(data)
            #print >>sys.stderr, url
            #print >>sys.stderr, data
            result = extract(url, data)
            output(url, result)
        data = ""
        http_start = False
        data_start = False
        store_size = 0
        orig_size = 0
        url = line.strip()
        continue

    if line.startswith("Original-Size:"):
        orig_size = int(line[14:].strip())
        continue
    if line.startswith("Store-Size:"):
        store_size = int(line[11:].strip())
        continue
Ejemplo n.º 13
0
    if args.keywords:
        keywords = args.keywords
    else:
        keywords = []

    if args.file:
        file_a = args.file[0]

        dir = os.environ['HOME'] + '/Files/'
        if file_a not in os.listdir(dir):
            print('file ' + file_a + ' not found in default folder')
            exit()
        else:
            print('Extracting /Files/' + file_a)
            result = extract(keywords, dir + file_a)
            # print(result)
            stripped = strip_metadata(result)
            # print(stripped)
            # post stripped data
            result = post_metadata(stripped)
            if result:
                print('Added file successfully!')
                exit(1)

            print('Couldn\'t add file...try again')
            exit(-1)
    if args.all:
        result = extract_all(keywords)
        # print(result)
Ejemplo n.º 14
0
filin = open(sys.argv[1], 'r')
rec = AttractRigidbody(sys.argv[2])
lig = AttractRigidbody(sys.argv[3])
s = "%8s%6s%10s%10s%10s%10s%10s"

print s %("transnb","rotnb","enregie","rmsd","irmsd","fnat","num_copie")

for ligne in filin :

	if ligne.startswith("==") :
		liste = []
		liste.append(ligne)
		spl= liste[0].split()
		ener = float(spl[3])
		if ener  < 0:
			templig = extract(sys.argv[1],lig,int(spl[1]),int(spl[2]))

			fn = fnat(rec,lig,templig)
			irm = irmsd(rec,lig,templig)


	elif ligne.startswith("### WEIGHTS BEGIN") :
		dico={}
	elif ligne.startswith("WEIGHT    REGION") :
		liste = []
		liste.append(ligne)
		scopy= liste[0].split()
		dico[int(scopy[4])]=float(scopy[6])
	elif ligne.startswith("### WEIGHTS END") :
		dico_trie = sorted(dico.iteritems(), reverse=True, key=operator.itemgetter(1))
		s = "%8d%6d%10.3f%10.3f%10.3f%10.3f%5d"
Ejemplo n.º 15
0
# 10 inital rows. Add more with add button
for i in range(0, 32):
    add_row()

# Creating gui

start_label = tk.Label(frame0, text="Start")
end_label = tk.Label(frame0, text="End")
name_label = tk.Label(frame0, text="Name")

add_button = tk.Button(frame1, text="Add", command=add_row)
pdf_button = tk.Button(frame1, text="PDF", command=select_pdf)
extract_button = tk.Button(frame1,
                           text="Extract",
                           command=lambda: extract(entries, pdf_filename))

pdf_label = tk.Label(frame1, text="PDF")

# Adding gui to grid

start_label.grid(row=1, column=0)
end_label.grid(row=1, column=1)
name_label.grid(row=1, column=2)

add_button.grid(row=0, column=0)
pdf_button.grid(row=0, column=1)
extract_button.grid(row=0, column=2)

pdf_label.grid(row=0, column=3)
def gamess_to_libra(params, ao, E, C, suff):
    ## 
    # Finds the keywords and their patterns and extracts the parameters
    # \param[in] params :  contains input parameters , in the directory form
    # \param[in,out] ao :  atomic orbital basis at "t" old
    # \param[in,out] E  :  molecular energies at "t" old
    # \param[in,out] C  :  molecular coefficients at "t" old
    # \param[in] suff : The suffix to add to the name of the output files
    # this suffix is now considered to be of a string type - so you can actually encode both the
    # iteration number (MD timestep), the nuclear cofiguration (e.g. trajectory), and any other
    # related information
    #
    # This function outputs the files for excited electron dynamics
    # in "res" directory.
    # It returns the forces which act on the atoms.
    # Also, it returns new atomic orbitals, molecular energies, and
    # molecular coefficients used for calculating time-averaged
    # molecular energies and Non-Adiabatic Couplings(NACs).
    #
    # Used in: md.py/run_MD

    flag_ao = params["flag_ao"] 

    # 2-nd file - time "t+dt"  new
    label, Q, R, Grad, E2, C2, ao2, tot_ene = extract(params["gms_out"],params["debug_gms_unpack"],flag_ao)

    # calculate overlap matrix of atomic and molecular orbitals
    P11, P22, P12, P21 = overlap(ao,ao2,C,C2,params["basis_option"],flag_ao)

    # calculate transition dipole moment matrices in the MO basis:
    # mu_x = <i|x|j>, mu_y = <i|y|j>, mu_z = <i|z|j>
    # this is done for the "current" state only    
    
    mu = []
    if flag_ao == 1:
        mu_x, mu_y, mu_z = transition_dipole_moments(ao2,C2)
        mu = [mu_x, mu_y, mu_z]

        if params["debug_mu_output"]==1:
            print "mu_x:";    mu_x.show_matrix()
            print "mu_y:";    mu_y.show_matrix()
            print "mu_z:";    mu_z.show_matrix()
 
    if params["debug_densmat_output"]==1:
        print "P11 and P22 matrixes should show orthogonality"
        print "P11 is";    P11.show_matrix()
        print "P22 is";    P22.show_matrix()
        print "P12 and P21 matrixes show overlap of MOs for different molecular geometries "
        print "P12 is";    P12.show_matrix()
        print "P21 is";    P21.show_matrix()


    ### TO DO: In the following section, we need to avoid computing NAC matrices in the full
    # basis. We will need the information on cropping, in order to avoid computations that 
    # we do not need (the results are discarded anyways)
    # calculate molecular energies and Non-Adiabatic Couplings(NACs) on MO basis
    E_mol = average_E(E,E2)
    D_mol = NAC(P12,P21,params["dt_nucl"])

    # reduce the matrix size
    E_mol_red = reduce_matrix(E_mol,params["min_shift"], params["max_shift"],params["H**O"])
    D_mol_red = reduce_matrix(D_mol,params["min_shift"], params["max_shift"],params["H**O"])
    ### END TO DO

    if params["print_mo_ham"]==1:
        E_mol.show_matrix(params["mo_ham"] + "full_re_Ham_" + suff)
        D_mol.show_matrix(params["mo_ham"] + "full_im_Ham_" + suff)
        E_mol_red.show_matrix(params["mo_ham"] + "reduced_re_Ham_" + suff)
        D_mol_red.show_matrix(params["mo_ham"] + "reduced_im_Ham_" + suff)

    # store "t+dt"(new) parameters on "t"(old) ones
    for i in range(0,len(ao2)):
        ao[i] = AO(ao2[i])
    E = MATRIX(E2)
    C = MATRIX(C2)

    # Returned data:
    # Grad: Grad[k][i] - i-th projection of the gradient w.r.t. to k-th nucleus (i = 0, 1, 2)
    # data: a dictionary containing transition dipole moments
    # E_mol: the matrix of the 1-el orbital energies in the full space of the orbitals
    # D_mol: the matrix of the NACs computed with 1-el orbitals. Same dimension as E_mol
    # E_mol_red: the matrix of the 1-el orbital energies in the reduced (active) space
    # D_mol_red: the matrix of the NACs computed with 1-el orbital. Same dimension as E_mol_red

    return tot_ene, Grad, mu, E_mol, D_mol, E_mol_red, D_mol_red
Ejemplo n.º 17
0
def extract_model(wowname, dir='', fullpath=False):
    m = Model(wow_data.open(wowname))
    extract(wowname, dir, fullpath)
    extract_model_textures(m, dir, fullpath)
Ejemplo n.º 18
0
from extract import *
from relocalize import *
from window_execute import *
from window_steps import *  # lpd process with plotting and values printed at each step
from betamap import *
from sort_data import *
from plots import *  # plot main results figures for presentations

########################################              EXTRACT DATA           #################################################################
extract = False

if extract == True:
    print('Extracting the data from raw catalogs')
    tonga = extract('RAW_DATA_FILES/ISC_RAWDATA.txt', None, 'ISC', 2005, 1, 1,
                    00, 00, 00, 'USED_DATA_FILES/ISC_fulldata.txt')
    tonga.extract_data()
    tonga.output()
    tonga = extract('RAW_DATA_FILES/NEIC_RAWDATA.txt', None, 'NEIC', 2005, 1,
                    1, 00, 00, 00, 'USED_DATA_FILES/NEIC_fulldata.txt')
    tonga.extract_data()
    tonga.output()
    tonga = extract('RAW_DATA_FILES/CMTSOLUTION_RAWDATA.txt',
                    'RAW_DATA_FILES/PSMECA_RAWDEPTHS.txt', 'CMT', 2005, 1, 1,
                    00, 00, 00, 'USED_DATA_FILES/SURROUNDING_EQ.txt')
    tonga.extract_data()
    tonga.output()

########################################             RELOCALIZE  DATA           #################################################################
reloc = False

if reloc == True:
Ejemplo n.º 19
0
    if args:
        dir = args[0]
    else:
        dir = "sample"

    # update options with config file

    root_folder = os.path.dirname(os.path.abspath(__file__))
    application_folder = os.path.join(root_folder, dir)
    config_file = os.path.join(application_folder, "app.cfg")

    # make application
    application = app.Application(dir, options)

    if options.extract:
        extract()
    if options.delete:
        delete()

    if options.purge_attachments and not options.delete:
        purge_attachments()

    if options.purge_application:
        purge_application()

    if options.console:
        import code
        application.initialise_database()
        database = application.database
        code.interact(local=locals())
    if options.generate:
Ejemplo n.º 20
0
def extract_model_textures(m, dir='', fullpath=False):
    for t in m.textures:
        extract(t.name, dir, fullpath)
Ejemplo n.º 21
0
def main():

    #RAW FILE NEEDED FROM USER
    filename = input("RAW FILE - ENTER FILE LOCATION: ")

    #TIME BEFORE FALL OCCURS NEEDED FROM USER
    timeActivity = int(input("ENTER WHEN FALL OCCURS (In Seconds): "))

    #SEPERATE FALL AND BEFORE FALL
    seperate(filename, timeActivity)

    #CREATE BEFORE FALL AND AFTER FALL FILES
    for i in range(2):

        #Before Fall
        if(i == 0):

            filemaker("beforeFall.txt", "before_", filename)

        #After Fall
        else:

            filemaker("afterFall.txt", "after_", filename)

    #will create all features files from new filename
    for i in range(11):

        #FOR ALL BEFORE SENSOR FILES
        if(i < 6):

            new = "before_"

            #Write according to sensor
            if(i == 0):

                accBefore = extract(new + "accFile_" + filename)

                xAccBefore, yAccBefore, zAccBefore = accBefore.getAll()


            elif(i == 1):

                gravBefore = extract(new + "gravFile_" + filename)

                xGravBefore, yGravBefore, zGravBefore = gravBefore.getAll()

            elif(i == 2):

                gyroBefore = extract(new + "gyroFile_" + filename)

                xGyroBefore, yGyroBefore, zGyroBefore = gyroBefore.getAll()

            elif(i == 3):

                linearBefore = extract(new + "linearFile_" + filename)

                xLinearBefore, yLinearBefore, zLinearBefore = linearBefore.getAll()

            elif(i == 4):

                magBefore = extract(new + "magFile_" + filename)

                xMagBefore, yMagBefore, zMagBefore = magBefore.getAll()

        #FOR ALL AFTER SENSOR FILES
        else:

            #AFTER FALL
            new = "after_"

            # Write according to sensor
            if(i == 6):

                accAfter = extract(new + "accFile_" + filename)

                xAccAfter, yAccAfter, zAccAfter = accAfter.getAll()

            elif(i == 7):

                gravAfter = extract(new + "gravFile_" + filename)

                xGravAfter, yGravAfter, zGravAfter = gravAfter.getAll()

            elif(i == 8):

                 gyroAfter = extract(new + "gyroFile_" + filename)

                 xGyroAfter, yGyroAfter, zGyroAfter = gyroAfter.getAll()

            elif(i == 9):

                 linearAfter = extract(new + "linearFile_" + filename)

                 xLinearAfter, yLinearAfter, zLinearAfter = linearAfter.getAll()

            elif(i == 10):

                 magAfter = extract(new + "magFile_" + filename)

                 xMagAfter, yMagAfter, zMagAfter = magAfter.getAll()

    #FEATURE FILE TO WRITE FROM ANYWHERE
    # 1. MULTI
    #CREATING TRAINING
    featuresFileAcc = open("featureFileFilteredMultiAcc.txt", 'a')
    featuresFileGrav = open("featureFileFilteredMultiGrav.txt", 'a')
    featuresFileGyro = open("featureFileFilteredMultiGyro.txt", 'a')
    featuresFileLinear = open("featureFileFilteredMultiLinear.txt", 'a')
    featuresFileMag = open("featureFileFilteredMultiMag.txt", 'a')
    #CREATING TESt
    #featuresFileAcc = open("featureFileTestFilteredMultiAcc.txt", 'a')
    #featuresFileGrav = open("featureFileTestFilteredMultiGrav.txt", 'a')
    #featuresFileGyro = open("featureFileTestFilteredMultiGyro.txt", 'a')
    #featuresFileLinear = open("featureFileTestFilteredMultiLinear.txt", 'a')
    #featuresFileMag = open("featureFileTestFilteredMultiMag.txt", 'a')
    # 2. BINARY
    #CREATING TRAINING
    #featuresFileAcc = open("featureFileFilteredBinaryAccPocket.txt", 'a')
    #featuresFileGrav = open("featureFileFilteredBinaryGravPocket.txt", 'a')
    #featuresFileGyro = open("featureFileFilteredBinaryGyroPocket.txt", 'a')
    #featuresFileLinear = open("featureFileFilteredBinaryLinearPocket.txt", 'a')
    #featuresFileMag = open("featureFileFilteredBinaryMagPocket.txt", 'a')
    #CREATING TEST
    #featuresFileAcc = open("featureFileTestFilteredBinaryAccPocket.txt", 'a')
    #featuresFileGrav = open("featureFileTestFilteredBinaryGravPocket.txt", 'a')
    #featuresFileGyro = open("featureFileTestFilteredBinaryGyroPocket.txt", 'a')
    #featuresFileLinear = open("featureFileTestFilteredBinaryLinearPocket.txt", 'a')
    #featuresFileMag = open("featureFileTestFilteredBinaryMagPocket.txt", 'a')
    # 3. OTHERS
    #FINISH CREATING FILES FOR EACH SENSORS AXIS
    #CREATING TEST
    #featuresFileX = open("featureFileTestFilteredXAccMulti.txt", 'a')
    #featuresFileY = open("featureFileTestFilteredYAccMulti.txt", 'a')
    #featuresFileZ = open("featureFileTestFilteredZAccMulti.txt", 'a')
    # CREATING TRAINING
    # featuresFileX = open("featureFileFilteredXAccMulti.txt", 'a')
    # featuresFileY = open("featureFileFilteredYAccMulti.txt", 'a')
    # featuresFileZ = open("featureFileFilteredZAccMulti.txt", 'a')

    #EXTRACT ALL AXIS OR EACH INDIVIDUAL AXIS
    usrExtract = input("\nExtract all Axis (y/N): ")

    #CLASS FOR BEFORE
    cfBefore = input("\nEnter Classifier Before Fall: ")

    # CLASS FOR AFTER
    cfAfter = input("Enter Classifier After Fall: ")

    #IF ALL AXIS INCLUDED
    if(usrExtract.lower() == "y".lower()):

        #For ALL BEFORE FALL
        for i in range(len(xAccBefore)):
            for AccX in range(len(xAccBefore[i])):
                featuresFileAcc.write(xAccBefore[i][AccX] + " ")
            for AccY in range(len(yAccBefore[i])):
                featuresFileAcc.write(yAccBefore[i][AccY] + " ")
            for AccZ in range(len(zAccBefore[i])):
                featuresFileAcc.write(zAccBefore[i][AccZ] + " ")

            featuresFileAcc.write(cfBefore + "\n")

        for i in range(len(xGravBefore)):
            for GravX in range(len(xGravBefore[i])):
                featuresFileGrav.write(xGravBefore[i][GravX] + " ")
            for GravY in range(len(yGravBefore[i])):
                featuresFileGrav.write(yGravBefore[i][GravY] + " ")
            for GravZ in range(len(zGravBefore[i])):
                featuresFileGrav.write(zGravBefore[i][GravZ] + " ")

            featuresFileGrav.write(cfBefore + "\n")

        for i in range(len(xGyroBefore)):
            for GyroX in range(len(xGyroBefore[i])):
                featuresFileGyro.write(xGyroBefore[i][GyroX] + " ")
            for GyroY in range(len(yGyroBefore[i])):
                featuresFileGyro.write(yGyroBefore[i][GyroY] + " ")
            for GyroZ in range(len(zGyroBefore[i])):
                featuresFileGyro.write(zGyroBefore[i][GyroZ] + " ")

            featuresFileGyro.write(cfBefore + "\n")

        for i in range(len(xLinearBefore)):
            for LinearX in range(len(xLinearBefore[i])):
                featuresFileLinear.write(xLinearBefore[i][LinearX] + " ")
            for LinearY in range(len(yLinearBefore[i])):
                featuresFileLinear.write(yLinearBefore[i][LinearY] + " ")
            for LinearZ in range(len(zLinearBefore[i])):
                featuresFileLinear.write(zLinearBefore[i][LinearZ] + " ")

            featuresFileLinear.write(cfBefore + "\n")

        for i in range(len(xMagBefore)):
            for MagX in range(len(xMagBefore[i])):
                featuresFileMag.write(xMagBefore[i][MagX] + " ")
            for MagY in range(len(yMagBefore[i])):
                featuresFileMag.write(yMagBefore[i][MagY] + " ")
            for MagZ in range(len(zMagBefore[i])):
                featuresFileMag.write(zMagBefore[i][MagZ] + " ")

            featuresFileMag.write(cfBefore + "\n")

        # FOR ALL AFTER FALL
        for i in range(len(xAccAfter)):
            for AccX in range(len(xAccAfter[i])):
                featuresFileAcc.write(xAccAfter[i][AccX] + " ")
            for AccY in range(len(yAccAfter[i])):
                featuresFileAcc.write(yAccAfter[i][AccY] + " ")
            for AccZ in range(len(zAccAfter[i])):
                featuresFileAcc.write(zAccAfter[i][AccZ] + " ")

            featuresFileAcc.write(cfAfter + "\n")

        for i in range(len(xGravAfter)):
            for GravX in range(len(xGravAfter[i])):
                featuresFileGrav.write(xGravAfter[i][GravX] + " ")
            for GravY in range(len(yGravAfter[i])):
                featuresFileGrav.write(yGravAfter[i][GravY] + " ")
            for GravZ in range(len(zGravAfter[i])):
                featuresFileGrav.write(zGravAfter[i][GravZ] + " ")

            featuresFileGrav.write(cfAfter + "\n")

        for i in range(len(xGyroAfter)):
            for GyroX in range(len(xGyroAfter[i])):
                featuresFileGyro.write(xGyroAfter[i][GyroX] + " ")
            for GyroY in range(len(yGyroAfter[i])):
                featuresFileGyro.write(yGyroAfter[i][GyroY] + " ")
            for GyroZ in range(len(zGyroAfter[i])):
                featuresFileGyro.write(zGyroAfter[i][GyroZ] + " ")

            featuresFileGyro.write(cfAfter + "\n")

        for i in range(len(xLinearAfter)):
            for LinearX in range(len(xLinearAfter[i])):
                featuresFileLinear.write(xLinearAfter[i][LinearX] + " ")
            for LinearY in range(len(yLinearAfter[i])):
                featuresFileLinear.write(yLinearAfter[i][LinearY] + " ")
            for LinearZ in range(len(zLinearAfter[i])):
                featuresFileLinear.write(zLinearAfter[i][LinearZ] + " ")

            featuresFileLinear.write(cfAfter + "\n")

        for i in range(len(xMagAfter)):
            for MagX in range(len(xMagAfter[i])):
                featuresFileMag.write(xMagAfter[i][MagX] + " ")
            for MagY in range(len(yMagAfter[i])):
                featuresFileMag.write(yMagAfter[i][MagY] + " ")
            for MagZ in range(len(zMagAfter[i])):
                featuresFileMag.write(zMagAfter[i][MagZ] + " ")

            featuresFileMag.write(cfAfter + "\n")

        featuresFileAcc.close()
        featuresFileGrav.close()
        featuresFileGyro.close()
        featuresFileLinear.close()
        featuresFileMag.close()

    # IF ALL AXIS SEPERATE
    elif(usrExtract.lower() == "N".lower()):

        #For ALL BEFORE FALL
        for i in range(len(xAccBefore)):

            #TIMES 3 FOR 3 LISTS
            for j in range(len(xAccBefore[i])):

                featuresFileX.write(xAccBefore[i][j] + " ")

            for k in range(len(yAccBefore[i])):

                featuresFileY.write(yAccBefore[i][k] + " ")

            for m in range(len(zAccBefore[i])):

                featuresFileZ.write(zAccBefore[i][m] + " ")

            featuresFileX.write(cfBefore + "\n")
            featuresFileY.write(cfBefore + "\n")
            featuresFileZ.write(cfBefore + "\n")


        #FOR ALL AFTER FALL
        for i in range(len(xAccAfter)):

            #TIMES 3 FOR 3 LISTS
            for j in range(len(xAccAfter[i])):

                featuresFileX.write(xAccAfter[i][j] + " ")

            for k in range(len(yAccAfter[i])):

                featuresFileY.write(yAccAfter[i][k] + " ")

            for m in range(len(zAccAfter[i])):

                featuresFileZ.write(zAccAfter[i][m] + " ")

            featuresFileX.write(cfAfter + "\n")
            featuresFileY.write(cfAfter + "\n")
            featuresFileZ.write(cfAfter + "\n")

        featuresFileX.close()
        featuresFileY.close()
        featuresFileZ.close()
Ejemplo n.º 22
0
#!/usr/bin/env/python
import sys

from optparse import OptionParser
from extract import *
from encode import *

extract = extract()
encode = encode()

# Main function called to begin execution
def main(options):
    if options.extract is True:
        if extract.extractFile(options.inputFile) != -1:
            exit("Extracting complete!")
        else:
            exit("Error upon extraction")
    else:
        encode.encodeFile(options.inputFile, options.metaFile, options.gifFile, options.outputFile)
        exit("Encoding complete!")

# Exits the program
def fail(message):
    if message is not None:
        print message

    sys.exit()

parser = OptionParser()
parser.add_option("-e", "--encode", action="store_false", dest="extract", default=False, help="Flag set to encode the input file into destination")
parser.add_option("-g", "--gif", dest="gifFile", metavar="GIF", help="The gif to encode into")
Ejemplo n.º 23
0
def extract_model(wowname, dir='', fullpath=False):
    m = Model(wow_data.open(wowname))
    extract(wowname, dir, fullpath)
    extract_model_textures(m, dir, fullpath)
Ejemplo n.º 24
0
from keras.models import load_model

test_pickle = 'pickle/test.npy'
model_path = sys.argv[1]
sample = 'data/sample_submission.csv'
id_name = 'pickle/mfcc_id_name.pickle'
test_dir = sys.argv[2]
out_path = sys.argv[3]

MAX_LEN = 200

if os.path.exists(test_pickle):
    x = np.load(test_pickle)
else:
    x, w2i = read_dir(test_dir)
    x = extract(x)
    np.save(test_pickle, x)

try:
    model = load_model(model_path)
except OSError:
    np.save(out_path, np.load(model_path))
    exit(0)

preds = []
for row in x:
    a = np.zeros([1, 20, MAX_LEN, 1])
    a[0, :, :row.shape[1], 0] = row[:, :MAX_LEN]
    pred = model.predict(a)
    preds.append(pred)
Ejemplo n.º 25
0
def job():
    extract()
    transform()
    load()
Ejemplo n.º 26
0
def gamess_to_libra(params, ao, E, C, suff):
    ## 
    # Finds the keywords and their patterns and extracts the parameters
    # \param[in] params :  contains input parameters , in the directory form
    # \param[in,out] ao :  atomic orbital basis at "t" old
    # \param[in,out] E  :  molecular energies at "t" old
    # \param[in,out] C  :  molecular coefficients at "t" old
    # \param[in] suff : The suffix to add to the name of the output files
    # this suffix is now considered to be of a string type - so you can actually encode both the
    # iteration number (MD timestep), the nuclear cofiguration (e.g. trajectory), and any other
    # related information
    #
    # This function outputs the files for excited electron dynamics
    # in "res" directory.
    # It returns the forces which act on the atoms.
    # Also, it returns new atomic orbitals, molecular energies, and
    # molecular coefficients used for calculating time-averaged
    # molecular energies and Non-Adiabatic Couplings(NACs).
    #
    # Used in: md.py/run_MD

    # 2-nd file - time "t+dt"  new
    label, Q, R, Grad, E2, C2, ao2, tot_ene = extract(params["gms_out"],params["debug_gms_unpack"])

    # calculate overlap matrix of atomic and molecular orbitals
    P11, P22, P12, P21 = overlap(ao,ao2,C,C2,params["basis_option"])

    # calculate transition dipole moment matrices in the MO basis:
    # mu_x = <i|x|j>, mu_y = <i|y|j>, mu_z = <i|z|j>
    # this is done for the "current" state only    
    mu_x, mu_y, mu_z = transition_dipole_moments(ao2,C2)
    mu = [mu_x, mu_y, mu_z]

    if params["debug_mu_output"]==1:
        print "mu_x:";    mu_x.show_matrix()
        print "mu_y:";    mu_y.show_matrix()
        print "mu_z:";    mu_z.show_matrix()
 
    if params["debug_densmat_output"]==1:
        print "P11 and P22 matrixes should show orthogonality"
        print "P11 is";    P11.show_matrix()
        print "P22 is";    P22.show_matrix()
        print "P12 and P21 matrixes show overlap of MOs for different molecular geometries "
        print "P12 is";    P12.show_matrix()
        print "P21 is";    P21.show_matrix()


    ### TO DO: In the following section, we need to avoid computing NAC matrices in the full
    # basis. We will need the information on cropping, in order to avoid computations that 
    # we do not need (the results are discarded anyways)
    # calculate molecular energies and Non-Adiabatic Couplings(NACs) on MO basis
    E_mol = average_E(E,E2)
    D_mol = NAC(P12,P21,params["dt_nucl"])

    # reduce the matrix size
    E_mol_red = reduce_matrix(E_mol,params["min_shift"], params["max_shift"],params["H**O"])
    D_mol_red = reduce_matrix(D_mol,params["min_shift"], params["max_shift"],params["H**O"])
    ### END TO DO

    if params["print_mo_ham"]==1:
        E_mol.show_matrix(params["mo_ham"] + "full_re_Ham_" + suff)
        D_mol.show_matrix(params["mo_ham"] + "full_im_Ham_" + suff)
        E_mol_red.show_matrix(params["mo_ham"] + "reduced_re_Ham_" + suff)
        D_mol_red.show_matrix(params["mo_ham"] + "reduced_im_Ham_" + suff)

    # store "t+dt"(new) parameters on "t"(old) ones
    for i in range(0,len(ao2)):
        ao[i] = AO(ao2[i])
    E = MATRIX(E2)
    C = MATRIX(C2)

    # Returned data:
    # Grad: Grad[k][i] - i-th projection of the gradient w.r.t. to k-th nucleus (i = 0, 1, 2)
    # data: a dictionary containing transition dipole moments
    # E_mol: the matrix of the 1-el orbital energies in the full space of the orbitals
    # D_mol: the matrix of the NACs computed with 1-el orbitals. Same dimension as E_mol
    # E_mol_red: the matrix of the 1-el orbital energies in the reduced (active) space
    # D_mol_red: the matrix of the NACs computed with 1-el orbital. Same dimension as E_mol_red

    return tot_ene, Grad, mu, E_mol, D_mol, E_mol_red, D_mol_red
Ejemplo n.º 27
0
def extract_model_textures(m, dir='', fullpath=False):
    for t in m.textures:
        extract(t.name, dir, fullpath)