def main():
    start_time = time.time();

    fileNames = ['HitchcockData.mat']
    for fileName in fileNames:
        f,data = readMat2(fileName)
    threads = []

    for subject in xrange(len(data)):
        newfileName = fileName[:-4]+str(subject)+'.dat'
        t = threading.Thread(target=dft_worker, args=(f,data,subject,newfileName))
        threads.append(t)
        t.start()
    print("--- total run time: %s seconds ---" % str(time.time() - start_time))
def main():
    start_time = time.time()

    fileNames = ["HitchcockData.mat"]
    for fileName in fileNames:
        try:
            f, data = readMat2(fileName)
        except:
            sys.exit("not found file " + fileName + ":  exiting")
    threads = []

    for subject in xrange(len(data)):
        newfileName = fileName[:-4] + str(subject) + "sample.dat"
        t = threading.Thread(target=dft_worker, args=(f, data, subject, newfileName))
        threads.append(t)
        t.start()
    print ("--- total run time: %s seconds ---" % str(time.time() - start_time))
def main():
    start_time = time.time();
    fileNames = ['HitchcockData.mat']
    for fileName in fileNames:
        try:
            f,data = readMat2(fileName)
        except:
            sys.exit('not found file '+fileName+':  exiting')
    processes = []
    #this result_dict should be a giant dictionary of the form
    #key -- x,y,z
    #value -- dictionary2 
    #dictionary2:
    #key: subject number
    #value: array of time series
    #Warning: This is a expoitation of the GIL global interpreter lock. If you are not using cython then problem might occur.
    #the only lock that I deployed here are at the time when creating a new dictionary2 inside the original dictionary
    #manager = Manager()
    #result_dict = manager.dict()
    result_dict = {}
    for subject in xrange(len(data)):
        #newfileName = fileName[:-4]+str(subject)+'.dat'
        #p = Process(target=subject_worker, args=(f,data,subject,result_dict))
        subject_worker(f,data,subject,result_dict)
        #processes.append(p)
        #p.start()
    # Wait for all of them to finish
    #for x in processes:
    #    x.join()
    #so now assume all threads finished running:
    #we have a dictionary of the described one
    #now we need to normalize it based on person
    #result_dict = normalize_columns(result_dict)
    print("--- total run time: %s seconds ---" % str(time.time() - start_time))
    f = open('result.dat','w')
    for key in result_dict.keys():
        f.write(key)
        f.write(';')
        for subject in sorted(result_dict[key].keys()):
            f.write(','.join(map(str,result_dict[key][subject])))

            f.write(',')
        f.write('\n')
    f.close()
    '''
Esempio n. 4
0
cluster.shutdown()
"""


def tryInsert(session, s, time):
    try:
        session.execute(s)
    except:
        print ("failed to insert, retrying: " + s[0:100] + " " + str(time))
        tryInsert(session, s, time + 1)


# reading data from file specified in fileNames array.
for fileName in fileNames:
    try:
        f, data = readMat2(fileName)
    except:
        sys.exit("not found file " + fileName + ":  exiting")

try:
    session = getSession(username="******", password="******")
    session.default_timeout = 30  # this is in *seconds* setting the default timeout will prevent the exception.
except:
    print "not able to start db session"

# processing the data.
tempInsert = ""
tempCreate = prepareCreateTable(fileName[:-4] + "total2")
print ("creating table:" + tempCreate)
tryInsert(session, tempCreate, 1)
for subject in xrange(len(data)):