def generateGazeFrames(eyetrackList, dirname="./gaze_frames/", granularity=250): # TODO: Check for a final "/" in the dirname, and add if necessary # TODO: Enforce "/gaze_frames/" directory if (not os.path.isdir(dirname)): os.mkdir(dirname) # generate gaze frames maxTime = 0 time = 0 i = 0 contentID = eyetrackList[0]["ContentID"] for eyetrack in eyetrackList: if maxTime < eyetrack["Time"]: maxTime = eyetrack["Time"] while time <= maxTime: eyetrackSegment = Task1.refineTimeframe(eyetrackList, (time, time + granularity)) # print(time) # pprint(eyetrackSegment) if len(eyetrackSegment) > 0: path = dirname + contentID + "-" + str(i) + ".png" Task1.makeMap(eyetrackSegment, path) elif time == 0: # Create a blank image img = Image.new("RGBA", (eyetrackList[0]["OriginWidth"], eyetrackList[0]["OriginHeight"]), (0, 0, 0, 0)) img.save(dirname + contentID + "-" + str(i) + ".png", "PNG") else: # use the last image. print "Skipping segment", time, "-", time + granularity shutil.copyfile(dirname + contentID + "-" + str(i - 1) + ".png", dirname + contentID + "-" + str(i) + ".png") time += granularity i += 1
def refined_book(book_id='all'): if book_id == '1' or book_id == '2' or book_id == '3' or book_id == '4': query = db.processed_text.find_one( {'book_id': book_id}) #query if data present against the book_id if query: #if something come in the response if query["words"]: # if the response contains words words_json = query[ "words"] #load the json of words in the word_json variable return words_json #return the word_json which has the words else: # if there is a document present but there is a no value against query["words"] data = Task1.show_the_list_of_stop_words( book_id ) #trigger the function of to get the refined text without stop words updateQueryTask1( book_id, data ) #as the document present so just update the value against the book id words return json.dumps(data) #return the json words json else: #if the query is empty no document is present relative to the book id words_are = Task1.show_the_list_of_stop_words( book_id ) #trigger the function in the task1 to get the refind words insertQueryTask1(book_id, words_are) #inset the document in the database return json.dumps(words_are) #return the json of words elif book_id == 'ALL' or book_id == 'all': my_dict = { } #create the dictionary that will contain sum of all the other dictionaries against book id list_of_all_books = ['1', '2', '3', '4'] #list of all books ids for book_no in list_of_all_books: #one by one iterating each book query = db.processed_text.find_one( {'book_id': book_no}) #querying if data present against that book id if query: #if some document is present against the book id if query[ "words"]: # checking if the cotains any value against words key words_dict = json.loads( query["words"] ) #load converting that word json into dictionary else: # if document is present but there is value against words key words_dict = Task1.show_the_list_of_stop_words( book_no ) # triggering function in task1 file to get words dictionary updateQueryTask1( book_no, words_dict ) # as the document present so just update the value against the book id words else: words_dict = Task1.show_the_list_of_stop_words( book_no ) ##trigger the function in the task1 to get the refind words insertQueryTask1( book_no, words_dict) #inset the document in the database my_dict[ book_no] = words_dict #placing the dictionaries in the main dictionary so later we can view the words against book id return json.dumps( my_dict) #returning the json containing all the dictionaries return "Books only from 1 to 4"
def test_create_arrival(): pq = Task1.PriorityQueue() print('Testing to create 200 arrival events.') pq = create_arrival(pq, 200) pq.print_all() print('Test success.') #for i in range(len(pq)): # print(pq.serve()) print()
def test_serving(): pq = Task1.PriorityQueue() #pq = create_arrival(pq) pq.reset() print('Testing to find the time when the customer is served and leaves if the customer arrives at 5.00pm.') pq.add('Arrival', 540) pq = serving(pq) #print('Test success.') pq.reset() print('\nTesting to find the time when the customer is served and leaves if the customer arrives at 12.00pm.') pq.add('Arrival', 240) pq = serving(pq) #print('Test success.') pq.reset() print('\nTesting to find the time when the customer is served and leaves if the customer arrives at 9.39am.') pq.add('Arrival', 99) pq = serving(pq)
def downloadAndSaveArticles(): #Get links links = Task1.getComicLinks()[0:10] #download site and select description in title tag descs = [] for link in links: href = link.attr('href') descs.append(getAltAttrFromArticle(href)) #Save queried data in database with sqlite3.connect('xkcd_db.db') as con: cur = con.cursor() cur.execute('drop table if exists xkcd') cur.execute('create table xkcd (url text, alt text)') for desc in descs: saveDescAndAltToDatabase(desc[1], desc[0], con, cur)
class Requirements(unittest.TestCase): nameToNumberReqs = ( ("A", 0), ("Z", 25), ("AA", 26), ("ZZZZ", 475253) ) numberToColumnReqs = ( (0, "A"), (25, "Z"), (26, "AA"), (475253, "ZZZZ") ) tester = Task1.ColumnNameConversionUtility() def testNameToNumber(self): """columnNameToColumnNumber should give expected results""" for columnName, number in self.nameToNumberReqs: result = self.tester.columnNameToColumnNumber(columnName) self.assertEqual(number, result) def testNumberToColumnReqs(self): """columnNumberToColumnName should give expected results which are the inverse of columnNameToColumnNumber""" for columnNumber, name in self.numberToColumnReqs: result = self.tester.columnNumberToColumnName(columnNumber) self.assertEqual(name, result)
import Task1 import Task2 Task1.sets() Task1.sets() Task1.sets() Task1.sets() Task1.sets() print(Task2.gets())
# If you get stuck on one of them, please go on to the rest and then come back to it. ######################################################################################## ######################################################################################## ######################################################################################## # 1) Create an image which shows the heat map of where people are looking from # time = 3 to time = 4 seconds on a black background in dots or a false color temp # gradient. # It should look roughly like this, but without the web page: # http://johnnyholland.org/wp-content/uploads/193383382_cf3b3bd6d0_o.png # Help on drawing: http://quickies.seriot.ch/index.php?id=256 ######################################## Task1.run(sessions, eventList, eyetrackList) ######################################################################################## # 2) Create an image for each 0.25 seconds and output them to a directory, # using FFMPEG or other lib to combine them all together into a movie. # Hint, create all images first then, in a separate step, call FFMPEG # to have it combine them itself. ######################################## Task2.run(sessions, eventList, eyetrackList)
def test_quote_split(self): # To split the quote in its characters. observed = Task1.quote_split(quote) expected = ['The', 'most', 'beautiful', 'thing', 'we', 'can', 'experience', 'is', 'the', 'mysterious.', 'It', 'is', 'the', 'source', 'of', 'all', 'true', 'art', 'and', 'science.'] self.assertEqual(observed, expected)
def test_empty_dict_and_grid(self): self.assertItemsEqual(Task1.find_words(self.grid2, self.dictionary2), []) self.assertItemsEqual(Task1.find_words(self.grid2, self.dictionary), []) self.assertItemsEqual(Task1.find_words(self.grid, self.dictionary2), [])
def testInputBounds(self): """only integer numbers in the range 0 ... 475253 are accepted""" self.assertRaises(Task1.InvalidColumnNumber, Task1.ColumnNameConversionUtility().columnNumberToColumnName, -1) self.assertRaises(Task1.InvalidColumnNumber, Task1.ColumnNameConversionUtility().columnNumberToColumnName, 475254)
def main(): task = int(input("Input Task number:")) if task == 1: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) Task1.starter(feature_model, dimension_reduction, k) elif task == 2: choice = input( "Do you want to go ahead with task 1 input configurations? yes(y) or no(n) " ) image_id = input("Enter image ID: ") m = int(input("Enter m: ")) feature_model = dimension_reduction = k = None if choice == "n": feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) Task2.starter(feature_model, dimension_reduction, k, image_id, m) elif task == 3: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) label = int( input( "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: " ) ) Task3.starter(feature_model, dimension_reduction, k, label) elif task == 4: choice = input( "Do you want to go ahead with task 3 input configurations? yes(y) or no(n) " ) image_id = input("Enter image ID: ") m = int(input("Enter m: ")) feature_model = dimension_reduction = k = label = None if choice == "n": feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) label = int( input( "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: " ) ) Task4.starter(feature_model, dimension_reduction, k, label, image_id, m) elif task == 5: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) label = int( input( "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: " ) ) image_id = input("Enter image ID: ") Task5.starter(feature_model, dimension_reduction, k, label, image_id) elif task == 6: subject_id = int(input("Enter subject ID: ")) Task6.starter(subject_id) elif task == 7: k = int(input("Enter k: ")) Task7.starter(k) elif task == 8: k = int(input("enter k : ")) Task8.starter(k) elif task == 9: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) visualizer = int(input("Enter Visualizer:\n1. Data \t2.Feature ")) ExtraCredit.starter(feature_model, dimension_reduction, k, visualizer) else: print("Enter Task number (1-9)")
def test_1(self): self.assertEqual([2, 1], Task1.func([1, 2]))
import Task1 import Task2 import Task3 # regression task task_1 = Task1.Task1() task_1.model_1_run() task_1.model_2_run() # multi-category task task_2 = Task2.Task2() task_2.model_1_run() task_2.model_2_run() # multi-label task task_3 = Task3.Task3() task_3.model_1_run() task_3.model_2_run()
def task1(): Task1().decoding()
def testToColumnNumberCase(self): """output must be upper case strings""" for integer in range(0, 475254): name = Task1.ColumnNameConversionUtility().columnNumberToColumnName(integer) self.assertEquals(name, name.upper())
def testToColumnNameCase(self): """column names should be upper case""" for integer in range(0, 475254): name = Task1.ColumnNameConversionUtility().columnNumberToColumnName(integer) Task1.ColumnNameConversionUtility().columnNameToColumnNumber(name.upper()) self.assertRaises(Task1.InvalidColumnName, Task1.ColumnNameConversionUtility().columnNameToColumnNumber, name.lower())
def testSanity(self): """columnNameToColumnNumber(columnNumberToColumnName(n)) == n for all n between 0 and 475253, or A and ZZZZ""" for integer in range(0, 475254): name = Task1.ColumnNameConversionUtility().columnNumberToColumnName(integer) result = Task1.ColumnNameConversionUtility().columnNameToColumnNumber(name) self.assertEqual(integer, result)
def test_quote_upper(self): # To capitalize every letter in the quote. observed = Task1.quote_upper(quote) expected = "THE MOST BEAUTIFUL THING WE CAN EXPERIENCE IS THE MYSTERIOUS. IT IS THE SOURCE OF ALL TRUE ART AND SCIENCE." self.assertEqual(observed, expected)
path = "../Images/" filename = "signature.jpg" # Opening Image if len(filename.split('.')) == 2: image = cv2.imread(path + filename, 0) # Task 0 filename = path + "bin_" + filename bin_image = Task0.Binarization(image, filename) filename = filename.split('/')[-1] # Task 1 height, width = bin_image.shape filename = path + "box_" + filename top, bottom, left, right = Task1.BoundingBox(bin_image, height, width) bounding_box_image = cv2.rectangle(bin_image, (top, left), (bottom, right), (0,255,0), 3) cv2.imwrite(filename, bounding_box_image) B = (left, right, top, bottom) filename = filename.split('/')[-1] # Task 2 filename = path + "cen_" + filename cx, cy = Task2.FindCentroid(bin_image, 0, bin_image.shape[1], 0, bin_image.shape[0]) centroid_image = cv2.circle(bounding_box_image, (cy, cx), 10, 200, -1) cv2.imwrite(filename, centroid_image) C = (cx, cy) filename = filename.split('/')[-1]
def test_count_quote(self): # To count the number of any letter in the following quote. observed = Task1.count_quote(quote) expected = 10 self.assertEqual(observed, expected)
def test_example(self): self.assertItemsEqual(Task1.find_words(self.grid,self.dictionary), ["CAT", "CAR", "CARD"])
filename = "signature.jpg" # Opening Image if len(filename.split('.')) == 2: image = cv2.imread(filename, 0) # Task 0 filename = "bin_" + filename bin_image = Task0.Binarization(image, filename) # cv2.imshow("Binarization", bin_image) # Task 1 width, height = bin_image.shape filename = "box_" + filename top, bottom, left, right, bounding_box_image = Task1.BoundingBox(width, height, bin_image, filename) B = (left, right, top, bottom) # cv2.imshow("Bounding Box", bounding_box_image) # Task 2 filename = "cen_" + filename centroid_image, cx, cy = Task2.FindCentroid(width, height, bin_image, bounding_box_image, filename) C = (cx, cy) # cv2.imshow("Centroid", centroid_image) # Task 3 cx = int(cx) cy = int(cy) filename = "seg_" + filename top_left, bottom_left, top_right, bottom_right, segmented_image = Task3.DivideBoundingBox(centroid_image, top, bottom, left, right, cx, cy, filename)
def test_length_string(self): # Find the length of quote. observed = Task1.length_string(quote) expected = 107 self.assertEqual(observed, expected)
def test_6(self): with patch('Task1.func') as obj: obj.return_value = self.get_result_from_txt() self.assertEqual([6, 3, 2], Task1.func([1, 2]))
import Task1 import Task2And3 ITEM_COUNT = { 'albums': 100, 'photos': 5000, 'todos': 200, 'users': 10, 'posts': 100, 'comments': 500 } task1 = Task1.MakingHttpRequestsTask(ITEM_COUNT) task1.make_and_save_requests() Task2And3.task2() Task2And3.task3()
def test_2(self): self.assertEqual([24, 12, 8, 6], Task1.func([1, 2, 3, 4]))
reader = csv.reader(f) calls = list(reader) """ TASK 2: Which telephone number spent the longest time on the phone during the period? Don't forget that time spent answering a call is also time spent on the phone. Print a message: "<telephone number> spent the longest time, <total time> seconds, on the phone during September 2016.". """ # Steps: # Get the list of unique numbers from task 1 ListOfUniqueNo, column_search = Task1.task1() callDuration = 0 # Create a map of Unique numbers initialize the value to 0 this is placeholder to add time spent on call map_numbers = dict.fromkeys(ListOfUniqueNo, callDuration) # Update the existing Key value with the sum of prev value + next value def task2_Opt(ListOfUniqueNo, column_search): # O(2n) for i in range(column_search): for row in calls: if row[i] in map_numbers.keys(): x = map_numbers[row[i]] x = x + int(row[3]) map_numbers.update({row[i]: x})
# read command line params for stemmed data file lstemmeddatafile = CACM_STEM_FILE + FILE_EXT if len(input_arguments) > 2: lstemmeddatafile = input_arguments[2] # Tokenize raw text tokenize_raw_data(ldatafilesdir) # Write given queries to a file create_directory(DIR_FOR_OUTPUT_FILES) write_given_queries_to_file( CACM_QUERY_FILE + FILE_EXT, DIR_FOR_OUTPUT_FILES + "/" + FILE_FOR_QUERIES + FILE_EXT) Task1.execute_system(ldatafilesdir) Task2.execute_system(ldatafilesdir) Task3A.execute_system(ldatafilesdir) Task3B.execute_system(lstemmeddatafile) ldictoffolderpaths = {} ldictoffolderpaths[ 1] = DIR_FOR_OUTPUT_FILES + "/" + TASK1_CONST + "/" + DIR_FOR_BM25_OUTPUT ldictoffolderpaths[ 2] = DIR_FOR_OUTPUT_FILES + "/" + TASK1_CONST + "/" + DIR_FOR_TFIDF_OUTPUT ldictoffolderpaths[3] = LUCENE + "/" + LUCENE_RESULTS ldictoffolderpaths[ 4] = DIR_FOR_OUTPUT_FILES + "/" + TASK2_CONST + "/" + DIR_FOR_BM25_OUTPUT ldictoffolderpaths[ 5] = DIR_FOR_OUTPUT_FILES + "/" + TASK2_CONST + "/" + DIR_FOR_TFIDF_OUTPUT ldictoffolderpaths[
def test_count(self): observed = Task1.count('People Are Awesome ... !!!') self.assertEqual(observed, 5)
def test_loops(self): self.assertItemsEqual(Task1.find_words(["AT"], ["ATA"]), [])
def test_upper(self): observed = Task1.upper('People Are Awesome ... !!!') expected = 'PEOPLE ARE AWESOME ... !!!' self.assertEqual(observed, expected)
def evaluate_all_systems(p_dictoffolderpaths, p_run_all_systems=True): if p_run_all_systems: Task1.execute_system('') Task2.execute_system('') Task3A.execute_system('') Task3B.execute_system('') print "Evaluating all systems..." create_directory(ldirpath) lquerydict = get_given_queries_in_dict(CACM_QUERY_FILE + FILE_EXT) lquerydict = get_sorted_dict(lquerydict) lallsystemsmeanvaluesdict = {} lallsystemsavgprecisionvaluesdict = {} for lkey, lvalue in p_dictoffolderpaths.iteritems(): lsystemname = get_system_name(lkey) print "Evaluating system: " + lsystemname lsystemmeanvaluesdict = {} ldictofavgprecisionvalues = {} llistofprecisionandrecallvalues = [ "Query Id,DocId,Rank,Precision,Recall" ] llistofpatkvalues = ["Query Id,P@5,P@20"] evaluate_system( lvalue, # results folder path to evaluate lquerydict, # dictionary containing all queries with query id lsystemmeanvaluesdict, # dictionary to hold the mean values for all systems ldictofavgprecisionvalues, # dictionary to hold avg precision values of systems llistofprecisionandrecallvalues, # results of precision recall values for all queries llistofpatkvalues # list to hold pat5 and pat20 values for all queries for this system ) # evaluate_system.. lallsystemsmeanvaluesdict[lkey] = lsystemmeanvaluesdict # ldictofavgprecisionvalues = get_sorted_dict(ldictofavgprecisionvalues) lallsystemsavgprecisionvaluesdict[lkey] = ldictofavgprecisionvalues # print "Writing Precision and Recall values for system: " + lsystemname llfilename = ldirpath + "/" + FILE_FOR_PRECISON_RECALL_RESULTS_OF_SYSTEM + "_" + lsystemname + CSV_FILE_EXT create_file(llfilename, '') convert_data_from_collection_to_file(llfilename, llistofprecisionandrecallvalues) # print "Writing P@5 and P@20 values for system: " + lsystemname llfilename = ldirpath + "/" + FILE_FOR_PATK_RESULTS_OF_SYSTEM + "_" + lsystemname + CSV_FILE_EXT create_file(llfilename, '') convert_data_from_collection_to_file(llfilename, llistofpatkvalues) # print "Writing mean values to file" llistofmeanvalues = ["System,MAP,MRR,P@5,P@20"] for lkey, lvalue in lallsystemsmeanvaluesdict.iteritems(): lsystemname = get_system_name(lkey) lstr = lsystemname + "," + str(lvalue[MAP_CONST]) + "," + str(lvalue[MRR_CONST]) + \ "," + str(lvalue[PAT5_CONST]) + "," + str(lvalue[PAT20_CONST]) llistofmeanvalues.append(lstr) lfilename = ldirpath + "/" + FILE_FOR_ALL_SYSTEMS_MEAN_VALUES + CSV_FILE_EXT create_file(lfilename, '') convert_data_from_collection_to_file(lfilename, llistofmeanvalues) # print "Writing average precision values to file" llistavgprecisionresults = ["System,Query Id,Average Precision"] for lkey, lvalue in lallsystemsavgprecisionvaluesdict.iteritems(): lsystemname = get_system_name(lkey) for ljkey, ljvalue in lvalue.iteritems(): lstr = lsystemname + "," + str(ljkey) + "," + str(ljvalue) llistavgprecisionresults.append(lstr) lfilename = ldirpath + "/" + FILE_FOR_ALL_SYSTEMS_AVG_PRECISION_VALUES + CSV_FILE_EXT create_file(lfilename, '') convert_data_from_collection_to_file(lfilename, llistavgprecisionresults) # print "Run t-tests for models" run_tests_for_models(lallsystemsavgprecisionvaluesdict, len(lquerydict))
def test_lower(self): observed = Task1.lower('People Are Awesome ... !!!') expected = 'people are awesome ... !!!' self.assertEqual(observed, expected)
# This is a sample Python script. import Task1, Task2, Task3, Task4, Task5, Task6, Task7, Task8, Task9, Task10, Task11, Task12 # Press the green button in the gutter to run the script. if __name__ == '__main__': print(" Enter first number : ") inputNumber1 = int(input()) print(" Enter second number : ") inputNumber2 = int(input()) # 1. Получить список всех нечётных чисел на отрезке [a, b]. print("<--------- Task 1 --------->") print(Task1.getOddNumbersFromRange(inputNumber1, inputNumber2)) # 2. Получить список всех чисел Фибоначчи на отрезке [a, b]. print("<--------- Task 2 --------->") print(Task2.cutByRangeFibonacci(inputNumber1, inputNumber2)) # 3. Напишите функцию, которая принимает на вход два параметра: a и b. print("<--------- Task 3 --------->") a = 12 b = "str" print(" out parameters are : {a}, {b}".format(a = a, b = b)) Task3.functionInRequest(a, b) # 4. Напишите функцию, которая принимает на вход три параметра print("<--------- Task 4 --------->") # # leap years c = [1900, 1904, 1908, 1964, 2020] startYear = 1900 finishYear = 2020 print(" our years' range : [ {s}, {f} ]".format(s = startYear, f = finishYear)) print(" exclude list from range : %s " % c ) print(Task4.listOfLeapYearsOnRange(startYear, finishYear, c)) # array for task 5-9 array = [1, "sd", 3, 1.2, 5.6, 0, -1]
def test_split(self): observed = Task1.split('People Are Awesome ... !!!') expected = ['People', 'Are', 'Awesome', '...', '!!!'] self.assertEqual(observed, expected)
import data_handler import matplotlib as mpl mpl.use('TkAgg') import matplotlib.pyplot as plt import Task1 all_accs = [] sample_string = "adgjwryozm" splits = ["1:34", "1:29", "1:24", "1:19", "1:14", "1:9", "1:4"] for split in splits: print("Current split %s" % split) trainX, trainY, testX, testY = data_handler.splitData2TestTrain( data_handler.pickDataClass( 'Handwrittenletters.txt', data_handler.letter_2_digit_convert(sample_string)), 39, split) all_accs.append(Task1.predict(trainX, trainY, testX, testY, 10)) print(all_accs) x = [1, 2, 3, 4, 5, 6, 7] # Plot the data for i in range(len(x)): plt.scatter(x[i], all_accs[i], color='black', marker='^') plt.plot(x[i], all_accs[i], label=i) plt.legend(loc='lower right', frameon=False) # Show the plot plt.show()
def test_strip(self): observed = Task1.strip('People Are Awesome ... !!!') expected = 'People Are Awesome ' self.assertEqual(observed, expected)
# If you get stuck on one of them, please go on to the rest and then come back to it. ######################################################################################## ######################################################################################## ######################################################################################## # 1) Create an image which shows the heat map of where people are looking from # time = 3 to time = 4 seconds on a black background in dots or a false color temp # gradient. # It should look roughly like this, but without the web page: # http://johnnyholland.org/wp-content/uploads/193383382_cf3b3bd6d0_o.png # Help on drawing: http://quickies.seriot.ch/index.php?id=256 ######################################## Task1.run(sessions, eventList, eyetrackList) ######################################################################################## # 2) Create an image for each 0.25 seconds and output them to a directory, # using FFMPEG or other lib to combine them all together into a movie. # Hint, create all images first then, in a separate step, call FFMPEG # to have it combine them itself. ######################################## Task2.run(sessions, eventList, eyetrackList) ######################################################################################## # 3) Identify the central point(s) of gaze over a period of time and # overlay a transparent white dot on the 1-3 areas of interest. # the number should depend on if everyone is looking in a single # place of if they are all looking in different places.
def test3(self): if Task1.c_type1 == "GBP" and Task1.c_type2 == "EUR": assert(Task1.conversion("GBP", "EUR", 10) == 12) #Test 2 Fails, converting from GBP does not work.
# import the previous built python files Task1.py, Task2.py and Task3.py import Task1 import Task2 import Task3 import re from collections import Counter # create a dictionary contains each character as the "key" # and a number of occurrence as its "value" occ_dict = {} occ_total = {} # declare a counter for loop control count = 0 Task1.print_structure() # combine the functions imported for Task2.py and Task3.py together, # keep taking and processing the user input until the input is an empty string while True: # for the first loop (count = 0), # assign the return value of Task2.take_input (should just be an input from user) # to "user_input" variable for later use if count == 0: user_input = Task2.take_input() # for later loops (count > 0), break the loop if the user input is empty elif count > 0 and user_input == "": break
def test4(self): if Task1.c_type1 == "EUR" and Task1.c_type2 == "GBP": numb1 = 12 Task1.conversion(Task1.c_type1, Task1.c_type2, Task1.numb1) assert(z == 10) #Test 3 fails, converting to GBP does not work.
#!/usr/bin/env python # encoding: utf-8 """ created by me for task2 """ import Task1 Task1.task1_function()
numOfSignComp=int(raw_input("Enter number of Significant Components m:\n")) #numOfBits=1 #numOfSignComp=2 #videoDir=r'F:\ASU_Projects\MIS\mis\Phase3\reducedSizeVideo' #videoFileName='R2' #frameId=0 fullPath = '{0}{2}{1}'.format(videoDir, videoFileName+fileSuffix, slash) tasks=['1a', '1b', '1c', '1d', '2'] a_outFileName='{0}_hist_{1}.hst'.format(videoFileName,numOfBits) b_outFileName = '{0}_blockdct_{1}.bct'.format(videoFileName, numOfBits) c_outFileName = '{0}_blockdwt_{1}.bwt'.format(videoFileName, numOfBits) d_outFileName='{0}_diff_{1}.dhc'.format(videoFileName,numOfBits) Task1.extract_video_portion(fullPath, width, height, numOfBits, a_outFileName,b_outFileName,c_outFileName,d_outFileName) for index, task_name in enumerate(tasks): if task_name == '1a': sorted_similarity=getSimilarFrames(a_outFileName,frameId) visualizeFrames(fullPath, sorted_similarity, frameId,"Task1a") elif task_name == '1b': sorted_similarity=getSimilarFrames(b_outFileName, frameId) visualizeFrames(fullPath, sorted_similarity, frameId, "Task1b") elif task_name == '1c': sorted_similarity=getSimilarFrames(c_outFileName, frameId) visualizeFrames(fullPath, sorted_similarity, frameId, "Task1c") elif task_name == '1d': sorted_similarity=getSimilarFramesForDiffQuantization(d_outFileName,fullPath,frameId,width,height,numOfBits) visualizeFrames(fullPath, sorted_similarity, frameId, "Task1d") elif task_name == '2':