d = [ [0] * x for i in range(y) ] for eyetrack in eyetrackList: ex = eyetrack["AbsoluteX"] ey = eyetrack["AbsoluteY"] print("X:",ex,"Y:",ey) d[ey][ex] += 1 print("New total:", d[ey][ex]) return d def refineTimeframe(eyetrackList, timeframe=(0, 60000)): rtn = [] for et in eyetrackList: time = et["Time"] if time >= timeframe[0] and time <= timeframe[1]: rtn.append(et) return rtn #print(type(eyetrackList[1])) #print "Total Eyetrack Events:", len(eyetrackList) #refinedList = refineTimeframe(eyetrackList, (3000,4000)) #print "Events w/in seconds 3 and 4:", len(refinedList) #makeDataMap(refinedList) #print(makeDataMap(eyetrackList)) #pprint(makeDataMap(eyetrackList)) #Task2.run(sessions,eventList,eyetrackList) Task4.run(sessions,eventList,eyetrackList)
def SplittingRecursive(image, left, right, top, bottom, depth=0): cx, cy, n = Task2.FindCentroid(image, left, right, top, bottom) if depth < 3: SplittingRecursive(image, left, cx, top, cy, depth + 1) # Top Left SplittingRecursive(image, cx, right, top, cy, depth + 1) # Bottom Left SplittingRecursive(image, left, cx, cy, bottom, depth + 1) # Top Right SplittingRecursive(image, cx, right, cy, bottom, depth + 1) # Bottom Right else: rectangles.append([(left, top), (right, bottom)]) transitions.append(Task4.B2W_Transitions(image[top:bottom, left:right])) ratios.append(Task5.AspectRatio(left, right, top, bottom)) # Task 6 size = (bottom - top) * (right - left) blacks = Task6.blackPixels(image, left, right, top, bottom) try: normalized.append(size / blacks) except: normalized.append(0) cx, cy, n = Task2.FindCentroid(image[top:bottom, left:right], 0, right - left, 0, bottom - top) centroids.append((cx, cy)) # Task 7 angle = math.degrees( math.acos( (bottom - top - cy) / (math.sqrt((right - left - cx)**2 + (bottom - top - cy)**2)))) angles.append(angle)
def SplittingRecursive(image, left, right, top, bottom, depth=0): cx, cy = Task2.FindCentroid(image, left, right, top, bottom) # print("(", top, "\t", left, ")\t(", bottom, "\t", right, ")\t", cx, "\t", cy, "\tDepth: ", depth) if depth < 3: SplittingRecursive(image, left, cy, top, cx, depth + 1) # Top Left SplittingRecursive(image, cy, right, top, cx, depth + 1) # Bottom Left SplittingRecursive(image, left, cy, cx, bottom, depth + 1) # Top Right SplittingRecursive(image, cy, right, cx, bottom, depth + 1) # Bottom Right else: t = Task4.B2W_Transitions(image, left, right, top, bottom) r = Task5.AspectRatio(left, right, top, bottom) filePath = "../Text/" # If Path Does not exists; Create it if not os.path.exists(filePath): os.makedirs(filePath + "Transitions/") os.makedirs(filePath + "Ratios/") os.makedirs(filePath + "Centroids/") TransitionsFile = open(filePath + "Transitions/" + "signature.txt", "a") TransitionsFile.write(str(t) + "\n") TransitionsFile.close() RatiosFile = open(filePath + "Ratios/" + "signature.txt", "a") RatiosFile.write(str(r) + "\n") RatiosFile.close() CentroidsFile = open(filePath + "Centroids/" + "signature.txt", "a") CentroidsFile.write(str(cx) + "," + str(cy) + "\n") CentroidsFile.close() return cv2.rectangle(bin_image, (top, left), (bottom, right), (0,255,0), 1)
def similar_document(first_book, second_book): if first_book == '1' or first_book == '2' or first_book == '3' or first_book == '4': if second_book == '1' or second_book == '2' or second_book == '3' or second_book == '4': percentage = Task4.sentence_similarity( first_book, second_book ) #trigger the function sentence similarity that will return the score of similarity between two books return "The similarity between the two documents is =" + str( percentage ) + " percent" #return the score of similarity between two books return "Books only from 1 to 4"
def similarity_of_all(string='all'): if string.lower() == 'all': list_of_sim_matrix = dict( ) #creating a dictionary for the similarities score count = 1 #intializing count with 1 which having index of book 1 while (count <= 4): #iterate through all 4 books count_for_second_book = 4 #sceond count has second book indexes while (count_for_second_book >= 1): #conditions for books remain in the limit percentage_of_each = Task4.sentence_similarity( str(count), str(count_for_second_book) ) #triggering task 4 function to get the similarity between two book at a ttime list_of_sim_matrix[str(count) + ' and ' + str(count_for_second_book)] = str( percentage_of_each) count_for_second_book -= 1 #decreement book second count += 1 #increement book first return json.dumps(list_of_sim_matrix) return "Not a required String"
# Central points of gaze mean the "clusters" of gaze. If this doesn't make sense, please # ask for clarification or skip it. ######################################## # ... ######################################################################################## # 3b) Create a separate movie showing this. ######################################## ######################################################################################## # 4) Figure out how to overlay the pictures on top of the actual video. The video can # be found here: http://g3.eyetrackshop.com/content/CT13C39DD417BLYWHILLT-SJJ ######################################## Task4.run(sessions, eventList, eyetrackList) ######################################################################################## # 5) Output a quality report in a test file, listing the % of sessions which are: # Complete - attribute session.last_state = "7.COMPLETE" # Usable - attribute eyetrack.quality = "GOOD" # Group eyetrack.quality failure reasons and output % of each ######################################## ######################################################################################## # 6) For each frame, add 4 'area of interest' rectangles which are the 4 quadrants of # the image. On each eye tracking frame, output a % noting in the middle of the frame in text # which says what percent of people who have data during that time period were looking # within the area of interest. ########################################
def main(): Task4.task4_function get_pi(Task4.task4_function())
print(Task2.cutByRangeFibonacci(inputNumber1, inputNumber2)) # 3. Напишите функцию, которая принимает на вход два параметра: a и b. print("<--------- Task 3 --------->") a = 12 b = "str" print(" out parameters are : {a}, {b}".format(a = a, b = b)) Task3.functionInRequest(a, b) # 4. Напишите функцию, которая принимает на вход три параметра print("<--------- Task 4 --------->") # # leap years c = [1900, 1904, 1908, 1964, 2020] startYear = 1900 finishYear = 2020 print(" our years' range : [ {s}, {f} ]".format(s = startYear, f = finishYear)) print(" exclude list from range : %s " % c ) print(Task4.listOfLeapYearsOnRange(startYear, finishYear, c)) # array for task 5-9 array = [1, "sd", 3, 1.2, 5.6, 0, -1] # 5. Найти сумму элементов массива print("<--------- Task 5 --------->") print(" given array : %s " % array) print(" array sum : %s " % Task5.array_sum(array)) # # 6. Найти максимальный элемент, значение и индекс. print("<--------- Task 6 --------->") print(" max in array and index : %s " % Task6.array_max(array)) # 7. Найти минимальный элемент, значение и индекс. print("<--------- Task 7 --------->") print(" min in array and index : %s " % Task7.array_min(array)) # 8. Посчитать количество элементов больше нуля. print("<--------- Task 8 --------->") print(" the amount of numbers greater than 0 : %s " % Task8.array_count(array))
top, bottom, left, right, bounding_box_image = Task1.BoundingBox(width, height, bin_image, filename) B = (left, right, top, bottom) # cv2.imshow("Bounding Box", bounding_box_image) # Task 2 filename = "cen_" + filename centroid_image, cx, cy = Task2.FindCentroid(width, height, bin_image, bounding_box_image, filename) C = (cx, cy) # cv2.imshow("Centroid", centroid_image) # Task 3 cx = int(cx) cy = int(cy) filename = "seg_" + filename top_left, bottom_left, top_right, bottom_right, segmented_image = Task3.DivideBoundingBox(centroid_image, top, bottom, left, right, cx, cy, filename) cv2.imshow("Top Left", top_left) cv2.imshow("Bottom Left", bottom_left) cv2.imshow("Top Right", top_right) cv2.imshow("Bottom Right", bottom_right) print("\nNumber of Black to White Transitions") # Task 4 TL = Task4.B2W_Transitions(top_left, top_left.shape[0], top_left.shape[1], "Top Left") BL = Task4.B2W_Transitions(bottom_left, bottom_left.shape[0], bottom_left.shape[1], "Bottom Left") TR = Task4.B2W_Transitions(top_right, top_right.shape[0], top_right.shape[1], "Top Right") BR = Task4.B2W_Transitions(bottom_right, bottom_right.shape[0], bottom_right.shape[1], "Bottom Right") T = (TL, TR, BL, BR) cv2.waitKey(0)
def main(): task = int(input("Input Task number:")) if task == 1: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) Task1.starter(feature_model, dimension_reduction, k) elif task == 2: choice = input( "Do you want to go ahead with task 1 input configurations? yes(y) or no(n) " ) image_id = input("Enter image ID: ") m = int(input("Enter m: ")) feature_model = dimension_reduction = k = None if choice == "n": feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) Task2.starter(feature_model, dimension_reduction, k, image_id, m) elif task == 3: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) label = int( input( "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: " ) ) Task3.starter(feature_model, dimension_reduction, k, label) elif task == 4: choice = input( "Do you want to go ahead with task 3 input configurations? yes(y) or no(n) " ) image_id = input("Enter image ID: ") m = int(input("Enter m: ")) feature_model = dimension_reduction = k = label = None if choice == "n": feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) label = int( input( "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: " ) ) Task4.starter(feature_model, dimension_reduction, k, label, image_id, m) elif task == 5: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) label = int( input( "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: " ) ) image_id = input("Enter image ID: ") Task5.starter(feature_model, dimension_reduction, k, label, image_id) elif task == 6: subject_id = int(input("Enter subject ID: ")) Task6.starter(subject_id) elif task == 7: k = int(input("Enter k: ")) Task7.starter(k) elif task == 8: k = int(input("enter k : ")) Task8.starter(k) elif task == 9: feature_model = int( input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ") ) dimension_reduction = int( input( "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : " ) ) k = int(input("Enter k: ")) visualizer = int(input("Enter Visualizer:\n1. Data \t2.Feature ")) ExtraCredit.starter(feature_model, dimension_reduction, k, visualizer) else: print("Enter Task number (1-9)")
from matplotlib import pyplot import numpy as np import pickle from pylab import * import Utils import DataSet import Classify import PCAModule import Task1 import Task2 import Task3 import Task4 data_path = "E:\\EE5907R\\project2\\project2_faces" plot_save_path = "E:\\EE5907R\\project2\\" def main(): # data_path = raw_input("Enter path : ") DataSet.read_faces(data_path) print 2 * "\n*******************************************" # project sequence main() # Task1.task1() # Task2.task2() # Task3.task3() Task4.task4()
def test_linked_file_reading(): test_object = Task4.linked_list_file_reading("try1.txt") str(test_object)
def test_array_file_reading(): test_object = Task4.array_list_file_reading("try.txt") str(test_object)