Esempio n. 1
0
def VerletAlgorithm(coords, vels, forces, M, delta_t, N):
    """
    Algorithm to integrate Newtons equations of motion. Writes the results 
    of the calulcations in the requested format into an output file.

    Parameters
    ----------
    coords : numpy array
        Array of the particle coordinates from the previous time step.
    vels : numpy array
        Array of the particle velocities from the previous time step.
    forces : numpy array
        Array of the particle forces from the previous time step.
    M : int
        Number of particles.
    delta_t : float
        Time step.
    N : int
        Number of iterations.

    Returns
    -------
    None.

    """
    filestring = Task2.createFilestring(M, L, coords, vels, "Time step 0")
    for k in range(1, N):
        coords = calculateNewCoords(coords, vels, forces, M, L, delta_t)
        vels, forces = calculateNewVels(coords, vels, forces, M, L, delta_t)
        filestring += Task2.createFilestring(M, L, coords, vels,
                                             f"Time step {k}")
        print(f"finished timestep {k}")

    Task2.writeFile(filestring, "trajectories.txt")
Esempio n. 2
0
def SplittingRecursive(image, left, right, top, bottom, depth=0):
    cx, cy, n = Task2.FindCentroid(image, left, right, top, bottom)

    if depth < 3:
        SplittingRecursive(image, left, cx, top, cy, depth + 1)  # Top Left
        SplittingRecursive(image, cx, right, top, cy, depth + 1)  # Bottom Left
        SplittingRecursive(image, left, cx, cy, bottom, depth + 1)  # Top Right
        SplittingRecursive(image, cx, right, cy, bottom,
                           depth + 1)  # Bottom Right
    else:
        rectangles.append([(left, top), (right, bottom)])
        transitions.append(Task4.B2W_Transitions(image[top:bottom,
                                                       left:right]))
        ratios.append(Task5.AspectRatio(left, right, top, bottom))

        # Task 6
        size = (bottom - top) * (right - left)
        blacks = Task6.blackPixels(image, left, right, top, bottom)

        try:
            normalized.append(size / blacks)
        except:
            normalized.append(0)

        cx, cy, n = Task2.FindCentroid(image[top:bottom, left:right], 0,
                                       right - left, 0, bottom - top)
        centroids.append((cx, cy))

        # Task 7
        angle = math.degrees(
            math.acos(
                (bottom - top - cy) / (math.sqrt((right - left - cx)**2 +
                                                 (bottom - top - cy)**2))))
        angles.append(angle)
Esempio n. 3
0
def VerletAlgorithm(coords, vels, forces, M, delta_t, N):
    filestring = Task2.createFilestring(M, L, coords, vels, "Time step 0")
    for k in range(1, N):
        coords = calculateNewCoords(coords, vels, forces, M, L, delta_t)
        vels, forces = calculateNewVels(coords, vels, forces, M, L, delta_t)
        filestring += Task2.createFilestring(M, L, coords, vels,
                                             f"Time step {k}")
        print(f"finished timestep {k}")

    Task2.writeFile(filestring, "trajectories.txt")
Esempio n. 4
0
def SplittingRecursive(image, left, right, top, bottom, depth=0):
	cx, cy = Task2.FindCentroid(image, left, right, top, bottom)
	# print("(", top, "\t", left, ")\t(", bottom, "\t", right, ")\t", cx, "\t", cy, "\tDepth: ", depth)

	if depth < 3:
		SplittingRecursive(image, left, cy, top, cx, depth + 1)		# Top Left
		SplittingRecursive(image, cy, right, top, cx, depth + 1)	# Bottom Left
		SplittingRecursive(image, left, cy, cx, bottom, depth + 1)	# Top Right
		SplittingRecursive(image, cy, right, cx, bottom, depth + 1)	# Bottom Right
	else:
		t = Task4.B2W_Transitions(image, left, right, top, bottom)
		r = Task5.AspectRatio(left, right, top, bottom)

		filePath = "../Text/"
		# If Path Does not exists; Create it
		if not os.path.exists(filePath):
			os.makedirs(filePath + "Transitions/")
			os.makedirs(filePath + "Ratios/")
			os.makedirs(filePath + "Centroids/")

		TransitionsFile = open(filePath + "Transitions/" + "signature.txt", "a")
		TransitionsFile.write(str(t) + "\n")
		TransitionsFile.close()

		RatiosFile = open(filePath + "Ratios/" + "signature.txt", "a")
		RatiosFile.write(str(r) + "\n")
		RatiosFile.close()

		CentroidsFile = open(filePath + "Centroids/" + "signature.txt", "a")
		CentroidsFile.write(str(cx) + "," + str(cy) + "\n")
		CentroidsFile.close()

	return cv2.rectangle(bin_image, (top, left), (bottom, right), (0,255,0), 1)
Esempio n. 5
0
def calculateNewVels(coords, vels, forces, M, L, delta_t):
    """
    Calculates the velocities of the current time step.

    Parameters
    ----------
    coords : numpy array
        Array of the particle coordinates from the current time step.
    vels : numpy array
        Array of the particle velocities from the previous time step.
    forces : numpy array
        Array of the particle forces from the current time step.
    M : int
        Number of particles.
    L : float
        Box side length.
    delta_t : float
        Time step.

    Returns
    -------
    newVels : numpy array
        Array of the particle velocities from the current time step.
    newForces : numpy array
        Array of the particle velocities from the next time step.

    """
    newForces = Task2.calculateInitialForces(M, L, coords)
    newVels = numpy.full_like(vels, 0.0)  # numpy array allows updates
    newVels[:][:] = vels[:][:] + \
                    0.5 * (forces[:][:] + newForces[:][:]) * delta_t
    return newVels, newForces
Esempio n. 6
0
def main():
    global videoFileName

    t2.main() # Call the main function of task-2

    acceptInput() # Accepte User Input
    # Open the input file to read
    input_file = open(videoFileName, 'r')
    frameInfo = input_file.readline()
    rows = int(frameInfo.split(',')[0].split(':')[1].strip(' '))
    cols = int(frameInfo.split(',')[1].split(':')[1].strip(' '))
    wavelets = int(frameInfo.split((','))[2].split(':')[1].strip(' '))
    computeEucleadean(input_file) #Compute Eculedean Distnace and similarities
    input_file.close() #close the file

    # Output
    showFrames()
Esempio n. 7
0
def main():

    # Set sample video location to a default location
    videoFilesPath = raw_input("Enter the path where the video files are: ")
    videoFileName = raw_input("Enter the name of video file: ")
    # create a videoInformationExtractor object
    videoInformationExtractor = VideoInformationExtractor.TestVideoInformationExtractor(
        videoFilesPath + "\\" + videoFileName)
    videoInformationExtractor.videoInformation.dump(
        videoFilesPath + "\\" + videoFileName.split(".")[0] + ".original")

    while True:
        # Accept feture number to be extracted from user
        printLine()
        print "Following options are available for feature extraction"
        print "0. Exit"
        print "1. Extract n-bin histogram for 8x8 pixel blocks"
        print "2. Extract n most significant 2-D DCT components for 8x8 pixel blocks"
        print "3. Extract n most significant 2-D DWT components for 8x8 pixel blocks"
        print "4. Extract n-bin histogram for 8x8 pixel difference blocks"
        print "5. Extract m most significant 2-D DWT components per frame"
        feature = input("Enter option: ")

        if feature == 0:
            return
        if feature == 1:
            # Accept value n from user
            n = input("Enter the value of n: ")
            Task1a.extractFeature(videoInformationExtractor, videoFilesPath,
                                  videoFileName, n)
        if feature == 2:
            # Accept value n from user
            n = input("Enter the value of n: ")
            blockLevel2DDCTExtractor = Task1b.BlockLevel2DDCTExtractor(
                videoInformationExtractor, videoFilesPath, videoFileName, n)
            blockLevel2DDCTExtractor.extractFeature()
        if feature == 3:
            # Accept value n from user
            n = input("Enter the value of n: ")
            blockLevel2DDWTExtractor = Task1c.BlockLevel2DDWTExtractor(
                videoInformationExtractor, videoFilesPath, videoFileName, n)
            blockLevel2DDWTExtractor.extractFeature()
        if feature == 4:
            # Accept value n from user
            n = input("Enter the value of n: ")
            Task1d.extractFeature(videoInformationExtractor, videoFilesPath,
                                  videoFileName, n)
        if feature == 5:
            # Accept value n from user
            m = input("Enter the value of m: ")
            frameLevel2DDWTExtractor = Task2.FrameLevel2DDWTExtractor(
                videoInformationExtractor, videoFilesPath, videoFileName, m)
            frameLevel2DDWTExtractor.extractFeature()
def send_a_word(book_id, word):
    if book_id == '1' or book_id == '2' or book_id == '3' or book_id == '4':

        stemmer = SnowballStemmer('english')  #intializing the stemmer
        base_word = stemmer.stem(word)  #stem the word that is comming
        query = db.processed_text.find_one(
            {'book_id': book_id})  #query if data present against the book_id

        if query:  #if something come in the response
            if query[
                    "stemmed_words_count"]:  # if stemmed_words_count dictionary is present in database
                stemmed_words_count = json.loads(
                    query["stemmed_words_count"]
                )  #load the json of stemmed_words in the stemmed_words_count variable
                return base_word + ":" + str(
                    stemmed_words_count.get(base_word, "Word not exists")
                )  #return the stemmed_word with its count
            else:  # if words_count is not present ,id database crashes so not create the collection again just update it
                stemmed_words_count, stemmed_words = Task2.stemming(
                )  # calling function written in main.py to calculate the word_count with respect to book_id
                updateQueryTask2(
                    book_id, stemmed_words, stemmed_words_count
                )  # updating words against book_id in the database so that next time no need to call the main function again
                return base_word + ":" + str(
                    stemmed_words_count.get(base_word, "Word not exists")
                )  #return the stemmed_word with its count
        else:  #if the query is empty no document is present relative to the book id
            stemmed_words_count, stemmed_words = Task2.stemming(
            )  #trigger the function in the task2 to get the stemmed words and stemmed words count
            insetQueryTask2(
                book_id, stemmed_words,
                stemmed_words_count)  #inset the document in the database
            return base_word + ":" + str(
                stemmed_words_count.get(base_word, "Word not exists")
            )  #return the stemmed_word with its count

    return "Books only from 1 to 4"
Esempio n. 9
0
 def test_find_unique_classes(self):
     columns = Task2.import_columns('sub_table.csv')
     observed = Task2.find_unique_classes(columns['class'])
     self.assertEqual(observed, {'Mammalia', 'Reptilia', 'Aves'})
Esempio n. 10
0
 def test_get_organism_with_longevity_morethan(self):
     columns = Task2.import_columns('sub_table.csv')
     observed = Task2.get_organism_with_longevity_morethan(columns['longevity_y'], long_y=150)
     self.assertEqual(observed, [21255, 21265])
Esempio n. 11
0
def calculateNewVels(coords, vels, forces, M, L, delta_t):
    newForces = Task2.calculateInitialForces(M, L, coords)
    newVels = numpy.full_like(vels, 0.0)  # numpy array allows updates
    newVels[:][:] = vels[:][:] + \
                    0.5 * (forces[:][:] + newForces[:][:]) * delta_t
    return newVels, newForces
Esempio n. 12
0
import Task1
import Task2
Task1.sets()
Task1.sets()

Task1.sets()

Task1.sets()

Task1.sets()

print(Task2.gets())
Esempio n. 13
0
# read command line params for stemmed data file
lstemmeddatafile = CACM_STEM_FILE + FILE_EXT
if len(input_arguments) > 2:
    lstemmeddatafile = input_arguments[2]

# Tokenize raw text
tokenize_raw_data(ldatafilesdir)

# Write given queries to a file
create_directory(DIR_FOR_OUTPUT_FILES)
write_given_queries_to_file(
    CACM_QUERY_FILE + FILE_EXT,
    DIR_FOR_OUTPUT_FILES + "/" + FILE_FOR_QUERIES + FILE_EXT)

Task1.execute_system(ldatafilesdir)
Task2.execute_system(ldatafilesdir)
Task3A.execute_system(ldatafilesdir)
Task3B.execute_system(lstemmeddatafile)

ldictoffolderpaths = {}
ldictoffolderpaths[
    1] = DIR_FOR_OUTPUT_FILES + "/" + TASK1_CONST + "/" + DIR_FOR_BM25_OUTPUT
ldictoffolderpaths[
    2] = DIR_FOR_OUTPUT_FILES + "/" + TASK1_CONST + "/" + DIR_FOR_TFIDF_OUTPUT
ldictoffolderpaths[3] = LUCENE + "/" + LUCENE_RESULTS
ldictoffolderpaths[
    4] = DIR_FOR_OUTPUT_FILES + "/" + TASK2_CONST + "/" + DIR_FOR_BM25_OUTPUT
ldictoffolderpaths[
    5] = DIR_FOR_OUTPUT_FILES + "/" + TASK2_CONST + "/" + DIR_FOR_TFIDF_OUTPUT
ldictoffolderpaths[
    6] = DIR_FOR_OUTPUT_FILES + "/" + TASK3A_CONST + "/" + DIR_FOR_BM25_OUTPUT
Esempio n. 14
0
    N : int
        Number of iterations.

    Returns
    -------
    None.

    """
    filestring = Task2.createFilestring(M, L, coords, vels, "Time step 0")
    for k in range(1, N):
        coords = calculateNewCoords(coords, vels, forces, M, L, delta_t)
        vels, forces = calculateNewVels(coords, vels, forces, M, L, delta_t)
        filestring += Task2.createFilestring(M, L, coords, vels,
                                             f"Time step {k}")
        print(f"finished timestep {k}")

    Task2.writeFile(filestring, "trajectories.txt")


if __name__ == "__main__":
    from time import perf_counter
    start = perf_counter()

    path, delta_t, N = readInputArguments(sys.argv)
    M, C, L, coords, vels = readFile(path)

    forces = Task2.calculateInitialForces(M, L, coords)
    VerletAlgorithm(coords, vels, forces, M, delta_t, N)
    end = perf_counter()
    print("execution time: ", end - start)
Esempio n. 15
0
bin_image = Task0.Binarization(image, filename)

filename = filename.split('/')[-1]
# Task 1
height, width = bin_image.shape
filename = path + "box_" + filename
top, bottom, left, right = Task1.BoundingBox(bin_image, height, width)
bounding_box_image = cv2.rectangle(bin_image, (top, left), (bottom, right), (0,255,0), 3)

cv2.imwrite(filename, bounding_box_image)
B = (left, right, top, bottom)

filename = filename.split('/')[-1]
# Task 2
filename = path + "cen_" + filename
cx, cy = Task2.FindCentroid(bin_image, 0, bin_image.shape[1], 0, bin_image.shape[0])
centroid_image = cv2.circle(bounding_box_image, (cy, cx), 10, 200, -1)

cv2.imwrite(filename, centroid_image)
C = (cx, cy)

filename = filename.split('/')[-1]
# Task 3
filename = path + "seg_" + filename
top_left, bottom_left, top_right, bottom_right, segmented_image = Task3.DivideBoundingBox(centroid_image, top, bottom, left, right, cx, cy)

cv2.imwrite(filename, segmented_image)
filename = filename.split('/')[-1]
CleaningDirectories("../Text/")
image = SplittingRecursive(bin_image, left, right, top, bottom, 0)
cv2.imshow("image", image)
Esempio n. 16
0
import Task1
import Task2
import Task3

# regression task
task_1 = Task1.Task1()
task_1.model_1_run()
task_1.model_2_run()

# multi-category task
task_2 = Task2.Task2()
task_2.model_1_run()
task_2.model_2_run()

# multi-label task
task_3 = Task3.Task3()
task_3.model_1_run()
task_3.model_2_run()
Esempio n. 17
0
# Task 0
filename = "bin_" + filename
bin_image = Task0.Binarization(image, filename)
# cv2.imshow("Binarization", bin_image)

# Task 1
width, height = bin_image.shape
filename = "box_" + filename
top, bottom, left, right, bounding_box_image = Task1.BoundingBox(width, height, bin_image, filename)
B = (left, right, top, bottom)
# cv2.imshow("Bounding Box", bounding_box_image)

# Task 2
filename = "cen_" + filename
centroid_image, cx, cy = Task2.FindCentroid(width, height, bin_image, bounding_box_image, filename)
C = (cx, cy)
# cv2.imshow("Centroid", centroid_image)

# Task 3
cx = int(cx)
cy = int(cy)
filename = "seg_" + filename
top_left, bottom_left, top_right, bottom_right, segmented_image = Task3.DivideBoundingBox(centroid_image, top, bottom, left, right, cx, cy, filename)

cv2.imshow("Top Left", top_left)
cv2.imshow("Bottom Left", bottom_left)
cv2.imshow("Top Right", top_right)
cv2.imshow("Bottom Right", bottom_right)

print("\nNumber of Black to White Transitions")
Esempio n. 18
0
def main():
    task = int(input("Input Task number:"))

    if task == 1:
        feature_model = int(
            input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ")
        )
        dimension_reduction = int(
            input(
                "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : "
            )
        )
        k = int(input("Enter k: "))
        Task1.starter(feature_model, dimension_reduction, k)

    elif task == 2:
        choice = input(
            "Do you want to go ahead with task 1 input configurations? yes(y) or no(n) "
        )
        image_id = input("Enter image ID: ")
        m = int(input("Enter m: "))
        feature_model = dimension_reduction = k = None

        if choice == "n":
            feature_model = int(
                input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ")
            )
            dimension_reduction = int(
                input(
                    "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : "
                )
            )
            k = int(input("Enter k: "))

        Task2.starter(feature_model, dimension_reduction, k, image_id, m)

    elif task == 3:
        feature_model = int(
            input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ")
        )
        dimension_reduction = int(
            input(
                "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : "
            )
        )
        k = int(input("Enter k: "))
        label = int(
            input(
                "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: "
            )
        )
        Task3.starter(feature_model, dimension_reduction, k, label)

    elif task == 4:
        choice = input(
            "Do you want to go ahead with task 3 input configurations? yes(y) or no(n) "
        )
        image_id = input("Enter image ID: ")
        m = int(input("Enter m: "))
        feature_model = dimension_reduction = k = label = None

        if choice == "n":
            feature_model = int(
                input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ")
            )
            dimension_reduction = int(
                input(
                    "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : "
                )
            )
            k = int(input("Enter k: "))
            label = int(
                input(
                    "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: "
                )
            )

        Task4.starter(feature_model, dimension_reduction, k, label, image_id, m)

    elif task == 5:
        feature_model = int(
            input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ")
        )
        dimension_reduction = int(
            input(
                "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : "
            )
        )
        k = int(input("Enter k: "))
        label = int(
            input(
                "Select the label:\n1. Left\t2. Right\t3. Dorsal\t4. Palmar\n5. With accessories\t6. Without accessories\t7. Male\t8. Female: "
            )
        )
        image_id = input("Enter image ID: ")
        Task5.starter(feature_model, dimension_reduction, k, label, image_id)

    elif task == 6:
        subject_id = int(input("Enter subject ID: "))
        Task6.starter(subject_id)

    elif task == 7:
        k = int(input("Enter k: "))
        Task7.starter(k)

    elif task == 8:
        k = int(input("enter k : "))
        Task8.starter(k)
    elif task == 9:
        feature_model = int(
            input("Select the Feature Model:\n1. CM\t2. LBP\t3. HOG\t4. SIFT : ")
        )
        dimension_reduction = int(
            input(
                "Select the Dimension Reduction Technique:\n1. PCA\t2. SVD\t3. NMF\t4. LDA : "
            )
        )
        k = int(input("Enter k: "))
        visualizer = int(input("Enter Visualizer:\n1. Data \t2.Feature "))
        ExtraCredit.starter(feature_model, dimension_reduction, k, visualizer)
    else:
        print("Enter Task number (1-9)")
Esempio n. 19
0
 def test_count_no_of_species(self):
     columns = Task2.import_columns('sub_table.csv')
     observed = Task2.count_no_of_species(columns['species'])
     self.assertEqual(observed, 21322)
def part_of_speech(book_id='all'):
    if book_id == '1' or book_id == '2' or book_id == '3' or book_id == '4':
        query = db.processed_text.find_one(
            {'book_id': book_id})  #query if data present against the book_id
        if query:  #if something come in the response
            if query["nouns"]:  # if the response contains  nouns
                return query['total_verbs_nouns'] + " " + query[
                    'nouns'] + "verbs :" + query[
                        "verbs"]  #the return the json of total nouns and verbs in the book and the nouns and the verbs
            else:  #if the query is empty no document is present relative to the book id
                if query[
                        "stemmed_words_count"]:  # if the stemmed words and count is present but the noun  and verbs are not
                    stemmed_words_count = json.loads(
                        query["stemmed_words_count"]
                    )  #just load the json into the dictionaries
                    stemmed_words = json.loads(
                        query["stemmed_words"]
                    )  #just load the json into the dictionaries
                    nouns, verbs = Task3.part_of_speech(
                        stemmed_words, stemmed_words_count
                    )  #trigger the function to find the nouns and verbs with the present stemmed words
                    total_noun_verbs = {
                        'total_nouns': len(nouns),
                        'total_verbs': len(verbs)
                    }  #store the total nouns and total verbs in the seprate dictionary
                    updateQueryTask3(
                        book_id, nouns, verbs, total_noun_verbs, stemmed_words,
                        stemmed_words_count
                    )  #update the document which already present with respective val
                    return json.dumps(total_noun_verbs) + json.dumps(
                        nouns
                    ) + "verbs : " + json.dumps(
                        verbs
                    )  #return  the json of total nouns verbs, verbs and nouns

                else:  # when document is present but it neither contains nouns and verbs and stemmed word count and stemmed words
                    stemmed_words_count, stemmed_words = Task2.stemming(
                        book_id
                    )  #tringger the task2 function to get the stemmed words and stemmed words count
                    nouns, verbs = Task3.part_of_speech(
                        stemmed_words, stemmed_words_count
                    )  #tringger the task3 function to get the nouns ,verbs
                    total_noun_verbs = {
                        'total_nouns': len(nouns),
                        'total_verbs': len(verbs)
                    }  #dictionary of total noun and total verbs
                    updateQueryTask3(
                        book_id, nouns, verbs, total_noun_verbs, stemmed_words,
                        stemmed_words_count
                    )  #update the document with nouns,verbs,total noun verbs,stemmed words,stemmed words count
                    return json.dumps(
                        total_noun_verbs
                    ) + "nouns :" + json.dumps(nouns) + "verbs : " + json.dumps(
                        verbs
                    )  #return the json total_noun_verbs and nouns and verbs
        else:  # when no document present agains book_id
            stemmed_words_count, stemmed_words = Task2.stemming(
                book_id
            )  # trigger function to get the stemmed words and stemmed words count
            # task3
            nouns, verbs = Task3.part_of_speech(
                stemmed_words, stemmed_words_count
            )  #trigger the function part of speech to get the nouns and verbs
            total_noun_verbs = {
                'total_nouns': len(nouns),
                'total_verbs': len(verbs)
            }  #total verbs and total verbs
            insetQueryTask3(
                book_id, nouns, verbs, total_noun_verbs, stemmed_words,
                stemmed_words_count)  #insert and the data into the database

            return json.dumps(total_noun_verbs) + "nouns:" + json.dumps(
                nouns
            ) + "verbs : " + json.dumps(
                verbs
            )  #return the json of nouns and verbs and total verbs and total nouns
    elif book_id == 'all' or book_id == 'ALL':
        list_of_all_books = ['1', '2', '3', '4']  #list of all books
        my_dict_total_nv = {}  # dictionary for total verbs and nouns
        my_dict_total_n = {}  #dictionay for nouns in each book
        my_dict_total_v = {}  #dictiony of  verbs in each book
        for book_no in list_of_all_books:  #iterating through each book
            query = db.processed_text.find_one(
                {'book_id':
                 book_no})  #query if data present against the book_id
            if query:  #if something come in the response
                if query["nouns"]:  # if the response contains  nouns
                    nouns = json.loads(
                        query["nouns"]
                    )  #load the nouns in the nouns dictionary
                    verbs = json.loads(
                        query["verbs"]
                    )  #load the verbs in the verbs dictionary
                    total_noun_verbs = json.loads(
                        query['total_verbs_nouns']
                    )  #load the toatl nouns and berbs in the total_noun_verbs dictionary
                else:  # when the stemmed words and count is present but the noun  and verbs are not
                    if query[
                            "stemmed_words_count"]:  # if the stemmed words and count is present but the noun  and verbs are not
                        stemmed_words_count = json.loads(
                            query["stemmed_words_count"]
                        )  #just load the json into the dictionaries
                        stemmed_words = json.loads(
                            query["stemmed_words"]
                        )  #just load the json into the dictionaries
                        nouns, verbs = Task3.part_of_speech(
                            stemmed_words, stemmed_words_count
                        )  #trigger the function to find the nouns and verbs with the present stemmed words
                        total_noun_verbs = {
                            'total_nouns': len(nouns),
                            'total_verbs': len(verbs)
                        }  #store the total nouns and total verbs in the seprate dictionary
                        updateQueryTask3(
                            book_no, nouns, verbs, total_noun_verbs,
                            stemmed_words, stemmed_words_count
                        )  #update the document which already present with respective val

                    else:  # when document is present but it neither contains nouns and verbs and stemmed word count and stemmed words
                        stemmed_words_count, stemmed_words = Task2.stemming(
                            book_no
                        )  #tringger the task2 function to get the stemmed words and stemmed words count
                        nouns, verbs = Task3.part_of_speech(
                            stemmed_words, stemmed_words_count
                        )  #tringger the task3 function to get the nouns ,verbs
                        total_noun_verbs = {
                            'total_nouns': len(nouns),
                            'total_verbs': len(verbs)
                        }  #dictionary of total noun and total verbs
                        updateQueryTask3(
                            book_no, nouns, verbs, total_noun_verbs,
                            stemmed_words, stemmed_words_count
                        )  #update the document with nouns,verbs,total noun verbs,stemmed words,stemmed words count

            else:  # when no document present agains book_id

                stemmed_words_count, stemmed_words = Task2.stemming(
                    book_no
                )  # trigger function to get the stemmed words and stemmed words count
                # task3
                nouns, verbs = Task3.part_of_speech(
                    stemmed_words, stemmed_words_count
                )  #trigger the function part of speech to get the nouns and verbs
                total_noun_verbs = {
                    'total_nouns': len(nouns),
                    'total_verbs': len(verbs)
                }  #total verbs and total verbs
                insetQueryTask3(book_no, nouns, verbs, total_noun_verbs,
                                stemmed_words, stemmed_words_count
                                )  #insert and the data into the database

            my_dict_total_nv = Counter(my_dict_total_nv) + Counter(
                total_noun_verbs
            )  #total_nouns and verbs in the all books will come in this dictionary
            my_dict_total_n = Counter(my_dict_total_n) + Counter(
                nouns)  #total nouns of all books dictionary
            my_dict_total_v = Counter(my_dict_total_v) + Counter(
                verbs)  #total verbs pf all books dictionary

        return json.dumps(my_dict_total_nv) + "nouns :" + json.dumps(
            my_dict_total_n) + " verbs " + json.dumps(
                my_dict_total_v
            )  #return the json of total nouns ,verbs and total verbs nouns
    return "task 3 Books only from 1 to 4"
Esempio n. 21
0
 def test_count_no_of_genus(self):
     columns = Task2.import_columns('sub_table.csv')
     observed = Task2.count_no_of_genus(columns['genus'])
     self.assertEqual(observed, 4336)
Esempio n. 22
0
#     http://johnnyholland.org/wp-content/uploads/193383382_cf3b3bd6d0_o.png
# Help on drawing: http://quickies.seriot.ch/index.php?id=256
########################################

Task1.run(sessions, eventList, eyetrackList)



########################################################################################
# 2) Create an image for each 0.25 seconds and output them to a directory,
# using FFMPEG or other lib to combine them all together into a movie.
# Hint, create all images first then, in a separate step, call FFMPEG
# to have it combine them itself.
########################################

Task2.run(sessions, eventList, eyetrackList)




########################################################################################
# 3) Identify the central point(s) of gaze over a period of time and
# overlay a transparent white dot on the 1-3 areas of interest.
# the number should depend on if everyone is looking in a single
# place of if they are all looking in different places.
#
# Central points of gaze mean the "clusters" of gaze. If this doesn't make sense, please
# ask for clarification or skip it.
########################################

# ...
Esempio n. 23
0
def evaluate_all_systems(p_dictoffolderpaths, p_run_all_systems=True):

    if p_run_all_systems:
        Task1.execute_system('')
        Task2.execute_system('')
        Task3A.execute_system('')
        Task3B.execute_system('')

    print "Evaluating all systems..."

    create_directory(ldirpath)

    lquerydict = get_given_queries_in_dict(CACM_QUERY_FILE + FILE_EXT)
    lquerydict = get_sorted_dict(lquerydict)

    lallsystemsmeanvaluesdict = {}
    lallsystemsavgprecisionvaluesdict = {}
    for lkey, lvalue in p_dictoffolderpaths.iteritems():

        lsystemname = get_system_name(lkey)
        print "Evaluating system: " + lsystemname

        lsystemmeanvaluesdict = {}
        ldictofavgprecisionvalues = {}
        llistofprecisionandrecallvalues = [
            "Query Id,DocId,Rank,Precision,Recall"
        ]
        llistofpatkvalues = ["Query Id,P@5,P@20"]

        evaluate_system(
            lvalue,  # results folder path to evaluate
            lquerydict,  # dictionary containing all queries with query id
            lsystemmeanvaluesdict,  # dictionary to hold the mean values for all systems
            ldictofavgprecisionvalues,  # dictionary to hold avg precision values of systems
            llistofprecisionandrecallvalues,  # results of precision recall values for all queries
            llistofpatkvalues  # list to hold pat5 and pat20 values for all queries for this system
        )  # evaluate_system..

        lallsystemsmeanvaluesdict[lkey] = lsystemmeanvaluesdict

        # ldictofavgprecisionvalues = get_sorted_dict(ldictofavgprecisionvalues)
        lallsystemsavgprecisionvaluesdict[lkey] = ldictofavgprecisionvalues

        # print "Writing Precision and Recall values for system: " + lsystemname
        llfilename = ldirpath + "/" + FILE_FOR_PRECISON_RECALL_RESULTS_OF_SYSTEM + "_" + lsystemname + CSV_FILE_EXT
        create_file(llfilename, '')
        convert_data_from_collection_to_file(llfilename,
                                             llistofprecisionandrecallvalues)

        # print "Writing P@5 and P@20 values for system: " + lsystemname
        llfilename = ldirpath + "/" + FILE_FOR_PATK_RESULTS_OF_SYSTEM + "_" + lsystemname + CSV_FILE_EXT
        create_file(llfilename, '')
        convert_data_from_collection_to_file(llfilename, llistofpatkvalues)

    # print "Writing mean values to file"
    llistofmeanvalues = ["System,MAP,MRR,P@5,P@20"]
    for lkey, lvalue in lallsystemsmeanvaluesdict.iteritems():
        lsystemname = get_system_name(lkey)
        lstr = lsystemname + "," + str(lvalue[MAP_CONST]) + "," + str(lvalue[MRR_CONST]) + \
               "," + str(lvalue[PAT5_CONST]) + "," + str(lvalue[PAT20_CONST])
        llistofmeanvalues.append(lstr)

    lfilename = ldirpath + "/" + FILE_FOR_ALL_SYSTEMS_MEAN_VALUES + CSV_FILE_EXT
    create_file(lfilename, '')
    convert_data_from_collection_to_file(lfilename, llistofmeanvalues)

    # print "Writing average precision values to file"
    llistavgprecisionresults = ["System,Query Id,Average Precision"]
    for lkey, lvalue in lallsystemsavgprecisionvaluesdict.iteritems():
        lsystemname = get_system_name(lkey)
        for ljkey, ljvalue in lvalue.iteritems():
            lstr = lsystemname + "," + str(ljkey) + "," + str(ljvalue)
            llistavgprecisionresults.append(lstr)

    lfilename = ldirpath + "/" + FILE_FOR_ALL_SYSTEMS_AVG_PRECISION_VALUES + CSV_FILE_EXT
    create_file(lfilename, '')
    convert_data_from_collection_to_file(lfilename, llistavgprecisionresults)

    # print "Run t-tests for models"
    run_tests_for_models(lallsystemsavgprecisionvaluesdict, len(lquerydict))
def stemmed_lemmatized(book_id='all'):
    if book_id == '1' or book_id == '2' or book_id == '3' or book_id == '4':
        query = db.processed_text.find_one(
            {'book_id': book_id})  #query if data present against the book_id
        if query:  #if something come in the response
            if query[
                    "stemmed_words_count"]:  # if the response contains  the count of every stemmed_word
                stemmed_words_count = query[
                    "stemmed_words_count"]  #load the json of stemmed_words in the stemmed_words_count variable
                return stemmed_words_count  #return the stemmed_word_count json which has the words
            else:  # if document is present but there is value against words key
                stemmed_words_count, stemmed_words = Task2.stemming(
                    book_id
                )  #trigger the function of to get the stemmed_words and stemmed words count
                updateQueryTask2(
                    book_id, stemmed_words, stemmed_words_count
                )  # as the document present so just update the value against the book id words
                return json.dumps(
                    stemmed_words_count)  #return the json words json
        else:  #if the query is empty no document is present relative to the book id
            stemmed_words_count, stemmed_words = Task2.stemming(
                book_id
            )  #trigger the function in the task2 to get the stemmed words and stemmed words count
            insetQueryTask2(
                book_id, stemmed_words,
                stemmed_words_count)  #inset the document in the database
            return json.dumps(
                stemmed_words_count)  #return the json of stemmed words count
    elif book_id == 'all' or book_id == 'ALL':
        my_dict = {
        }  #create the dictionary that will contain count all stemmed words
        list_of_all_books = ['1', '2', '3', '4']  #list of all books ids
        for book_no in list_of_all_books:  #one by one iterating each book
            query = db.processed_text.find_one(
                {'book_id':
                 book_no})  #query if data present against the book_id
            if query:  #if something come in the response
                if query[
                        "stemmed_words_count"]:  # if the response contains  the count of every stemmed_word
                    stemmed_words_count = json.loads(
                        query["stemmed_words_count"]
                    )  #load the json of stemmed_words in the stemmed_words_count variable
                else:  #if the query is empty no document is present relative to the book id
                    stemmed_words_count, stemmed_words = Task2.stemming(
                        book_no
                    )  #trigger the function of to get the stemmed_words and stemmed words count
                    updateQueryTask2(
                        book_no, stemmed_words, stemmed_words_count
                    )  # as the document present so just update the value against the book id words
            else:  #if the query is empty no document is present relative to the book id
                stemmed_words_count, stemmed_words = Task2.stemming(
                    book_no
                )  #trigger the function in the task2 to get the stemmed words and stemmed words count
                insetQueryTask2(
                    book_no, stemmed_words,
                    stemmed_words_count)  #inset the document in the database
            my_dict = Counter(my_dict) + Counter(
                stemmed_words_count
            )  #Counter will take the intersection of the keys that common and its values otherwise union
        return json.dumps(my_dict)  #return the json of stemmed words count
    return "Books only from 1 to 4"
Esempio n. 25
0
def run(sessions, eventList, eyetrackList):
    print "Downloading session video for parsing..."
    # download session video
    opener = urllib2.build_opener(urllib2.HTTPHandler)
    request = urllib2.Request("http://g3.eyetrackshop.com/content/" + eyetrackList[0]["ContentID"])
    request.get_method = lambda: "GET"
    url = opener.open(request)
    tempdir = tempfile.tempdir = tempfile.mkdtemp()
    movie = tempfile.NamedTemporaryFile(mode="w+", suffix=".mp4", delete=False)
    movie.write(url.read())
    moviename = movie.name
    movie.close()
    print "I've loaded it into:\n", moviename

    # split apart session video into frame images
    # ffmpeg -i inputfile.avi -r 12 -f image2 CT13C39DD417BLYWHILLT-SJJ-f%4d.jpeg
    print "Using superhuman strength to split movie into images..."
    movieframedir = tempdir + "/movieframes/"
    os.mkdir(movieframedir)
    args = {
            "inputfile":    moviename,
            "framerate":    12,
            "outputfilename":movieframedir + eyetrackList[0]["ContentID"] + "-f%4d.png"
            }
    command = "ffmpeg -i %(inputfile)s -r %(framerate)d -f image2 %(outputfilename)s" % args
    ffmpeg_proc = subprocess.Popen(command, shell=True, bufsize= -1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    io = ffmpeg_proc.communicate()
    if not io[1] == None: pass  # TODO: Automagically troubleshoot ffmpeg
    print "Success! We have movie frames!\n", movieframedir

    # generate gaze images
    print "Generating Gaze Frames..."
    gazedir = tempdir + "/gaze_frames/"
    granule = 1000 / args["framerate"]
    print granule
    Task2.generateGazeFrames(eyetrackList, dirname=gazedir, granularity=granule)
    print "Done Gazing into the gazes of others. See what I saw here:\n", gazedir

    # add session frame images and gaze images together
    # Image.composite(sessionframe, gazeimage, gazeimage)
    print "Making composite frames..."
    compdir = tempdir + "/composite_frames/"
    os.mkdir(compdir)
    i = 0
    movieframelist = os.listdir(movieframedir)
    gazeframelist = os.listdir(gazedir)
    for gazeframename in gazeframelist:
        movieframe = Image.open(movieframedir + movieframelist[i])
        gazeframe = Image.open(gazedir + gazeframename)
        # compimg = Image.composite(movieframe, gazeframe, gazeframe)
        # compimg.save(compdir + str(i) + ".png", "PNG")
        movieframe.paste(gazeframe, (0, 0), gazeframe)
        movieframe.save(compdir + str(i) + ".png", "PNG")
        i += 1
    print "Done! Find your composite frames here:\n", compdir

    # TODO: recomposite images into video
    # ffmpeg -f image2 -r 4 -i CT13C39DD417BLYWHILLT-SJJ-gaze-comp-%4d.png -vcodec mpeg4 -r 30 CT13C39DD417BLYWHILLT-SJJ-gaze-overlay.mp4
    args = {
            "fps":          12,
            "inputfilesmask":    compdir + "%d.png",
            "outputfilename": tempdir + "/" + eyetrackList[0]["ContentID"] + "-gaze_composite.mp4"
            }

    command = "ffmpeg -f image2 -r %(fps)d -i %(inputfilesmask)s -vcodec mpeg4 -r 30 %(outputfilename)s" % args
    print("Now lets pull all those images into a movie for your gazing enjoyment")
    # TODO: Figure out why making this movie hangs python...
    #    it's worked before. It only hangs sometimes.  Memory issues? I think my machine has memory issues.
    ffmpeg_proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    io = ffmpeg_proc.communicate()
    print(io[0])
    if not io[1] == None: print "Houston, we had a problem. with ffmpeg"  # TODO: Automagically troubleshoot ffmpeg
    else:
Esempio n. 26
0
                        help='pure training')
    parser.add_argument('--test', type=bool, default=False, help='test_phase')
    parser.add_argument('--novel_path', type=str, help='training path')
    #parser.add_argument('--train_path',type=str,default='task2-dataset/base',help='testing path')
    parser.add_argument('--test_path', type=str, help='testing path')
    parser.add_argument('--ensemble',
                        type=bool,
                        default=False,
                        help='ensemble?')
    parser.add_argument('--way', type=bool, default=False, help='ensemble?')
    parser.add_argument('--outfile', type=str, help='outfile_csv')
    args = parser.parse_args()
    print(args)

    import Task2
    model = Task2.Task2(args)
    #print(args.train)
    if args.train:
        model.train()
    elif args.test:
        model.test()
    else:
        model.ensemble()
    '''
	
	import Task2_1

	model = Task2_1.Task2(args)

	if args.way:
		model.train()
Esempio n. 27
0
# This is a sample Python script.
import Task1, Task2, Task3, Task4, Task5, Task6, Task7, Task8, Task9, Task10, Task11, Task12
# Press the green button in the gutter to run the script.
    if __name__ == '__main__':
        print(" Enter first number : ")
        inputNumber1 = int(input())
        print(" Enter second number : ")
        inputNumber2 = int(input())
    # 1. Получить список всех нечётных чисел на отрезке [a, b].
        print("<--------- Task 1 --------->")
        print(Task1.getOddNumbersFromRange(inputNumber1, inputNumber2))
    # 2. Получить список всех чисел Фибоначчи на отрезке [a, b].
        print("<--------- Task 2 --------->")
        print(Task2.cutByRangeFibonacci(inputNumber1, inputNumber2))
    # 3. Напишите функцию, которая принимает на вход два параметра: a и b.
        print("<--------- Task 3 --------->")
        a = 12
        b = "str"
        print(" out parameters are : {a}, {b}".format(a = a, b = b))
        Task3.functionInRequest(a, b)
    # 4. Напишите функцию, которая принимает на вход три параметра
        print("<--------- Task 4 --------->")
    #     # leap years 
        c = [1900, 1904, 1908, 1964, 2020]
        startYear = 1900
        finishYear = 2020
        print(" our years' range : [ {s}, {f} ]".format(s = startYear, f = finishYear))
        print(" exclude list from range : %s " % c )
        print(Task4.listOfLeapYearsOnRange(startYear, finishYear, c))
    #  array for task 5-9
        array = [1, "sd", 3, 1.2, 5.6, 0, -1]
Esempio n. 28
0
 def test_task2_zeroInFib(self):
     self.assertTrue(0 not in Task2.cutByRangeFibonacci(0, 3))
Esempio n. 29
0
def run(sessions, eventList, eyetrackList):
    print "Downloading session video for parsing..."
    # download session video
    opener = urllib2.build_opener(urllib2.HTTPHandler)
    request = urllib2.Request("http://g3.eyetrackshop.com/content/" +
                              eyetrackList[0]["ContentID"])
    request.get_method = lambda: "GET"
    url = opener.open(request)
    tempdir = tempfile.tempdir = tempfile.mkdtemp()
    movie = tempfile.NamedTemporaryFile(mode="w+", suffix=".mp4", delete=False)
    movie.write(url.read())
    moviename = movie.name
    movie.close()
    print "I've loaded it into:\n", moviename

    # split apart session video into frame images
    # ffmpeg -i inputfile.avi -r 12 -f image2 CT13C39DD417BLYWHILLT-SJJ-f%4d.jpeg
    print "Using superhuman strength to split movie into images..."
    movieframedir = tempdir + "/movieframes/"
    os.mkdir(movieframedir)
    args = {
        "inputfile":
        moviename,
        "framerate":
        12,
        "outputfilename":
        movieframedir + eyetrackList[0]["ContentID"] + "-f%4d.png"
    }
    command = "ffmpeg -i %(inputfile)s -r %(framerate)d -f image2 %(outputfilename)s" % args
    ffmpeg_proc = subprocess.Popen(command,
                                   shell=True,
                                   bufsize=-1,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
    io = ffmpeg_proc.communicate()
    if not io[1] == None: pass  # TODO: Automagically troubleshoot ffmpeg
    print "Success! We have movie frames!\n", movieframedir

    # generate gaze images
    print "Generating Gaze Frames..."
    gazedir = tempdir + "/gaze_frames/"
    granule = 1000 / args["framerate"]
    print granule
    Task2.generateGazeFrames(eyetrackList,
                             dirname=gazedir,
                             granularity=granule)
    print "Done Gazing into the gazes of others. See what I saw here:\n", gazedir

    # add session frame images and gaze images together
    # Image.composite(sessionframe, gazeimage, gazeimage)
    print "Making composite frames..."
    compdir = tempdir + "/composite_frames/"
    os.mkdir(compdir)
    i = 0
    movieframelist = os.listdir(movieframedir)
    gazeframelist = os.listdir(gazedir)
    for gazeframename in gazeframelist:
        movieframe = Image.open(movieframedir + movieframelist[i])
        gazeframe = Image.open(gazedir + gazeframename)
        # compimg = Image.composite(movieframe, gazeframe, gazeframe)
        # compimg.save(compdir + str(i) + ".png", "PNG")
        movieframe.paste(gazeframe, (0, 0), gazeframe)
        movieframe.save(compdir + str(i) + ".png", "PNG")
        i += 1
    print "Done! Find your composite frames here:\n", compdir

    # TODO: recomposite images into video
    # ffmpeg -f image2 -r 4 -i CT13C39DD417BLYWHILLT-SJJ-gaze-comp-%4d.png -vcodec mpeg4 -r 30 CT13C39DD417BLYWHILLT-SJJ-gaze-overlay.mp4
    args = {
        "fps":
        12,
        "inputfilesmask":
        compdir + "%d.png",
        "outputfilename":
        tempdir + "/" + eyetrackList[0]["ContentID"] + "-gaze_composite.mp4"
    }

    command = "ffmpeg -f image2 -r %(fps)d -i %(inputfilesmask)s -vcodec mpeg4 -r 30 %(outputfilename)s" % args
    print(
        "Now lets pull all those images into a movie for your gazing enjoyment"
    )
    # TODO: Figure out why making this movie hangs python...
    #    it's worked before. It only hangs sometimes.  Memory issues? I think my machine has memory issues.
    ffmpeg_proc = subprocess.Popen(command,
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
    io = ffmpeg_proc.communicate()
    print(io[0])
    if not io[1] == None:
        print "Houston, we had a problem. with ffmpeg"  # TODO: Automagically troubleshoot ffmpeg
    else:
        print "Success! Your gaze movie is located here:\n", args[
            "outputfilename"]
Esempio n. 30
0
 def test_task2_fibValueForOne(self):
     self.assertTrue(1 in Task2.cutByRangeFibonacci(0, 3))
Esempio n. 31
0
# gradient.
# It should look roughly like this, but without the web page:
#     http://johnnyholland.org/wp-content/uploads/193383382_cf3b3bd6d0_o.png
# Help on drawing: http://quickies.seriot.ch/index.php?id=256
########################################

Task1.run(sessions, eventList, eyetrackList)

########################################################################################
# 2) Create an image for each 0.25 seconds and output them to a directory,
# using FFMPEG or other lib to combine them all together into a movie.
# Hint, create all images first then, in a separate step, call FFMPEG
# to have it combine them itself.
########################################

Task2.run(sessions, eventList, eyetrackList)

########################################################################################
# 3) Identify the central point(s) of gaze over a period of time and
# overlay a transparent white dot on the 1-3 areas of interest.
# the number should depend on if everyone is looking in a single
# place of if they are all looking in different places.
#
# Central points of gaze mean the "clusters" of gaze. If this doesn't make sense, please
# ask for clarification or skip it.
########################################

# ...

########################################################################################
# 3b) Create a separate movie showing this.
Esempio n. 32
0
 def test_get_organism_with_body_mass(self):
     columns = Task2.import_columns('sub_table.csv')
     observed = Task2.get_organism_with_body_mass(columns['adult_body_mass_g'], bm=114)
     self.assertEqual(observed, [1342, 1645, 4309, 7329])
Esempio n. 33
0
occ_total = {}

# declare a counter for loop control
count = 0

Task1.print_structure()

# combine the functions imported for Task2.py and Task3.py together,
# keep taking and processing the user input until the input is an empty string
while True:

    # for the first loop (count = 0),
    # assign the return value of Task2.take_input (should just be an input from user)
    # to "user_input" variable for later use
    if count == 0:
        user_input = Task2.take_input()

    # for later loops (count > 0), break the loop if the user input is empty
    elif count > 0 and user_input == "":
        break

    # keep taking user input if its not empty
    else:
        user_input = Task2.take_input()

    # check if user input is empty string,
    # stop the loop if it is
    if user_input == "":
        break

    # if user input is not empty string,
Esempio n. 34
0
  tasks=['1a', '1b', '1c', '1d', '2']

  a_outFileName='{0}_hist_{1}.hst'.format(videoFileName,numOfBits)
  b_outFileName = '{0}_blockdct_{1}.bct'.format(videoFileName, numOfBits)
  c_outFileName = '{0}_blockdwt_{1}.bwt'.format(videoFileName, numOfBits)
  d_outFileName='{0}_diff_{1}.dhc'.format(videoFileName,numOfBits)
  Task1.extract_video_portion(fullPath, width, height, numOfBits, a_outFileName,b_outFileName,c_outFileName,d_outFileName)

  for index, task_name in enumerate(tasks):
    if task_name == '1a':
      sorted_similarity=getSimilarFrames(a_outFileName,frameId)
      visualizeFrames(fullPath, sorted_similarity, frameId,"Task1a")
    elif task_name == '1b':
      sorted_similarity=getSimilarFrames(b_outFileName, frameId)
      visualizeFrames(fullPath, sorted_similarity, frameId, "Task1b")
    elif task_name == '1c':
      sorted_similarity=getSimilarFrames(c_outFileName, frameId)
      visualizeFrames(fullPath, sorted_similarity, frameId, "Task1c")
    elif task_name == '1d':
      sorted_similarity=getSimilarFramesForDiffQuantization(d_outFileName,fullPath,frameId,width,height,numOfBits)
      visualizeFrames(fullPath, sorted_similarity, frameId, "Task1d")
    elif task_name == '2':
      outFileName = getOutFileName(videoFileName, numOfBits, numOfSignComp, task_name)
      Task2.extract_video(fullPath, numOfSignComp, outFileName)
      sorted_similarity=getSimilarFrames(outFileName, frameId)
      visualizeFrames(fullPath, sorted_similarity, frameId, "Task2")


  c = cv2.waitKey(0)
  if 'q' == chr(c & 255):
    cv2.destroyAllWindows()