Esempio n. 1
0
def worker(ip, t, m, h):
    global times, n, passip
    if detect(ip, t, h) == True:
        passip.append(ip)
        print('√   ' + ip)
    else:
        if m == False:
            print('x   ' + ip)
    times += 1
    if times == n:
        global output
        print('√   finish  ' + '本次扫描了' + str(times) + '个ip,' + 'SNI_IP有' +
              str(len(passip)) + '个。')
        if output == 'replace':
            name = time.strftime('%Y-%m-%d-%H-%M-%S',
                                 time.localtime(time.time()))
            output = 'PassIp ' + name + '.txt'
        f = open(output, 'w')
        try:
            for v in passip:
                f.writelines(v + '\n')
        finally:
            f.close()
            print('bye,文件已写出到' + output + ',按Enter退出。')
            input()
def auto(camera, rawCapture, cas_params, stop_event):
    print("In Auto Detction System for PiCamera...")
    g.track_flag
    check_candidates(stop_event)
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        flag = False
        raw_img = frame.array
        # cv2.imshow("Raw", img)
        # img = preprocess(img)
        # cv2.imshow("Preprocessed", img)
        rects, detected_img = detect(raw_img, cas_params)
        g.img = box(rects, detected_img)
        #    cv2.imshow("Cascaded", img)
        measure(raw_img, rects)

        # if there's no rects found, look around
        # if not rects:
        #     look_around()
        # Check time elapsed, if over 10 sec, invoke spiral search
        # if (time.time()-start) > 10:
        #     spiral_search()
        if g.track_flag:
            track(g.avg_pos)
            g.track_flag = False
            #time.sleep(.1)
        rawCapture.truncate(0)
Esempio n. 3
0
def sdetect():
    for impath in glob.glob("output/json/*.jpg"):
        img = cv2.imread(impath)
        im = cv2.Canny(img, 200, 200)
        onimg = cv2.imread(impath)
        oimg = cv2.imread(impath)
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hue, sens = 60, 40
        lowergreen = np.array([hue - sens, 50, 50])
        uppergreen = np.array([hue + sens, 255, 255])
        mask = cv2.inRange(hsv, lowergreen, uppergreen)
        img = cv2.bitwise_and(img, img, mask=cv2.bitwise_not(mask))
        img = cv2.bitwise_and(img, img, mask=cv2.bitwise_not(im))
        frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, threshed = cv2.threshold(frame, 127, 255, cv2.THRESH_TOZERO)
        img = cv2.bitwise_and(img, img, mask=cv2.bitwise_not(threshed))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        ret, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_TOZERO)
        _, cnts, heier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)
        n = 0
        cnt = list()
        for c in cnts:
            area = cv2.contourArea(c)
            if area > 100:
                cnt.append(c)
                shape = detect(c)
                n += 1


##        haar = cv2.CascadeClassifier('E://stage2.xml')
##        if haar.detectMultiScale(gray, 1.3, 5):
##            shape="person"
    return shape
def upload_page():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            return render_template('upload.html', msg='No file selected')
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            return render_template('upload.html', msg='No file selected')

        if file and allowed_file(file.filename):
            file_path = os.path.join(os.getcwd() + UPLOAD_FOLDER,
                                     file.filename)
            file.save(file_path)
            print(file.filename)
            # call the OCR function on it
            extracted_text = detect(file_path)
            print(extracted_text)

            # extract the text and display it
            return render_template('upload.html',
                                   msg='Successfully processed',
                                   extracted_text=extracted_text,
                                   img_src='/static/uploads/' + file.filename)
    elif request.method == 'GET':
        return render_template('upload.html')
Esempio n. 5
0
def worker(ip, t, m, h):
    global times, n, passip
    ret = detect(ip, t, h)
    with lock:
        if ret:
            passip.append(ip)
            printx('√   ' + ip, 1)
        times += 1
        printx()
Esempio n. 6
0
 def run(self):
     print('[*] LIVE Camera')
     for stream in detect():
         if stream[0] == "Busy Camera":
             self.cv_img = cv2.imread("../../img/offline.jpg")
             self.change_pixmap_signal.emit(self.cv_img)
             break
         else:
             self.cv_img = stream[1]
             self.change_pixmap_signal.emit(self.cv_img)
             self.detected_label = stream[0]
             print("DETECTED=", stream[0])
Esempio n. 7
0
def worker(t, h):
    global times, passip, ipQueue
    try:
        ip = ipQueue.get(timeout=5)
    except:
        pass
    else:
        r = detect(ip, t, h)
        times += 1
        if r == True:
            passip.append(ip)
            printx('√   ' + ip, 1)
    printx()
Esempio n. 8
0
def worker(port):

    while not q.empty():
        global times, n, passip, stop
        ip = q.get_nowait()
        port = 8118 if port is None else str(port)
        if detect(ip, timeout, hostname, port) and req_test(ip, port) == True:
            passip.append(ip)
            printx('√   ' + ip, 1)
        times += 1
        printx()

        global output
        #print ('√   finish  ' + 'This time seem'+ str(times) +' ip,'+'SNI_IP '+ str(len(passip)) +' s。')
        print(times)
        print(passip)
Esempio n. 9
0
    def ImageRec(self):

        output_dir, _ = os.path.split(self.file_dir)
        output_file = detect(source=self.file_dir, output=output_dir)

        image = cv2.imread(output_file)
        rows, cols, channels = image.shape
        bytesPerLine = channels * cols
        cv2.cvtColor(image, cv2.COLOR_BGR2RGB, image)
        QImg = QImage(image.data, cols, rows, bytesPerLine,
                      QImage.Format_RGB888)
        self.ui.lab_home_main_disc.setPixmap(
            QPixmap.fromImage(QImg).scaled(self.ui.lab_home_main_disc.size(),
                                           Qt.KeepAspectRatio,
                                           Qt.SmoothTransformation))


###############################################################################################################################################################
Esempio n. 10
0
def traffic():
    d = driver()
    d.setStatus(mode="speed")
    isfirst = True
    decisions = {1: 'left', 0: 'straight', -1: 'right'}
    dodetect = 0

    while 1:
        try:
            img_origin, img = get_img(camera, closed_size)
            cruise_main(d, img, isfirst)
            isfirst = False
            dodetect = (dodetect + 1) % detect_interval
            if dodetect % detect_interval != 0: continue

            zebra_conf = detect(img_origin, zebra_threshold, zebra, zebra_area)
            decision, sign_conf = detect_sign(img_origin, sign_threshold,
                                              right, straight, left, sign_area)
            print('[ Detection ] [ Zebra', zebra_conf, '] [ Sign', sign_conf,
                  ']')

            if zebra_conf > zebra_conf_threshold:
                print("[ Zebra ] [ Confidence", zebra_conf, ']')
                control_open(d, 0, 0, 3)
                img_origin, img = move_away_from_zebra(d, camera,
                                                       time_for_zebra,
                                                       closed_size, zebra_edge,
                                                       zebra_motorMax)

            if sign_conf > sign_conf_threshold:
                print("[ Sign ] [ Confidence", sign_conf, '] [ Decision',
                      decisions[decision], ']')
                if decision:
                    motor = motor_for_turn
                    steer = decision * steer_for_turn
                    control_open(d, motor, steer, time_for_turn)
                else:
                    cruise_main(d, img, False)

        except KeyboardInterrupt:
            break
    d.setStatus(motor=0.0, servo=0.0, dist=0x00, mode="stop")
    d.close()
    del d
Esempio n. 11
0
    def post(self):
        data = parser.parse_args()
        if data['pic'] == "":
            return {'data': '', 'message': 'No file found', 'status': 'error'}
        photo = data['pic']

        if photo:
            filename = 'your_image.png'
            photo.save(os.path.join(UPLOAD_FOLDER, filename))
            #data = "test sentense"
            data = detect('uploads/your_image.png')
            data = make_sentense_from_raw(data)

            return {'data': data, 'status': 'success'}
        return {
            'data': '',
            'message': 'Something when wrong',
            'status': 'error'
        }
Esempio n. 12
0
    def spider(self):

        # we create a default resource for
        # the directory itself
        self.resources.push(Resource({"name": "index", "path": self.folder}))

        # the grid is like an index of the files on the file system
        # and their relationships (hyperlinks)
        # where to find everything and how to intitialise
        # the non-static parts of a living document
        # such as the microservices and REPLs
        for item in self.resources:
            # an item probes its exitence - it may no longer exist on the file system
            item.probe()

        for file in self.folder:
            # detect the general kind of file and add it to the resources
            resource = detect(file)
            self.resources.append(resource)
Esempio n. 13
0
def worker (ip,t,m,h):
    global times ,n ,passip
    if detect(ip,t,h) == True:
        passip.append(ip)
        print ('√   '+ip)
    else:
        if m == False:
            print ('x   '+ip)
    times += 1
    if times == n :
        global output
        print ('√   finish  ' + '本次扫描了'+ str(times) +'个ip,'+'SNI_IP有'+ str(len(passip)) +'个。')
        if output == 'replace':
            name = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))
            output = 'PassIp '+ name +'.txt'
        f = open (output,'w')
        try:
            for v in passip:
                f.writelines(v+'\n')
        finally:
            f.close()
            print('bye,文件已写出到'+output+',按Enter退出。')
            input()
Esempio n. 14
0
def button2():
    choice = usb_or_ip()
    if choice == "B":
        video = "http://" + get_ip() + "/video?dummy=param.mjpg"
        capture1 = cv.VideoCapture(video)
        success1, frame1 = capture1.read()
        cv.imwrite(tempimagepath_cam, frame1)
        cv.imwrite(tempimagepath_detect, frame1[85:450, 200:480])
    else:
        success, frame = capture.read()
        ref, frame = capture.read()
        cv.imwrite(tempimagepath_detect, frame[85:450, 200:480])
        cv.imwrite(tempimagepath_cam, frame)
    parser = argparse.ArgumentParser()
    parser.add_argument('--cfg',
                        type=str,
                        default='data/train/yolov3.cfg',
                        help='cfg file path')
    parser.add_argument('--data',
                        type=str,
                        default='cz.data',
                        help='coco.data file path')
    parser.add_argument('--weights',
                        type=str,
                        default='data/yolov3_10000.weights',
                        help='path to weights file')
    parser.add_argument('--images',
                        type=str,
                        default='data/samples',
                        help='path to images')
    parser.add_argument('--img-size',
                        type=int,
                        default=416,
                        help='inference size (pixels)')
    parser.add_argument('--conf-thres',
                        type=float,
                        default=0.5,
                        help='object confidence threshold')
    parser.add_argument('--nms-thres',
                        type=float,
                        default=0.5,
                        help='iou threshold for non-maximum suppression')
    parser.add_argument(
        '--fourcc',
        type=str,
        default='mp4v',
        help='fourcc output video codec (verify ffmpeg support)')
    parser.add_argument('--output',
                        type=str,
                        default='output',
                        help='specifies the output path for images and videos')
    opt = parser.parse_args()

    with torch.no_grad():
        label = detect(opt.cfg,
                       opt.data,
                       opt.weights,
                       images=opt.images,
                       img_size=opt.img_size,
                       conf_thres=opt.conf_thres,
                       nms_thres=opt.nms_thres,
                       fourcc=opt.fourcc,
                       output=opt.output)
Esempio n. 15
0
def main():
	d=detect(inputFolder='../data/pilgrim/',outputFolder='../output/pilgrim/')
	print('Loading books and splitting')
	text=d.loadNew()
	books=d.loadCandidates()
	textChunks=d.splitChunks(text)
	
	print('Filtering using Jaccard')
	reducedBooks=d.filterWithJacard(textChunks,books,threshold=0.25)
	pickling_on = open('../output/'+'pilgrim/reducedBooks.pickle',"wb")
	pickle.dump(reducedBooks, pickling_on)

	# print('Text: ',len(text))
	# print('original is',len(books['isaiah']))
	# print('reduced isaiah',len(reducedBooks['isaiah']))

	# print('textChunks: ',len(textChunks))


	print('Syntactic parsing')
	parseTrees,parsedSentences,parseWithoutTokenTrees=d.parseNewBook(textChunks)
	pickling_on = open('../output/'+'pilgrim/parseTrees.pickle',"wb")
	pickle.dump(parseTrees, pickling_on)

	# print('Parse trees',len(parseTrees))

	potentialParseTrees,potentialParsedSentences,potentialParseWithoutTokenTrees=d.parseCandidates(reducedBooks)
	# print(len(parseTrees))
	# print(len(parseTrees['isaiah']))
	pickling_on = open('../output/'+'pilgrim/potentialParseTrees.pickle',"wb")
	pickle.dump(potentialParseTrees, pickling_on)

	# print('Potential Parse Trees isaiah ',len(potentialParseTrees['isaiah']))

	print('Moschitti scoring')
	syntacticScore,syntacticScoreWithoutTokens=d.syntacticScoring(parseTrees,potentialParseTrees,parseWithoutTokenTrees,potentialParseWithoutTokenTrees)
	pickling_on = open('../output/'+'pilgrim/allScores.pickle',"wb")
	pickle.dump(syntacticScore, pickling_on)

	pickling_on = open('../output/'+'pilgrim/allScores2.pickle',"wb")
	pickle.dump(syntacticScoreWithoutTokens, pickling_on)

	print('Semantic scoring')
	semanticScore=d.semanticScoring(text,reducedBooks)

	# print('Semantic Score: ',len(semanticScore))

	print('Extracting longest subsequence')
	lcsScore,lcs=d.longestSubsequenceScoring(text,reducedBooks)

	print('Average scoring')

	scoreTuples=d.aggregateScoring(syntacticScore,semanticScore,lcsScore,lcs,syntacticScoreWithoutTokens)

	# print(len(scoreTuples))


	pickling_on = open('../output/'+'pilgrim/scoreTuples.pickle',"wb")
	pickle.dump(scoreTuples, pickling_on)

	finalTuples,diffTuples=d.finalFiltering(scoreTuples,reducedBooks,0.80)
	if len(finalTuples)>100:
		finalTuples=finalTuples[0:100]
	orderedTuples=d.nounBasedRanking(finalTuples,text,reducedBooks)
	
	pickling_on = open('../output/'+'pilgrim/orderedTuples.pickle',"wb")
	pickle.dump(orderedTuples, pickling_on)

	print('Final results: \n\n\n')

	i=1
	for t in orderedTuples:
		print('Pairing: ',i)
		print('\n')
		print('New Sentence: ',text[t[0]])
		print('\n')
		print('Reference: \n',reducedBooks[t[1]][t[2]])
		print('\n')
		print('Similar Sentence is from: ',t[1])
		print('Syntactic Score: ',t[3])
		print('Syntactic Similarity without tokens: ',t[11])
		print('Semantic Score: ',t[4])
		print('Semantic Score without stopwords: ',t[5])
		print('LCS Length: ',t[9])
		print('LCS: ',t[10])
		print('Jaccard of common nouns: ',t[12])
		print('Jaccard of common verbs: ',t[13])
		print('Jaccard of common adjectives: ',t[14])
		print('Semantic similarity nouns: ',t[6])
		print('Semantic similarity verbs: ',t[7])
		print('\n\n')
		i=i+1

	d.writeOutput(orderedTuples,text,reducedBooks)

	print('\n\n Tuples with large difference in syntactic and semantic value: \n\n\n')

	diffTuples=d.nounBasedRanking(diffTuples,text,reducedBooks)

	pickling_on = open('../output/'+'pilgrim/diffTuples.pickle',"wb")
	pickle.dump(diffTuples, pickling_on)
def main(args):  #create the main function with input args
    if (args.test_text):  #if input is test, just run test
        print(args.test_text)

    detect(args.test_location, args.output_location
           )  #runs detect program with input and output locations
Esempio n. 17
0
def job():
    camera.capture(IMG_PATH)
    detect(load_model, infer, 416, IMG_PATH)
    os.remove(IMG_PATH)
Esempio n. 18
0
track_flag = False
avg_pos = 0.
candidates = []

vs = PiVideoStream((win_w, win_h), 64).start()
time.sleep(2.0)

start_time = time.time()
monitor_start_time = 0.

while (True):
    img = vs.read()
    # cv2.imshow("Raw", img)
    # img = preprocess(img)
    # cv2.imshow("Preprocessed", img)
    rects, img = detect(img, scale_factor, min_neighs, obj_w, obj_h)
    img = box(rects, img)
    #    cv2.imshow("Cascaded", img)
    measure(img, rects, candidates)

    # if there's no rects found, look around
    # if not rects:
    #     look_around()
    # Check time elapsed, if over 10 sec, invoke spiral search
    # if (time.time()-start) > 10:
    #     spiral_search()
    #if (time.time()-start_time >.1):

    if True:  # if time.time() - start_time > 5:
        if candidates:
            avg_pos = mean(candidates)
Esempio n. 19
0
        # move above the target
        val = 'MOVP ' + str(p_hat[0]) + ' ' + str(
            p_hat[1]) + ' 0 ' + str(-p_angle[i]) + ' 0 180\n'
        checkPoint(val)
        s.sendall(val.encode('ascii'))

        # move down to reach the target
        val = 'MOVP ' + str(p_hat[0]) + ' ' + str(
            p_hat[1]) + ' -190 ' + str(-p_angle[i]) + ' 0 180\n'
        checkPoint(val)
        s.sendall(val.encode('ascii'))

        # close the gripper
        s.sendall(close_grip.encode('ascii'))

        face, grabbing, SN = detect(i, s, actual_length_box[i])
        inter_pose_register[i] = SN
        object_size = GetSizeBySN(SN)
        xs.append(object_size[0])
        ys.append(object_size[1])
        zs.append(object_size[2])
        print("face: {} grabbing {}".format(face, grabbing))
        if SN not in [18, 19, 10, 11]:
            traceRoute(s, i, SN, face, grabbing)

        # ==========================================
        # ReCalibrating the centroid of object
        # with the manipulator
        val = 'MOVP ' + str(p_hat[0]) + ' ' + str(
            p_hat[1]) + ' 0 ' + '90 0 180\n'
        checkPoint(val)
Esempio n. 20
0
                            split('.')[0] + '.png')
        image_d = os.path.join(args['output'], 'detect' + image.split(os.sep)[-1].\
                            split('.')[0] + '.png')

        img = cv2.imread(image_e)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = removeText(img, args['svm'])

        # Save remove-text-image
        cv2.imwrite(image_p, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))

        # detect alrotirhem only works with gary scale image.
        img_GRAY = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

        # Get bounding boxes
        pairs = detect(img_GRAY)

        # Using matplot library to display image and detected copy
        # and paste area.
        if args['display']:
            fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
            ax.imshow(img)

        # Draw rectangles in image.
        for pair_list in pairs:
            for pair in pair_list:

                # random.random will generate a number between 0 and 1.
                color = (random.random(), random.random(), random.random())

                for x, y, w, h in pair:
# ratings, usermap = load_itunes(USE_PRODUCTS)
breakpoint()

data = pd.read_csv('./data/amazon/amazon_network.csv')
gmm_analysis(data)
dataname = 'amazon'
ratings, usermap = load_flipkart(USE_PRODUCTS)
(rating_arr, iat_arr, ids) = process_data(ratings, dataname, USE_PRODUCTS)
(rating_arr, iat_arr) = (np.array(rating_arr), np.array(iat_arr))
# pickle.dump((rating_arr, iat_arr, ids), open('../data/%s/%s_bucketed.pickle' % (dataname, keyword), 'wb'))
# (rating_arr, iat_arr, ids) = pickle.load(open('../data/%s/%s_bucketed.pickle' % (dataname, keyword), 'rb'))

(rating_arr, iat_arr) = (rating_arr[0:5000], iat_arr[0:5000])

# Detect suspicious users given matrices containing ratings and  inter-arrival times. USE_TIMES is a boolean for whether the inter-arrival times should be used. The last parameter is the number of clusters to use.
suspn = detect(rating_arr, iat_arr, USE_TIMES, 2)

# OUTPUT RESULTS TO FILE: it considers the top (NUM_TO_OUTPUT) most suspicious users and stores their user ids, scores, ratings and IATs in separate files.
NUM_TO_OUTPUT = 500  # number of suspicious users to output to file
susp_sorted = np.array([
    (x[0]) for x in sorted(enumerate(suspn), key=itemgetter(1), reverse=True)
])
most_susp = susp_sorted[range(1000)]
with open('./output/%s/top%d%s_ids.txt' % (dataname, NUM_TO_OUTPUT, keyword),
          'w') as outfile:
    with open(
            './output/%s/top%d%s_scores.txt' %
        (dataname, NUM_TO_OUTPUT, keyword), 'w') as out_scores:
        with open(
                './output/%s/top%d%s_ratings.txt' %
            (dataname, NUM_TO_OUTPUT, keyword), 'w') as out_rating:
Esempio n. 22
0
import cv2
from detect import *


class eyes():
    def __init__(self, img1):

        self.img1 = img1
        self.main = main

    def eye(self):
        image = cv2.imread(self.img1)
        ey = cv2.CascadeClassifier('classifiers/haarcascade_eye.xml')
        imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = ey.detectMultiScale(imgray, 1.3, 5)
        for (ex, ey, ew, eh) in faces:
            eye_image = cv2.rectangle(image, (ex, ey), (ex + ew, ey + eh),
                                      (255, 0, 0), 2)
        return eye_image


if __name__ == "__main__":
    e = eyes(detect('vasanth.jpg').detectface())
    cv2.imshow('image', e.eye())
    cv2.waitKey(0)
    cv2.destryAllwindows()
Esempio n. 23
0
def main():

	# Creating an object to detect sentence level allusions
	d=detect(inputFolder='../data/n1-lim/',outputFolder='../output/n1-lim-sent/',cores=30,language='de')
	
	# loading the data
	print('Loading books and splitting')
	text=d.loadNew()
	books=d.loadCandidates()
	textChunks=d.splitChunks(text)

	# d.extendStopwords(text)
	
	# processing using spacy
	print('spacy')
	spacyTextChunks,spacyBooks,spacyText=d.spacyExtract(textChunks,books)

	# filtering using jaccard
	print('Filtering using Jaccard')
	reducedSpacyBooks,reducedSentences=d.filterWithJacard(spacyTextChunks,spacyBooks,threshold=0.05) #filtering the spacy data structure
	reducedBooks=d.filterOriginalBooks(reducedSentences,books) #filtering the original data structure

	pickling_on = open('../output/'+'n1-lim-sent/reducedBooks.pickle',"wb")
	pickle.dump(reducedBooks, pickling_on)

	# Syntactic parsing of the new text
	print('Syntactic parsing')
	parseTrees,parsedSentences,parseWithoutTokenTrees=d.parseNewBook(textChunks)

	pickling_on = open('../output/'+'n1-lim-sent/parseTrees.pickle',"wb")
	pickle.dump(parseTrees, pickling_on)

	# Syntactic parsing of the potential candidates
	potentialParseTrees,potentialParsedSentences,potentialParseWithoutTokenTrees=d.parseCandidates(reducedBooks)

	pickling_on = open('../output/'+'n1-lim-sent/potentialParseTrees.pickle',"wb")
	pickle.dump(potentialParseTrees, pickling_on)

	# Syntactic scoring using the moschitti score
	print('Moschitti scorings')
	syntacticScore,syntacticScoreWithoutTokens=d.syntacticScoring(parseTrees,potentialParseTrees,parseWithoutTokenTrees,potentialParseWithoutTokenTrees)

	pickling_on = open('../output/'+'n1-lim-sent/allScores.pickle',"wb")
	pickle.dump(syntacticScore, pickling_on)

	# Semantic scoring using word2vec
	print('Semantic scoring')
	semanticScore=d.semanticScoring(spacyText,reducedSpacyBooks,monolingual=True,lang1='english',lang2='english')

	# Extracting the longest common subsequence
	print('Extracting longest subsequence')
	lcsScore,lcs=d.longestSubsequenceScoring(text,reducedBooks)

	# Aggregating the syntactic and semantic scores
	print('Average scoring')
	scoreTuples=d.aggregateScoring(syntacticScore,semanticScore,lcsScore,lcs,syntacticScoreWithoutTokens)

	pickling_on = open('../output/'+'n1-lim-sent/scoreTuples.pickle',"wb")
	pickle.dump(scoreTuples, pickling_on)

	# Extracting a limited number of sentence pairs
	finalTuples,diffTuples=d.finalFiltering(scoreTuples,reducedBooks,0.79)
	if len(finalTuples)>100:
		finalTuples=finalTuples[0:100]
	
	# Sorting the extracted tuples using Noun based ranking
	orderedTuples=d.nounBasedRanking(finalTuples,spacyText,reducedSpacyBooks)
	
	pickling_on = open('../output/'+'n1-lim-sent/orderedTuples.pickle',"wb")
	pickle.dump(orderedTuples, pickling_on)


	# Printing final results on the terminal
	print('Final results: \n\n\n')

	i=1
	for t in orderedTuples:
		print('Pairing: ',i)
		print('\n')
		print('New Sentence: ',text[t[0]])
		print('\n')
		print('Reference: \n',reducedBooks[t[1]][t[2]])
		print('\n')
		print('Similar Sentence is from: ',t[1])
		print('Syntactic Score: ',t[3])
		print('Syntactic Similarity without tokens: ',t[11])
		print('Semantic Score: ',t[4])
		print('Semantic Score without stopwords: ',t[5])
		print('LCS Length: ',t[9])
		print('LCS: ',t[10])
		print('Jaccard of common nouns: ',t[13])
		print('Jaccard of common verbs: ',t[14])
		print('Jaccard of common adjectives: ',t[15])
		print('Semantic similarity nouns: ',t[6])
		print('Semantic similarity verbs: ',t[7])
		print('\n\n')
		i=i+1

	# Writing the output into a file
	d.writeOutput(orderedTuples,text,reducedBooks)

	# Sorting the tuples which had high differences

	diffTuples=d.nounBasedRanking(diffTuples,spacyText,reducedSpacyBooks)

	pickling_on = open('../output/'+'n1-lim-sent/diffTuples.pickle',"wb")
	pickle.dump(diffTuples, pickling_on)