def window(): global lastFilename name,ext=os.path.splitext(request.args.get('type')) filenamenew=name+ext lastFilename=filenamenew status=1 if ext==".pdf": # creating a pdf file object pdfFileObj = open(UPLOAD_FOLDER+"/"+filenamenew, 'rb') # creating a pdf reader object pdfReader = PyPDF2.PdfFileReader(pdfFileObj) # printing number of pages in pdf file print(pdfReader.numPages) #print(pdfReader.getDocumentInfo()) #print(pdfReader.getIsEncrypted()) # creating a page object bundle="" for i in range(1,pdfReader.numPages): pageObj = pdfReader.getPage(i) # extracting text from page #print(pageObj.extractText()) bundle+=pageObj.extractText() #print(bundle) # closing the pdf file object pdfFileObj.close() #Auto tagging t = AutoTagify() t.text = bundle #print(t.tag_list()) e_words = list(dict.fromkeys(t.tag_list())) #print(e_words) else: file = open(UPLOAD_FOLDER+"/"+filenamenew,"r+") #print(type(file.read())) t = AutoTagify() t.text = file.read() #print(len(t.tag_list())) e_words = list(dict.fromkeys(t.tag_list())) #print(e_words) file.close() #Summarization summary=generate_summary(UPLOAD_FOLDER+"/"+filenamenew,5) conn = sqlite3.connect('TAGS.db') #c = conn.cursor() # Insert a row of data conn.execute('''INSERT INTO Tag (Filename,Auto_tag,Manual_tag,Summary,status) VALUES (?,?,?,?,?)''',(filenamenew, str(e_words),str([]),str(summary),status)) # Save (commit) the changes conn.commit() conn.close() return render_template('window.html',F=filenamenew,L=e_words)
def getsummary(): if request.method == "POST": topic = request.form['topic'] text = request.form["text"] data = generate_summary(text) sd.add_data(text,topic,data) all_data = sd.get_all_records() return render_template("getchapsum.html",data= data,text=text,all_data = all_data) return render_template("getchapsum.html")
def get_metadata(path): # Checks if Scanned PDF (Needs to be added) text = "" curr_page = 0 with open(path, 'rb') as f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo() number_of_pages = pdf.getNumPages() while curr_page < number_of_pages: page = pdf.getPage(curr_page) curr_page += 1 text += page.extractText() metadata = {} metadata['author'] = info.author metadata['creator'] = info.creator metadata['producer'] = info.producer metadata['subject'] = info.subject metadata['title'] = info.title metadata['numpages'] = number_of_pages metadata['summary'] = generate_summary(text) return metadata
def pipeline(progargs): """ Performs sequentially the steps of the pipeline that have been requested. Args: progargs: Program arguments. """ # Magnitudes calculated. mag = None stars, filters, header_fields = get_pipeline_parameters(progargs) # This step organizes the images in directories depending on the type of # image: bias, flat or data. if progargs.organization_requested or progargs.all_steps_requested: logging.info("* Step 1 * Organizing image files in directories.") orgfits.organize_files(progargs, stars, header_fields, filters) anything_done = True else: logging.info("* Step 1 * Skipping the organization of image files in directories. Not requested.") # This step reduces the data images applying the bias and flats. if progargs.reduction_requested or progargs.all_steps_requested: logging.info("* Step 2 * Reducing images.") reduction.reduce_images(progargs) anything_done = True else: logging.info("* Step 2 * Skipping the reduction of images. Not requested.") # This step find objects in the images. The result is a list of x,y and # AR,DEC coordinates. if progargs.astrometry_requested or progargs.all_steps_requested: logging.info("* Step 3 * Performing astrometry of the images.") astrometry.do_astrometry(progargs, stars, header_fields) anything_done = True else: logging.info("* Step 3 * Skipping astrometry. Not requested.") # This step calculates the photometry of the objects detected doing the # astrometry. if progargs.photometry_requested or progargs.all_steps_requested: logging.info("* Step 4 * Performing photometry of the stars.") photometry.calculate_photometry(progargs) anything_done = True else: logging.info("* Step 4 * Skipping photometry. Not requested.") # This step process the magnitudes calculated for each object and # generates a file that associate to each object all its measures. if progargs.magnitudes_requested or progargs.all_steps_requested: logging.info("* Step 5 * Calculating magnitudes of stars.") mag = magnitude.process_magnitudes(stars, progargs.target_dir, progargs.light_directory) anything_done = True else: logging.info("* Step 5 * Skipping the calculation of magnitudes of stars. Not requested.") # This step process the magnitudes calculated for each object and # generates a light curves. if progargs.light_curves_requested or progargs.all_steps_requested: logging.info("* Step 6 * Generating light curves.") curves.generate_curves(stars, progargs.target_dir) anything_done = True else: logging.info("* Step 6 * Skipping the generation of light curves. Not requested.") # Generates a summary if requested and some task has been indicated. if anything_done and progargs.summary_requested: summary.generate_summary(progargs, stars, mag)
import speech_recognition as sr import summary as s print('hi') r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) print('hi') try: command = r.recognize_google(audio) with open('gen.txt', 'w') as f: print(command, file=f) except: print('Soryy') s.generate_summary('gen.txt')
def pipeline(progargs): """ Performs sequentially the steps of the pipeline that have been requested. Args: progargs: Program arguments. """ # Magnitudes calculated. mag = None stars, filters, header_fields = get_pipeline_parameters(progargs) # This step organizes the images in directories depending on the type of # image: bias, flat or data. if progargs.organization_requested or progargs.all_steps_requested: logging.info("* Step 1 * Organizing image files in directories.") orgfits.organize_files(progargs, stars, header_fields, filters) anything_done = True else: logging.info( "* Step 1 * Skipping the organization of image files in directories. Not requested." ) # This step reduces the data images applying the bias and flats. if progargs.reduction_requested or progargs.all_steps_requested: logging.info("* Step 2 * Reducing images.") reduction.reduce_images(progargs) anything_done = True else: logging.info( "* Step 2 * Skipping the reduction of images. Not requested.") # This step find objects in the images. The result is a list of x,y and # AR,DEC coordinates. if progargs.astrometry_requested or progargs.all_steps_requested: logging.info("* Step 3 * Performing astrometry of the images.") astrometry.do_astrometry(progargs, stars, header_fields) anything_done = True else: logging.info("* Step 3 * Skipping astrometry. Not requested.") # This step calculates the photometry of the objects detected doing the # astrometry. if progargs.photometry_requested or progargs.all_steps_requested: logging.info("* Step 4 * Performing photometry of the stars.") photometry.calculate_photometry(progargs) anything_done = True else: logging.info("* Step 4 * Skipping photometry. Not requested.") # This step process the magnitudes calculated for each object and # generates a file that associate to each object all its measures. if progargs.magnitudes_requested or progargs.all_steps_requested: logging.info("* Step 5 * Calculating magnitudes of stars.") mag = magnitude.process_magnitudes(stars, progargs.target_dir, progargs.light_directory) anything_done = True else: logging.info( "* Step 5 * Skipping the calculation of magnitudes of stars. Not requested." ) # This step process the magnitudes calculated for each object and # generates a light curves. if progargs.light_curves_requested or progargs.all_steps_requested: logging.info("* Step 6 * Generating light curves.") curves.generate_curves(stars, progargs.target_dir) anything_done = True else: logging.info( "* Step 6 * Skipping the generation of light curves. Not requested." ) # Generates a summary if requested and some task has been indicated. if anything_done and progargs.summary_requested: summary.generate_summary(progargs, stars, mag)