예제 #1
0
 def __init__(self, access_token=''):
     self.Account = Account(access_token=access_token)
     self.Apps = Apps(access_token=access_token)
     self.Audio = Audio(access_token=access_token)
     self.Auth = Auth(access_token=access_token)
     self.Board = Board(access_token=access_token)
     self.Database = Database(access_token=access_token)
     self.Docs = Docs(access_token=access_token)
     self.Other = Other(access_token=access_token)
     self.Fave = Fave(access_token=access_token)
     self.Friends = Friends(access_token=access_token)
     self.Gifts = Gifts(access_token=access_token)
     self.Groups = Groups(access_token=access_token)
     self.Likes = Likes(access_token=access_token)
     self.Market = Market(access_token=access_token)
     self.Messages = Messages(access_token=access_token)
     self.Newsfeed = Newsfeed(access_token=access_token)
     self.Notes = Notes(access_token=access_token)
     self.Notifications = Notifications(access_token=access_token)
     self.Pages = Pages(access_token=access_token)
     self.Photos = Photos(access_token=access_token)
     self.Places = Places(access_token=access_token)
     self.Polls = Polls(access_token=access_token)
     self.Search = Search(access_token=access_token)
     self.Stats = Stats(access_token=access_token)
     self.Status = Status(access_token=access_token)
     self.Storage = Storage(access_token=access_token)
     self.Users = Users(access_token=access_token)
     self.Utils = Utils(access_token=access_token)
     self.Video = Video(access_token=access_token)
     self.Wall = Wall(access_token=access_token)
     self.Widgets = Widgets(access_token=access_token)
예제 #2
0
    def __init__(self, strip_number:int, led_number:int):
        self.strip_number = strip_number
        self.led_nuber = led_number

        self.led = Led(led_number=led_number, strip_number=strip_number)
        self.notes = Notes()

        self.has_been_assigned = False
예제 #3
0
 def loadNotes(self, FileNotes):
     file_object = open(FileNotes, 'r')
     corpus = file_object.readlines()
     file_object.close()
     notes = []
     for j in range(len(corpus)):
         note = corpus[j].replace('\n', '')
         m = note.split('\t')
         notes.append(Notes(m[0], m[1], m[2], m[3]))
     return notes
예제 #4
0
파일: music.py 프로젝트: macBdog/midimaster
 def __init__(self, graphics: Graphics, note_render: NoteRender,
              staff: Staff):
     self.graphics = graphics
     self.staff = staff
     self.note_positions = staff.get_note_positions()
     self.notes = Notes(graphics, note_render, staff, self.note_positions)
     self.song = None
     self.tempo_bpm = 60
     self.ticks_per_beat = Song.SDQNotesPerBeat
     self.backing_index = {}
     self.backing_time = {}
예제 #5
0
def newSeq():
    seq = request.get_json()
    #if sequence is correct format, change state of userDict class
    if match(seq):
        user = request.cookies['username']
        userDictionary[user] = Notes(str.split(seq))
        curState = userDictionary[user]
        return jsonify(curState.next_note())
    #tell app that it wasnt correct format
    else:
        return jsonify("error")
예제 #6
0
def view_notes(username):
    n = Notes(username)
    all_notes = n.read_notes()
    if all_notes is False:
        print('No notes exists for {}'.format(username))
        return False

    for savedtime, note in all_notes.items():
        print(time.strftime("%d %b %Y %I:%M %p",
                            time.localtime(int(savedtime))),
              note,
              sep=' : ')
예제 #7
0
def add_note(username):
    n = Notes(username)
    all_notes = n.read_notes()
    if all_notes and MAX_NOTES_PER_USER == len(all_notes):
        print(
            'You have reached the limit. Your oldest note will be deleted to save new.'
        )
        n.delete_old_note(all_notes)

    new_note = take_input('Enter note to save ', 'note')
    if n.create_note(new_note):
        print('Note saved successfully.')
    else:
        print('something went wrong... please try again')
예제 #8
0
파일: app.py 프로젝트: ArtisticTank/PerNote
def notesController():
    if request.method == 'GET':
        userId = request.args.get('userId')
        app_json = json.dumps(notes[userId].__dict__)
        #temp = [notes[userId]]
        #app_json = json.dumps(temp.__dict__)
        return app_json

    elif request.method == 'POST':
        req_data = request.json
        userId = req_data.get("userId")
        userName = req_data.get("userName")
        createdOn = req_data.get("createdOn")
        title = req_data.get("title")
        note = req_data.get("note")

        print(userId)
        p1 = Notes(userId, userName, createdOn, title, note)
        print(p1)
        notes[userId] = p1
        return json.dumps(notes[userId].__dict__)
예제 #9
0
파일: app.py 프로젝트: ArtisticTank/PerNote
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from notes import Notes
import json
from user import User

app = Flask(__name__)
#api = Api(app)

dummyUser = User("s;dgfh", "this@lol", "thisisshit", "*****@*****.**", "Shitty", "Sartaj")
users = {
    "*****@*****.**" : dummyUser
}

n1 = Notes("12345", "shit", "lol", "this","shit")
notes = {
    "12345" : n1
}

@app.route("/login", methods = ['POST'])
def loginController():
    if request.method == 'POST':
        req_data = request.json
        mailId = req_data.get("mailId")
        password = req_data.get("password")

        if mailId == None :
            return "Mail Id is empty"

        elif users[mailId] == None :
            return "Your account does not exist, maybe you need to create an account first"
예제 #10
0
#!/usr/bin/python

import yaml
config = yaml.safe_load(open("config.yml"))

from notes import Notes
notebook = Notes()

import sys

if len(sys.argv) < 2:
  print("usage: delete_note.py <number>")
  sys.exit()

script_name = sys.argv.pop(0)
note_index = int(sys.argv.pop(0))

if note_index == 0:
  print "No note #0 found. Start counting with 1 like a normal person."
elif note_index < 0:
  print "Please try to stay positive."
elif note_index > len(notebook.get_notes()):
  print "You fell off the edge. There aren't that many notes."
else:
  notebook.delete_note(int(note_index) - 1)
예제 #11
0
파일: xray.py 프로젝트: seunboi4u/madcow
 def init(self):
     self.learn = Learn(madcow=self.madcow)
     self.staff = Staff(madcow=self.madcow)
     self.company = Company(madcow=self.madcow)
     self.realname = Realname(madcow=self.madcow)
     self.notes = Notes(madcow=self.madcow)
예제 #12
0
def main():

    # Transcription and Cleaning
    url = input("Enter the URL = ")

    sec = pafy.new(url).length
    print(f"\nVideo duration in sec = {sec}\n")

    # THRESHOLDS

    DYNAMIC_INTERVAL = (sec / 60) * 100

    if sec <= 900:  # 0-15 min
        NUM_KEYWORDS = 15
        SUMMARY_PERCENT = 60
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 6
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 900 < sec <= 1800:  # 15-30 min
        NUM_KEYWORDS = 18
        SUMMARY_PERCENT = 50
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 5
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 1800 < sec <= 2700:  # 30-45 min
        NUM_KEYWORDS = 20
        SUMMARY_PERCENT = 40
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 2700 < sec <= 3600:  # 45-60 min
        NUM_KEYWORDS = 22
        SUMMARY_PERCENT = 35
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 3600 < sec <= 7200:  # 1-2 hr
        NUM_KEYWORDS = 25
        SUMMARY_PERCENT = 30
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    else:  # More than 2 hr
        NUM_KEYWORDS = 30
        SUMMARY_PERCENT = 25
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4


# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    start = time.perf_counter()

    yt = YoutubeTranscribe(url)
    text = yt.youtube_transcribe()

    # Keywords Extractor
    num_keywords = NUM_KEYWORDS
    words = KeywordsExtractor(text, num_keywords)
    keywords = words.ExtractKeywords()
    print(f'\nKeywords:\n {keywords}')

    # Summarization
    summ = Summarizer()
    percentage = SUMMARY_PERCENT
    summary_result = summ.summary(text, percentage)
    print(f'\nSummary:\n {summary_result}')

    # Keyframe Extraction (Output : 'out' folder)
    print("\nExtracting Keyframes\n")
    ip = ImageProcessing(url, keywords)
    ip.img_processing(text_threshold=NON_TEXT_LEN,
                      dis_threshold=SIMILAR_DISTANCE,
                      jump=INTERVAL_KEYFRAMES)

    # Paragraph and Headings (Output : paragraph_headings.txt)
    print("\nGenerating Paragraphs and Headings\n")
    pf = ParaFormation(summary_result)
    list_para = pf.paragraph(similarity_threshold=SENTENCE_SIMILARITY,
                             word_threshold=WORDS_PER_PARA,
                             percent_reduce=PERCENT_REDUCE)
    ph = ParaHeadings(list_para)
    # title_para = ph.get_titles_paras(sentence_threshold = SENTENCES_PER_PARA, training = HEADING_TRAINING, heading_threshold = TOP_HEADINGS)
    title_para = ph.get_titles_paras(sentence_threshold=SENTENCES_PER_PARA)

    # Final Notes (Includes Web Scraping)
    print("\nGenerating Final Notes\n")
    scraped_results = Scrapper(keywords, 2, 2, 2)
    s = scraped_results.web_scrape()
    notes = Notes(url, s)
    notes.generate_notes()
    print("\nBrevis-Notes.docx and Brevis-Notes.pdf(on Windows) Generated\n")

    if os.path.exists('res'):
        shutil.rmtree('res')

    finish = time.perf_counter()
    print(f'Serial: Finished in {round(finish-start, 2)} second(s)')
예제 #13
0
 def notes(self):
     Notes(self.output_panel.get('sel.first', 'sel.last'))
예제 #14
0
파일: clue.py 프로젝트: J-Andrieu/PyClue
from gameboard import GameBoard
from notes import Notes
from deck import Deck
from player import Player


def rollDie():
    return np.random.randint(1, 7)


if __name__ == "__main__":
    pygame.init()

    gameBoard = GameBoard("board_image.png")
    #gameBoard.setShowTiles(True)
    my_notes = Notes((gameBoard.size[0], 0))
    my_player = Player("mrs_peacock.png", (0, 5), gameBoard)
    screen = pygame.display.set_mode(
        (gameBoard.size[0] + my_notes.size[0],
         max(gameBoard.size[1], my_notes.size[1])), pygame.DOUBLEBUF)

    clock = pygame.time.Clock()
    run = True
    num_moves = 1000  #rollDie()
    while run:
        clock.tick(60)
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_LEFT and num_moves > 0:
예제 #15
0
 def process_notes(self, string):
     self.set_progress_text("Reading notes...")
     self.notes = Notes(string)
     self.add_progress(self.TASK_LENGTH_NOTES)
예제 #16
0
def main():
    # Parallel
    url = input("Enter the URL = ")

    start = time.perf_counter()

    # Transcription and Cleaning
    yt = YoutubeTranscribe(url)
    text = yt.youtube_transcribe()

    # Level1
    with ThreadPoolExecutor() as executor:
        '''
        Type1:
        level1_results1 = executor.submit(Test(10,20).RecArea)
        print(type(level1_results1))
        print(dir(level1_results1))
        print(level1_results1.result())
        
        Type2:
        level1_results1 = list(executor.map(Test().RecArea,[10],[20]))
        print(level1_results1[0])
        '''

        # Keywords Extractor
        # num_keywords=int(input("Enter number of keywords to be extracted : "))
        num_keywords = 10
        level1_results1 = executor.submit(
            KeywordsExtractor(text, num_keywords).ExtractKeywords)

        # Summarization
        percentage = 40
        level1_results2 = list(
            executor.map(Summarizer().summary, [text], [percentage]))

        print(f"\nKeywords:\n {level1_results1.result()}")
        print(f"\nSummary:\n {level1_results2[0]}")

    # Level2
    with ThreadPoolExecutor() as executor:
        # Keyframe Extraction (Output : 'out' folder)
        print("\nExtracting Keyframes\n")
        level2_results1 = list(
            executor.map(
                ImageProcessing(url, level1_results1.result()).img_processing,
                [50], [20], [1000]))

        # Paragraph and Headings (Output : paragraph_headings.txt)
        print("\nGenerating Paragraphs and Headings\n")
        level2_results2 = executor.submit(
            ParaFormation(level1_results2[0]).paragraph)

        print("\nScraping Web\n")
        level2_results3 = executor.submit(
            Scrapper(level1_results1.result(), 2, 2, 2).web_scrape)

        print(len(os.listdir(os.path.join('res', 'out'))),
              "images extracted in 'out' folder")

    ph = ParaHeadings(level2_results2.result())
    title_para = ph.get_titles_paras(sentence_threshold=2)

    # Final Notes
    notes = Notes(url, level2_results3.result())
    notes.generate_notes()
    print("\nBrevis-Notes.docx and Brevis-Notes.pdf(on Windows) Generated\n")

    if os.path.exists('res'):
        shutil.rmtree('res')

    finish = time.perf_counter()

    print(f'Parallel: Finished in {round(finish-start, 2)} second(s)')
예제 #17
0
def add_object(user):
    if user not in userDictionary:
        userDictionary[user] = Notes()
예제 #18
0
def gen():
    global video_url
    global keywords
    global path
    global json_result
    global text
    global summary_result
    global scrape_json
    global option

    sec = pafy.new(video_url).length
    print(f"\nVideo duration in sec = {sec}\n")

    # THRESHOLDS

    DYNAMIC_INTERVAL = (sec / 60) * 100

    if sec <= 900:  # 0-15 min
        NUM_KEYWORDS = 15
        SUMMARY_PERCENT = 60
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 6
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 900 < sec <= 1800:  # 15-30 min
        NUM_KEYWORDS = 18
        SUMMARY_PERCENT = 50
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 5
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 1800 < sec <= 2700:  # 30-45 min
        NUM_KEYWORDS = 20
        SUMMARY_PERCENT = 40
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 2700 < sec <= 3600:  # 45-60 min
        NUM_KEYWORDS = 22
        SUMMARY_PERCENT = 35
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 3600 < sec <= 7200:  # 1-2 hr
        NUM_KEYWORDS = 25
        SUMMARY_PERCENT = 30
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    else:  # More than 2 hr
        NUM_KEYWORDS = 30
        SUMMARY_PERCENT = 25
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4


# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    start = time.perf_counter()

    # if option == "Overview":
    # 	percentage = 50

    # elif option == "Notes":
    # 	percentage = 60

    # elif option == "Notes+Ref":
    # 	percentage = 80

    #Running keywords and summary Processes parallely
    key_ext = multiprocessing.Process(target=Process_Extract_Keywords,
                                      args=(video_url, text, NUM_KEYWORDS,
                                            NON_TEXT_LEN, SIMILAR_DISTANCE,
                                            INTERVAL_KEYFRAMES))
    summ_ext = multiprocessing.Process(
        target=Process_Get_Summary,
        args=(text, SUMMARY_PERCENT, SENTENCE_SIMILARITY, WORDS_PER_PARA,
              PERCENT_REDUCE, SENTENCES_PER_PARA))
    #Starting both process simultaneously
    key_ext.start()
    summ_ext.start()
    #Checking if the process have finished execution
    key_ext.join()
    summ_ext.join()

    if option == "Overview" or option == "Notes":
        scrape_json = {}
    #Generating final notes
    notes = Notes(video_url, scrape_json)
    notes.generate_notes()
    print("\nBrevis-Notes.docx and Brevis-Notes.pdf(on Windows) Generated\n")

    with ZipFile('Brevis_Notes.zip', 'w') as zip:
        print("Writing zip")
        if os.path.exists(os.path.join('res', 'Brevis-Notes.pdf')):
            zip.write(os.path.join('res', 'Brevis-Notes.pdf'),
                      arcname='Brevis-Notes.pdf')
        zip.write(os.path.join('res', 'Brevis-Notes.docx'),
                  arcname='Brevis-Notes.docx')

    path = os.path.abspath("Brevis_Notes.zip")

    if os.path.exists('res'):
        shutil.rmtree('res')

    finish = time.perf_counter()

    print(f'Gen Function: Finished in {round(finish-start, 2)} second(s)')
예제 #19
0
def main():

	url = input("Enter the URL = ")
	#Thresholds
	sec = pafy.new(url).length
	print(f"\nVideo duration in sec = {sec}\n")
	
	# THRESHOLDS
	
	DYNAMIC_INTERVAL = (sec/60) * 100
	
	if sec <= 900: # 0-15 min
		NUM_KEYWORDS = 15
		SUMMARY_PERCENT = 60
		NON_TEXT_LEN = 50
		SIMILAR_DISTANCE = 20
		INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
		SENTENCE_SIMILARITY = 0.35
		WORDS_PER_PARA = 20
		PERCENT_REDUCE = 0.6
		SENTENCES_PER_PARA = 6
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3
	
	elif 900 < sec <= 1800: # 15-30 min
		NUM_KEYWORDS = 18
		SUMMARY_PERCENT = 50 
		NON_TEXT_LEN = 50
		SIMILAR_DISTANCE = 20
		INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
		SENTENCE_SIMILARITY = 0.35
		WORDS_PER_PARA = 20 
		PERCENT_REDUCE = 0.6
		SENTENCES_PER_PARA = 5
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

	elif 1800 < sec <= 2700: # 30-45 min
		NUM_KEYWORDS = 20
		SUMMARY_PERCENT = 40
		NON_TEXT_LEN = 50
		SIMILAR_DISTANCE = 20
		INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
		SENTENCE_SIMILARITY = 0.35
		WORDS_PER_PARA = 20
		PERCENT_REDUCE = 0.6
		SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3
   
	elif 2700 < sec <= 3600: # 45-60 min
		NUM_KEYWORDS = 22
		SUMMARY_PERCENT = 35
		NON_TEXT_LEN = 50
		SIMILAR_DISTANCE = 20
		INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
		SENTENCE_SIMILARITY = 0.35
		WORDS_PER_PARA = 20
		PERCENT_REDUCE = 0.6
		SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3
	
	elif 3600 < sec <= 7200: # 1-2 hr
		NUM_KEYWORDS = 25
		SUMMARY_PERCENT = 30
		NON_TEXT_LEN = 50
		SIMILAR_DISTANCE = 20
		INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
		SENTENCE_SIMILARITY = 0.35
		WORDS_PER_PARA = 20
		PERCENT_REDUCE = 0.6
		SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3
		
	else: # More than 2 hr
		NUM_KEYWORDS = 30
		SUMMARY_PERCENT = 25
		NON_TEXT_LEN = 50
		SIMILAR_DISTANCE = 20
		INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
		SENTENCE_SIMILARITY = 0.35
		WORDS_PER_PARA = 20
		PERCENT_REDUCE = 0.6
		SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    #Starting the timer    
	start = time.perf_counter()
    
    # Transcription and Cleaning
	yt = YoutubeTranscribe(url)
	text = yt.youtube_transcribe()

    #Declaring a multiprocessing queue to exchange data between various functions
	Q=multiprocessing.Queue()
    #Running keywords and summary Processes parallely
	key_ext=multiprocessing.Process(target=Process_Extract_Keywords , args=(url,text,Q,NUM_KEYWORDS,NON_TEXT_LEN,SIMILAR_DISTANCE,INTERVAL_KEYFRAMES))
	summ_ext=multiprocessing.Process(target=Process_Get_Summary , args=(text,SUMMARY_PERCENT,SENTENCE_SIMILARITY,WORDS_PER_PARA,PERCENT_REDUCE,SENTENCES_PER_PARA))
	#Starting both process simultaneously
	key_ext.start()
	summ_ext.start()
	#Checking if the process have finished execution
	key_ext.join()
	summ_ext.join()
	#Fetching scraped links from the Queue
	scraped_res = Q.get()
	
	#Generating final notes
	notes = Notes(url,scraped_res)
	notes.generate_notes()
	print("\nBrevis-Notes.docx and Brevis-Notes.pdf(on Windows) Generated\n")
	
	#Removing the temporary res folder
	if os.path.exists('res'):
	    shutil.rmtree('res')
	    
	#Stopping the timer  
	end=time.perf_counter()
	#Printing the time taken by the program for execution
	print(f"Finished in {round(end-start, 3)} second(s)")
예제 #20
0
 def reset_container(self):
     self.has_been_assigned = False
     self.led = Led(led_number=led_number, strip_number=strip_number)
     self.notes = Notes()
예제 #21
0
파일: api.py 프로젝트: max-kov/todo-rest
from flask import Flask
from flask import jsonify, request
from notes import Notes

notes = Notes()

api = Flask(__name__)


def not_found_error():
    return "This task ID was not found.", 404


@api.route("/", methods=["GET"])
def get_all_notes():
    return jsonify(notes.get_all())


@api.route("/<int:note_id>", methods=["GET"])
def get_note(note_id: int):
    if notes.valid_id(note_id):
        return jsonify(notes.get(note_id))
    else:
        return not_found_error()


@api.route("/<int:note_id>", methods=["DELETE"])
def delete_note(note_id: int):
    if notes.valid_id(note_id):
        notes.delete(note_id)
        return jsonify(success=True)
예제 #22
0
def gen():
    global video_url
    global keywords
    global path
    global json_result
    global text
    global summary_result
    global scrape_json
    global option

    sec = pafy.new(video_url).length
    print(f"\nVideo duration in sec = {sec}\n")

    # THRESHOLDS

    DYNAMIC_INTERVAL = (sec / 60) * 100

    if sec <= 900:  # 0-15 min
        NUM_KEYWORDS = 15
        SUMMARY_PERCENT = 60
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 6
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 900 < sec <= 1800:  # 15-30 min
        NUM_KEYWORDS = 18
        SUMMARY_PERCENT = 50
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 5
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 1800 < sec <= 2700:  # 30-45 min
        NUM_KEYWORDS = 20
        SUMMARY_PERCENT = 40
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 2700 < sec <= 3600:  # 45-60 min
        NUM_KEYWORDS = 22
        SUMMARY_PERCENT = 35
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    elif 3600 < sec <= 7200:  # 1-2 hr
        NUM_KEYWORDS = 25
        SUMMARY_PERCENT = 30
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4
# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    else:  # More than 2 hr
        NUM_KEYWORDS = 30
        SUMMARY_PERCENT = 25
        NON_TEXT_LEN = 50
        SIMILAR_DISTANCE = 20
        INTERVAL_KEYFRAMES = DYNAMIC_INTERVAL
        SENTENCE_SIMILARITY = 0.35
        WORDS_PER_PARA = 20
        PERCENT_REDUCE = 0.6
        SENTENCES_PER_PARA = 4


# 		HEADING_TRAINING = 500
# 		TOP_HEADINGS = 3

    start = time.perf_counter()

    if option == "Overview":
        if not os.path.exists(os.path.join('res', 'out')):
            os.mkdir(os.path.join('res', 'out'))

    elif option == "Notes" or option == "Notes+Ref":
        # Keyframe Extraction (Output : 'out' folder)
        print("\nExtracting Keyframes\n")
        ip = ImageProcessing(video_url, keywords)
        ip.img_processing(text_threshold=NON_TEXT_LEN,
                          dis_threshold=SIMILAR_DISTANCE,
                          jump=INTERVAL_KEYFRAMES)

    # Paragraph and Headings (Output : paragraph_headings.txt)
    print("\nGenerating Paragraphs and Headings\n")
    pf = ParaFormation(summary_result)
    list_para = pf.paragraph(similarity_threshold=SENTENCE_SIMILARITY,
                             word_threshold=WORDS_PER_PARA,
                             percent_reduce=PERCENT_REDUCE)
    ph = ParaHeadings(list_para)
    title_para = ph.get_titles_paras(sentence_threshold=SENTENCES_PER_PARA)

    # Final Notes (Includes Web Scraping)
    print("\nGenerating Final Notes\n")

    if option == "Overview" or option == "Notes":
        scrape_json = {}

    #scraped_results = Scrapper(scrape_keywords,2,2,2)
    #s = scraped_results.web_scrape()
    notes = Notes(video_url, scrape_json)
    notes.generate_notes()
    print("\nBrevis-Notes.docx Generated\n")

    with ZipFile('Brevis_Notes.zip', 'w') as zip:
        print("Writing zip")
        if os.path.exists(os.path.join('res', 'Brevis-Notes.pdf')):
            zip.write(os.path.join('res', 'Brevis-Notes.pdf'),
                      arcname='Brevis-Notes.pdf')
        zip.write(os.path.join('res', 'Brevis-Notes.docx'),
                  arcname='Brevis-Notes.docx')

    path = os.path.abspath("Brevis_Notes.zip")

    if os.path.exists('res'):
        shutil.rmtree('res')

    finish = time.perf_counter()

    print(f'Gen Function: Finished in {round(finish-start, 2)} second(s)')