def loadMf(self, filePath):
        """
        Extracts a Toontown Beta multifile.
        """

        extractor = Extractor.Extractor()
        extractor.extract(Filename(filePath), Filename(filePath[:-3]))
Exemple #2
0
    def download_single(self, url, extra):

        download_url, source_url = self.find_download_link(url)
        hidden_url = self.find_hidden_url(url)

        if self.resolution == '480':

            download_url = download_url[0][1]

        else:

            try:

                download_url = source_url[1][1]

            except Exception:

                download_url = download_url[0][1]

        show_info = self.info_extractor(extra)
        output = self.check_output(show_info[0])
        Extractor(logger=self.logger,
                  download_url=download_url,
                  backup_url=source_url,
                  hidden_url=hidden_url,
                  output=output,
                  header=self.header,
                  user_agent=self.user_agent,
                  show_info=show_info,
                  settings=self.settings,
                  quiet=self.quiet)
Exemple #3
0
 def saisirUrl(self):
     url = input("veuillez entrez une url ")
     page = PageCheck(url)
     if (page.urlChek() != " "):
         extract = Extractor(url)
         extract.extraction()
         print("le nombre de tableau est {} ".format(
             extract.countTable(url)))
     else:
         print("l'url n\' est pas valide")
Exemple #4
0
class PostHandler(BaseHTTPRequestHandler):

	ex = Extractor.Extractor()

	def processed(self, data):
		#PARSE DATA
		#UPDATE MODEL
		#RETURN NEW MODEL DATA JSON
		#Sidd will implement
		self.model.add_vote(user_id, question_id, vote)
		out = self.model.get_graph_data()
		# Parse output into JSON of right form
		return out
Exemple #5
0
 def lister(self):
     f = open("urls.txt", "r")
     fichier_entier = f.read()
     files = fichier_entier.split("\n")
     for file in files:
         page = PageCheck(file)
         url = page.urlChek()
         if (url != " "):
             extract = Extractor(url)
             extract.extraction()
             print("le nombre de tableau est {} ".format(
                 extract.countTable(url)))
         else:
             print("l'url n\' est pas valide")
Exemple #6
0
    def extractFile(self, filePath):
        """Extract the downloaded file.

        Args:
            filePath (str): The file path to be extract.

        Return:
            str: The folder where the file was extracted.
        """
        self.__extractor = Extractor.Extractor(self.__fileExt)
        uniqueName = str(uuid.uuid4())
        extractedPath = os.path.join(tempfile.gettempdir(), uniqueName)
        self.__extractor.getFunction()(self.__fileDownloaded, extractedPath)

        return extractedPath
Exemple #7
0
def main():
    #---------Extraccion de Informacion---------
    extractor = Extractor()
    OS_Data = extractor.get_os_information()
    Server_Data = extractor.get_server_data()
    Processor_Data = extractor.get_processor_information()
    Processes_data = extractor.get_processes_information()
    Users_Data = extractor.get_users_information()
    #-------------------------------------------

    #------Envio de Informacion a la API--------
    dictonary_set = {
        'OS': OS_Data,
        'Proccesor': Processor_Data,
        'Server': Server_Data,
        'Users': Users_Data,
        'Processes': Processes_data
    }

    post_api(config.URL_API, dictonary_set)
Exemple #8
0
def genetic_alg(next_gen,
                num_generations=200,
                max_size_gen=1000,
                size_final_gen=100,
                mutations_per_solution_max=50,
                name=""):
    print('start solution:' + str(next_gen[0].get_score()))
    for times in xrange(num_generations):
        next_gen = next_generation(next_gen, max_size_gen, size_final_gen,
                                   mutations_per_solution_max)
        print('iter :' + str(times) + ', name: ' + name)
        print('best solution:' + str(next_gen[0].get_score()))
        print('worst solution:' +
              str(next_gen[size_final_gen - 1].get_score()))
        print('start solution:' + str(next_gen[0].get_score()))
        file_write = open(name + '.obj', 'w')
        pickle.dump(next_gen[0], file_write)
        ex = Extractor.Extractor(next_gen[0].cars, name)
        ex.write()

    return next_gen[0]
Exemple #9
0
from Extractor import *
from Wrapper import *
from Sender import *

if __name__ == '__main__':

    extractor = Extractor()
    wrapper = Wrapper()
    sender = Sender()

    rawData = extractor.get_site()
    dataModel = wrapper.packData()


    print(rawData)
Exemple #10
0
import Extractor as ex
import question_generator as gen

# To speed up script, start servers:
##bash runStanfordParserServer.sh
##bash runSSTServer.sh

#Dish sample
#direct_path = "/Users/brandon/Documents/Northwestern Courses/Winter 2019/CS+Law Innovation Lab/Orrick, Harrington, & Sutcliffe/Documents/Dish_Sample.txt"

#Apple Brief
direct_path = '/Users/brandon/Documents/Northwestern Courses/Winter 2019/CS+Law Innovation Lab/Orrick, Harrington, & Sutcliffe/Documents/Test_Text.txt'

with open(direct_path, 'r') as file:
    brief = file.read()

test = ex.Extractor(brief)
qGen = gen.QuestionGenerator()
test.fix_pronouns(silence=1)
sentences = test.get_sentences()
print(sentences)

for sentence in sentences:
    flashcard = qGen.generate_question(sentence)
    if flashcard:
        #print(type(flashcard), type(flashcard[0]))
        print("Question: {}\n\nAnswer: {}'\n-------------".format(
            flashcard[0]['Q'], flashcard[0]['A']))
Exemple #11
0
from Extractor import *
from Threads import *
from Curator import *

from gcamp_extractor import *

arguments = {
    'root': '/Users/stevenban/Documents/Data/20190917/binned',
    'numz': 20,
    'frames': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
    'offset': 0,
    't': 999,
    'gaussian': (25, 4, 3, 1),
    'quantile': 0.99,
    'reg_peak_dist': 40,
    'anisotropy': (6, 1, 1),
    'blob_merge_dist_thresh': 7,
    'mip_movie': True,
    'marker_movie': True,
    'infill': True,
    'save_threads': True,
    'save_timeseries': True,
    'suppress_output': False,
    'regen': False,
}

e = Extractor(**arguments)
e.calc_blob_threads()
e.quantify()
c = Curator(e)
Exemple #12
0
'''
@author: Yang
@time: 17-11-14 下午2:57
'''
'''
extractor text lines from a text file

acceleration dataset:
training data: 2000 | test data: 100

underfitting dataset:
training data: 500 | test data: 1000
'''
import Extractor

extractor = Extractor.Extractor(filename='text.txt')

extractor.load_data(trainingNum=2000, testNum=100)
extractor.save_data(target='dataset/text/acceleration/')

extractor.load_data(trainingNum=500, testNum=1000)
extractor.save_data(target='dataset/text/underfitting/')

extractor.charset()

import os
'''
imageGen.py
generate image for OCR
'''
os.system('python imageGen.py')
Exemple #13
0
    def download_show(self, url):

        page = requests.get(url)
        soup = BeautifulSoup(page.content, 'html.parser')
        ep_range = self.ep_range
        links = []

        for link in soup.findAll('a', {'class': 'sonra'}):

            if link['href'] not in links:

                links.append(link['href'])

        if self.exclude is not None:

            excluded = [
                i for e in self.exclude for i in links if re.search(e, i)
            ]
            links = [item for item in links if item not in excluded]

        season = "season-" + self.season

        if self.update == True:

            links = links[0:1]

        if len(ep_range) == 1:

            ep_range = '{0}-{0}'.format(ep_range)

        if ep_range == 'l5' or ep_range == 'L5':  # L5 (Last five)

            links = links[:5]
            ep_range = 'All'
            season = 'season-All'

        if self.newest:

            links = links[0:1]
            ep_range = 'All'
            season = 'season-All'

        if season != "season-All" and ep_range != "All":

            episodes = [
                "episode-{0}".format(n) for n in range(int(ep_range[0]),
                                                       int(ep_range[1]) + 1)
            ]

            if season == 'season-1':

                matching = [
                    s for s in links if 'season' not in s or season in s
                ]

            else:

                matching = [s for s in links if season in s]

            matching = [
                s for s in matching for i in episodes
                if i == re.search(r'episode-[0-9]+', s).group(0)
            ]

        elif season != "season-All":

            if season == 'season-1':

                matching = [
                    s for s in links if 'season' not in s or season in s
                ]

            else:

                matching = [s for s in links if season in s]

        elif ep_range != 'All':

            episodes = [
                "episode-{0}".format(n) for n in range(int(ep_range[0]),
                                                       int(ep_range[1]) + 1)
            ]
            matching = [
                s for s in links for i in episodes
                if re.search("{0}-".format(i), s)
            ]

        else:

            matching = links

        if len(matching) < 1:

            matching.reverse()

        if (self.threads != None and self.threads != 0):

            if (len(matching) == 1):

                for item in matching:

                    source_url, backup_url = self.find_download_link(item)
                    hidden_url = self.find_hidden_url(item)

                    if self.resolution == '480' or len(source_url[0]) > 2:

                        download_url = source_url[0][1]

                    else:

                        try:

                            download_url = source_url[1][1]

                        except Exception:

                            download_url = source_url[0][1]

                    show_info = self.info_extractor(item)
                    output = self.check_output(show_info[0])

                    Extractor(logger=self.logger,
                              download_url=download_url,
                              backup_url=backup_url,
                              hidden_url=hidden_url,
                              output=output,
                              header=self.header,
                              user_agent=self.user_agent,
                              show_info=show_info,
                              settings=self.settings,
                              quiet=self.quiet)

            else:

                count = 0

                while (True):

                    processes_count = 0
                    processes = []
                    processes_url = []
                    processes_extra = []

                    if (int(self.threads) > len(matching)):

                        self.threads = 3

                    procs = ProcessParallel(print('', end='\n\n'))

                    for x in range(int(self.threads)):

                        try:

                            item = matching[count]
                            _, extra = self.is_valid(item)
                            processes.append(self.download_single)
                            processes_url.append(item)
                            processes_extra.append(extra)
                            count += 1

                        except Exception as e:

                            if self.logger == 'True':
                                print('Error: {0}'.format(e))
                            pass

                    for x in processes:

                        procs.append_process(
                            x,
                            url=processes_url[processes_count],
                            extra=processes_extra[processes_count])
                        processes_count += 1

                    if ('' in processes_extra):

                        self.threads = None
                        self.download_show(url)
                        break

                    procs.fork_processes()
                    procs.start_all()
                    procs.join_all()
                    processes_url.clear()
                    processes_extra.clear()
                    processes.clear()
                    self.threads = self.original_thread
                    if (count >= len(matching)): break

        else:

            for item in matching:

                source_url, backup_url = self.find_download_link(item)
                hidden_url = self.find_hidden_url(item)

                if self.resolution == '480' or len(source_url[0]) > 2:

                    download_url = source_url[0][1]

                else:

                    try:

                        download_url = source_url[1][1]

                    except Exception:

                        download_url = source_url[0][1]

                show_info = self.info_extractor(item)
                output = self.check_output(show_info[0])

                Extractor(logger=self.logger,
                          download_url=download_url,
                          backup_url=backup_url,
                          hidden_url=hidden_url,
                          output=output,
                          header=self.header,
                          user_agent=self.user_agent,
                          show_info=show_info,
                          settings=self.settings,
                          quiet=self.quiet)

            if (self.original_thread != None and self.original_thread != 0):

                self.threads = self.original_thread
Exemple #14
0
# pylint: disable=no-member
import time
import cv2
import numpy as np
import Extractor

W = 1920 // 2
H = 1080 // 2

F = 1

extrac = Extractor.Extractor(F, H, W)


class Process():
    def process_frame(self, img):

        self.img = cv2.resize(img, (W, H))

        #matches es un array-2D con los puntos normalizados y filtrados
        matches = extrac.extract(self.img)

        print("%d matches" % (len(matches)))

        for pt1, pt2 in matches:

            #Se desnormalizan las coordenadas de pt1 y pt2, provenientes del filtrado, para poder mostrarse
            u1, v1 = extrac.denormalize(pt1)
            u2, v2 = extrac.denormalize(pt2)

            #Dibuja un circulo verde por cada keypoint
Exemple #15
0
			solution = Solution.Solution(cars, rule_out_rides, bonus, steps)
		if genetic:
			solutions =  solutions + [solution]

			print('genetic algorithm:')

			solution = Solution.genetic_alg([solution], num_generations=1000, max_size_gen=500, size_final_gen=50, mutations_per_solution_max=50, name=file)

		score_new = solution.get_score()
		suma_total = suma_total + score_new
		print('solution score ' + str(score_new))
		if score_new > score:
			print('NUEVA MEJORA DE PUNTUACION EN EL FICHERO: ' + file)
			best_scores[index_file]=score_new
			ex = Extractor.Extractor(solution.cars, file)
			ex.write()
			veces_mejorado = veces_mejorado + 1

		index_file =  index_file + 1

	print('score: ' + str(suma_total/1000000.0) + ' M')
	if best_global < suma_total:
		print('Has mejorado el algoritmo!')

	print('llevas mejoradas veces: ' + str(veces_mejorado) + '/' + str(index_total))
	print('mejora: ' + str(sum(best_scores)-sum(old_best_scores)))
	suma_total=0
	index_total=index_total+1

Exemple #16
0
	def process(url):
		e = Extractor.Extractor()
		return e.get_out_data(url)
Exemple #17
0
 def __init__(self):
   self.extractor = Extractor()
   self.rootDirectory = ''
   self.movieDb = None
Exemple #18
0
    filemode='a')


def buildSequence(frameList):
    sequence = []
    for image in frameList:
        features = model.extract(image)
        sequence.append(features)
    return np.array(sequence)


proc_csv = pd.read_pickle('dataset_with_file_list.pkl')

logging.info("Starting routine for features extraction with InceptionV3")

model = Extractor()

proc_csv.set_index('palavra', inplace=True)
folder = '/home/fabiana/Desktop/projeto-final-src/Classifier/InceptionV3_Features'

for n in ['5', '10', '15']:
    print('Number of keyframes: ' + n)
    for palavra, frameList in tqdm(proc_csv[f'files_list_{n}'].iteritems(),
                                   total=len(proc_csv)):
        if (len(frameList) < int(n)):
            # print(f"Word {palavra} got less than {n} key frames")
            logging.warning(f"Word {palavra} got less than {n} key frames")

        seq = buildSequence(frameList)
        np.save(f'{folder}/{n}/{palavra}', seq)