示例#1
0
    def test_loading(self):
        """
        Tests if the file dataset is loaded correctly.

        It uses the pandas' library's dataframe testing method assert_frame_equal which compares two dataframes
        which when equal returns OK.

        """
        # Create our class objects
        f = fm.FileManager(self.path_10)
        df_from_method = f.parse_json_dataframe()
        # Load dataframe using read_json and remove some of it's columns and change datatypes.
        dtypes = {
            "visitor_uuid": 'category',
            "visitor_useragent": "category",
            "visitor_country": 'category',
            "subject_doc_id": "category",
            "event_type": "category",
            "event_readtime": "float32"
        }
        df_from_test = pd.read_json(
            "C:/Users/amogh/PycharmProjects/dataAnalysis/data/issuu_cw2.json",
            lines=True).loc[:, [
                "visitor_uuid", "visitor_useragent", "visitor_country",
                "subject_doc_id", "event_readtime", "event_type"
            ]]
        df_from_test["event_readtime"] = df_from_test["event_readtime"].fillna(
            0)
        df_from_test = df_from_test.astype(dtypes)
        # Compare left and right dataframes
        pdt.assert_frame_equal(left=df_from_method, right=df_from_test)
示例#2
0
def create_dot(gen_data,pdf=False,seperate_colors=False,trait=0):
    global genealogy_data, dot_file

    genealogy_data = gen_data

    directory = "outputs/dot/"

    # first couple of lines
    dot_file = file_manager.FileManager(directory,get_genealogy_name()+".dot")
    dot_file.write("graph \"" + get_genealogy_name() + "\"{")

    # create the dot stuff
    config_graph()

    # seperate or not
    if seperate_colors:
        create_colors_labels(traits)
        create_colors(traits)
    else:
        create_labels()
        create_generations()
    
    create_member_attributes()
    create_relations()

    finish()

    if pdf:
        os.system("sudo dot " + directory + get_genealogy_name() + ".dot -Tpdf -o " + directory + get_genealogy_name() + ".pdf")
        os.system("sudo rm " + directory + get_genealogy_name() + ".dot")
示例#3
0
 def test_filelines(self):
     """
     Tests if the number of lines in the file are outputted correctly.
     """
     # Create our class objects
     f = fm.FileManager(self.path_10)
     self.assertEqual(f.get_file_lines, "10003", "Should be 10003")
示例#4
0
 def test_graph_600k(self):
     """
     Displays the graph for the 600k dataset's task 6 test parameters.
     """
     f = fm.FileManager(self.path_600)
     df = f.parse_json_dataframe()
     p = prc.DataProcessing(df)
     p.run_task_6(self.doc_id_600, self.user_id_600)
示例#5
0
 def test_filename(self):
     """
     Tests if the file name is returned correctly
     """
     # Create our class objects
     f = fm.FileManager(self.path_10)
     self.assertEqual(f.get_file_name, "issuu_cw2.json",
                      "Should be issuu_cw2.json")
示例#6
0
 def test_fileformat(self):
     """
     Tests if the file format is returned correctly
     """
     # Create our class objects
     f = fm.FileManager(self.path_10)
     self.assertEqual(f.check_file_format(), True,
                      "Should be True for JSON")
示例#7
0
def articles_list(generator):
    pelican_settings = generator.settings
    parser = parser_factory(html_parser.TYPE_HTML, pelican_settings)
    articles_total_count = len(generator.articles)
    s = settings.ArticlesListSettings()
    articles_list_settings = s.get(pelican_settings, articles_total_count)
    articles_list_settings['articles_total_count'] = articles_total_count
    al = creator.Creator(file_manager.FileManager())
    al.run(generator.articles, articles_list_settings, parser)
示例#8
0
 def __init__(self, host, port, identifier="main-", syncServer=[]):
     self.server = Server(host, port, identifier)
     self.fileManager = file_manager.FileManager(identifier)
     self.lockSync = consistency.Sync()
     self.syncServer = list()
     for server in syncServer:
         url = "PYRONAME:%s@%s:%d" % (server, host, port)
         self.syncServer.append(Pyro4.Proxy(url))
     self.Sync()
示例#9
0
 def test_format_time(self):
     """Tests if the time is formatted correctly
     We test the seconds converted by this tool.
     test data: 1234567800
     source tool: https://www.convert-me.com/en/convert/time/millisecond/millisecond-to-dhms.html?u=millisecond&v=1%2C234%2C567%2C800
     """
     f = fm.FileManager(file_path=self.path_100)
     df = f.parse_json_dataframe()
     p = prc.DataProcessing(df)
     actual = p._format_time(
         1234567800)  # 1234567800ms is 14 days 6 hours 56 minutes 7 seconds
     expected = "14d : 6h : 56m : 7s"
     self.assertEqual(actual, expected, "Should be %s" % expected)
示例#10
0
 def __init__(self, n_devices, logger, server=True):
     """
     :param n_devices: numero di device della rete
     :param logger: elabora il file di log
     :param server: booleano, indica la presenza di un server centrale (False = P2P)
     """
     self.env = simpy.Environment()
     self.logger = logger
     self.file_manager = fm.FileManager(self.logger)
     self.devices = {}
     self.shared_folders = {}
     self.server = server
     self.stats = cs.StatsManager(n_devices, self.devices, self.env, server)
     self.generate_network(n_devices)
示例#11
0
 def test_also_likes_readers_100k(self):
     """
     Test to see if the readers is as expected for a document and visitor in the 100k dataset.
     """
     f = fm.FileManager(file_path=self.path_100)
     df = f.parse_json_dataframe()
     p = prc.DataProcessing(df)
     # get relevant readers gets a set of readers for a document and user id
     set_readers = p.get_relevant_readers(self.doc_id_100, self.user_id_100)
     set_expected = {
         '4108dc09bfe11a0c'
     }  # We expect only 1 reader based on the given test data
     self.assertEqual(set_readers, set_expected,
                      "Should be %s" % set_expected)
示例#12
0
 def test_also_likes_readers_600k(self):
     """
     Test to see if the readers is as expected for a document and visitor in the 600k dataset.
     """
     f = fm.FileManager(self.path_600)
     df = f.parse_json_dataframe()
     p = prc.DataProcessing(df)
     # get relevant readers gets a set of readers for a document and user id
     set_readers = p.get_relevant_readers(self.doc_id_600, self.user_id_600)
     # We expect the following 4 readers based on the given test data.
     set_expected = {
         '383508ea93fd2fd1', '3f64bccfd160557e', '1f891eb0b573e42c',
         '7134a88f8b201d31'
     }
     self.assertEqual(set_readers, set_expected,
                      "Should be %s" % set_expected)
示例#13
0
def run(torrent):
    try:
        # create FileManager and check hashes if file already exists
        file_wrapper = file_manager.FileWrapper(torrent=torrent)
        existing_hashes = file_wrapper.create_file_or_return_hashes()

        if existing_hashes:
            for index, h in enumerate(existing_hashes):
                piece_info = torrent.piece_info(index)
                if piece_info.sha1hash == h:
                    torrent._complete[
                        index] = True  # TODO remove private property access

        s_complete_pieces, r_complete_pieces = trio.open_memory_channel(
            config.INTERNAL_QUEUE_SIZE)
        s_write_confirmations, r_write_confirmations = trio.open_memory_channel(
            config.INTERNAL_QUEUE_SIZE)
        s_blocks_to_read, r_blocks_to_read = trio.open_memory_channel(
            config.INTERNAL_QUEUE_SIZE)
        s_blocks_for_peers, r_blocks_for_peers = trio.open_memory_channel(
            config.INTERNAL_QUEUE_SIZE)

        file_engine = file_manager.FileManager(
            file_wrapper=file_wrapper,
            pieces_to_write=r_complete_pieces,
            write_confirmations=s_write_confirmations,
            blocks_to_read=r_blocks_to_read,
            blocks_for_peers=s_blocks_for_peers,
        )

        engine = Engine(
            torrent=torrent,
            complete_pieces_to_write=s_complete_pieces,
            write_confirmations=r_write_confirmations,
            blocks_to_read=s_blocks_to_read,
            blocks_for_peers=r_blocks_for_peers,
        )

        async def run():
            async with trio.open_nursery() as nursery:
                nursery.start_soon(file_engine.run)
                nursery.start_soon(engine.run)

        trio.run(run)
    except KeyboardInterrupt:
        print()
        print("Shutting down without cleanup...")
示例#14
0
    def create_dot(self):
        # first couple of lines
        self.dot_file = file_manager.FileManager("outputs", self.name + ".dot")
        self.dot_file.write("graph " + self.name + "{")

        self.generations_labels = []

        # setup the dot parameters (to do lots of things)
        self.dot_params = dot_parameters.DOTParameters(self, self.geneology)

        # create the dot stuff
        self.dot_params.config_graph()
        self.create_generations_labels()
        self.create_generations()
        self.create_member_attributes()
        self.create_relations()

        self.finish()
示例#15
0
def select_file():
    """Lets the user select a file and checks if it is valid"""
    file_frame.filename = filedialog.askopenfilename(initialdir="/dataAnalysis/data", title="Select Dataset",
                                                     filetypes=FILE_TYPES)
    if file_frame.filename:
        f = fm.FileManager(file_frame.filename)
        if f.check_file_format():
            # Check if file selected is of JSON format
            global df
            # Try loading the dataframe, else return messagebox with relevant errors.
            df = f.parse_json_dataframe()
            if not df.empty:
                global dataset
                dataset = pr.DataProcessing(df)
                display_file_info(f)
            else:
                messagebox.showerror("Value Error",
                                     "The JSON file you are trying to load didn't contain valid dictionaries. Please try again")
        else:
            # Display message box in case file is incorrect format
            messagebox.showerror(title="Bad file format", message="Please load JSON file only.")
示例#16
0
 def test_also_likes_documents_100k(self):
     """
     Test to see if the documents is as expected for a document and visitor in the 100k dataset.
     """
     f = fm.FileManager(file_path=self.path_100)
     df = f.parse_json_dataframe()
     p = prc.DataProcessing(df)
     # get relevant readers gets a set of readers for a document and user id
     set_readers = p.get_relevant_readers(self.doc_id_100, self.user_id_100)
     # get the documents that these readers like
     set_docs = p.get_documents(set_readers)
     # We expect only 4 documents based on the given test data
     set_expected = {
         '4108dc09bfe11a0c': {
             '100405170355-00000000ee4bfd24d2ff703b9147dd59',
             '100806162735-00000000115598650cb8b514246272b5',
             '100806172045-0000000081705fbea3553bd0d745b92f',
             '101122221951-00000000a695c340822e61891c8f14cf'
         }
     }
     self.assertEqual(set_docs, set_expected, "Should be %s" % set_expected)
示例#17
0
import file_manager


plik = file_manager.FileManager("tekst")
print(plik.read_file())
plik.update_file(" - update test")
print(plik.read_file())
示例#18
0
def main():
    """ Start point of app """

    # Driver path
    driverPath = os.path.dirname(
        os.path.abspath(__file__)) + '/chromedriver_win32/chromedriver.exe'

    # Objects
    objFileManager = fm.FileManager()
    objScrapper = ps.PolyScrapper(driverPath)

    # Get words to search from file (words.txt)
    searchTexts = objFileManager.read_all_words()
    for i in range(len(searchTexts)):
        searchTexts[i] = searchTexts[i].rstrip()

    # Count of max download per searched text.
    maxDownloadCountPerItem = 50

    for searchText in searchTexts:
        print("--------------------> Searching:" + searchText)

        # Create sub folder in destinationPath
        objFileManager.create_folder(searchText)

        # Search
        objScrapper.search(searchText)
        time.sleep(4)

        # Filter founded items
        objScrapper.filter()
        time.sleep(2)

        # Get founded total item count
        itemCount = int(objScrapper.get_item_count())
        maxItemCount = maxDownloadCountPerItem
        if itemCount < maxDownloadCountPerItem:
            maxItemCount = itemCount
        print("--------------------> ItemCount:" + str(itemCount))

        # Scroll a couple times to load enough item to screen.
        objScrapper.scroll_down()
        time.sleep(1)
        objScrapper.scroll_down()
        time.sleep(1)
        objScrapper.scroll_down()
        time.sleep(1)
        objScrapper.scroll_down()
        time.sleep(1)

        # Download all founded items one by one
        for index in range(maxItemCount):
            realIndex = index + 1
            print("Index:" + str(realIndex) + " at " +
                  str(datetime.datetime.now()))

            # Click Item
            isClickSuccess = objScrapper.click_element(realIndex)
            if isClickSuccess:
                time.sleep(2)

                isPageExist = objScrapper.is_page_exist()

                if isPageExist:

                    # Remove non-ascii characters from name
                    name = objScrapper.get_name()
                    name = replace_non_ascii(name)

                    # Is item already downloaded?
                    isFileExist = objFileManager.has_file(name, searchText)

                    #
                    if isFileExist == False:
                        isSuccess = objScrapper.download()
                        if isSuccess:
                            # Wait until download finish
                            while True:
                                if objFileManager.is_download_finished(
                                ) == True:
                                    break
                                time.sleep(0.25)

                            # Move downloaded file to correct folder
                            objFileManager.create_folder(searchText)
                            objFileManager.cut_and_paste_last_file(
                                searchText, name)
                        else:
                            print("Download failed")
                            objScrapper.return_to_first_tab()
                    else:
                        print("File already exist")
                        objScrapper.return_to_first_tab()
                else:
                    print("File already exist")
                    objScrapper.return_to_first_tab()
            #
            time.sleep(0.5)

    objScrapper.quit()
    print("ALL DONE!")
示例#19
0
class Entity:

    name = ""
    health = 0
    attack = 0
    defence = 0
    money = 0
    alive = True

    file_stuff = file_manager.FileManager()

    def attack_target(self, thing_being_attacked):
        roll = self.dice_roll(20)
        print("{} rolled a {}".format(self.name, roll))
        if roll >= 15:
            damage = self.attack * 2 - thing_being_attacked.defence
            thing_being_attacked.set_health(thing_being_attacked.health -
                                            damage)
            print("{} hit doing critical damage of {}".format(
                self.name, damage))
        elif roll < 15 and roll >= 10:
            damage = self.attack - thing_being_attacked.defence
            thing_being_attacked.set_health(thing_being_attacked.health -
                                            damage)
            print("{} hit doing damage of {}".format(self.name, damage))
        if roll >= 5 and roll < 10:
            print("{} missed!".format(self.name))
            if roll < 5:
                damage = self.attack * 2 - thing_being_attacked.defence
                self.set_health(self.health - damage)
                print("{} missed and hit themselves dealing {} damage".format(
                    self.name, damage))

    def run_from(self, thing_attacking):
        roll = self.dice_roll(20)
        if roll >= 15:
            print("{} got away from {} safely".format(self.name,
                                                      thing_attacking.name))
            return True
        else:
            damage = thing_attacking.attack * 2
            self.set_health(self.health - damage)
            print("{} failed to get away and got hit doing {} damage".format(
                self.name, damage))
            return False

    def get_health(self):
        return self.health

    def set_health(self, number):
        self.health = number

    def dice_roll(self, sides):
        return random.randrange(1, sides)

    def check_if_alive(self):
        if self.health <= 0:
            print("{} DIED!".format(self.name))
            self.alive = False
            return False
        else:
            print("{} health is {}".format(self.name, self.health))
            return True
示例#20
0
    def add(self, a, b):
        print(a + b)

    def difference(self, s, b):
        print(a - b)

    def multiply(self, a, b):
        print(a * b)

    def divide(self, a, b):
        print(a / b)


class ScienceCalculator(Calculator):
    def potega(self, a, b):
        print(a ^ b)


def tyl(tekst):
    print(tekst[::-1])


plik = file_manager.FileManager("pliczek.txt")
print("Przed edycja:\n")
plik.read_file()
plik.update_file()
plik.zamknij()
krowa = file_manager.FileManager("pliczek.txt")
print("Po edycji:\n")
krowa.read_file()
示例#21
0
 def __init__(self, host, port, identifier="main-"):
     self.server = Server(host, port, identifier)
     self.fileManager = file_manager.FileManager()
示例#22
0
import screen
import file_manager
import store
import entity
import time
import json

file_stuff = file_manager.FileManager()
game_is_running = True

filer = file_manager.FileManager()
screen_stuff = screen.Screen()


def opening_screen():
    print("Welcome")
    print()
    print(
        "what do you want to do?\nfor new game type new\nto load game type load\nTo quit type quit"
    )
    running = True
    while running:
        user_input = input()
        if user_input == "new":
            return character_creator()
        elif user_input == "load":
            while True:
                character_name = input("what is your characters name: ")
                file_exists = filer.check_if_file_exists(
                    "json/{}.json".format(character_name))
                if file_exists == True:
示例#23
0
def main():
    ''' Main que simula o dispatcher, administra todos os modulos e le os Arquivos
    de entrada '''

    #Inicializa modulos
    manager = pm.ProcessManager()
    memory = mm.MemoryManager()
    io = iom.IOManager()
    filesystem = fm.FileManager()
    logger = log.Logger()

    # Se tiver dois argumentos de linha, utiliza-os, senao usa os padroes
    if len(sys.argv) > 2:
        procFile = sys.argv[1]
        arqFile = sys.argv[2]
    else:
        procFile = 'processes.txt'
        arqFile = 'files.txt'

    #Abre e le o arquivo de processos
    with open(procFile, 'r') as f:
        procs = [[int(x) for x in line.split(',')] for line in f]
        processes = [pm.Process(x).__dict__ for x in procs]

    #Abre e le o arquivo do sistema de arquivos
    with open(arqFile, 'r') as f:
        temp = f.read().splitlines()
        filesystem.qtd_blocos = int(temp[0])
        filesystem.qtd_segmentos = int(temp[1])
        filesystem.arquivos = [
            fm.File(temp[i].replace(' ', '').split(',')).__dict__
            for i in range(2, filesystem.qtd_segmentos + 2)
        ]
        # import ipdb; ipdb.set_trace()
        filesystem.operacoes = [
            fm.FileOperation(temp[i].replace(' ', '').split(',')).__dict__
            for i in range(filesystem.qtd_segmentos + 2, len(temp))
        ]

    filesystem.inicia_disco()
    #Ordena os processos por ordem de chegada
    manager.fila_principal = list(
        sorted(processes, key=operator.itemgetter('tempo_init')))
    # Como o quantum e um, o tratamento e apenas um iterador t
    t = 0
    while (True):
        #Se tiver processo ainda nao processado
        while (manager.fila_principal):
            #Escalona processos do tempo de chegada = t
            if (manager.fila_principal[0]['tempo_init'] == t):
                manager.escalona_processo_geral()
            else:
                break
        #Escalona processos da fila de usuario para as filas de prioridade
        while (manager.escalona_processo_usuario()):
            pass
        #SE NAO TEM NADA EXECUTANDO(SE TIVER VAI SER TEMPO REAL)
        if (not (manager.em_execucao)):
            #Executa tempo real se tiver
            for novo_processo in manager.fila_tempo_real:
                #Tenta salvar na memoria, se tiver espaco
                novo_processo['PID'] = manager.gera_pid()
                offset = memory.salva(novo_processo)
                #Coloca em execucao
                if (offset is not None):
                    manager.em_execucao = manager.fila_tempo_real.pop(
                        manager.fila_tempo_real.index(novo_processo))
                    manager.em_execucao['offset'] = offset
                    logger.dispatch(manager.em_execucao)
                    break
                #Nao atribui PID se n conseguir salvar na memoria
                else:
                    novo_processo['PID'] = None
                    manager.ultimoPID -= 1

            #Se nao tiver tempo real, vai ser despachado processos de usuario
            else:
                # Procura algum processo de prioridade 1 que possa ser executado
                for novo_processo in manager.prioridade_1:
                    #Se processo ainda nao esta na memoria(nunca foi executado)
                    if novo_processo['offset'] is None:
                        #Ve se pode ser alocado em IO
                        novo_processo['PID'] = manager.gera_pid()
                        if (io.aloca(novo_processo)):
                            offset = memory.salva(novo_processo)
                            novo_processo['offset'] = offset
                            if offset is not None:
                                logger.dispatch(novo_processo)
                    offset = novo_processo['offset']
                    #Se o processo puder ser executado, carrega para a CPU
                    if (offset is not None):
                        manager.em_execucao = manager.prioridade_1.pop(
                            manager.prioridade_1.index(novo_processo))
                        break
                    else:
                        novo_processo['PID'] = None
                        manager.ultimoPID -= 1

                # Se nao pode atribuir processos de prioridade 1(falta de processos ou recursos(memoria e io))
                else:
                    for novo_processo in manager.prioridade_2:
                        #Se processo ainda nao esta na memoria
                        if novo_processo['offset'] is None:
                            #Ve se pode ser alocado em IO
                            novo_processo['PID'] = manager.gera_pid()
                            if (io.aloca(novo_processo)):
                                offset = memory.salva(novo_processo)
                                novo_processo['offset'] = offset
                                if offset is not None:
                                    logger.dispatch(novo_processo)
                        offset = novo_processo['offset']
                        #Se o processo puder ser executado, carrega para a CPU
                        if (offset is not None):
                            manager.em_execucao = manager.prioridade_2.pop(
                                manager.prioridade_2.index(novo_processo))
                            break
                        else:
                            novo_processo['PID'] = None
                            manager.ultimoPID -= 1

                    # Se nao pode atribuir processos de prioridade 1 ou 2(falta de processos ou recursos(memoria e io))
                    else:
                        for novo_processo in manager.prioridade_3:
                            #Se processo ainda nao esta na memoria
                            if novo_processo['offset'] is None:
                                #Ve se pode ser alocado em IO
                                novo_processo['PID'] = manager.gera_pid()
                                if (io.aloca(novo_processo)):
                                    offset = memory.salva(novo_processo)
                                    novo_processo['offset'] = offset
                                    if offset is not None:
                                        logger.dispatch(novo_processo)
                            offset = novo_processo['offset']
                            #Se o processo puder ser executado, carrega para a CPU
                            if (offset is not None):
                                manager.em_execucao = manager.prioridade_3.pop(
                                    manager.prioridade_3.index(novo_processo))
                                break
                            else:
                                novo_processo['PID'] = None
                                manager.ultimoPID -= 1
            if (manager.acabou()):
                #Condicao de saida do programa => Nao tem nenhum processo em nenhuma fila
                #E todos os processos ja chegaram
                break
        # Executa Processo
        if (manager.em_execucao):
            #Decrementa tempo restante e aumenta o numero de instrucoes rodadas
            manager.em_execucao['tempo_processador'] -= 1
            manager.em_execucao['execucoes'] += 1
            #Mostra Saida
            logger.executa(manager.em_execucao)
            #APOS EXECUCAO
            #Remove o processo da memoria e libera recursos SE TIVER ACABADO O TEMPO
            if manager.em_execucao['tempo_processador'] == 0:
                filesystem.opera_processo(manager.em_execucao)
                io.libera(manager.em_execucao)
                memory.mata(manager.em_execucao)
                manager.em_execucao = {}
            #COmo o quantum eh um, processos de usuario sao retirados da CPU em toda iteracao
            elif manager.em_execucao['prioridade'] > 0:
                if manager.em_execucao['prioridade'] == 1:
                    manager.prioridade_1.append(manager.em_execucao)
                elif manager.em_execucao['prioridade'] == 2:
                    manager.prioridade_2.append(manager.em_execucao)
                elif manager.em_execucao['prioridade'] == 3:
                    manager.prioridade_3.append(manager.em_execucao)
                manager.em_execucao = {}
        #Avanca uma unidade de tempo
        t += 1

    #Mostra saida do sistema de arquivos
    logger.disco(filesystem)
示例#24
0
def run(args):
    print("")  # Leave a gap
    print("Starting task %s" % args.task)
    # If the task value is 2a, then run
    if args.task == "2a":
        # Create file object
        f = fm.FileManager(args.file)
        # Create the dataframe
        df = f.parse_json_dataframe()
        # If the df was not empty, then run this
        if not df.empty:
            # Send it to dataprocessing,
            dataset = pr.DataProcessing(df)
            # Run the task for 2a.
            dataset.histogram_country(args.docid)
    # If the task value is 2b, then run
    elif args.task == "2b":
        f = fm.FileManager(args.file)
        df = f.parse_json_dataframe()
        if not df.empty:
            dataset = pr.DataProcessing(df)
            dataset.histogram_continent(args.docid)
    elif args.task == "3a":
        f = fm.FileManager(args.file)
        df = f.parse_json_dataframe()
        if not df.empty:
            dataset = pr.DataProcessing(df)
            dataset.histogram_browsers_a()
    elif args.task == "3b":
        f = fm.FileManager(args.file)
        df = f.parse_json_dataframe()
        if not df.empty:
            dataset = pr.DataProcessing(df)
            dataset.histogram_browsers_b()
    elif args.task == "4":
        f = fm.FileManager(args.file)
        df = f.parse_json_dataframe()
        if not df.empty:
            dataset = pr.DataProcessing(df)
            output = dataset.visitor_readtimes()
            print("Reader(s):        |  Total readtime(s): ")
            print("------------------------------------------------")
            for k, v in output.items():
                print('%s  |  %s' % (k, v))
    elif args.task == "5":
        f = fm.FileManager(args.file)
        df = f.parse_json_dataframe()
        if not df.empty:
            dataset = pr.DataProcessing(df)
            readers, output = dataset.run_task_5(args.docid, args.userid)
            print("Relevant readers for the document:")
            print("Reader(s)  ")
            print("-----------")
            for reader in readers:
                print("%s      |" % reader[-4:])
            print("")
            print("Top 10 most read (also-like) documents: ")
            print("Document(s)  |   Times Read")
            print("----------------------------")
            for documents, count in output.items():
                if documents[-4:] == args.docid[-4:]:
                    print("%s (*)     |   %s" % (documents[-4:], count))
                else:
                    print("%s         |   %s" % (documents[-4:], count))
            print("Where (*) is the input document.")
    elif args.task == "6":
        f = fm.FileManager(args.file)
        dataset = pr.DataProcessing(f.parse_json_dataframe())
        dataset.run_task_6(args.docid, args.userid)
    else:
        return "No conditions set"
示例#25
0
import config_parser
import file_manager

if __name__ == "__main__":
    p = config_parser.ConfigParser()
    f = file_manager.FileManager()

    dest = p.get_destination()
    f.archive_desktop_files(dest)
示例#26
0
    def run(self):
        self.logger_function('Server started')
        while self.main.server_running:
            try:
                connection, address = self.socket.accept()
            except:
                continue

            self.logger_function('Received connection from ' + str(address))
            connection.settimeout(1)
            data = connection.recv(65535)
            configuration_password_hash = self.configuration_menu.get_parameter(
                'password_hash')

            if data.startswith(b'GET_MEME'):
                self.logger_function('Meme was requested')
                password_hash, offset = ServerThread.parse_length_value_string(
                    data, len('GET_MEME'))
                if not password_hash == configuration_password_hash.encode():
                    connection.send(b'WRONG_PASSWORD')
                    self.logger_function('Wrong password')
                else:
                    file_mngr = file_manager.FileManager()
                    folder_list = json.loads(
                        self.configuration_menu.get_parameter('folders'))
                    extension_list = json.loads(
                        self.configuration_menu.get_parameter('extensions'))
                    try:
                        file_mngr.open_random_file(folder_list, extension_list)
                        path = file_mngr.get_file_path().encode()
                        path_length = len(path)
                        connection.send(path_length.to_bytes(2, 'big') + path)
                        data = file_mngr.read()
                        while data:
                            connection.send(data)
                            data = file_mngr.read()
                        file_mngr.close_file()
                        self.logger_function('Sent meme')
                    except:
                        self.logger_function('Didn\'t find any memes to send')
            elif data.startswith(b'MOVE_FILE'):
                self.logger_function('File moving was requested')
                password_hash, offset = ServerThread.parse_length_value_string(
                    data, len('MOVE_FILE'))
                if not password_hash == configuration_password_hash.encode():
                    connection.send('WRONG_PASSWORD')
                    self.logger_function('Wrong password')
                else:
                    source, offset = ServerThread.parse_length_value_string(
                        data, offset)
                    destination, offset = ServerThread.parse_length_value_string(
                        data, offset)
                    new_path = file_manager.FileManager.move_file(
                        source.decode("utf-8"), destination.decode("utf-8"))
                    if new_path:
                        connection.send(new_path.encode())
                        self.logger_function('Moved successfully')
                    else:
                        connection.send(b'NOK')
                        self.logger_function('Moving failed')
            connection.close()
        self.socket.close()
        self.logger_function('Server stopped')
示例#27
0
def run():
    fm = file_manager.FileManager("best_examples")
    best_score = fm.get_or_create_file(k, l, choose_mode)

    scores = []
    moves = []
    cascade_repartition = np.zeros(int(np.log2(k)))
    for i in range(trial_number):
        tmp_cascade_repartition = np.zeros(int(np.log2(k)))
        t0 = time.time()
        p = random_phase.RandomPhase(k, l, choose_mode)
        p.start_phase()

        # TO COMPUTE THE MAX COST OF A PHASE
        if best_score < sum(p.cascade_costs):
            best_score = sum(p.cascade_costs)
            fm.redo_file(best_score, p.history)
            scores.clear()
            scores.append(p.cascade_costs)
            moves.clear()
            moves.append(p.g.get_moves())
        elif best_score == sum(p.cascade_costs):
            fm.update_file(p.history)
            scores.append(p.cascade_costs)
            moves.append(p.g.get_moves())

        # TO CHECK THE PROPERTY: after each cascade, cost <= l*s*log(s), where s is the number of revealed edges
        for j in range(0, len(p.cascade_costs)):
            if sum(p.cascade_costs[:j]) > l * (j + 1) * np.log2(j + 1):
                fm.store_counter_example("best_examples",
                                         "false lslog(s) property ", p.history,
                                         p.cascade_costs, j)
                print("counter example!! false lslog(s) property")
                break

        # TO CHECK THE PROPERTY: after each cascade, cost <= max(2, l*s*log(s)), where s is the number of revealed edges
        for j in range(0, len(p.cascade_costs)):
            if sum(p.cascade_costs[:j + 1]) > max(2,
                                                  l *
                                                  (j + 1) * np.log2(j + 1)):
                fm.store_counter_example("best_examples",
                                         "false max(2, lslog(s) property) ",
                                         p.history, p.cascade_costs, j)
                print("counter example!! false max(2, lslog(s) property")
                break

        # TO SEE THE REPARTITION OF CASCADES, DEPENDING ON THEIR COST
        for c in p.cascade_costs:
            index = int(np.log2(int(c / l)))
            tmp_cascade_repartition[index] += 1
        for j in range(len(tmp_cascade_repartition)):
            if tmp_cascade_repartition[j] > cascade_repartition[j]:
                cascade_repartition[j] = tmp_cascade_repartition[j]

        duration = time.time() - t0
        remaining_time = (trial_number - i) * duration
        hours = remaining_time // 3600
        minutes = (remaining_time - hours * 3600) // 60
        print(i * 100 / trial_number, "% rem. time:", int(hours), "h",
              int(minutes), "m score:", sum(p.cascade_costs), "best :",
              best_score)
        print("cas. repart.: ", cascade_repartition)

    print("best score : ", best_score)
示例#28
0
 def getCreator(self):
     fm = file_manager.FileManager()
     fm.save_file = MagicMock(return_value=True)
     return creator.Creator(fm)
示例#29
0
        elif command == b'\x0D':  # RS422: Begin to use the rover line for extended period of time
            files_transferring = True
            files_transfer_thread = threading.Thread(
                target=rover.transfer_sample)
            files_transfer_thread.start()
        elif command == b'\x0E':
            rover.clock_sync(packet)

        elif command == b'\xF0':
            debug.pi_tune()


if __name__ == "__main__":
    if DEBUG_MODE:
        fm = file_manager.FileManager(DEBUG_SD_PATH, DEBUG_FLASH_PATH)
    else:
        fm = file_manager.FileManager(SD_PATH, FLASH_PATH)

    # Set up our serial connection to the rover
    rover_serial = oasis_serial.OasisSerial("/dev/ttyS1",
                                            debug_mode=DEBUG_MODE,
                                            debug_tx_port=ROVER_TX_PORT,
                                            debug_rx_port=ROVER_RX_PORT,
                                            rx_print_prefix="BBB RX] ")
    # Set up a packet manager to process the OSPP packets we send and recieve
    packet_manager = ospp.PacketManager(rover_serial)

    tlc_serial = oasis_serial.OasisSerial("/dev/ttyS2",
                                          debug_mode=DEBUG_MODE,
                                          debug_tx_port=TLC_TX_PORT,
示例#30
0
list2 = [1, 2, 3, 4, 5, 6, 7]
lists(list1, list2)

# 2
print(info("WIELKIE niewielkie"))

# 3
print(delete("Ala ma kota", "a"))

# 4
print(convert("K", 33))
print(convert("F", 43))
print(convert("R", 43))

# 5
calc1 = Calculator()
print(calc1.add(3, 4))

# 6
calc2 = ScienceCalculator()
print(calc2.square(4, 2))

# 7
reverse("to kajak")

# 8
manager1 = file_manager.FileManager("zad8.txt")
manager1.read_file()
manager1.update_file("\nAdded text")
manager1.read_file()