Ejemplo n.º 1
0
 def __init__(self):
     manager = Manager()
     
     self.flow_to_state_map = manager.dict()
     self.flow_to_state_map.clear()
     self.trigger = manager.Value('i', 0)
     self.comp = manager.Value('i', 0) # sequential = 0, parallel = 1 
Ejemplo n.º 2
0
class DataManager:
    """
    Class to interact with the Data visualizer
    @author Frederic Abraham
    """
    def __init__(self, data_names: List, pull_rate: int):
        self.manager = Manager()
        self.done = self.manager.Value("done", True)
        self.time_step = self.manager.Value("timestep", 0)
        self.line_dict = self.manager.dict(
            {data_name: 0
             for data_name in data_names})
        self.p = Process(target=run,
                         args=(
                             self.done,
                             self.time_step,
                             self.line_dict,
                         ))
        self.p.start()

    def update_time_step(self, new_time_step):
        self.time_step.value = new_time_step

    def update_value(self, key, value):
        self.line_dict[key] = value

    def stop(self):
        self.done.value = False
        self.p.join()
        self.p.close()
Ejemplo n.º 3
0
def main(argv):

    task_files = [t.replace('.py', '') for t in os.listdir(task.TASKS_PATH)
                  if t != '__init__.py' and t.endswith('.py')]

    if len(FLAGS.tasks) > 0:
        for t in FLAGS.tasks:
            if t not in task_files:
                raise ValueError('Task %s not recognised!.' % t)
        task_files = FLAGS.tasks

    tasks = [task_file_to_task_class(t) for t in task_files]

    manager = Manager()

    result_dict = manager.dict()
    file_lock = manager.Lock()

    task_index = manager.Value('i', 0)
    variation_count = manager.Value('i', 0)
    lock = manager.Lock()

    check_and_make(FLAGS.save_path)

    processes = [Process(
        target=run, args=(
            i, lock, task_index, variation_count, result_dict, file_lock,
            tasks))
        for i in range(FLAGS.processes)]
    [t.start() for t in processes]
    [t.join() for t in processes]

    print('Data collection done!')
    for i in range(FLAGS.processes):
        print(result_dict[i])
Ejemplo n.º 4
0
def alinear(frame, fragmento, r):
    print('aq')
    ssdd=0
    size=2 
    manager = Manager()
    ssdd = manager.Value('ssd', 0)
    find = manager.Value('find', False)
    mejor_x_inicial = manager.Value('mejor_x_inicial', 0)
    mejor_y_inicial = manager.Value('mejor_y_inicial', 0)
    procesos=[]
    for cuadrante_filas in range(size):
        for cuadrante_columnas in range(size):
            p = Process(target=cuarto1, args=(frame, fragmento, size,cuadrante_filas, cuadrante_columnas,find,ssdd,mejor_x_inicial,mejor_y_inicial,), daemon=True)
            print('ya')
            p.start()
            
            procesos.append(p)
    for p in procesos:
        print('ok')
        p.join()
        
    print(f'{ssdd.value},{mejor_x_inicial.value},{mejor_y_inicial.value}')

    movimiento_x = r[1]-mejor_x_inicial
    movimiento_y = r[0]-mejor_y_inicial

    print(movimiento_x)
    print(movimiento_y)
    translation_matrix = np.float32(
        [[1, 0, movimiento_y], [0, 1, movimiento_x]])
    num_rows, num_cols = frame.shape[:2]
    img_translation = cv2.warpAffine(
        frame, translation_matrix, (num_cols, num_rows))
    return img_translation
Ejemplo n.º 5
0
def trainerLearnScoreParallel(lrLearner, svmLearner, knnLearner):
    manager = Manager()

    lrScore, svmScore, knnScore = manager.Value('d', 0.0), manager.Value('d', 0.0), manager.Value('d', 0.0)

    temp = manager.Namespace()
    temp.learner = lrLearner
    lrLearner = temp

    temp = manager.Namespace()
    temp.learner = svmLearner
    svmLearner = temp

    temp = manager.Namespace()
    temp.learner = knnLearner
    knnLearner = temp

    lrP = Process(target=trainerLearnScore, args=(lrLearner, 'LogReg', finalFeatures, finalAnswers, testFeatures, testAnswers, lrScore))
    svmP = Process(target=trainerLearnScore, args=(svmLearner, 'SVM', finalFeatures, finalAnswers, testFeatures, testAnswers, svmScore))
    knnP = Process(target=trainerLearnScore, args=(knnLearner, 'kNN', finalFeatures, finalAnswers, testFeatures, testAnswers, knnScore))

    lrP.start()
    svmP.start()
    knnP.start()

    lrP.join()
    svmP.join()
    knnP.join()

    lrLearner = lrLearner.learner
    svmLearner = svmLearner.learner
    knnLearner = knnLearner.learner
    return (lrLearner, svmLearner, knnLearner, lrScore, svmScore, knnScore)
Ejemplo n.º 6
0
    def __init__(self, dialogs, out_serialized_dataset_dir,
                 tokenizer_name_or_path, n_workers, max_n_tokens,
                 max_n_utterances):
        self._dialogs = dialogs
        self._out_serialized_dataset_dir = Path(out_serialized_dataset_dir)
        self._tokenizer_name_or_path = tokenizer_name_or_path
        self._n_workers = n_workers
        self._max_n_tokens = max_n_tokens
        self._max_n_utterances = max_n_utterances

        self._out_serialized_dataset_dir.mkdir(exist_ok=False, parents=True)
        self._data_file_path = self._out_serialized_dataset_dir / 'data.bin'
        self._offsets_file_path = self._out_serialized_dataset_dir / 'offsets.bin'
        self._sample_lengths_file_path = self._out_serialized_dataset_dir / 'sample_lengths.bin'
        self._response_lengths_file_path = self._out_serialized_dataset_dir / 'response_lengths.bin'
        self._meta_file_path = self._out_serialized_dataset_dir / 'meta.json'
        self._tokenizer_params_file_path = self._out_serialized_dataset_dir / 'tokenizer_params.json'

        sync_manager = Manager()
        self._lock = sync_manager.Lock()
        self._prev_offset = sync_manager.Value('i', 0)
        self._n_samples = sync_manager.Value('i', 0)
        self._dtype_code = sync_manager.Value('i', -1)

        self._tokenizer = DialogsTokenizer(
            self._tokenizer_name_or_path,
            max_n_tokens=self._max_n_tokens,
            max_n_utterances=self._max_n_utterances)
Ejemplo n.º 7
0
def controller_failure_unit_test():
    s = ["1001"]
    s1 = ["1002"]
    clear_config(s)
    clear_config(s1)
    manager1 = Manager()
    manager2 = Manager()
    failure1 = manager1.Value('i', 0)
    failed_list1 = manager1.list([])

    failure2 = manager2.Value('i', 0)
    failed_list2 = manager2.list([])
    processes = []
    process2 = mp.Process(target=controller_failure_detection, args=(s, '1', failure1, failed_list1,))
    processes.append(process2)
    process4 = mp.Process(target=controller_failure_detection, args=(s, '2', failure2, failed_list2,))
    processes.append(process4)
    for p in processes:
        p.start()
        print 'STARTING:', p, p.is_alive()
    r = random.randint(1, 10)
    time.sleep(r)
    print 'terminated'
    t1 = time.time()
    logging.debug(str( ["controller failed at:"] + [t1]))
    processes[0].terminate()
# Exit the completed processes
    for p in processes:
        p.join()
        print 'JOINED:', p, p.is_alive()
Ejemplo n.º 8
0
def showerrormessage(messagetext):
    idproc = os.fork()
    if (idproc == 0):
        import signal

        def message(messagetext, winclosed):
            import gi
            if not hasattr(sys, 'argv'):
                sys.argv = ['']
            gi.require_version('Gtk', '3.0')
            from gi.repository import Gtk, Gdk, GObject

            def init():
                class Dialog(Gtk.Dialog):
                    def __init__(self):
                        Gtk.Window.__init__(self, title="Error")
                        self.set_default_icon_from_file("resources/icon.svg")
                        self.set_default_size(150, 100)
                        self.add_button("_OK", Gtk.ResponseType.OK)
                        self.connect("response", self.on_response)
                        self.set_keep_above(True)
                        hboxdialogerror = Gtk.Box(
                            orientation=Gtk.Orientation.HORIZONTAL, spacing=3)
                        imageerror = (Gtk.Image.new_from_icon_name(
                            "dialog-error", Gtk.IconSize.DIALOG))
                        label = Gtk.Label("" + str(messagetext.value))
                        hboxdialogerror.pack_start(imageerror, False, False, 0)
                        hboxdialogerror.pack_start(label, True, True, 0)
                        box = self.get_content_area()
                        box.add(hboxdialogerror)
                        self.show_all()

                    def on_response(self, dialog, response):
                        winclosed.value = "True"
                        dialog.close()
                        dialog.destroy()

                win = Dialog()
                win.show_all()
                win.connect("delete-event", Gtk.main_quit)
                Gtk.main()
                win.close()
                win.destroy()

            init()
            quit()

        managerc = Manager()
        internaltextvalue = managerc.Value(c_char_p, "" + str(messagetext))
        winclosed = managerc.Value(c_char_p, "")
        p2 = Process(target=message, args=(internaltextvalue, winclosed))
        p2.start()
        print "Error opened"
        while (str(winclosed.value) != "True"):
            time.sleep(0.1)
        print "Error closed"
        p2.terminate()
        os.kill(os.getpid(), signal.SIGKILL)
    print "return"
Ejemplo n.º 9
0
        def __init__(self, parent, zip_it, user_id, total, update, bot):
            self.parent = parent
            self.zip_it = zip_it
            self.user_id = user_id
            self.total = total
            self.update = update
            self.bot = bot

            manager = Manager()
            self.current_size = manager.Value('i', 0)
            self.xth_zip = manager.Value('i', 1)
            self.next_zip = manager.dict()
            self.lock = manager.Lock()

            self.logger = logging.getLogger(self.__class__.__name__)
Ejemplo n.º 10
0
def py_parallel_demo12():
    from multiprocessing import Process, Manager

    def my_update(lock, shareValue, shareList, shareDict):
        with lock:
            print('[%s] my_update is running ...' % os.getpid())
            shareValue.value += 1
            for i in range(len(shareList)):
                shareList[i] += 1
            shareDict['key1'] += 1
            shareDict['key2'] += 2
            time.sleep(1)

    manager = Manager()
    shareValue = manager.Value('i', 1)
    shareList = manager.list(range(5))
    shareDict = manager.dict({'key1': 1, 'key2': 2})

    lock = manager.Lock()
    procs = [
        Process(target=my_update,
                args=(lock, shareValue, shareList, shareDict))
        for _ in range(10)
    ]
    for p in procs:
        p.start()
    for p in procs:
        p.join()

    print('[%s] main is running ...' % os.getpid())
    print('share value:', shareValue.value)
    print('share list:', shareList)
    print('share dict:', shareDict)
Ejemplo n.º 11
0
async def aio_resolve(subdomain_list, process_num, coroutine_num):
    """
    异步解析子域A记录

    :param list subdomain_list: 待解析的子域列表
    :param int process_num: 解析进程数
    :param int coroutine_num: 每个解析进程下的协程数
    :return: 解析结果
    """
    m = Manager()
    done_obj = m.Value('done', 0)  # 创建一个进程间可以共享的值
    loop = asyncio.get_event_loop()
    loop.run_in_executor(None, resolve_progress_func,
                         done_obj, len(subdomain_list))
    wrapped_resolve_func = functools.partial(do_resolve, done_obj)
    result_list = list()
    # macOS上队列大小不能超过2**15 - 1 = 32767
    # https://stackoverflow.com/questions/5900985/multiprocessing-queue-maxsize-limit-is-32767
    if sys.platform == 'darwin':
        split_subdomain_list = utils.split_list(subdomain_list, 32767)
        for current_subdomain_list in split_subdomain_list:
            async with aiomp.Pool(processes=process_num,
                                  childconcurrency=coroutine_num) as pool:
                result = await pool.map(wrapped_resolve_func,
                                        current_subdomain_list)
                result_list.extend(result)
        return result_list
    async with aiomp.Pool(processes=process_num,
                          childconcurrency=coroutine_num) as pool:
        result_list = await pool.map(wrapped_resolve_func, subdomain_list)
        return result_list
Ejemplo n.º 12
0
def main(sys_args):
    """Function that is called by the command line"""
    # Parses the arguments
    input_directory, output_directory, bit_8 = parse_arguments(sys_args[1:])
    print('Input directory: %s' % input_directory)
    print('Output directory: %s' % output_directory)
    print('bit_8: %s' % bit_8)

    input_files, output_files = curate_files(input_directory, output_directory,
                                             bit_8)
    print('Found {} files for processing'.format(len(input_files)))

    m = Manager()
    prev_time = m.Value('d', time.time())
    prev_time_lock = m.Lock()
    process_file_partial = partial(process_file, len(input_files), bit_8,
                                   prev_time, prev_time_lock)

    # Process using multiple cores.
    num_workers = cpu_count() - 1 or 1
    with Pool(num_workers) as pool:
        results = pool.starmap(process_file_partial,
                               [(in_file, output_files[index], index)
                                for index, in_file in enumerate(input_files)],
                               chunksize=1)
    print('Done!')
    print('Completed converting {} files'.format(sum(1 for x in results if x)))
Ejemplo n.º 13
0
    def __init__(self,
                 manager: Manager,
                 total_count: int,
                 print_step: Optional[int] = None):
        """
    Constructs the counter with the given arguments

    :param manager: the manager has to be instanciated outside for shared
    resources
    :param total_count: the total number of elements to process
    :param print_step: the number of step between each print, initialized
    to sensible default if not provided
    """
        self._lock = manager.Lock()
        self._value = manager.Value('i', 0)
        self._total_count = total_count
        self._start_time = time.time()
        if print_step:
            self._print_step = print_step
        else:
            # Nearest (floored) power of 10 from the total count:
            # - if count is 456, the print step will be of 100
            # - if count is 55698, the print step will be of 10000
            if total_count < 10:
                self._print_step = 1
            else:
                self._print_step = int(
                    10**(math.floor(math.log10(total_count))) / 10)
Ejemplo n.º 14
0
def create_proc_channel(instance_cls, cleanroom_args=None):
    mgr = Manager()
    in_queue = mgr.Queue(maxsize=1)
    out_queue = mgr.Queue(maxsize=1)
    state = mgr.Value('b', 1)
    lock = mgr.Lock()  # pylint: disable=no-member

    if cleanroom_args is None:
        args = ()
        kwargs = {}
    else:
        args = cleanroom_args.args
        kwargs = cleanroom_args.kwargs

    proc = CleanroomProcess(instance_cls, args, kwargs, in_queue, out_queue)
    proc.daemon = True
    proc.start()
    logger.debug('create_proc_channel: proc=%s started.', proc)

    while not proc.is_alive():
        logger.debug('create_proc_channel: proc=%s not alive, waiting...',
                     proc)
        time.sleep(0.01)

    logger.debug('create_proc_channel: proc=%s is alive.', proc)
    return proc, in_queue, out_queue, state, lock
Ejemplo n.º 15
0
    def getMultiSetsWithTO(self, longLen=None, maxSingle=None, maxDouble=None, okRate=0.5, cutStart=150, cutEnd=280):
        """
        設定 迴圈運行時間
        """
        manager = Manager()

        allSet = manager.dict()
        checkStop = manager.Value('I', True)

        paras = {
            'checkStop': checkStop,
            'allSets': allSet,
            'longLen': longLen,
            'maxSingleLen': maxSingle,
            'maxDoubleLen': maxDouble,
            'okRate': okRate,
            'cutStart': cutStart,
            'cutEnd': cutEnd
        }

        p = Process(
            target=self.getMultiSetsNS,
            kwargs=paras)

        p.start()
        p.join(timeout=10)
        p.terminate()

        if p.exitcode is None:
            checkStop.value = False
            return dict(allSet)

        if p.exitcode == 0:
            return dict(allSet)
        return dict(allSet)
Ejemplo n.º 16
0
def main_thread():
    name = ""
    memory = []
    TestMotion.start_in_bg()
    manager = Manager()
    command = manager.Value(c_wchar_p, "None")
    process = Process(target=run, args=(command, ))
    process.start()
    while True:
        if name != "n/a" or name != "":
            print(name)
        try:
            res = req.get("http://localhost:8080/getMessage")
            if (res != None or res.status_code == 200):
                data = res.json()
                command.value = data['message']
                mouth.speak_aloud("from website ")
                memory.append(command.value)
                respond(command.value)
        except:
            x = 12
            #print("website not up")
        try:
            if command.value == "None":
                raise (Exception)
            jsonData = json.loads(command.value)
            if 'atlas' in jsonData['text'] or 'alice' in jsonData['text']:
                print(command.value)
                memory.append(command.value)
                respond(command.value)
            sleep(.1)
        except:
            sleep(.10)
Ejemplo n.º 17
0
    def run_multiprocesses_likelihood(self):
        lik = 0.0
        workers = []
        workers_no = self.configuration.num_threads
        corpusSplitlist = self.split_average_data(workers_no)

        likmanager = Manager()
        ManagerReturn_corpusSplitlist = []
        ManagerReturn_corpusSplitlist_lik = []
        for dataSplit in corpusSplitlist:
            likreturn_dataSplit = likmanager.list()
            likreturn_dataSplit_likvalue = likmanager.Value("", 0.0)
            worker = Process(target=self.splitlikelihood,
                             args=(dataSplit, likreturn_dataSplit,
                                   likreturn_dataSplit_likvalue))
            worker.start()
            workers.append(worker)
            ManagerReturn_corpusSplitlist.append(likreturn_dataSplit)
            ManagerReturn_corpusSplitlist_lik.append(
                likreturn_dataSplit_likvalue)
        for w in workers:
            w.join()

        # compute all the likelihood for the splits:
        for v in ManagerReturn_corpusSplitlist_lik:
            lik += v.value
        # update all the docs into corpus, since we compute the doc distribution in likelihood()
        self.corpus.clear()
        for dataSplit in ManagerReturn_corpusSplitlist:
            for doc in dataSplit:
                self.corpus.append(doc)

        return lik
Ejemplo n.º 18
0
def main():
    # manage shared variables between process
    manager = Manager()
    shared_my_user_id = manager.Value(str, "")
    shared_my_user_name = manager.Value(str, "")

    p1 = Process(target=itchat_main,
                 args=(shared_my_user_id, shared_my_user_name))
    p1.start()

    time.sleep(5)
    p2 = Process(target=auto_message_main,
                 args=(shared_my_user_id, shared_my_user_name))
    p2.start()

    time.sleep(5)
Ejemplo n.º 19
0
 def __init__(self, parent, settings):
     self.proc = None
     self.s = None
     self.__class__.settings = settings
     self.img = ImageTk.PhotoImage(Image.open(Utils.getIcon("help.png")))
     manager = Manager()
     self.exiting = manager.Value('i', 0)
Ejemplo n.º 20
0
    def create_initial_population(self):
        """Create members of the first population randomly."""

        for _ in range(self.pop_size):
            individual = Particle(self.chromosome_size, self.fitness_function)
            if not self.pool:
                individual.calculate_fitness()
            self.add_individual_to_pop(individual)

        if self.pool:
            p = Pool(self.pool_size)
            manager = Manager()
            lock = manager.Lock()
            counter = manager.Value('i', 0)

            def pool_function(inside_lock, inside_counter, inside_member):
                inside_lock.acquire()
                inside_counter.value += 1
                inside_lock.release()

                fitness_value = inside_member.calculate_fitness(
                    gpu=inside_counter.value % 4)

                return fitness_value

            func = partial(pool_function, lock, counter)
            fitness_values = p.map(func, self.current_population[:])

            for value, member in zip(fitness_values,
                                     self.current_population[:]):
                member.fitness = value

            p.terminate()
Ejemplo n.º 21
0
    def run(self):
        """Run the preprocessing pipeline

        This method runs the preprocessing pipeline for given samples
        by spinning up processes provided while initializing the class
        """
        rundir = Path(self.checkpoint_dir)
        if not rundir.resolve().exists():
            os.makedirs(rundir)
        if self.overwrite_checkpoints:
            shutil.rmtree(rundir / 'galaxies', ignore_errors=True)
        os.makedirs(rundir / 'galaxies', exist_ok=True)
        self.logger.debug('Created a directory called {} to save lupton-rgb images'
                          .format(rundir / 'galaxies'))
        guid = CKPT_GUID
        if self.overwrite_checkpoints:
            CheckPoint.remove_ckpt(self.checkpoint_dir, guid)
        if not CheckPoint.checkpoint_exists(self.checkpoint_dir, guid):
            checkpoint = CheckPoint(self.checkpoint_dir, RedShiftCheckPointObject, guid)
            checkpoint.save_checkpoint()

        process_pool = PreProcess.get_process_pool(self.num_processes)
        manager = Manager()
        counter = manager.Value('i', 0)
        checkpoint_objects = manager.Queue()
        [process_pool.apply_async(self._run_preprocess_for_one_block,
                                  kwds={'ckpt_info': x,
                                        'counter': counter,
                                        'ckpt_objs': checkpoint_objects},
                                  callback=self._on_process_complete,
                                  error_callback=self._on_process_fail) for x in self.process_blocks]
        process_pool.close()
        process_pool.join()
Ejemplo n.º 22
0
def crawl(dataset_path, scenes, subsequence_length, num_workers=1):
    pool = Pool(num_workers)
    manager = Manager()

    count = len(scenes)
    progress = manager.Value('i', 0)

    samples = []

    if subsequence_length == 2:
        for scene_samples in pool.imap_unordered(
                partial(crawl_subprocess_short,
                        dataset_path=dataset_path,
                        count=count,
                        progress=progress), scenes):
            samples.extend(scene_samples)

    else:
        for scene_samples in pool.imap_unordered(
                partial(crawl_subprocess_long,
                        dataset_path=dataset_path,
                        count=count,
                        progress=progress,
                        subsequence_length=subsequence_length), scenes):
            samples.extend(scene_samples)

    random.shuffle(samples)

    return samples
Ejemplo n.º 23
0
class FunctionalityTestCase(unittest.TestCase):
    def setUp(self) -> None:
        self.manager = Manager()
        self.queue = self.manager.Queue()
        self.active = self.manager.Value('b', False)
        self.worker_manager = WorkerManager(MockSenderFactory(), 6, self.queue,
                                            self.active)

    def test_distribute_empty(self):
        self.active.value = True
        process = Process(target=self.worker_manager.distribute, args=([], ))
        process.start()
        process.join()
        self.assertEqual(self.active.value, False)

    def test_distribute_normal(self):
        self.active.value = True
        process = Process(
            target=self.worker_manager.distribute,
            args=(['foo.txt', 'bar.txt', 'baz.txt', 'quuux.txt',
                   'quuz.txt'], ))
        process.start()
        for i in range(6):
            progress = self.queue.get()
            self.assertNotEqual(progress.done + progress.error, 0)
        process.join()
        self.assertEqual(self.active.value, False)
Ejemplo n.º 24
0
def main(argv=None):  # pylint: disable=unused-argument
    if (len(sys.argv) != 3):
        print("<port> <no of workers> required")
        sys.exit()
    global s
    global port
    global MAX_WORKERS
    port = int(sys.argv[1])
    MAX_WORKERS = int(sys.argv[2])
    global gradients_q
    global global_var_vals
    gradients_q = Queue()
    manager = Manager()
    global_var_vals = manager.Value(c_char_p, "")

    for i in xrange(MAX_WORKERS):
        process_port = port + i + 1
        p = Process(target=handleWorker,
                    args=(process_port, gradients_q, global_var_vals))
        p.daemon = True
        p.start()

    cifar10.maybe_download_and_extract()
    if tf.gfile.Exists(FLAGS.train_dir):
        tf.gfile.DeleteRecursively(FLAGS.train_dir)
    tf.gfile.MakeDirs(FLAGS.train_dir)
    total_start_time = time.time()
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    print("Connecting to port : ", port, " and no of workers: ", MAX_WORKERS)
    s.bind((TCP_IP, port))
    s.listen(1)
    train()
    print("--- %s seconds ---" % (time.time() - total_start_time))
Ejemplo n.º 25
0
def main(filename):
    manager = Manager()
    res = manager.Value('d', 0)

    nama = input("Masukkan Nama: ")
    ipk = float(input("Masukkan IPK saat ini: "))
    penghasilan = int(input("Masukkan Penghasilan Orang Tua : "))
    jarak = float(
        input("Masukkan Jarak dari rumah anda ke Kampus/Sekolah (Km): "))

    p = Process(target=fungsi_keanggotaan,
                args=(
                    ipk,
                    penghasilan,
                    jarak,
                    res,
                ))
    p.start()
    p.join()

    write_csv(filename, nama, ipk, penghasilan, jarak, res.value)

    repeat = input("Apakah anda ingin menginput data lagi?(y/n) : ")
    print()
    while repeat == "y":
        main(filename)
    menu(filename)
Ejemplo n.º 26
0
def batch_download(bucket, file_paths, root, num_workers=10, retry=10):
    with Pool(num_workers) as p:
        m = Manager()
        counter = m.Value('i', 0)
        lock = m.Lock()
        download_ = functools.partial(download, bucket, root, retry, counter, lock)
        p.map(download_, file_paths)
Ejemplo n.º 27
0
def mine_msg(msg: bytes, zeros: int, timestamp: bytes):
    """
    Nos mina un mensaje para que supere la PoW del nodo
    :param msg: Mensaje a minar
    :param zeros: Dificultad del PoW
    :param timestamp: b"" si el mensaje no contiene timestamp, el timestamp si lo contiene
    :return: Mensaje minado
    """
    m = Manager()
    var_return = m.Value(bytes, b"")

    def helper(msg, zeros, var_return, timestamp: bytes) -> None:
        h = b"1"
        counter = 0
        if not timestamp:
            msg = [msg]
        else:
            msg = msg.split(timestamp, 1)
        returneo = bytes(str(counter), "ascii") + b" " + timestamp.join(msg)
        while not check_msg_pow(returneo, zeros):
            returneo = bytes(str(counter), "ascii") + b" " + bytes(
                str(int(time())), "ascii").join(msg)
            counter += 1
        var_return.value = returneo

    p = Process(target=helper, args=(msg, zeros, var_return, timestamp))
    #  Debemos liberar CPU para atender bien las peticiones de I/O. Por ello usamos Proccess
    # (lanzamos proceso de sistema para asi liberar presión sobre el hilo de ejecución de los Threads)
    p.start()
    p.join()
    return var_return.value
Ejemplo n.º 28
0
    def __init__(self,
                 fn,
                 producer_count=None,
                 consumer_count=None,
                 callback=None,
                 batch=True,
                 counter=None,
                 **shared):
        """
        init producer/consumer task
        Args:
            fn: consumer called func(data, counter, q_size, *args, **shared_vars)
            producer_count: producer process count, default: 1
            consumer_count: consumer process count, default: cpu_count - 1
            callback: callback func after f calling completed
            batch: if True, `task.put(todo_list)` 'todo_list' will be do all at once in batches;
                    False, todo_list will be do one by one
            counter: process shared counter, need custom imp in <fn>
            **shared: process shared object data
        """
        cpus = cpu_count()
        if producer_count is None or producer_count < 1 or producer_count > cpu_count(
        ):
            producer_count = 1
        if consumer_count is None or consumer_count < 1 or consumer_count > cpu_count(
        ):
            consumer_count = cpus - 1

        print 'producer_count=%s consumer_count=%s' % (producer_count,
                                                       consumer_count)

        self._callback = callback
        self.batch = batch
        manager = Manager()
        self.q = manager.Queue()
        self.lock = manager.Lock()
        self.event = manager.Event()
        self._counter = manager.Value('counter', counter or 0)
        self._shared = {
            var_name: manager.Value(var_name, var_value)
            for var_name, var_value in shared.iteritems()
        }
        self.producerProcessList = [Producer() for _ in xrange(producer_count)]
        self.consumerProcessList = [
            Consumer(fn=fn) for _ in xrange(consumer_count)
        ]
        self.pool = ProcessPool(consumer_count + producer_count)
    class Launcher:
        def __init__(self, images):
            self.images = images
            self.manager = Manager()
            self.input_queue = self.manager.Queue()
            self.result_queue = self.manager.Queue()
            self.stop_flag = self.manager.Value('i', 0)
            self.time_worker_run = self.manager.list()
            self.start_time = None

            self.start()

        def start(self):

            self.start_time = datetime.timestamp(datetime.now())

            if self.images:
                for image in self.images:
                    self.input_queue.put(image)

            contractors = []

            for i in range(MAX_CONTRACTORS):

                contractor = Contractor(self.images, self.stop_flag,
                                        self.input_queue, self.result_queue,
                                        API_KEY, SECRET_KEY,
                                        self.time_worker_run)
                contractor.start()
                contractors.append(contractor)

            for contractor in contractors:
                contractor.join()

            print("\n All contractors were finished. Cleaning up")

        def get_results(self):

            results = {}

            while not self.result_queue.empty():
                task = self.result_queue.get(False)
                results.update(task)

            lapse_time = datetime.timestamp(datetime.now()) - self.start_time
            average_time_per_worker = sum(self.time_worker_run) / len(
                self.time_worker_run)

            _time = "seconds"

            if lapse_time > 60:
                lapse_time = lapse_time / 60
                _time = "minute/s"

            print("\n--- average time(sec) per worker: %s" %
                  average_time_per_worker)
            print("--- lapse time in %s %s \n" % (lapse_time, _time))

            return results
Ejemplo n.º 30
0
 def __init__(self):
     manager = Manager()
     self.pcloud = PyCloud(PCLOUD_USER, PCLOUD_PASS)
     self.putio = putiopy.Client(PUTIO_KEY)
     self.download_list = manager.list()
     self.upload_list = manager.list()
     self.files_left = manager.Value(1, 0)
     self.destination = None