Beispiel #1
0
 def run(self):
     '''
         ****************
         -2: Error
         -1: Timeout
         0 : Wrong Answer
         1 : Passed
         ****************
     '''
     if self.Error:
         return [[-1, -2, str(self.ee)]]
     ans = thread.Queue()
     job = []
     for case in range(len(self.input)):
         # sent[case] = False
         # print(self.input[case])
         # print(self.expected[case])
         T = thread.Process(target=self.run_helper,
                            args=(self.input[case], self.expected[case],
                                  case, ans))
         job.append(T)
         T.start()
         T.join(self.timeLimit)
         if T.is_alive():
             print('case%d: Timeout' % (case + 1))
             # self.update_result(-1) # -1 for timeout
             ans.put([case, -1])
             T.terminate()
             T.join()
     return [ans.get() for j in job]
Beispiel #2
0
    def __init__(self,
                 batch_generator,
                 batch_size=1,
                 ordered=False,
                 multiprocess=True,
                 n_producers=4,
                 max_queue_size=None):
        self.generator = batch_generator
        self.ordered = ordered
        self.multiprocess = multiprocess
        self.n_producers = n_producers
        self.batch_size = batch_size

        if max_queue_size is None:
            self.max_queue_size = n_producers * 3
        else:
            self.max_queue_size = max_queue_size

        if self.multiprocess:
            self.job_queue = multip.Queue()
        else:
            self.job_queue = Queue.Queue()

        self.last_retrieved_job = 0
        self.last_added_job = 0
        self.started = False
Beispiel #3
0
    def __init__(self, server_ip=None, conf=None, http_endpoint='brainiak',
                 server_port=21216, rmq_port=5672):
        assert server_ip is not None, 'server_address required'
        assert conf is not None, 'conf required'

        self.server_ip = server_ip
        self.server_port = server_port

        self.server_address = 'http://%s:%d' % \
            (self.server_ip, self.server_port)
        self.server_address = os.path.join(self.server_address, http_endpoint)

        self.rmq_port = rmq_port
        self.conf = conf
        self.connected = False
        self.name = 'rtcloud'
        self.queue_work_name = '%s_work' % self.name
        self.queue_result_name = '%s_result' % self.name

        self.conf['name'] = self.name
        self.conf['queue_work_name'] = self.queue_work_name
        self.conf['queue_result_name'] = self.queue_result_name

        if 'extensions' not in self.conf:
            self.conf['extensions'] = ['.nii.gz']

        # TODO: it'd be nice to have multiple queues, but not totally clear
        # based on quick inspection how we might select on multiple queues
        self.display_queue = mp.Queue()

        return
Beispiel #4
0
    def start(self):

        max_queue_size = 1 if self.ordered else self.max_queue_size // 2

        self.queue = multip.Queue(
            maxsize=max_queue_size) if self.multiprocess else Queue.Queue(
                maxsize=self.max_queue_size)

        # Flag used for keeping values in completed queue in order
        self.last_completed_job = multip.Value('i', -1)
        self.exit = multip.Event()

        if self.multiprocess and self.ordered:
            self.cache_queue = Queue.Queue(maxsize=self.max_queue_size)

            def batcher(queue, cache_queue):
                while not self.exit.is_set():
                    job_index, item = queue.get()
                    cache_queue.put((job_index, item))

                    time.sleep(0.0001)  #to be sure..

            # As Queues in Python are __!__NOT__!__ First in first out in a multiprocessing setting
            # We use a seperate thread to synchronously put them in order
            p = Thread(target=batcher,
                       args=(self.queue, self.cache_queue),
                       name='Synchronous batcher worker')
            p.daemon = True
            p.start()

        else:
            self.cache_queue = self.queue

        # Start worker processes or threads
        for i in xrange(self.n_producers):
            name = "ContinuousParallelBatchIterator worker {0}".format(i)

            if self.multiprocess:
                p = multip.Process(target=_produce_helper,
                                   args=(i, self.generator, self.job_queue,
                                         self.queue, self.last_completed_job,
                                         self.ordered, self.exit),
                                   name=name)
            else:
                p = Thread(target=_produce_helper,
                           args=(i, self.generator, self.job_queue, self.queue,
                                 self.last_completed_job, self.ordered,
                                 self.exit),
                           name=name)

            # Make the process daemon, so the main process can die without these finishing
            p.daemon = True
            p.start()

        self.started = True
Beispiel #5
0
    def __init__(self,
                 configfile='config.ini',
                 logfile='dopq.log',
                 debug=False):

        # init logging
        self.logger = log.init_log(logfile)

        # get settings from config
        if not os.path.isfile(configfile):
            self.write_default_config(configfile)

        # init member variables
        self.starttime = None
        self.client = docker.from_env()
        self.debug = debug
        self.configfile = configfile
        self.logfile = logfile
        self.config = self.parse_config(configfile)
        self.paths = self.config['paths']
        self.history_file = 'history.dill'
        self.container_list_file = 'container_list.dill'
        self.running_containers_file = 'running_containers.dill'
        self.container_list = []
        self.running_containers = []
        self.history = []
        self.mapping = self.restore('all')

        # init helper processes and classes
        self.queue = mp.Queue()
        self.gpu_handler = gh.GPUHandler()
        self.provider = provider.Provider(self.config, self.queue)

        # build all non-existent directories, except the network container share
        keys = list(self.paths.keys())
        for key in keys:
            if key != 'network_containers':
                if not os.path.isdir(self.paths[key]):
                    os.makedirs(self.paths[key])

        # initialize process variable and termination flag
        super(DopQ, self).__init__()

        # initialize interface as a thread (so that members of the queue are accessible by the interface)
        self.thread = threading.Thread(target=self.run_queue)
Beispiel #6
0
def test_provider():
    from pathos.helpers import mp
    import shutil

    test_config = {
        'paths': {'local_containers': 'test/dest/',
                  'network_containers': 'test/src/',
                  'unzip': 'test/unzipped/',
                  'log': 'test/log/',
                  'history': './',
                  'failed': 'test/failed/'},
        'docker': {'mounts': ['/home/kazuki/:/blub'],
                   'auto_remove': False,
                   'mem_limit': '32gb',
                   'network_mode': 'host',
                   'logging_interval': 30},
        'builder': {'sleep': 10,
                    'load': '.tar',
                    'build': '.zip'},
        'fetcher': {'remove_invalid': False,
                    'sleep': 10,
                    'min_space': 0.01,
                    'executors': 'ilja'}}

    backup_dir = 'test/backup'
    testfiles = os.listdir(backup_dir)
    testfiles = [tf for tf in testfiles if '.zip' in tf]
    print(testfiles)

    for testfile in testfiles:
        testfile = os.path.join(backup_dir, testfile)
        shutil.copy(testfile, test_config['paths']['network_containers'])

    q = mp.Queue()
    p = Provider(test_config, q)
    p.start()
    while p.status == 'running':
        try:
            image = q.get()
            print(image)
        except BaseException:
            p.stop()
Beispiel #7
0
    # Set the logging level
    if options.debug: DEBUG = True
    if options.protocol: DEBUG_PROTOCOL = True
    if options.quiet: QUIET = True

    #if DEBUG:
    #    event = multiprocess.Event()
    #    test_yescryptr16()

    # The want a daemon, give them a daemon
    if options.background:
        import os

        if os.fork() or os.fork(): sys.exit()
    queue_in = multiprocess.Queue()
    queue_out = multiprocess.Queue()
    event = multiprocess.Event()
    manager = multiprocess.Manager()
    requests = manager.dict()
    processes = {}

    # Heigh-ho, heigh-ho, it's off to work we go...
    if options.url:
        miner = Miner(options.url, username, password, thread_count, algorithm=options.algo)
        if options.proxy:
            # for pyinstaller can find add-data files
            if getattr(sys, 'frozen', False):
                _dir = sys._MEIPASS
            else:
                _dir = ''
Beispiel #8
0
    def run_full_exp_parallel_smp(self, save=True):

        if 'pathos' in sys.modules:

            n_cpus = os.cpu_count()

            remaining_runs = deepcopy(self.n_runs)

            while self.finished is False:

                round_start_time = time.time()
                n_round_runs = np.min([n_cpus, remaining_runs])
                processes = [0] * n_round_runs
                # queue = pathos_multiprocess.Queue()
                queues = [0] * n_round_runs
                for queue_no in range(len(queues)):
                    queues[queue_no] = pathos_multiprocess.Queue()

                for proc_no in range(n_round_runs):
                    bayes_optimiser = deepcopy(
                        self.bayes_opt_configs[self.current_config_no])
                    seed = int(torch.randint(1, int(2**32 - 1), (1, )))
                    processes[proc_no] = pathos_multiprocess.Process(
                        target=self.run_rep_parallel,
                        args=(bayes_optimiser, queues[proc_no],
                              self.current_config_no, self.current_rep, seed))
                    processes[proc_no].start()

                    if self.current_rep < self.repetitions - 1:
                        self.current_rep += 1

                    elif self.current_config_no < self.n_configs - 1:
                        self.current_rep = 0
                        self.current_config_no += 1

                    else:
                        self.finished = True

                jobs_running = True
                procs_status = [5] * len(processes)
                last_waiting_n = 10e10
                while jobs_running:
                    for proc_no, process in enumerate(processes):
                        process.join(timeout=1)
                        if process.is_alive():
                            procs_status[proc_no] = 1
                        else:
                            procs_status[proc_no] = 0

                    for queue in queues:
                        while not queue.empty():
                            message = queue.get()
                            config_ind, rep_ind, vals, best_vals = message
                            # config_ind, rep_ind, best_vals = message

                            # self.bayes_opts[config_ind][rep_ind] = b_opt
                            self.vals[config_ind][rep_ind] = deepcopy(vals)
                            self.best_vals[config_ind][rep_ind] = deepcopy(
                                best_vals)

                    waiting_n = np.sum(np.count_nonzero(procs_status))
                    if last_waiting_n != waiting_n:
                        current_time = time.time()
                        elapsed_time = (current_time - round_start_time) / 60.0
                        print(
                            f"Waited for {elapsed_time} minutes in this round, "
                            f"for {waiting_n} processes out of {len(processes)}",
                            flush=True)
                        last_waiting_n = deepcopy(waiting_n)

                    if np.sum(procs_status) < 1:
                        jobs_running = False

                remaining_runs -= n_round_runs

                self.print_status()

                if save:
                    self.save_experiment()

        else:
            print(
                "Could not run experiment in parallel because pathos is not imported"
                "This is probably because it isn't installed.")