Пример #1
0
def loadData(bot, update):
    c = StrictRedis()
    query = update.callback_query
    chat_id = query.message.chat_id
    regNumber = c.hget(chat_id, 'umsReg')
    umsPass = c.hget(chat_id, 'umsPass')
    count = 0
    bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
    try:
        homeUms = Verto(regNumber, umsPass)
        homeData = homeUms.initiater()
        if homeData != 'Incorrect Credentials':
            pass
        else:
            bot.edit_message_text(text='Incorrect Credentials',
                                  chat_id=chat_id,
                                  message_id=query.message.message_id)
            raise ValueError("Something Went Wrong")
    except ValueError as error:
        return
    else:

        def msg():
            msgUms = Message(homeData)
            msgList = msgUms.initiater()
            no_msg = 0
            for i, item in enumerate(msgList):
                if item != '':
                    no_msg = no_msg + 1
                    item = ('|').join(item)
                    c.hset(chat_id, 'msg' + str(i), item)
            c.hset(chat_id, '#msg', str(no_msg))

        def course():
            courseRes = Resources(homeUms.session)
            courseList = courseRes.course_codes()
            c.hset(chat_id, '#course', str(len(courseList)))
            for i, item in enumerate(courseList):
                c.hset(chat_id, 'c{}'.format(i), item)

        msg()
        pool = Pool(4)
        p1 = pool.Process(target=course)
        p1.start()
        p1.join()
        bot.edit_message_text(text="All set, Now what you want to know about?",
                              chat_id=chat_id,
                              message_id=query.message.message_id,
                              reply_markup=markup_options)

    return
def EID_Attenuation_Sim(simDataFile, nThread):
    # Read simulation jobs
    f = open(simDataFile, "r")
    trials = f.readlines()
    nSimTrials = len(trials)
    # Submit simulations
    if nThread > cpu_count():
        nThread = cpu_count()
    if nSimTrials < nThread:
        nThread = nSimTrials
    # Start a new parallel pool
    print("Starting parallel pool with {0} threads".format(nThread))
    ctx = get_context("fork")
    pool = Pool(processes=nThread)
    max_queue_size = min(2 * nThread, nSimTrials)
    work_queue = Queue(maxsize=max_queue_size)
    jobs = []
    remaining_jobs = nSimTrials
    # Kick off worker threads
    for _ in range(nThread):
        # Start a new job thread
        try:
            p = pool.Process(target=QueueWorker, args=(work_queue, ))
        except Exception:
            if ctx is not None:
                # Fallback to use context
                p = ctx.Process(target=QueueWorker, args=(work_queue, ))
        p.start()
        jobs.append(p)

    for it in range(nSimTrials):
        # Fill in work queue
        work_queue.put((trials[it].split(), remaining_jobs),
                       block=True,
                       timeout=None)
        remaining_jobs -= 1
        print_color(
            f"[MasterNode-{getpid()}]: Adding new job {trials[it].split()[3]}, "
            f"remaining jobs {remaining_jobs}",
            color="green",
        )
        # Unfortunately we need to wait briefly before adding new data into the queue.
        # This is because it takes some time for the object to get properly ingested.
        time.sleep(0.1)

    # Wait until all the active thread to finish
    for job in jobs:
        job.join()
Пример #3
0
def evaluate_command(command, attributes):
    manager = multiprocessing.Manager()
    return_list = manager.list()

    pool = Pool(processes=1)

    main_queue = Queue()
    main_event = Event()
    main_event.set()
    proc = pool.Process(target=handle_service,
                        args=(main_queue, main_event, command, attributes,
                              return_list))
    proc.daemon = True
    proc.start()
    proc.join()
    print("from main: " + str(return_list))
Пример #4
0
def main(command, value):
    manager = multiprocessing.Manager()
    return_list = manager.list()

    pool = Pool(processes=1)

    value = 'test' + str(command)
    main_queue = Queue()
    main_event = Event()
    main_event.set()
    proc = pool.Process(target=eval(command),
                        args=(main_queue, main_event, value, return_list))
    proc.daemon = True
    proc.start()
    proc.join()
    print("from main: " + str(return_list))
Пример #5
0
def main():
    try:
        limit = 256
        sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
        pool = Pool(processes=limit)
        signal.signal(signal.SIGINT, sigint)
        eid_proc = {}
        main_queue = Queue()
        main_event = Event()
        main_event.set()
        for eid in range(limit):
            proc = pool.Process(target=echo_forever,
                                args=(main_queue, main_event))
            proc.daemon = True
            proc.start()
            eid_proc[eid] = proc
        while True:
            alive = len(
                [_proc for _proc in eid_proc.values() if _proc.is_alive()])
            print('{}/{} alive'.format(alive, limit))
            if alive == 0:
                print('All of the client processes are dead. Exiting loop...')
                break
            else:
                time.sleep(1)
    except (KeyboardInterrupt, Exception):
        main_event.clear()
        for proc in eid_proc.values():
            proc.terminate()
    finally:
        res = []
        while not main_queue.empty():
            res.append(main_queue.get())
        cdelta = [_max['cdelta'] for _max in res]
        delta = [_max['delta'] for _max in res]
        if cdelta:
            cdelta = max(cdelta)
        else:
            cdelta = "unknown"
        if delta:
            delta = max(delta)
        else:
            delta = "unknown"
        time.sleep(1)
        print("Max time to establish: {}".format(cdelta))
        print("Max time   echo reply: {}".format(delta))
        print(cdelta, delta)
Пример #6
0
    def exposed_echo(self, message, value):
        manager = multiprocessing.Manager()
        return_list = manager.list()
        if message == "Echo":
            print("received EchoService - forwarding to executing server")
            #sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
            pool = Pool(processes=1)
            #signal.signal(signal.SIGINT, sigint)
            server_res = []
            main_queue = Queue()
            main_event = Event()
            main_event.set()
            proc = pool.Process(target=echo,
                                args=(main_queue, main_event, value,
                                      return_list))
            proc.daemon = True

            proc.start()
            proc.join()
            print(return_list)
            return str(return_list[0])
        else:
            return "Parameter Problem"
Пример #7
0
def SimulationMainQueue(dataFiles, nThread=1):
    if type(dataFiles) is not list:
        dataFiles = [dataFiles]
    paramList = []
    simParamList = []
    nSimJobsList = []
    for dat in dataFiles:
        # Load parameters
        paramList.append(loadParams(dat))
        # Permutate conditions
        simParamList.append(
            list(
                product(
                    paramList[-1]["SNR"],
                    paramList[-1]["procNoiseSigma"],
                    paramList[-1]["pLSigmaAmp"],
                    paramList[-1]["Sigma"],
                    paramList[-1]["objAmp"],
                    paramList[-1]["dt"],
                    paramList[-1]["wControl"],
                    paramList[-1]["randSeed"],
                )
            )
        )
        nSimJobsList.append(len(simParamList[-1]))

    nTrials = len(nSimJobsList)
    # additional wiggle attenuation sims
    with open("./SimParameters/SimJobList.txt", "r") as fp:
        attenuation_sim_trials = fp.readlines()
        attenuation_sim_trials.sort()
    nAttenuationSimTrials = len(attenuation_sim_trials)
    nRegularSimJobs = sum(nSimJobsList)
    nTotalJobs = nRegularSimJobs + nAttenuationSimTrials
    # Limit pool size when job size is smaller than total available threads
    if nTotalJobs < nThread:
        nThread = nTotalJobs
    # Start a new parallel pool
    print("Starting parallel pool with {0} threads".format(nThread))
    ctx = get_context("fork")
    pool = Pool(processes=nThread)
    work_queue = JoinableQueue(maxsize=nTotalJobs)
    jobs = []
    remaining_jobs = nTotalJobs
    job_id = 1

    for it in range(nAttenuationSimTrials):
        job_path = f'{sim_jobs_path}/eih_sim_job_{job_id}.pkl'
        with open(job_path, 'wb') as fp:
            pkl.dump(attenuation_sim_trials[it].split(), fp, pkl.HIGHEST_PROTOCOL)
        # Fill in work queue
        work_queue.put(job_path, block=True, timeout=None)
        job_id += 1
        remaining_jobs -= 1
        print_color(
            f"[MasterNode-{getpid()}]: Adding new job {attenuation_sim_trials[it].split()[3]}",
            color="green",
        )

    # Kick off worker threads
    for _ in range(nThread):
        # Start a new job thread
        try:
            p = pool.Process(target=QueueWorker, args=(work_queue,))
        except Exception:
            if ctx is not None:
                # Fallback to use context
                p = ctx.Process(target=QueueWorker, args=(work_queue,))
        p.start()
        jobs.append(p)

    for trial in range(nTrials):
        # Parse parameters
        param = paramList[trial]
        simParam = simParamList[trial]
        nJobs = nSimJobsList[trial]
        eidParam = param["eidParam"]
        ergParam = param["ergParam"]
        filename = param["filename"]
        # Check if saveDir exists, create the folder if not
        if not exists(eidParam.saveDir):
            print(f"Save folder {eidParam.saveDir} does not exist, creating...")
            makedirs(eidParam.saveDir, exist_ok=True)
        ergParam.time = None
        ergParam.eidTime = None
        for it in range(nJobs):
            eidParam.SNR = simParam[it][0]
            eidParam.procNoiseSigma = simParam[it][1]
            eidParam.pLSigmaAmp = simParam[it][2]
            eidParam.pLSigmaAmpBayesian = simParam[it][2]
            eidParam.pLSigmaAmpEID = simParam[it][2]
            eidParam.Sigma = simParam[it][3]
            eidParam.objAmp = simParam[it][4]
            ergParam.dt = simParam[it][5]
            eidParam.UpdateDeltaT(simParam[it][5])
            if eidParam.simType == "IF":
                eidParam.maxIter = round(eidParam.maxT / ergParam.dt)
            ergParam.wControl = simParam[it][6]
            eidParam.randSeed = simParam[it][7]
            eidParam.filename = (
                filename.replace("SNR", "SNR-" + str(eidParam.SNR))
                .replace("wC", "wC-" + str(ergParam.wControl))
                .replace("RandSeed", "RandSeed-" + str(eidParam.randSeed))
            )
            # Do the extra initialization here to speed up.
            if isinstance(eidParam.rawTraj, str) and "moth" in eidParam.rawTraj:
                eidParam.rawTraj = np.array(
                    loadMothData(
                        target="M300lux", trialID=0, nrmGain=eidParam.objAmp
                    )[1]
                )
            ergParam.time = linspace(0.0, ergParam.timeHorizon, ergParam.tRes)
            ergParam.eidTime = linspace(0.0, ergParam.timeHorizon, eidParam.res)
            # initialize multiple targets tracking
            if eidParam.multiTargetTracking == "dual":
                eidParam.multiTargetTracking = True
                eidParam.otherTargets = [RealTarget(eidParam.multiTargetInitialPos)]
            elif eidParam.multiTargetTracking == "distractor":
                eidParam.multiTargetTracking = True
                eidParam.otherTargets = [Distractor(eidParam.multiTargetInitialPos)]

            # Fill in work queue if it's not full
            job_path = f'{sim_jobs_path}/eih_sim_job_{job_id}.pkl'
            with open(job_path, 'wb') as fp:
                pkl.dump((ergParam, eidParam, False), fp, pkl.HIGHEST_PROTOCOL)
            work_queue.put(job_path, block=True, timeout=None)
            job_id += 1
            remaining_jobs -= 1
            print_color(
                f"[MasterNode-{getpid()}]: Adding new job {eidParam.filename}",
                color="green",
            )

    # Wait until all the active thread to finish
    work_queue.join()
    for job in jobs:
        job.join()