예제 #1
0
파일: main.py 프로젝트: Winterpuma/bmstu_MR
def main():
    client_generator = Generator(EvenDistribution(8, 12))

    first_queue = []
    second_queue = []

    operators = [
        Operator(first_queue, EvenDistribution(15,
                                               25)),  # самый производительный
        Operator(first_queue, EvenDistribution(30, 50)),
        Operator(second_queue,
                 EvenDistribution(20, 60))  # наименее производительный
    ]

    processors = [
        Processor(first_queue, EvenDistribution(15, 15)),  # ровно 15 минут
        Processor(second_queue, EvenDistribution(30, 30))  # ровно 30 минут
    ]

    total_requests = 300

    t_start = time()
    res = modeling(client_generator, operators, processors, total_requests)

    print('time seconds', time() - t_start)
    for key in res.keys():
        print(key, res[key])

    print('lost', res['lost'] / total_requests)
예제 #2
0
파일: main.py 프로젝트: koret2090/Modeling
def main():
    clientGenerator = Generator(UniformDistribution(8, 12))

    firstQueue = []
    secondQueue = []

    operators = [
        Operator(firstQueue, UniformDistribution(15, 25)),
        Operator(firstQueue, UniformDistribution(30, 50)),
        Operator(secondQueue, UniformDistribution(20, 60))
    ]

    processors = [
        Processor(firstQueue, UniformDistribution(15, 15)),
        Processor(secondQueue, UniformDistribution(30, 30))
    ]

    totalRequests = 3000

    tStart = time()
    res = modeling(clientGenerator, operators, processors, totalRequests)

    print('time (secs)', time() - tStart)
    for key in res.keys():
        print(key, res[key])

    print('lost', res['lost'] / totalRequests)
예제 #3
0
def main():
    # parse arguments
    args = cli()
    # setup processor and visualizer
    processor = Processor(model=args['model'])
    visualizer = Visualizer()

    # fetch input
    print('image arg', args['image'])
    # img = cv2.imread('inputs/{}'.format(args['image']))
    input_image_paths = []
    folder_path = args['image']
    if os.path.isdir(folder_path):
        ls = os.listdir(folder_path)
        for file_name in sorted(ls, key=lambda x: str(x.split('.jpg')[0])):
            input_image_paths.append(os.path.join(folder_path, file_name))
    for input_image_path in input_image_paths:
        img = cv2.imread(input_image_path)

        # inference
        output = processor.detect(img)

        # final results
        boxes, confs, classes = processor.post_process(output, conf_thres=0.3, iou_thres=0.4, origin_w=img.shape[1], origin_h=img.shape[0])
        visualizer.draw_results(img, boxes, confs, classes)
예제 #4
0
def system_init():
    processor_tmp = Processor()
    resourcer_tmp = Resourcer()

    #for x in processor_tmp.get_running_list():
    #   print(x + " ", end='')
    return processor_tmp, resourcer_tmp
예제 #5
0
 def __init_models(self):
     for file in os.listdir(self.models_folder):
         if file.endswith(".json"):
             processor = Processor(sliding_window_frame_size=20,
                                   stddev_threshold=0.1,
                                   risk_iterations=5,
                                   minimum_training_size=900,
                                   starting_eval_size=5,
                                   save_trained_model=True,
                                   save_path='./')
             processor.load_processor(self.models_folder + '/' + file, [{
                 'Learner':
                 AutoEncoderNNWeakLearner(cols_shape=24),
                 'Extractor':
                 DataReaderExtractor(),
                 'Stream':
                 DataReaderStream(
                     data_folder='C:/Users/ofiri/Desktop/Tests/B/test'),
                 'Sanitizer':
                 None
             }, {
                 'Learner':
                 AutoEncoderNNWeakLearner(cols_shape=6),
                 'Extractor':
                 DataReaderExtractor(),
                 'Stream':
                 DataReaderStream(
                     data_folder='C:/Users/ofiri/Desktop/Tests/A/test'),
                 'Sanitizer':
                 None
             }])
             self.processes.append(processor)
def system_init():
    processor_temp = Processor()
    resource_temp = Resource()
    processor_temp.create_process('init', 0)
    for x in processor_temp.get_running_list():
        print(x + " ", end='')
    return processor_temp, resource_temp
예제 #7
0
파일: Tests.py 프로젝트: sipb/simple-mit
def run_tests():
    global test_result
    TEST_ENV = {
            'stopped': False,
            'test': True,
            'send': False,
            }

    TEST_ENV['sender'] = TestSender()
    TEST_ENV['enumerator'] = Enumerator(TEST_ENV)
    TEST_ENV['parser'] = Parser(TEST_ENV)
    TEST_ENV['screener'] = Screener(TEST_ENV)
    TEST_ENV['logger'] = open('/dev/null', 'w')
    TEST_ENV['processor'] = processor = Processor(TEST_ENV)

    num_failures = 0
    for (rcpttos, expected) in tests:
        test_result = {}
        processor.process_message({
            'peer': None,
            'mailfrom': 'mailfrom',
            'rcpttos': rcpttos,
            'data': 'message',
            })
        print 'testing sending to', rcpttos, '...'
        for key in expected:
            assert key in test_result
            if type(test_result[key]) == type([]):
                assert set(test_result[key]) == set(expected[key])
            else:
                assert test_result[key] == expected[key]
        for key in test_result:
            assert key in expected
예제 #8
0
def main():
    # parse arguments
    args = cli()

    # setup processor and visualizer
    processor = Processor(model=args['model'])
    visualizer = Visualizer()

    # fetch input
    print('image arg', args['image'])
    img = cv2.imread('inputs/{}'.format(args['image']))

    # inference
    output = processor.detect(img)
    img = cv2.resize(img, (640, 640))

    # object visualization
    object_grids = processor.extract_object_grids(output)
    visualizer.draw_object_grid(img, object_grids, 0.1)

    # class visualization
    class_grids = processor.extract_class_grids(output)
    visualizer.draw_class_grid(img, class_grids, 0.01)

    # bounding box visualization
    boxes = processor.extract_boxes(output)
    visualizer.draw_boxes(img, boxes)

    # final results
    boxes, confs, classes = processor.post_process(output)
    visualizer.draw_results(img, boxes, confs, classes)
예제 #9
0
 def add_processor(self, id, output_dist):
     """
     Método de clase
     Se agrega un procesador con un identificador y distribución de salidad, señalada por parámetros
     """
     processor = Processor(id, output_dist)
     self._processors_list.append(processor)
예제 #10
0
    def test_limpiar_mensaje_debe_eliminar_letras_iguales_consecutivas(self):
        pObj = Processor()
        responseTrue = pObj.limpiar_mensaje("holaaaaa")
        responseFalse = pObj.limpiar_mensaje("holaaaaasss")

        self.assertEqual("hola", responseTrue)
        self.assertNotEqual("hola", responseFalse)
예제 #11
0
def process(transaction):
    """Dropbox entry point.

    @param transaction, the transaction object
    """

    # Get path to containing folder
    # __file__ does not work (reliably) in Jython
    dbPath = "../core-plugins/microscopy/4/dss/drop-boxes/MicroscopyDropbox"

    # Path to the logs subfolder
    logPath = os.path.join(dbPath, "logs")

    # Make sure the logs subfolder exist
    if not os.path.exists(logPath):
        os.makedirs(logPath)

    # Path for the log file
    logFile = os.path.join(logPath, "log.txt")

    # Set up logging
    logging.basicConfig(filename=logFile,
                        level=logging.DEBUG,
                        format='%(asctime)-15s %(levelname)s: %(message)s')
    logger = logging.getLogger("Microscopy")

    # Create a Processor
    processor = Processor(transaction, logger)

    # Run
    processor.run()
예제 #12
0
파일: Pipeline.py 프로젝트: ximu7/MIBCIProj
 def __init__(self, main_cfg):
     # self.ns_reader = NSDataReader()
     self.ns_reader = NSDataReaderRandom()
     self.is_online = main_cfg.is_online
     self.cue = CueInterface(main_cfg)
     # self.cue = VRInterface(main_cfg)
     self.stim = Stimulator(main_cfg.stim_cfg)
     self.save_data_path = main_cfg.subject.get_date_dir()
     self.filename = 'online' if main_cfg.is_online else 'acquire'
     self.stim_cfg = main_cfg.stim_cfg
     self.stim.subscribe(BCIEvent.change_stim, self.cue.handle_stim)
     self.stim.subscribe(BCIEvent.change_stim, main_cfg.exo.handle_stim)
     self.cue.subscribe(BCIEvent.gaze_focus, self.stim.get_gaze)
     self.cue.subscribe(BCIEvent.cue_disconnect, self.stim.stop_stim)
     self.stim.subscribe(BCIEvent.save_data, self.save_data)
     if self.is_online:
         self.processor = Processor(main_cfg)
         self.processor.subscribe(BCIEvent.readns_header,
                                  self.ns_reader.get_head_settings)
         self.processor.subscribe(BCIEvent.readns,
                                  self.ns_reader.get_ns_signal)
         self.stim.subscribe(BCIEvent.change_stim,
                             self.processor.handle_stim)
         self.processor.subscribe(BCIEvent.online_progressbar,
                                  self.cue.send_progress)
         self.processor.subscribe(BCIEvent.online_ctrl,
                                  self.cue.online_feedback)
         self.processor.subscribe(BCIEvent.online_ctrl,
                                  main_cfg.exo.online_feedback)
         self.stim.subscribe(BCIEvent.save_data, self.processor.save_log)
예제 #13
0
    def proc(self, config: dict, record_dir: str, danmu_path: str,
             current_state, state_change_time) -> None:
        p = Processor(config, record_dir, danmu_path)
        p.run()

        if config.get('spec', {}).get('uploader', {}).get('record', {}).get(
                'upload_record', False) or config.get('spec', {}).get(
                    'uploader', {}).get('clips', {}).get(
                        'upload_clips', False):
            current_state.value = int(utils.state.UPLOADING_TO_BILIBILI)
            state_change_time.value = time.time()
            try:
                u = Uploader(p.outputs_dir, p.splits_dir, config)
                d = u.upload(p.global_start)
            except Exception as e:
                current_state.value = int(utils.state.ERROR)
                state_change_time.value = time.time()

            if d is None:
                current_state.value = int(utils.state.ERROR)
                state_change_time.value = time.time()
            else:
                if not config.get('spec', {}).get('uploader', {}).get(
                        'record',
                    {}).get('keep_record_after_upload', True) and d.get(
                        "record",
                        None) is not None and not config.get('root', {}).get(
                            'uploader', {}).get('upload_by_edit', False):
                    rc = BiliVideoChecker(d['record']['bvid'], p.splits_dir,
                                          config)
                    rc.start()
                if not config.get('spec', {}).get('uploader', {}).get(
                        'clips',
                    {}).get('keep_clips_after_upload', True) and d.get(
                        "clips",
                        None) is not None and not config.get('root', {}).get(
                            'uploader', {}).get('upload_by_edit', False):
                    cc = BiliVideoChecker(d['clips']['bvid'], p.outputs_dir,
                                          config)
                    cc.start()

        if config.get('root', {}).get('enable_baiduyun', False) and config.get(
                'spec', {}).get('backup', False):
            current_state.value = int(utils.state.UPLOADING_TO_BAIDUYUN)
            state_change_time.value = time.time()
            try:
                from bypy import ByPy
                bp = ByPy()
                bp.upload(p.merged_file_path)
            except Exception as e:
                logging.error('Error when uploading to Baiduyun:' + str(e) +
                              traceback.format_exc())
                current_state.value = int(utils.state.ERROR)
                state_change_time.value = time.time()
                return

        if current_state.value != int(utils.state.LIVE_STARTED):
            current_state.value = int(utils.state.WAITING_FOR_LIVE_START)
            state_change_time.value = time.time()
예제 #14
0
def get_processor(f):
    processor = Processor(int(f.readline()))
    while True:
        line = f.readline().split()
        if len(line) == 0:
            break
        processor.insert_processor_mode(*map(float, line))
    return processor
예제 #15
0
def process_snapshot(snapshot: Snapshot):
    processor = Processor(snapshot, base_data_dir)
    processor.read_tiff()
    processor.refine_bbox()
    usage_stat = psutil.virtual_memory()
    print("Available = ",
          math.ceil(usage_stat.available / 1048576 * 100) / 100, " MBs")
    return snapshot
예제 #16
0
    def test_letras_repetidas_verifica_si_el_texto_tienen_letras_consecutivas_repetidas(
        self, ):
        pObj = Processor()
        response1 = pObj.letras_consecutivas_repetidas("holaaaaa")
        response2 = pObj.letras_consecutivas_repetidas("hola")

        self.assertTrue(response1)
        self.assertFalse(response2)
    def test_generar_mensaje_corectamente(self, ):
        P_Obj = Processor()
        response = P_Obj.generar_mensaje([
            "11 14 38",
            "CeseAlFuego",
            "CoranACubierto",
            "XXcaaamakkCCessseAAllFueeegooDLLKmmNNN",
        ])

        self.assertEqual("Si\nNo", response)
예제 #18
0
 def __init__(self, number, mainMemory, UIManager, connectionBus):
     threading.Thread.__init__(self)
     self.chip = number
     self.mainMemory = mainMemory
     self.procesors = []
     self.UIManager = UIManager
     for i in range(2):
         self.procesors.append(Processor(i, self))
         self.procesors[i].start()
     self.L2 = L2(self)
     self.connectionBus = connectionBus
예제 #19
0
	def __init__(self, samp_T_us, cpi_samps, savefile, emulate=False):
		"""
		PURPOSE: creates a new Speed_Gun
		ARGS: 
			samp_T_us (float): sampling period in microseconds
			cpi_samps (int): number of samples in one cpi
			emulate (bool): if True loads pre-recorded data, if False runs for 
				real
			savefile (str): the file to save to
		RETURNS: new instance of a Speed_Gun
		NOTES:
		"""
		#Save arguments
		self.samp_T_us = samp_T_us
		self.fs = 1e6 / samp_T_us
		self.cpi_samps = cpi_samps
		self.emulate = emulate
		self.savefile = savefile

		#Setup app
		self.app = QtWidgets.QApplication(sys.argv)
		self.main_win = QtWidgets.QMainWindow()
		self.ui = Ui_MainWindow()
		self.ui.setupUi(self.main_win)

		#Initialize UI values
		self.init_ui_values()

		#Connect buttons
		self.ui.run_button.clicked.connect(self.run_button_clicked)
		self.ui.stop_button.clicked.connect(self.stop_button_clicked)
		self.ui.vel_radbutton.toggled.connect(self.rad_button_toggled)
		self.ui.raw_sig_radbutton.toggled.connect(self.rad_button_toggled)

		#Setup queues
		self.record_q = queue.Queue()
		self.res_q = queue.Queue()
		self.save_q = queue.Queue()

		#Setup other modules
		#Setup recorder.replayer
		if emulate:
			self.recorder = Replayer("C:\\Users\\rga0230\\Documents\\School\\EE-137\\EE-137-Doppler-Radar\\data\\car.mat", [self.record_q, self.save_q], ts_us=samp_T_us, chunk_size=cpi_samps)
		else:
			self.recorder = Chunked_Arduino_ADC(samp_T_us, cpi_samps, [self.record_q, self.save_q])
		#Setup processor
		self.proc = Processor(samp_T_us, cpi_samps, self.record_q, self.res_q)
		#Setup saver
		self.saver = Chunk_Saver(savefile, samp_T_us, cpi_samps, self.save_q)

		#Setup variables for our update thread
		self.update_thread = threading.Thread(target = self.update_thread_run)
		self.update_keep_going = threading.Event()
		self.update_keep_going.set()
예제 #20
0
def fetch(instructions_file):
    processor = Processor()
    for instructions in instructions_file:
        if not instructions.strip():
            return
        binary_string = ""
        for bits in instructions:
            if bits != "\n":
                binary_string += bits
        decoder = Decoder(binary_string)
        processor.set_instruction(decoder)
        processor.start_processor()
예제 #21
0
 def test_single_instr (self):
     prog = "I ADDI R1 R1 8"
     memory = Memory ()
     memory.loadProgramDebugFromText (prog)
     processor = Processor (memory, 0)
     disablePrint ()
     processor.start ()
     enablePrint ()
     cpi = processor.getCPI ()
     r1_content = processor.register_file [1]
     self.assertEqual (cpi, 5)
     self.assertEqual (r1_content, 8)
예제 #22
0
    def __init__(self, numOfLayer):
        self.num = numOfLayer
        self.parent = []
        self.children = []
        self.handled = []
        self.Indexer = Indexer()
        self.Processor = Processor()
        self.Porter = PorterStemmer()
        self.db = []

        link = "http://www.cse.ust.hk/"
        self.parent.append(link)
예제 #23
0
    def __init__(self, input=None, output=None, modules=None):
        pp = Processor()

        map(str.lower, modules)

        for name in modules:
            if Modules.modules.has_key(name):
                module = Modules.modules[name]()
                pp.register(module)

        pp.input(input)
        pp.process()
        pp.output(output)
예제 #24
0
    def test_independant_instrs_dummy (self):
        prog = """I ADDI R1 R1 1
R ADD  R3 R3 R2
"""
        memory = Memory ()
        memory.loadProgramDebugFromText (prog)
        processor = Processor (memory, 0)
        disablePrint ()
        processor.start ()
        enablePrint ()
        cpi = processor.getCPI ()
        # CPI should be 3 as there is no stall
        # For the second instruction.
        self.assertEqual (cpi, 3)
def process(transaction):
    """Dropbox entry point.

    @param transaction, the transaction object
    """

    #
    # Run registration
    #
    prefix = "LSR_FORTESSA"
    version = 2
    logDir = "../core-plugins/flow/4/dss/drop-boxes/BDLSRFortessaDropbox/logs"

    processor = Processor(transaction, prefix, version, logDir)
    processor.run()
def process(transaction):
    """Dropbox entry point.

    @param transaction, the transaction object
    """

    #
    # Run registration
    #
    prefix = "MOFLO_XDP"
    version = 2
    logDir = "../core-plugins/flow/4/dss/drop-boxes/BCMoFloXDPDropbox/logs"

    processor = Processor(transaction, prefix, version, logDir)
    processor.run()
def process(transaction):
    """Dropbox entry point.

    @param transaction, the transaction object
    """

    #
    # Run registration
    #
    prefix = "INFLUX"
    version = 2
    logDir = "../core-plugins/flow/3/dss/drop-boxes/BDInfluxDropbox/logs"

    processor = Processor(transaction, prefix, version, logDir)
    processor.run()
예제 #28
0
    def test_validar_datos_instrucciones_genera_exception_cuando_en_linea_1_parametro_1_no_coincida_con_texto_linea_2(
        self, ):
        try:
            PObj = Processor()
            response = PObj.validar_datos([
                "5 14 80",
                "CeseAlFuego",
                "CoranACubierto",
                "XXcaaamakkCCessseAAllFueeegooCoranACuuubiiiertoooDLLKmmNNN",
            ])

        except ValueError as e:
            # print(e)
            pass
        else:
            raise ValueError(
                "==>  test_validar_datos_instrucciones_genera_exception_cuando_en_linea_1_parametro_1_no_coincida_con_texto_linea_2"
            )
    def test_generar_mensaje_genera_un_exception_por_recibir_dos_mensajes_posibles(
        self, ):
        try:
            PObj = Processor()
            response = PObj.generar_mensaje([
                "11 14 80",
                "CeseAlFuego",
                "CoranACubierto",
                "XXcaaamakkCCessseAAllFueeegooCoranACuuubiiiertoooDLLKmmNNN",
            ])

        except ValueError as e:
            # print(e)
            pass
        else:
            raise ValueError(
                "==>  test_generar_mensaje_genera_un_exception_por_recibir_dos_mensajes_posibles"
            )
예제 #30
0
def proc(config: dict, record_dir: str, danmu_path: str) -> None:
    p = Processor(config, record_dir, danmu_path)
    p.run()
    u = Uploader(p.outputs_dir, p.splits_dir, config)
    d = u.upload(p.global_start)
    if not config['spec']['uploader']['record'][
            'keep_record_after_upload'] and d.get("record", None) is not None:
        rc = BiliVideoChecker(d['record']['bvid'], p.splits_dir, config)
        rc_process = Process(target=rc.check)
        rc_process.start()
    if not config['spec']['uploader']['clips'][
            'keep_clips_after_upload'] and d.get("clips", None) is not None:
        cc = BiliVideoChecker(d['clips']['bvid'], p.outputs_dir, config)
        cc_process = Process(target=cc.check)
        cc_process.start()
    if config['root']['enable_baiduyun'] and config['spec']['backup']:
        from bypy import ByPy
        bp = ByPy()
        bp.upload(p.merged_file_path)