예제 #1
0
def main():
    mp.set_start_method('spawn')
    q = mp.Queue()
    p = mp.Process(target=foo, args=(q,))
    p.start()
    print(q.get())
    p.join()
예제 #2
0
def main():

    global SEM

    if args.forkserver:
        try:
            multiprocessing.set_start_method('forkserver')
        except AttributeError as e:
            # set_start_method is not present in older versions
            print('Not using multiprocessing start_method forkserver')
            print(e)

    if args.pidfile:
        if getpid(args.pidfile) and is_running(getpid(args.pidfile)):
            raise AlreadyRunning('daemon with pidfile {}'.format(args.pidfile))

        with open(args.pidfile, 'w') as fh:
            fh.write(str(os.getpid()))

    SEM = Semaphore(args.nprocs)
    pid = os.getpid()

    def graceful_exit(*_):
        # for some reason the child processes will also execute this function
        # so check that I am actually the parent before executing all of this
        if os.getpid() == pid:
            log.info('\nterminating {} processes'.format(len(PROCESSES)))
            for p in PROCESSES.values():
                p.terminate()
        sys.exit()

    signal.signal(signal.SIGINT, graceful_exit)
    signal.signal(signal.SIGTERM, graceful_exit)
    daemon()
예제 #3
0
def run(args):
    # create dummy environment to be able to create model
    env = gym.make(args.environment)
    assert isinstance(env.observation_space, Box)
    assert isinstance(env.action_space, Discrete)
    print("Observation space:", env.observation_space)
    print("Action space:", env.action_space)

    # create main model
    model = create_model(env, args)
    model.summary()
    env.close()

    # for better compatibility with Theano and Tensorflow
    multiprocessing.set_start_method('spawn')

    # create shared buffer for sharing weights
    blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
    shared_buffer = Array('c', len(blob))
    shared_buffer.raw = blob

    # force runner processes to use cpu
    os.environ["CUDA_VISIBLE_DEVICES"] = ""

    # create fifos and threads for all runners
    fifos = []
    for i in range(args.num_runners):
        fifo = Queue(args.queue_length)
        fifos.append(fifo)
        process = Process(target=runner, args=(shared_buffer, fifo, args))
        process.start()

    # start trainer in main thread
    trainer(model, fifos, shared_buffer, args)
예제 #4
0
파일: main.py 프로젝트: freenas/freenas
 def __init__(
     self, loop_debug=False, loop_monitor=True, overlay_dirs=None, debug_level=None,
     log_handler=None, startup_seq_path=None,
 ):
     super().__init__(overlay_dirs)
     self.logger = logger.Logger('middlewared', debug_level).getLogger()
     self.crash_reporting = logger.CrashReporting()
     self.crash_reporting_semaphore = asyncio.Semaphore(value=2)
     self.loop_debug = loop_debug
     self.loop_monitor = loop_monitor
     self.debug_level = debug_level
     self.log_handler = log_handler
     self.startup_seq = 0
     self.startup_seq_path = startup_seq_path
     self.app = None
     self.__loop = None
     self.__thread_id = threading.get_ident()
     # Spawn new processes for ProcessPool instead of forking
     multiprocessing.set_start_method('spawn')
     self.__threadpool = concurrent.futures.ThreadPoolExecutor(
         initializer=lambda: set_thread_name('threadpool_ws'),
         max_workers=10,
     )
     self.__init_procpool()
     self.__wsclients = {}
     self.__events = Events()
     self.__event_sources = {}
     self.__event_subs = defaultdict(list)
     self.__hooks = defaultdict(list)
     self.__server_threads = []
     self.__init_services()
     self.__console_io = False if os.path.exists(self.CONSOLE_ONCE_PATH) else None
     self.jobs = JobsQueue(self)
예제 #5
0
    def compute_topological_summary(self):
        """
        For each in self.outliers generate cleaned_points. Then construct
        GeometricComplex(cleaned_points) and compute its persistant 0-th
        homology.

        We save the 0-persistence pairs in the _list_
        self.persistence_pairs.

        persistence_pairs[outlier] contains dictionary with self-explaining
        keys:
        x_filtration_H0
        x_inv_filtration_H0
        y_filtration_H0
        y_inv_filtration_H0

        values are arrays of persistance pairs
        """

        mproc.set_start_method('forkserver')
        arguments = [i for i in range(len(self.outliers))]
        with mproc.Pool(processes=int(mproc.cpu_count()/2)) as pool:
            results = pool.map(self.single_outlier, arguments)

        self.extrema = [x[0] for x in results]
        self.persistence_pairs = [x[1] for x in results]
예제 #6
0
    def __init__(self, p_max_items_by_queue=50000, p_forkserver=False, p_log_every=10000):
        """Class creation"""
        logger = logging.getLogger('swallow')

        if p_forkserver:
            mp.set_start_method('forkserver')

        self.readers = None
        self.writer = None
        self.writer_store_args = None
        self.process = None
        self.process_args = None
        if p_max_items_by_queue is None:
            self.in_queue = JoinableQueue()
            self.out_queue = JoinableQueue()
        else:
            if (sys.platform.lower() == 'darwin'):
                logger.warn("As running Swallow on a MacOS env, the number of items is limited to 32767.")
                p_max_items_by_queue = 32767
            self.in_queue = JoinableQueue(p_max_items_by_queue)
            self.out_queue = JoinableQueue(p_max_items_by_queue)

        self.counters = {
            'nb_items_processed': Value('i', 0),
            'nb_items_error': Value('i', 0),
            'nb_items_scanned': Value('i', 0),
            'nb_items_stored': Value('i', 0),
            'whole_storage_time': Value('f', 0),
            'bulk_storage_time': Value('f', 0),
            'whole_process_time': Value('f', 0),
            'real_process_time': Value('f', 0),
            'idle_process_time': Value('f', 0),
            'scan_time': Value('f', 0),
            'log_every': p_log_every
        }
def crack(target_hash, salt, seed_str, num_proc):
    mp.set_start_method('spawn')
    
    #total_count = Counter(0)
    #pool = mp.Pool(initializer=init, initargs=(total_count,), processes=num_proc)
    
    #part_crack_helper = partial(crack_helper, target_hash=target_hash, salt=salt)
    
    try:
        #pool.map(part_crack_helper, product(seed_str, repeat=4))
        #pool.close()
        #pool.join()

        processes = []
        shift_interval = int(len(seed_str) / num_proc)
        pt_len = 6
        for i in range(0, num_proc):
            seed_str = ''.join(shift_array(list(seed_str), shift_interval))
            p = mp.Process(target=crack_helper_proc, args=(product(seed_str, repeat=pt_len), target_hash, salt))
            p.start()
            print("pid: %d started using seed str: %s" % (p.pid, seed_str))
            processes.append(p)

        for p in processes:
            p.join()

    except KeyboardInterrupt:
        #pool.terminate()
        #pool.join()

        for p in processes:
            p.terminate()
            p.join
예제 #8
0
파일: conftest.py 프로젝트: asi1024/chainer
def scope_session():
    if int(platform.python_version_tuple()[0]) >= 3:
        multiprocessing.set_start_method('forkserver')
        p = multiprocessing.Process(target=dummy_func)
        p.start()
        p.join()
    yield
예제 #9
0
    def test_transmission(self):
        import multiprocessing as mp
        if hasattr(mp, 'set_start_method'):
            mp.set_start_method('spawn')

        iq = mp.Queue()
        oq = mp.Queue()
        portName = 'TestVirtualPorts.%i' % time.time()
        senderProc = mp.Process(target=SenderProc, args=(oq, iq, portName))
        senderProc.start()

        # handshake
        self.assertEqual(iq.get(), 'init')  # virtual midi port is now open

        # Supposedly you can't just open a virtual port by name from
        # within the same proc or proc group? Anyway opening by index
        # works.
        device = RtMidiIn()
        for i in range(device.getPortCount()):
            if device.getPortName(i) == portName:
                device.openPort(i)
                break
        self.assertTrue(device.isPortOpen())

        # collect messages and print progress
        self.messages = []
        self.last_s = ''

        def put(m):
            self.messages.append(m)
            if len(self.messages) % 10 == 0:
                sys.stdout.write('\b' * len(self.last_s))  # backspace
                self.last_s = '%s: Received message %i / 32640' % (__name__, len(self.messages))
                sys.stdout.write(self.last_s)
                # sys.stdout.write('.')
                sys.stdout.flush()

        oq.put('start')
        for i in range(128):
            for j in range(1, 128):
                msg = device.getMessage(1000)
                self.assertTrue(msg is not None)
                self.assertTrue(msg.isNoteOn())
                self.assertEqual(msg.getNoteNumber(), i)
                self.assertEqual(msg.getVelocity(), j)
                put(msg)
                oq.put('next')

        for i in range(128):
            for j in range(128):
                msg = device.getMessage(1000)
                self.assertTrue(msg is not None)
                self.assertTrue(msg.isController())
                self.assertEqual(msg.getControllerNumber(), i)
                self.assertEqual(msg.getControllerValue(), j)
                put(msg)
                oq.put('next')

        self.assertEqual(len(self.messages), 32640)
        oq.put('done')
예제 #10
0
    def __init__(self,
                 project_dir=None,
                 max_training_processes=1,
                 response_log=None,
                 emulation_mode=None,
                 remote_storage=None,
                 component_builder=None,
                 model_server=None,
                 wait_time_between_pulls=None):
        self._training_processes = max(max_training_processes, 1)
        self._current_training_processes = 0
        self.responses = self._create_query_logger(response_log)
        self.project_dir = config.make_path_absolute(project_dir)
        self.emulator = self._create_emulator(emulation_mode)
        self.remote_storage = remote_storage
        self.model_server = model_server
        self.wait_time_between_pulls = wait_time_between_pulls

        if component_builder:
            self.component_builder = component_builder
        else:
            self.component_builder = ComponentBuilder(use_cache=True)

        self.project_store = self._create_project_store(project_dir)

        # tensorflow sessions are not fork-safe,
        # and training processes have to be spawned instead of forked. See
        # https://github.com/tensorflow/tensorflow/issues/5448#issuecomment
        # -258934405
        multiprocessing.set_start_method('spawn', force=True)

        self.pool = ProcessPool(self._training_processes)
예제 #11
0
    def __init__(self, p_max_items_by_queue=50000, p_forkserver=False, p_log_every=10000):
        """Class creation"""
        if p_forkserver:
            mp.set_start_method('forkserver')

        self.readers = None
        self.writer = None
        self.writer_store_args = None
        self.process = None
        self.process_args = None
        if p_max_items_by_queue is None:
            self.in_queue = JoinableQueue()
            self.out_queue = JoinableQueue()
        else:
            self.in_queue = JoinableQueue(p_max_items_by_queue)
            self.out_queue = JoinableQueue(p_max_items_by_queue)

        self.counters = {
            'nb_items_processed': Value('i', 0),
            'nb_items_error': Value('i', 0),
            'nb_items_scanned': Value('i', 0),
            'nb_items_stored': Value('i', 0),
            'whole_storage_time': Value('f', 0),
            'bulk_storage_time': Value('f', 0),
            'whole_process_time': Value('f', 0),
            'real_process_time': Value('f', 0),
            'idle_process_time': Value('f', 0),
            'scan_time': Value('f', 0),
            'log_every': p_log_every
        }
예제 #12
0
파일: core.py 프로젝트: N6UDP/cslbot
def init(confdir="/etc/cslbot"):
    """The bot's main entry point.

    | Initialize the bot and start processing messages.
    """
    multiprocessing.set_start_method('spawn')

    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--debug', help='Enable debug logging.', action='store_true')
    args = parser.parse_args()
    loglevel = logging.DEBUG if args.debug else logging.INFO
    logging.basicConfig(level=loglevel)

    bot = IrcBot(confdir)

    try:
        bot.start()
    except KeyboardInterrupt:
        # KeyboardInterrupt means someone tried to ^C, so shut down the bot
        bot.disconnect('Bot received a Ctrl-C')
        bot.shutdown_mp()
        sys.exit(0)
    except Exception as ex:
        bot.shutdown_mp(False)
        logging.error("The bot died! %s" % ex)
        output = "".join(traceback.format_exc()).strip()
        for line in output.split('\n'):
            logging.error(line)
        sys.exit(1)
def run():

    # ask user for difficulty
    q_app = QtWidgets.QApplication([])
    q_widget = QtWidgets.QWidget()
    dialog = QtWidgets.QMessageBox(q_widget)
    dialog.addButton('Easy', QtWidgets.QMessageBox.ActionRole)
    dialog.addButton('Medium', QtWidgets.QMessageBox.ActionRole)
    dialog.addButton('Hard', QtWidgets.QMessageBox.ActionRole)
    dialog.addButton('Impossible', QtWidgets.QMessageBox.ActionRole)
    dialog.setText('Choose difficulty:')
    ret = dialog.exec_()

    easy, medium, hard, impossible = range(4)
    sim_time = None
    if ret == easy:
        sim_time = 1
    elif ret == medium:
        sim_time = 3
    elif ret == hard:
        sim_time = 5
    elif ret == impossible:
        sim_time = 8

    mp.set_start_method('spawn')
    gui_process = mp.Process(target=start_client.main)
    gui_process.start()

    run_game.main(BlackAgent='human', WhiteAgent='monte_carlo',
                  sim_time=sim_time, gui=True)
예제 #14
0
 def wrapper(*args, **kwargs):
     out = func(*args, **kwargs)
     if sys.version_info > (3, 4):
         mp.set_start_method('spawn', force=True)
         func(*args, **kwargs)
         mp.set_start_method('fork', force=True)
     return out
예제 #15
0
 def wrapper(*args, **kwargs):
     if sys.version_info > (3, 4) and os.name != 'nt':
         mp.set_start_method('spawn', force=True)
         out = func(*args, **kwargs)
         mp.set_start_method('fork', force=True)
     else:
         out = func(*args, **kwargs)
     return out
예제 #16
0
파일: gui.py 프로젝트: maoge/crate-cactus
def main():
    set_start_method('forkserver')
    root = tk.Tk()
    app = GuiApplication(root, title='Crate Cactus')
    app.pack(side='top', fill='both', expand=True)
    try:
        root.mainloop()
    except KeyboardInterrupt:
        root.quit()
        root.destroy()
예제 #17
0
파일: QtTestCase.py 프로젝트: jopohl/urh
    def setUpClass(cls):
        import multiprocessing as mp
        try:
            mp.set_start_method("spawn")
        except RuntimeError:
            pass
        assert mp.get_start_method() == "spawn"

        write_settings()
        cls.app = QApplication([cls.__name__])
예제 #18
0
def see_bin_data():
    import multiprocessing as mp
    mp.set_start_method('spawn')
    
    for i in range(10):
        p = mp.Process(target = bin_list)
        p.start()
        p.join()
        
    for i in range(10):
        p = mp.Process(target = bin_dict)
        p.start()
        p.join()
    def runController(self):

        from light_controller import PiLights

        class MyPiLightsManager(BaseManager): pass

        mp.set_start_method('spawn')
       
        MyPiLightsManager.register('PiLights', PiLights)
        manager = MyPiLightsManager()
        manager.start()

        self.piLights = manager.PiLights()
        process = mp.Process(target=mainLoop, args=(piClass,), name="PiLights")
예제 #20
0
파일: __main__.py 프로젝트: nicfit/mishmash
def main(args):
    import multiprocessing
    global _FORK_METHOD_SET

    if not _FORK_METHOD_SET:
        try:
            multiprocessing.set_start_method("fork")
            _FORK_METHOD_SET = True
        except RuntimeError as ex:
            log.warning("multiprocessing.set_start_method: " + str(ex))

    if not hasattr(args, "command_func") or not args.command_func:
        # No command was given.
        args.app.arg_parser.print_help()
        return 1

    # In the case fileConfig undid the command line, which has precedence.
    args.applyLoggingOpts(args.log_levels, args.log_files)

    if args.db_url:
        args.config.set(MAIN_SECT, SA_KEY, args.db_url)
        # Don't want commands and such to use this, so reset.
        args.db_url = None
    elif "MISHMASH_DBURL" in os.environ:
        log.verbose("Using environment MISHMASH_DBURL over configuration: {}"
                    .format(os.environ["MISHMASH_DBURL"]))
        args.config.set(MAIN_SECT, SA_KEY, os.environ["MISHMASH_DBURL"])

    # Run command
    try:
        retval = args.command_func(args, args.config) or 0
    except (KeyboardInterrupt, PromptExit):
        # PromptExit raised when CTRL+D during prompt, or prompts disabled
        retval = 0
    except (sql_exceptions.ArgumentError,):
        _pErr("Database error")
        retval = 1
    except (sql_exceptions.OperationalError,) as db_err:
        print(str(db_err), file=sys.stderr)
        retval = 1
    except CommandError as cmd_err:
        print(str(cmd_err), file=sys.stderr)
        retval = cmd_err.exit_status
    except Exception as ex:
        log.exception(ex)
        _pErr("General error")
        retval = 2

    return retval
예제 #21
0
def main():
    # We set the sub processes start method to spawn because it solves
    # deadlocks when a library cannot handle being used on two sides of a
    # forked process. This happens on modern Macs with the Accelerate library
    # for instance. Unfortunately this only comes with python≥3.4. People using
    # older versions should upgrade if they encounter deadlocks.
    if sys.version_info[:2] >= (3, 4):
        mp.set_start_method('spawn')
    else:
        print("Could not set the multiprocessing start method to spawn. If "
              "you encounter a deadlock, please upgrade to Python≥3.4.")

    parser = argparse.ArgumentParser()

    parser.add_argument('-c', '--conf-file', dest='config_file',
                        help="Alternative configuration file to use.")

    subparsers = parser.add_subparsers(help="List of commands")

    init_parser = subparsers.add_parser('init', help=init.__doc__)
    init_parser.set_defaults(parser='init')

    genconf_parser = subparsers.add_parser('genconf', help=genconf.__doc__)
    genconf_parser.set_defaults(parser='genconf')

    check_parser = subparsers.add_parser('check', help=check.__doc__)
    check_parser.set_defaults(parser='check')

    run_parser = subparsers.add_parser('run', help=run.__doc__)
    run_parser.set_defaults(parser='run')

    if len(sys.argv) == 1:
        parser.print_usage()
    else:
        args = parser.parse_args()

        if args.config_file:
            config = Configuration(args.config_file)
        else:
            config = Configuration()

        if args.parser == 'init':
            init(config)
        elif args.parser == 'genconf':
            genconf(config)
        elif args.parser == 'check':
            check(config)
        elif args.parser == 'run':
            run(config)
def porttest(start,end):
    mp.set_start_method('spawn')
    task_queue=Queue()
    done_task=Queue()
    print("-------------------Port testing-----------------")
    print("Localhost port: 127.0.0.1")
    print("Scanning ports..", start, " to ", end)
    print("Parallel processing started.")
    for port in range(start,end):
        p1=mp.Process(target=TCP_scan, args=('127.0.0.1',port,))
        p1.start()
        p1.join()
        p2=mp.Process(target=UDP_scan, args=('127.0.0.1',port,))
        p2.start()
        p2.join()
def porttestremote(remoteip, start,end):
    mp.set_start_method('spawn')
    task_queue = Queue()
    done_task = Queue()
    print("-------------------Port testing-----------------")
    print("IP: ", remoteip)
    print("Scanning ports..", start, " to ", end)
    print("Parallel processing started.")
    for port in range(start, end):
        p1 = mp.Process(target=TCP_scan, args=(remoteip, port,))
        p1.start()
        p1.join()
        p2 = mp.Process(target=UDP_scan, args=(remoteip, port,))
        p2.start()
        p2.join()
예제 #24
0
    def execute(self):
        # noinspection PyBroadException
        try:
            multiprocessing.set_start_method('spawn')
        except:
            debug('Using default process start method')

        # the module tracer holds content over the entire module
        tracer = ImportTracer.ModuleTracer(self.file_spec)
        q = multiprocessing.Queue()
        p = Process(target=ImportTracer.module_trace, args=(self.module_spec, self.file_spec, tracer, q))
        p.start()
        patch = q.get()
        p.join()
        return patch
예제 #25
0
def main():

    if sys.version_info[:2] >= (3, 4):
        mp.set_start_method('spawn')
    else:
        print("Could not set the multiprocessing start method to spawn. If "
              "you encounter a deadlock, please upgrade to Python≥3.4.")

    parser = argparse.ArgumentParser()

    parser.add_argument('-c', '--conf-file', dest='config_file',
                        help="Alternative configuration file to use.")

    subparsers = parser.add_subparsers(help="List of commands")

    pdf_parser = subparsers.add_parser('pdf', help=pdf.__doc__)
    pdf_parser.set_defaults(parser='pdf')

    chi2_parser = subparsers.add_parser('chi2', help=chi2.__doc__)
    chi2_parser.set_defaults(parser='chi2')

    sed_parser = subparsers.add_parser('sed', help=sed.__doc__)
    sed_parser.add_argument('--type', default='mJy')
    sed_parser.add_argument('--nologo', action="store_true")
    sed_parser.set_defaults(parser='sed')

    sed_parser = subparsers.add_parser('mock', help=mock.__doc__)
    sed_parser.add_argument('--nologo', action="store_true")
    sed_parser.set_defaults(parser='mock')

    args = parser.parse_args()

    if args.config_file:
        config = Configuration(args.config_file)
    else:
        config = Configuration()

    if len(sys.argv) == 1:
        parser.print_usage()
    else:
        if args.parser == 'chi2':
            chi2(config)
        elif args.parser == 'pdf':
            pdf(config)
        elif args.parser == 'sed':
            sed(config, args.type, args.nologo)
        elif args.parser == 'mock':
            mock(config, args.nologo)
예제 #26
0
파일: common.py 프로젝트: DistAlgo/distalgo
def initialize_runtime_options(options=None):
    """Sets and sanitizes runtime options.

    'options' should be a dict-like object containing mappings from options
    names to corresponding values.

    """
    import multiprocessing

    from . import compiler

    global GlobalOptions

    if not GlobalOptions:
        GlobalOptions = dict()
    if options:
        GlobalOptions.update(options)

    # Parse '--substitute-classes' and '--substitute-modules':
    GlobalOptions['substitute_classes'] = \
                            _parse_items(GlobalOptions.get('substitute_classes'))
    GlobalOptions['substitute_modules'] = \
                            _parse_items(GlobalOptions.get('substitute_modules'))

    if GlobalOptions.get('nodename') is None:
        GlobalOptions['nodename'] = ''

    _set_hostname()

    # Configure multiprocessing package to use chosen semantics:
    startmeth = GlobalOptions.get('start_method')
    if startmeth != multiprocessing.get_start_method(allow_none=True):
        multiprocessing.set_start_method(startmeth)

    # Convert 'compiler_flags' to a namespace object that can be passed directly
    # to the compiler:
    GlobalOptions['compiler_args'] \
        = compiler.ui.parse_compiler_args(
            GlobalOptions.get('compiler_flags', '').split())

    # Make sure the directory for storing trace files exists:
    if GlobalOptions.get('record_trace'):
        if 'logdir' not in GlobalOptions:
            raise ConfigurationError(
                "'record_trace' enabled without setting 'logdir'")
        os.makedirs(GlobalOptions['logdir'], exist_ok=True)
예제 #27
0
파일: spawn.py 프로젝트: as133/scikit-learn
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'log_fmt' in data:
        import logging
        util.get_logger().handlers[0].setFormatter(
            logging.Formatter(data['log_fmt'])
        )

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if hasattr(mp, 'set_start_method'):
        mp.set_start_method('loky', force=True)

    if 'tacker_pid' in data:
        from . import semaphore_tracker
        semaphore_tracker._semaphore_tracker._pid = data["tracker_pid"]

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])
예제 #28
0
	def main():
		#configure multiprocessing
                mp.set_start_method('spawn')
                #Configure Arduino Port
                #Check for arduino presence
		if list(serial.tools.list_ports.grep('/dev/ttyACM')) != "":
                    arduino_ports = list(serial.tools.list_ports.grep('/dev/ttyACM'))
                    print "[]-->Arduino detected at " , arduino_ports
                    port = re.findall(r'/dev/ttyACM.', str(arduino_ports))
                    l = str(port).strip("[']")
                    print "[]-->Selecting port" , l
                    ser = serial.Serial('%s' % l, 9600)

                    print "[]-->READY"
                    looper = Process(target=play)
                    looper.start()
                else:
                    print "[]-->There doesn't seem to be an arduino present. Check the connection?"
예제 #29
0
파일: transql.py 프로젝트: xf7027/transql
def pool_init():

	p_sql_select = transql_conf.SQL_SELECT
	p_sql_insert = transql_conf.SQL_INSERT
	p_sql_s_i = []	

	for a,b in zip(p_sql_select,p_sql_insert):
		p_sql_s_i.append(a+b)

#	C_FLAG = input("continue?  Y/y: ")
#	if C_FLAG not in ('y','Y'):
#		sys.exit(0)

	freeze_support()
	set_start_method('fork')
	with MyPool(4) as p:
		result = p.map(select_sql_execute,p_sql_s_i)
	print("call pool_init() result: ",result)
	return 0
예제 #30
0
 def __init__(self, loop_monitor=True, overlay_dirs=None, debug_level=None):
     self.logger = logger.Logger('middlewared', debug_level).getLogger()
     self.crash_reporting = logger.CrashReporting()
     self.loop_monitor = loop_monitor
     self.overlay_dirs = overlay_dirs or []
     self.__loop = None
     self.__thread_id = threading.get_ident()
     # Spawn new processes for ProcessPool instead of forking
     multiprocessing.set_start_method('spawn')
     self.__procpool = ProcessPoolExecutor(max_workers=2)
     self.__threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
     self.jobs = JobsQueue(self)
     self.__schemas = {}
     self.__services = {}
     self.__wsclients = {}
     self.__event_sources = {}
     self.__event_subs = defaultdict(list)
     self.__hooks = defaultdict(list)
     self.__server_threads = []
     self.__init_services()
예제 #31
0
파일: main.py 프로젝트: pupil-labs/pupil
                elif "notify.meta.should_doc" in topic:
                    cmd_push.notify({
                        "subject": "meta.doc",
                        "actor": "launcher",
                        "doc": launcher.__doc__,
                    })
                elif "notify.launcher_process.should_stop" in topic:
                    if parsed_args.app == "capture":
                        cmd_push.notify(
                            {"subject": "world_process.should_stop"})
                    elif parsed_args.app == "service":
                        cmd_push.notify(
                            {"subject": "service_process.should_stop"})
                    elif parsed_args.app == "player":
                        cmd_push.notify(
                            {"subject": "player_process.should_stop"})

            else:
                if not active_children():
                    break

        for p in active_children():
            p.join()


if __name__ == "__main__":
    freeze_support()
    if platform.system() == "Darwin":
        set_start_method("spawn")
    launcher()
예제 #32
0
import os
import sys
from argparse import ArgumentParser
from multiprocessing import set_start_method

import requests

from logger import logger

set_start_method("fork")

BASE_URL = 'http://172.23.126.166/builds/latestbuilds/couchbase-server'

CHECKPOINT_DIR = '/home/'

MAX_MISSING = 3

RELEASES = {
    'spock': '5.0.0',
    'vulcan': '5.5.6',
    'alice': '6.0.4',
    'mad-hatter': '6.5.0',
    'cheshire-cat': '7.0.0',
    'neo': '7.1.0',
    'morpheus': '7.2.0'
}


def read_latest(release: str) -> int:
    checkpoint = os.path.join(CHECKPOINT_DIR, release)
    with open(checkpoint) as f:
    args = parser.parse_args()

    model_num = args.model_num
    agents_num = args.agents_num
    device = args.device

    print("model_num:", model_num, "agents_num:", agents_num, "pool size:",
          args.pool, "device:", device)

    experiment_name = "ddqn_to_td3_discrete_vary_transfer_reward_overview_only_10_best_filtered_models_4000_evals_" + \
                      str(agents_num) + "_agents_num_" + str(model_num) + "_model_num"

    env_name = "CartPole"

    if args.pool is not None:
        mp.set_start_method('spawn')  # due to cuda
        pool = mp.Pool(args.pool)
    else:
        pool = None

    if args.mode is not None:
        run_vary_hp(mode=args.mode,
                    experiment_name=experiment_name,
                    model_num=model_num,
                    agents_num=agents_num,
                    model_dir=model_dir,
                    custom_load_envs_and_config=load_envs_and_config,
                    custom_train_test_agents=train_test_agents,
                    env_name=env_name,
                    pool=pool,
                    device=device,
예제 #34
0
def main():
    if sys.version_info < (3, 4):
        print("You need at least Python 3.4 for this application!")
        sys.exit(1)

    if sys.platform == "win32":
        urh_dir = os.path.dirname(os.path.realpath(__file__)) if not os.path.islink(__file__) \
            else os.path.dirname(os.path.realpath(os.readlink(__file__)))
        assert os.path.isdir(urh_dir)

        dll_dir = os.path.realpath(os.path.join(urh_dir, "dev", "native", "lib", "win"))
        print("Using DLLs from:", dll_dir)
        os.environ['PATH'] = dll_dir + ';' + os.environ['PATH']

    t = time.time()
    if GENERATE_UI and not hasattr(sys, 'frozen'):
        try:
            urh_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", ".."))
            sys.path.append(urh_dir)
            sys.path.append(os.path.join(urh_dir, "src"))

            import generate_ui

            generate_ui.gen()

            print("Time for generating UI: %.2f seconds" % (time.time() - t))
        except (ImportError, FileNotFoundError):
            print("Will not regenerate UI, because script can't be found. This is okay in release.")

    urh_exe = sys.executable if hasattr(sys, 'frozen') else sys.argv[0]
    urh_exe = os.readlink(urh_exe) if os.path.islink(urh_exe) else urh_exe

    urh_dir = os.path.join(os.path.dirname(os.path.realpath(urh_exe)), "..", "..")
    prefix = os.path.abspath(os.path.normpath(urh_dir))

    src_dir = os.path.join(prefix, "src")
    if os.path.exists(src_dir) and not prefix.startswith("/usr") and not re.match(r"(?i)c:\\program", prefix):
        # Started locally, not installed
        print("Adding {0} to pythonpath. This is only important when running URH from source.".format(src_dir))
        sys.path.insert(0, src_dir)

    try:
        import urh.cythonext.signalFunctions
        import urh.cythonext.path_creator
        import urh.cythonext.util
    except ImportError:
        print("Could not find C++ extensions, trying to build them.")
        old_dir = os.curdir
        os.chdir(os.path.join(src_dir, "urh", "cythonext"))

        from urh.cythonext import build
        build.main()

        os.chdir(old_dir)

    from urh.controller.MainController import MainController
    from urh import constants

    if constants.SETTINGS.value("theme_index", 0, int) > 0:
        os.environ['QT_QPA_PLATFORMTHEME'] = 'fusion'

    app = QApplication(sys.argv)
    app.setWindowIcon(QIcon(":/icons/data/icons/appicon.png"))

    # noinspection PyUnresolvedReferences
    import urh.ui.xtra_icons_rc  # Use oxy theme always
    QIcon.setThemeName("oxy")

    constants.SETTINGS.setValue("default_theme", app.style().objectName())

    if constants.SETTINGS.value("theme_index", 0, int) > 0:
        app.setStyle(QStyleFactory.create("Fusion"))

        if constants.SETTINGS.value("theme_index", 0, int) == 2:
            palette = QPalette()
            background_color = QColor(56, 60, 74)
            text_color = QColor(211, 218, 227).lighter()
            palette.setColor(QPalette.Window, background_color)
            palette.setColor(QPalette.WindowText, text_color)
            palette.setColor(QPalette.Base, background_color)
            palette.setColor(QPalette.AlternateBase, background_color)
            palette.setColor(QPalette.ToolTipBase, background_color)
            palette.setColor(QPalette.ToolTipText, text_color)
            palette.setColor(QPalette.Text, text_color)

            palette.setColor(QPalette.Button, background_color)
            palette.setColor(QPalette.ButtonText, text_color)

            palette.setColor(QPalette.BrightText, Qt.red)
            palette.setColor(QPalette.Disabled, QPalette.Text, Qt.darkGray)
            palette.setColor(QPalette.Disabled, QPalette.ButtonText, Qt.darkGray)

            palette.setColor(QPalette.Highlight, QColor(200, 50, 0))
            palette.setColor(QPalette.HighlightedText, text_color)
            app.setPalette(palette)

    main_window = MainController()

    if sys.platform == "darwin":
        menu_bar = main_window.menuBar()
        menu_bar.setNativeMenuBar(False)
        import multiprocessing as mp
        mp.set_start_method("spawn")  # prevent errors with forking in native RTL-SDR backend

    main_window.showMaximized()
    # main_window.setFixedSize(1920, 1080 - 30)  # Youtube

    # use system colors for painting
    widget = QWidget()
    bgcolor = widget.palette().color(QPalette.Background)
    fgcolor = widget.palette().color(QPalette.Foreground)
    selection_color = widget.palette().color(QPalette.Highlight)
    constants.BGCOLOR = bgcolor
    constants.LINECOLOR = fgcolor
    constants.SELECTION_COLOR = selection_color
    constants.SEND_INDICATOR_COLOR = selection_color

    if "autoclose" in sys.argv[1:]:
        # Autoclose after 1 second, this is useful for automated testing
        timer = QTimer()
        timer.timeout.connect(app.quit)
        timer.start(1000)

    os._exit(app.exec_())  # sys.exit() is not enough on Windows and will result in crash on exit
예제 #35
0
파일: __init__.py 프로젝트: maoyeh/stytra
    def __init__(self, recording=None, exec=True, app=None, **kwargs):
        # Check if exist a default config file in the home (user) directory:
        mp.set_start_method("spawn", force=True)
        inum = kwargs.get("instance_number", -1)
        if inum >= 0:
            default_config_file = Path.home(
            ) / "stytra_setup_config_{}.json".format(inum)
        else:
            default_config_file = Path.home() / "stytra_setup_config.json"
        if default_config_file.is_file():
            config = json.load(open(str(default_config_file)))
        else:
            config = dict()

        # Get rest of configuration parameters from the protocol:
        try:
            extra_config = kwargs["protocol"].stytra_config
            recursive_update(config, extra_config)
        except AttributeError:
            pass

        recursive_update(config, kwargs)  # Use also keyword arguments

        if config.get("scope_triggering", None) == "zmq":
            # Automatically use zmqTrigger if zmq is specified
            from stytra.triggering import ZmqTrigger

            config["scope_triggering"] = ZmqTrigger(port="5555")

        if app is None:
            app = QApplication([])
            app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
            if app.devicePixelRatio() > 1:
                app.setAttribute(Qt.AA_UseHighDpiPixmaps)
        class_kwargs = dict(app=app)
        class_kwargs.update(config)

        base = VisualExperiment

        if "camera" in class_kwargs.keys():
            base = CameraVisualExperiment
            if "tracking" in class_kwargs.keys():
                base = TrackingExperiment
                if not class_kwargs["tracking"].get("embedded", True):
                    class_kwargs["calibrator"] = CircleCalibrator()

            if recording:
                base = VideoRecordingExperiment

        # Stytra logo
        app_icon = QIcon()
        for size in [32, 64, 128, 256]:
            app_icon.addFile(
                pkg_resources.resource_filename(__name__,
                                                "/icons/{}.png".format(size)),
                QSize(size, size),
            )
        app.setWindowIcon(app_icon)

        pg.setConfigOptions(imageAxisOrder="row-major")

        self.exp = base(**class_kwargs)

        self.exp.start_experiment()
        if exec:
            app.exec_()
예제 #36
0
import multiprocessing as mp


def foo(q):
    q.put('hello')


if __name__ == '__main__':
    mp.set_start_method('spawn')  # 不要过多使用
    q = mp.Queue()
    p = mp.Process(target=foo, args=(q, ))
    p.start()
    print(q.get())
    p.join()
예제 #37
0
                           parent=self)
        else:
            directory = tk.filedialog.askdirectory(initialdir=os.getcwd())
            if directory != '':
                if not os.path.isdir('lab4output'):
                    os.mkdir('lab4output')
                os.chdir('lab4output')
                tkmb.showinfo("Save",
                              "File saved in " + os.getcwd(),
                              parent=self)
                processData(self.E.getData, self.LB.curselection(), self.index)
                os.chdir('..')


if __name__ == '__main__':
    mp.set_start_method('spawn')  #to support spawning of child process
    win = MainWin()  #create a main Window
    win.mainloop()
'''
PART C (Comparing Threads and Process):
Data Sets are tested according to the order (Northern CA)
Data sets       Threads         Process
    1           1.01 s          2.04 s
    2           1.17 s          2.28 s
    3           1.42 s          2.74 s
    4           1.07 s          2.22 s
    5           1.68 s          2.87 s
    
From the table above, we could see that threading is faster than processes.
Threads run in the same memory space while processes uses a separate memory. In threading,
the tasks run concurrently which may affect the run time. Also because they are in the
예제 #38
0
# Test script on part of the data: python3 learning_SGD.py train.csv test.csv
# Usage on all data : python3 learning_SGD.py LLCP2015.csv_preprocessing.csv_transformed_train.csv LLCP2015.csv_preprocessing.csv_transformed_test.csv

# Train model on training set with cross-validation using grid search for best parameters.
# Evaluate model - Bias/variance: Validation curve, learning curve.
# Evaluate model - quantify quality of evaluations: Precision, recall, f1 score, PR curve, ROC curve.

import multiprocessing

multiprocessing.set_start_method('forkserver')

import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn import model_selection
import time
from sklearn.linear_model import SGDClassifier
from sklearn.feature_selection import RFE
import pickle


def get_X_Y_from_csv(csv_name):
    df = pd.read_csv(csv_name)
    array = df.values
    X = array[:, 1:]
    Y = array[:, 0]
    return X, Y

예제 #39
0
import multiprocessing as multiprocess, sys


def foo():
    print("123")


# Because "if __name__ == '__main__'" is missing this will not work
# correctly on Windows.  However, we should get a RuntimeError rather
# than the Windows equivalent of a fork bomb.

if len(sys.argv) > 1:
    multiprocess.set_start_method(sys.argv[1])
else:
    multiprocess.set_start_method('spawn')

p = multiprocess.Process(target=foo)
p.start()
p.join()
sys.exit(p.exitcode)
예제 #40
0
                                ipc_pub_url,
                                ipc_sub_url,
                                ipc_push_url,
                                user_dir,
                                app_version,
                            )).start()
                elif "notify.circle_detector_process.should_start" in topic:
                    Process(target=circle_detector,
                            name='circle_detector',
                            args=(ipc_push_url, n['pair_url'],
                                  n['source_path'])).start()
                elif "notify.meta.should_doc" in topic:
                    cmd_push.notify({
                        'subject': 'meta.doc',
                        'actor': 'launcher',
                        'doc': launcher.__doc__
                    })
            else:
                if not active_children():
                    break

        for p in active_children():
            p.join()


if __name__ == '__main__':
    freeze_support()
    if platform.system() == 'Darwin':
        set_start_method('spawn')
    launcher()
예제 #41
0
    model.add(Dense(
        5  #num of hidden units
        ,
        input_shape=(data.shape[1], )))  #num of features in input layer
    model.add(Activation('sigmoid'))

    model.add(Dense(1))  #number of nodes in output layer
    model.add(Activation('sigmoid'))

    model.compile(loss='mse', optimizer=keras.optimizers.Adam())
    #------------------------------
    model.fit(data, target, epochs=5000, verbose=1)
    model.save("model_for_%s.hdf5" % index)
    #------------------------------
    #finally, close sessions
    session.close()
    keras.backend.clear_session()


#-----------------------------
#main program

multiprocessing.set_start_method('spawn', force=True)

df = pd.read_csv("dataset.csv")

my_tuple = [(i, df[df['index'] == i]) for i in range(0, 20)]

with Pool(10) as pool:
    pool.starmap(train, my_tuple)
예제 #42
0
def gpu_stump(T_A, m, T_B=None, ignore_trivial=True, device_id=0):
    """
    Compute the matrix profile with GPU-STOMP

    This is a convenience wrapper around the Numba `cuda.jit` `_gpu_stump` function
    which computes the matrix profile according to GPU-STOMP.

    Parameters
    ----------
    T_A : ndarray
        The time series or sequence for which to compute the matrix profile

    m : int
        Window size

    T_B : (optional) ndarray
        The time series or sequence that contain your query subsequences
        of interest. Default is `None` which corresponds to a self-join.

    ignore_trivial : bool
        Set to `True` if this is a self-join. Otherwise, for AB-join, set this
        to `False`. Default is `True`.

    device_id : int or list
        The (GPU) device number to use. The default value is `0`. A list of
        valid device ids (int) may also be provided for parallel GPU-STUMP
        computation. A list of all valid device ids can be obtained by
        executing `[device.id for device in cuda.list_devices()]`.

    Returns
    -------
    out : ndarray
        The first column consists of the matrix profile, the second column
        consists of the matrix profile indices, the third column consists of
        the left matrix profile indices, and the fourth column consists of
        the right matrix profile indices.

    Notes
    -----
    `DOI: 10.1109/ICDM.2016.0085 \
    <https://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf>`__

    See Table II, Figure 5, and Figure 6

    Timeseries, T_B, will be annotated with the distance location
    (or index) of all its subsequences in another times series, T_A.

    Return: For every subsequence, Q, in T_B, you will get a distance
    and index for the closest subsequence in T_A. Thus, the array
    returned will have length T_B.shape[0]-m+1. Additionally, the
    left and right matrix profiles are also returned.

    Note: Unlike in the Table II where T_A.shape is expected to be equal
    to T_B.shape, this implementation is generalized so that the shapes of
    T_A and T_B can be different. In the case where T_A.shape == T_B.shape,
    then our algorithm reduces down to the same algorithm found in Table II.

    Additionally, unlike STAMP where the exclusion zone is m/2, the default
    exclusion zone for STOMP is m/4 (See Definition 3 and Figure 3).

    For self-joins, set `ignore_trivial = True` in order to avoid the
    trivial match.

    Note that left and right matrix profiles are only available for self-joins.
    """
    if T_B is None:  # Self join!
        T_B = T_A
        ignore_trivial = True

    # Swap T_A and T_B for GPU implementation
    # This keeps the API identical to and compatible with `stumpy.stump`
    tmp_T = T_A
    T_A = T_B
    T_B = tmp_T

    T_A, M_T, Σ_T = core.preprocess(T_A, m)
    T_B, μ_Q, σ_Q = core.preprocess(T_B, m)

    if T_A.ndim != 1:  # pragma: no cover
        raise ValueError(
            f"T_A is {T_A.ndim}-dimensional and must be 1-dimensional. "
            "For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`"
        )

    if T_B.ndim != 1:  # pragma: no cover
        raise ValueError(
            f"T_B is {T_B.ndim}-dimensional and must be 1-dimensional. "
            "For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`"
        )

    core.check_dtype(T_A)
    core.check_dtype(T_B)

    core.check_window_size(m)

    if ignore_trivial is False and core.are_arrays_equal(
            T_A, T_B):  # pragma: no cover
        logger.warning("Arrays T_A, T_B are equal, which implies a self-join.")
        logger.warning("Try setting `ignore_trivial = True`.")

    if ignore_trivial and core.are_arrays_equal(
            T_A, T_B) is False:  # pragma: no cover
        logger.warning(
            "Arrays T_A, T_B are not equal, which implies an AB-join.")
        logger.warning("Try setting `ignore_trivial = False`.")

    n = T_B.shape[0]
    k = T_A.shape[0] - m + 1
    l = n - m + 1
    excl_zone = int(np.ceil(m / 4))  # See Definition 3 and Figure 3

    T_A_fname = core.array_to_temp_file(T_A)
    T_B_fname = core.array_to_temp_file(T_B)
    M_T_fname = core.array_to_temp_file(M_T)
    Σ_T_fname = core.array_to_temp_file(Σ_T)
    μ_Q_fname = core.array_to_temp_file(μ_Q)
    σ_Q_fname = core.array_to_temp_file(σ_Q)

    out = np.empty((k, 4), dtype=object)

    if isinstance(device_id, int):
        device_ids = [device_id]
    else:
        device_ids = device_id

    profile = [None] * len(device_ids)
    indices = [None] * len(device_ids)

    for _id in device_ids:
        with cuda.gpus[_id]:
            if (cuda.current_context().__class__.__name__ !=
                    "FakeCUDAContext"):  # pragma: no cover
                cuda.current_context().deallocations.clear()

    step = 1 + l // len(device_ids)

    # Start process pool for multi-GPU request
    if len(device_ids) > 1:  # pragma: no cover
        mp.set_start_method("spawn", force=True)
        p = mp.Pool(processes=len(device_ids))
        results = [None] * len(device_ids)

    QT_fnames = []
    QT_first_fnames = []

    for idx, start in enumerate(range(0, l, step)):
        stop = min(l, start + step)

        QT, QT_first = _get_QT(start, T_A, T_B, m)
        QT_fname = core.array_to_temp_file(QT)
        QT_first_fname = core.array_to_temp_file(QT_first)
        QT_fnames.append(QT_fname)
        QT_first_fnames.append(QT_first_fname)

        if len(device_ids
               ) > 1 and idx < len(device_ids) - 1:  # pragma: no cover
            # Spawn and execute in child process for multi-GPU request
            results[idx] = p.apply_async(
                _gpu_stump,
                (
                    T_A_fname,
                    T_B_fname,
                    m,
                    stop,
                    excl_zone,
                    M_T_fname,
                    Σ_T_fname,
                    QT_fname,
                    QT_first_fname,
                    μ_Q_fname,
                    σ_Q_fname,
                    k,
                    ignore_trivial,
                    start + 1,
                    device_ids[idx],
                ),
            )
        else:
            # Execute last chunk in parent process
            # Only parent process is executed when a single GPU is requested
            profile[idx], indices[idx] = _gpu_stump(
                T_A_fname,
                T_B_fname,
                m,
                stop,
                excl_zone,
                M_T_fname,
                Σ_T_fname,
                QT_fname,
                QT_first_fname,
                μ_Q_fname,
                σ_Q_fname,
                k,
                ignore_trivial,
                start + 1,
                device_ids[idx],
            )

    # Clean up process pool for multi-GPU request
    if len(device_ids) > 1:  # pragma: no cover
        p.close()
        p.join()

        # Collect results from spawned child processes if they exist
        for idx, result in enumerate(results):
            if result is not None:
                profile[idx], indices[idx] = result.get()

    os.remove(T_A_fname)
    os.remove(T_B_fname)
    os.remove(M_T_fname)
    os.remove(Σ_T_fname)
    os.remove(μ_Q_fname)
    os.remove(σ_Q_fname)
    for QT_fname in QT_fnames:
        os.remove(QT_fname)
    for QT_first_fname in QT_first_fnames:
        os.remove(QT_first_fname)

    for idx in range(len(device_ids)):
        profile_fname = profile[idx]
        indices_fname = indices[idx]
        profile[idx] = np.load(profile_fname, allow_pickle=False)
        indices[idx] = np.load(indices_fname, allow_pickle=False)
        os.remove(profile_fname)
        os.remove(indices_fname)

    for i in range(1, len(device_ids)):
        # Update all matrix profiles and matrix profile indices
        # (global, left, right) and store in profile[0] and indices[0]
        for col in range(profile[0].shape[1]):  # pragma: no cover
            cond = profile[0][:, col] < profile[i][:, col]
            profile[0][:, col] = np.where(cond, profile[0][:, col],
                                          profile[i][:, col])
            indices[0][:, col] = np.where(cond, indices[0][:, col],
                                          indices[i][:, col])

    out[:, 0] = profile[0][:, 0]
    out[:, 1:4] = indices[0][:, :]

    threshold = 10e-6
    if core.are_distances_too_small(out[:, 0],
                                    threshold=threshold):  # pragma: no cover
        logger.warning(
            f"A large number of values are smaller than {threshold}.")
        logger.warning("For a self-join, try setting `ignore_trivial = True`.")

    return out
예제 #43
0
        "--confidence-threshold",
        type=float,
        default=0.5,
        help="Minimum score for instance predictions to be shown",
    )
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == "__main__":
    mp.set_start_method("spawn", force=True)
    args = get_parser().parse_args()
    setup_logger(name="fvcore")
    logger = setup_logger()
    logger.info("Arguments: " + str(args))

    cfg = setup_cfg(args)

    demo = VisualizationDemo(cfg)

    if args.input:
        if len(args.input) == 1:
            args.input = glob.glob(os.path.expanduser(args.input[0]))
            assert args.input, "The input path(s) was not found"
        for path in tqdm.tqdm(args.input, disable=not args.output):
            # use PIL, to be consistent with evaluation
예제 #44
0
    def closeGUI(self):
        self.sendClose('gui')
        self.proc_GUI.join()
        print('close GUI')
        self.closeAPEbtn.setEnabled(True)
        self.startAPEbtn.setEnabled(False)
        self.closeGUIbtn.setEnabled(False)
        self.startGUIbtn.setEnabled(True)

    def closeLauncher(self):
        self.node.close()
        self.closeAPEbtn.setEnabled(False)
        self.startAPEbtn.setEnabled(False)
        self.closeGUIbtn.setEnabled(False)
        self.startGUIbtn.setEnabled(False)


if __name__ == '__main__':
    # this is important to avoid forking in UNIX operating systems
    multiprocessing.set_start_method('spawn')
    # make a QApplication to run pyqt5
    app = QApplication(sys.argv)
    # instance of the new class
    startUp = StartUp()
    # target is the GUI from the class
    startUp.node.target = startUp
    # shows the GUI
    startUp.show()
    # exiting function
    sys.exit(app.exec_())
예제 #45
0
def main():
    # Check if GPU is available
    # (ImageNet example does not support CPU execution)
    if not chainer.cuda.available:
        raise RuntimeError("ImageNet requires GPU support.")

    archs = {
        'alex': alex.Alex,
        'googlenet': googlenet.GoogLeNet,
        'googlenetbn': googlenetbn.GoogLeNetBN,
        'nin': nin.NIN,
        'resnet50': resnet50.ResNet50,
    }

    parser = argparse.ArgumentParser(
        description='Learning convnet from ILSVRC2012 dataset')
    parser.add_argument('train', help='Path to training image-label list file')
    parser.add_argument('val', help='Path to validation image-label list file')
    parser.add_argument('--arch',
                        '-a',
                        choices=archs.keys(),
                        default='nin',
                        help='Convnet architecture')
    parser.add_argument('--batchsize',
                        '-B',
                        type=int,
                        default=32,
                        help='Learning minibatch size')
    parser.add_argument('--epoch',
                        '-E',
                        type=int,
                        default=10,
                        help='Number of epochs to train')
    parser.add_argument('--initmodel',
                        help='Initialize the model from given file')
    parser.add_argument('--loaderjob',
                        '-j',
                        type=int,
                        help='Number of parallel data loading processes')
    parser.add_argument('--mean',
                        '-m',
                        default='mean.npy',
                        help='Mean file (computed by compute_mean.py)')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Initialize the trainer from given file')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Output directory')
    parser.add_argument('--root',
                        '-R',
                        default='.',
                        help='Root directory path of image files')
    parser.add_argument('--val_batchsize',
                        '-b',
                        type=int,
                        default=250,
                        help='Validation minibatch size')
    parser.add_argument('--test', action='store_true')
    parser.add_argument('--communicator', default='hierarchical')
    parser.set_defaults(test=False)
    args = parser.parse_args()

    # Prepare ChainerMN communicator.
    comm = chainermn.create_communicator(args.communicator)
    device = comm.intra_rank

    if comm.mpi_comm.rank == 0:
        print('==========================================')
        print('Num process (COMM_WORLD): {}'.format(comm.size))
        print('Using {} communicator'.format(args.communicator))
        print('Using {} arch'.format(args.arch))
        print('Num Minibatch-size: {}'.format(args.batchsize))
        print('Num epoch: {}'.format(args.epoch))
        print('==========================================')

    model = archs[args.arch]()
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    chainer.cuda.get_device(device).use()  # Make the GPU current
    model.to_gpu()

    # Split and distribute the dataset. Only worker 0 loads the whole dataset.
    # Datasets of worker 0 are evenly split and distributed to all workers.
    mean = np.load(args.mean)
    if comm.rank == 0:
        train = PreprocessedDataset(args.train, args.root, mean, model.insize)
        val = PreprocessedDataset(args.val, args.root, mean, model.insize,
                                  False)
    else:
        train = None
        val = None
    train = chainermn.scatter_dataset(train, comm, shuffle=True)
    val = chainermn.scatter_dataset(val, comm)

    # We need to change the start method of multiprocessing module if we are
    # using InfiniBand and MultiprocessIterator. This is because processes
    # often crash when calling fork if they are using Infiniband.
    # (c.f., https://www.open-mpi.org/faq/?category=tuning#fork-warning )
    multiprocessing.set_start_method('forkserver')
    train_iter = chainer.iterators.MultiprocessIterator(
        train, args.batchsize, n_processes=args.loaderjob)
    val_iter = chainer.iterators.MultiprocessIterator(
        val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)

    # Create a multi node optimizer from a standard Chainer optimizer.
    optimizer = chainermn.create_multi_node_optimizer(
        chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9), comm)
    optimizer.setup(model)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=device)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)

    val_interval = (10, 'iteration') if args.test else (1, 'epoch')
    log_interval = (10, 'iteration') if args.test else (1, 'epoch')

    # Create a multi node evaluator from an evaluator.
    evaluator = TestModeEvaluator(val_iter, model, device=device)
    evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
    trainer.extend(evaluator, trigger=val_interval)

    # Some display and output extensions are necessary only for one worker.
    # (Otherwise, there would just be repeated outputs.)
    if comm.rank == 0:
        trainer.extend(extensions.dump_graph('main/loss'))
        trainer.extend(extensions.LogReport(trigger=log_interval))
        trainer.extend(extensions.observe_lr(), trigger=log_interval)
        trainer.extend(extensions.PrintReport([
            'epoch', 'iteration', 'main/loss', 'validation/main/loss',
            'main/accuracy', 'validation/main/accuracy', 'lr'
        ]),
                       trigger=log_interval)
        trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
예제 #46
0
파일: __main__.py 프로젝트: qxcv/mtil
def main(
        demos,
        add_preproc,
        seed,
        sampler_batch_B,
        sampler_batch_T,
        disc_batch_size,
        out_dir,
        run_name,
        gpu_idx,
        disc_up_per_iter,
        total_n_steps,
        log_interval_steps,
        cpu_list,
        snapshot_gap,
        load_policy,
        bc_loss,
        omit_noop,
        disc_replay_mult,
        disc_aug,
        ppo_aug,
        disc_use_bn,
        disc_net_attn,
        disc_use_sn,
        disc_gp_weight,
        disc_al,
        disc_al_dim,
        disc_al_nsamples,
        disc_ae_pretrain_iters,
        wgan,
        transfer_variants,
        transfer_disc_loss_weight,
        transfer_pol_loss_weight,
        transfer_disc_anneal,
        transfer_pol_batch_weight,
        danger_debug_reward_weight,
        danger_override_env_name,
        # new sweep hyperparams:
        disc_lr,
        disc_use_act,
        disc_all_frames,
        ppo_lr,
        ppo_gamma,
        ppo_lambda,
        ppo_ent,
        ppo_adv_clip,
        ppo_norm_adv,
        ppo_use_bn,
        ppo_minibatches,
        ppo_epochs):
    # set up seeds & devices
    # TODO: also seed child envs, when rlpyt supports it
    set_seeds(seed)
    # 'spawn' is necessary to use GL envs in subprocesses. For whatever reason
    # they don't play nice after a fork. (But what about set_seeds() in
    # subprocesses? May need to hack CpuSampler and GpuSampler.)
    mp.set_start_method('spawn')
    use_gpu = gpu_idx is not None and torch.cuda.is_available()
    dev = torch.device(["cpu", f"cuda:{gpu_idx}"][use_gpu])
    if cpu_list is None:
        cpu_list = sample_cpu_list()
    # FIXME: I suspect current solution will set torch_num_threads suboptimally
    affinity = dict(cuda_idx=gpu_idx if use_gpu else None,
                    workers_cpus=cpu_list)
    print(f"Using device {dev}, seed {seed}, affinity {affinity}")

    # register original envs
    import magical
    magical.register_envs()

    if danger_override_env_name:
        raise NotImplementedError(
            "haven't re-implemeneted env name override for multi-task GAIL")

    demos_metas_dict = get_demos_meta(demo_paths=demos,
                                      omit_noop=omit_noop,
                                      transfer_variants=transfer_variants,
                                      preproc_name=add_preproc)
    dataset_mt = demos_metas_dict['dataset_mt']
    variant_groups = demos_metas_dict['variant_groups']
    env_metas = demos_metas_dict['env_metas']
    task_ids_and_demo_env_names = demos_metas_dict[
        'task_ids_and_demo_env_names']
    task_var_weights = {
        (task, variant): 1.0 if variant == 0 else transfer_pol_batch_weight
        for task, variant in variant_groups.env_name_by_task_variant
    }
    sampler, sampler_batch_B = make_mux_sampler(
        variant_groups=variant_groups,
        task_var_weights=task_var_weights,
        env_metas=env_metas,
        use_gpu=use_gpu,
        num_demo_sources=0,  # not important for now
        batch_B=sampler_batch_B,
        batch_T=sampler_batch_T)

    policy_kwargs = {
        'use_bn': ppo_use_bn,
        'env_ids_and_names': task_ids_and_demo_env_names,
        **get_policy_spec_magical(env_metas),
    }
    policy_ctor = MultiHeadPolicyNet
    ppo_agent = CategoricalPgAgent(ModelCls=MuxTaskModelWrapper,
                                   model_kwargs=dict(
                                       model_ctor=policy_ctor,
                                       model_kwargs=policy_kwargs))

    print("Setting up discriminator/reward model")
    disc_fc_dim = 256
    disc_final_feats_dim = disc_al_dim if disc_al else disc_fc_dim
    discriminator_mt = MILBenchDiscriminatorMT(
        task_ids_and_names=task_ids_and_demo_env_names,
        in_chans=policy_kwargs['in_chans'],
        act_dim=policy_kwargs['n_actions'],
        use_all_chans=disc_all_frames,
        use_actions=disc_use_act,
        # can supply any argument that goes to MILBenchFeatureNetwork (e.g.
        # dropout, use_bn, width, etc.)
        attention=disc_net_attn,
        use_bn=disc_use_bn,
        use_sn=disc_use_sn,
        fc_dim=disc_fc_dim,
        final_feats_dim=disc_final_feats_dim,
    ).to(dev)

    if (not transfer_variants
            and (transfer_disc_loss_weight or transfer_pol_loss_weight)):
        print("No xfer variants supplied, setting xfer disc loss term to zero")
        transfer_disc_loss_weight = 0.0
        transfer_pol_loss_weight = 0.0
    if transfer_pol_loss_weight > 0:
        assert transfer_disc_loss_weight > 0
    if transfer_variants and transfer_disc_loss_weight:
        xfer_adv_module = BinaryDomainLossModule(
            discriminator_mt.ret_feats_dim).to(dev)
    else:
        xfer_adv_module = None

    reward_model_mt = RewardModel(
        discriminator_mt,
        xfer_adv_module,
        transfer_pol_loss_weight,
        # In apprenticeship learning we can just pass
        # the model outputs straight through, just
        # like in WGAN.
        use_wgan=wgan or disc_al).to(dev)
    reward_evaluator_mt = RewardEvaluatorMT(
        task_ids_and_names=task_ids_and_demo_env_names,
        reward_model=reward_model_mt,
        obs_dims=3,
        batch_size=disc_batch_size,
        normalise=True,
        # I think I had rewards in [0,0.01] in
        # the PPO run that I got to run with a
        # manually-defined reward.
        target_std=0.01)

    ppo_hyperparams = dict(
        learning_rate=ppo_lr,
        discount=ppo_gamma,
        entropy_loss_coeff=ppo_ent,  # was working at 0.003 and 0.001
        gae_lambda=ppo_lambda,
        ratio_clip=ppo_adv_clip,
        minibatches=ppo_minibatches,
        epochs=ppo_epochs,
        value_loss_coeff=1.0,
        clip_grad_norm=1.0,
        normalize_advantage=ppo_norm_adv,
    )
    if bc_loss:
        # TODO: make this batch size configurable
        ppo_loader_mt = make_loader_mt(
            dataset_mt, max(16, min(64, sampler_batch_T * sampler_batch_B)))
    else:
        ppo_loader_mt = None

    # FIXME: abstract code for constructing augmentation model from presets
    try:
        ppo_aug_opts = MILBenchAugmentations.PRESETS[ppo_aug]
    except KeyError:
        raise ValueError(f"unsupported augmentation mode '{ppo_aug}'")
    if ppo_aug_opts:
        print("Policy augmentations:", ", ".join(ppo_aug_opts))
        ppo_aug_model = MILBenchAugmentations(
            **{k: True
               for k in ppo_aug_opts}).to(dev)
    else:
        print("No policy augmentations")
        ppo_aug_model = None

    ppo_algo = BCCustomRewardPPO(bc_loss_coeff=bc_loss,
                                 expert_traj_loader=ppo_loader_mt,
                                 true_reward_weight=danger_debug_reward_weight,
                                 aug_model=ppo_aug_model,
                                 **ppo_hyperparams)
    ppo_algo.set_reward_evaluator(reward_evaluator_mt)

    print("Setting up optimiser")
    try:
        aug_opts = MILBenchAugmentations.PRESETS[disc_aug]
    except KeyError:
        raise ValueError(f"unsupported augmentation mode '{disc_aug}'")
    if aug_opts:
        print("Discriminator augmentations:", ", ".join(aug_opts))
        aug_model = MILBenchAugmentations(**{k: True for k in aug_opts}) \
            .to(dev)
    else:
        print("No discriminator augmentations")
        aug_model = None
    gail_optim = GAILOptimiser(
        dataset_mt=dataset_mt,
        discrim_model=discriminator_mt,
        buffer_num_samples=max(
            disc_batch_size,
            disc_replay_mult * sampler_batch_T * sampler_batch_B),
        batch_size=disc_batch_size,
        updates_per_itr=disc_up_per_iter,
        gp_weight=disc_gp_weight,
        dev=dev,
        aug_model=aug_model,
        lr=disc_lr,
        xfer_adv_weight=transfer_disc_loss_weight,
        xfer_adv_anneal=transfer_disc_anneal,
        xfer_adv_module=xfer_adv_module,
        final_layer_only_mode=disc_al,
        final_layer_only_mode_n_samples=disc_al_nsamples,
        use_wgan=wgan)

    if disc_ae_pretrain_iters:
        # FIXME(sam): pass n_acts, obs_chans, lr to AETrainer
        ae_trainer = AETrainer(discriminator=discriminator_mt,
                               disc_out_size=disc_final_feats_dim,
                               data_batch_iter=gail_optim.expert_batch_iter,
                               dev=dev)

    print("Setting up RL algorithm")
    # signature for arg: reward_model(obs_tensor, act_tensor) -> rewards
    runner = GAILMinibatchRl(
        seed=seed,
        gail_optim=gail_optim,
        variant_groups=variant_groups,
        algo=ppo_algo,
        agent=ppo_agent,
        sampler=sampler,
        # n_steps controls total number of environment steps we take
        n_steps=total_n_steps,
        # log_interval_steps controls how many environment steps we take
        # between making log outputs (doing N environment steps takes roughly
        # the same amount of time no matter what batch_B, batch_T, etc. are, so
        # this gives us a fairly constant interval between log outputs)
        log_interval_steps=log_interval_steps,
        affinity=affinity)

    # TODO: factor out this callback
    def init_policy_cb(runner):
        """Callback which gets called once after Runner startup to save an
        initial policy model, and optionally load saved parameters."""
        # get state of newly-initalised model
        wrapped_model = runner.algo.agent.model
        assert wrapped_model is not None, "has ppo_agent been initalised?"
        unwrapped_model = wrapped_model.model

        if load_policy:
            print(f"Loading policy from '{load_policy}'")
            saved_model = load_state_dict_or_model(load_policy)
            saved_dict = saved_model.state_dict()
            unwrapped_model.load_state_dict(saved_dict)

        real_state = unwrapped_model.state_dict()

        # make a clone model so we can pickle it, and copy across weights
        policy_copy_mt = policy_ctor(**policy_kwargs).to('cpu')
        policy_copy_mt.load_state_dict(real_state)

        # save it here
        init_pol_snapshot_path = os.path.join(logger.get_snapshot_dir(),
                                              'full_model.pt')
        torch.save(policy_copy_mt, init_pol_snapshot_path)

    print("Training!")
    n_uniq_envs = variant_groups.num_tasks
    log_params = {
        'add_preproc': add_preproc,
        'seed': seed,
        'sampler_batch_T': sampler_batch_T,
        'sampler_batch_B': sampler_batch_B,
        'disc_batch_size': disc_batch_size,
        'disc_up_per_iter': disc_up_per_iter,
        'total_n_steps': total_n_steps,
        'bc_loss': bc_loss,
        'omit_noop': omit_noop,
        'disc_aug': disc_aug,
        'danger_debug_reward_weight': danger_debug_reward_weight,
        'disc_lr': disc_lr,
        'disc_use_act': disc_use_act,
        'disc_all_frames': disc_all_frames,
        'disc_net_attn': disc_net_attn,
        'disc_use_bn': disc_use_bn,
        'ppo_lr': ppo_lr,
        'ppo_gamma': ppo_gamma,
        'ppo_lambda': ppo_lambda,
        'ppo_ent': ppo_ent,
        'ppo_adv_clip': ppo_adv_clip,
        'ppo_norm_adv': ppo_norm_adv,
        'transfer_variants': transfer_variants,
        'transfer_pol_batch_weight': transfer_pol_batch_weight,
        'transfer_pol_loss_weight': transfer_pol_loss_weight,
        'transfer_disc_loss_weight': transfer_disc_loss_weight,
        'transfer_disc_anneal': transfer_disc_anneal,
        'ndemos': len(demos),
        'n_uniq_envs': n_uniq_envs,
    }
    with make_logger_ctx(out_dir,
                         "mtgail",
                         f"mt{n_uniq_envs}",
                         run_name,
                         snapshot_gap=snapshot_gap,
                         log_params=log_params):
        torch.save(
            discriminator_mt,
            os.path.join(logger.get_snapshot_dir(), 'full_discrim_model.pt'))

        if disc_ae_pretrain_iters:
            # FIXME(sam): come up with a better solution for creating these
            # montages (can I do it regularly? Should I put them somewhere
            # other than the snapshot dir?).
            ae_trainer.make_montage(
                os.path.join(logger.get_snapshot_dir(), 'ae-before.png'))
            ae_trainer.do_full_training(disc_ae_pretrain_iters)
            ae_trainer.make_montage(
                os.path.join(logger.get_snapshot_dir(), 'ae-after.png'))

        # note that periodic snapshots get saved by GAILMiniBatchRl, thanks to
        # the overridden get_itr_snapshot() method
        runner.train(cb_startup=init_policy_cb)
예제 #47
0
def run(args=None, ntasks=None):
    if args is None:
        args = Args().parse_args()
    if isinstance(args, dict):
        args = Args(**args).parse_args()

    # Some automatic ntask settings code
    if ntasks is None:
        try:
            devices = os.environ['CUDA_VISIBLE_DEVICES']
            ntasks = len(devices.split(','))
        except:
            try:
                ntasks = int(os.popen("nvidia-smi -L | wc -l").read())
            except:
                ntasks = 2

    args.is_distributed = True
    # Temp ignore for bug in pytorch dataloader, it leaks semaphores
    os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning,ignore::UserWarning'

    # Make this process the head of a process group.
    os.setpgrp()

    # Most important line in this file. CUDA fails horribly if we use the default fork
    multiprocessing.set_start_method('forkserver')

    processses = []
    for i in range(ntasks):
        p = multiprocessing.Process(target=work, args=[(i, ntasks, args)])
        p.start()

        if args.strace:
            # Addtional Monitoring process
            subprocess.Popen(["strace", "-tt" , 
                "-o", f"{args.exp_dir}/strace_{i}.log", 
                "-e", "trace=write", "-s256", 
                "-p", f"{p.pid}"])

        processses.append(p)

    def terminate(signum, frame):
        # Try terminate first
        print("terminating child processes")
        sys.stdout.flush()
        for i, p in enumerate(processses):
            if p.is_alive():
                p.terminate()

        # Wait a little
        for i in range(20):
            if any(p.is_alive() for p in processses):
                sleep(0.1)

        ## If they are still alive after that kill -9 them
        for i, p in enumerate(processses):
            if p.is_alive():
                print(f"Sending SIGKILL to process {i}")
                sys.stdout.flush()
                os.kill(p.pid, signal.SIGKILL)

        print("exiting")
        sys.stdout.flush()
        sys.exit(0)


    if args.auto_requeue:
        def forward_usr1_signal(signum, frame):
            print(f"Received USR1 signal in spawn_dist", flush=True)
            for i, p in enumerate(processses):
                if p.is_alive():
                    os.kill(p.pid, signal.SIGUSR1)
        
        def forward_term_signal(signum, frame):
            print(f"Received SIGTERM signal in spawn_dist", flush=True)
            for i, p in enumerate(processses):
                if p.is_alive():
                    os.kill(p.pid, signal.SIGTERM)

        # For requeing we need to ignore SIGTERM, and forward USR1
        signal.signal(signal.SIGUSR1, forward_usr1_signal)
        signal.signal(signal.SIGTERM, forward_term_signal)
        signal.signal(signal.SIGINT, terminate)

    else:
        signal.signal(signal.SIGINT, terminate)
        signal.signal(signal.SIGTERM, terminate)

    while True:
        sleep(0.5)
        if any(not p.is_alive() for p in processses):
            print("Detected an exited process, so exiting main")
            terminate(None, None)

    print("DONE")
예제 #48
0
def main():
    """Application entry point.
    """
    if hasattr(multiprocessing, 'set_start_method'):
        # Avoid use 'fork': safely forking a multithreaded process is problematic
        multiprocessing.set_start_method('spawn')

    parser = argparse.ArgumentParser(usage="%(prog)s [options]", description=pibooth.__doc__)

    parser.add_argument('--version', action='version', version=pibooth.__version__,
                        help=u"show program's version number and exit")

    parser.add_argument("--config", action='store_true',
                        help=u"edit the current configuration and exit")

    parser.add_argument("--translate", action='store_true',
                        help=u"edit the GUI translations and exit")

    parser.add_argument("--reset", action='store_true',
                        help=u"restore the default configuration/translations and exit")

    parser.add_argument("--fonts", action='store_true',
                        help=u"display all available fonts and exit")

    parser.add_argument("--nolog", action='store_true', default=False,
                        help=u"don't save console output in a file (avoid filling the /tmp directory)")

    group = parser.add_mutually_exclusive_group()
    group.add_argument("-v", "--verbose", dest='logging', action='store_const', const=logging.DEBUG,
                       help=u"report more information about operations", default=logging.INFO)
    group.add_argument("-q", "--quiet", dest='logging', action='store_const', const=logging.WARNING,
                       help=u"report only errors and warnings", default=logging.INFO)

    options, _args = parser.parse_known_args()

    if not options.nolog:
        filename = osp.join(tempfile.gettempdir(), 'pibooth.log')
    else:
        filename = None
    configure_logging(options.logging, '[ %(levelname)-8s] %(name)-18s: %(message)s', filename=filename)

    plugin_manager = create_plugin_manager()

    # Load the configuration and languages
    config = PiConfigParser("~/.config/pibooth/pibooth.cfg", plugin_manager)
    language.init(config.join_path("translations.cfg"), options.reset)

    # Register plugins
    custom_paths = [p for p in config.gettuple('GENERAL', 'plugins', 'path') if p]
    load_plugins(plugin_manager, *custom_paths)
    LOGGER.info("Installed plugins: %s", ", ".join(list_plugin_names(plugin_manager)))

    # Update configuration with plugins ones
    plugin_manager.hook.pibooth_configure(cfg=config)

    # Ensure config files are present in case of first pibooth launch
    if not options.reset:
        if not osp.isfile(config.filename):
            config.save(default=True)
        plugin_manager.hook.pibooth_reset(cfg=config, hard=False)

    if options.config:
        LOGGER.info("Editing the pibooth configuration...")
        config.edit()
    elif options.translate:
        LOGGER.info("Editing the GUI translations...")
        language.edit()
    elif options.fonts:
        LOGGER.info("Listing all fonts available...")
        print_columns_words(get_available_fonts(), 3)
    elif options.reset:
        config.save(default=True)
        plugin_manager.hook.pibooth_reset(cfg=config, hard=True)
    else:
        LOGGER.info("Starting the photo booth application %s", GPIO_INFO)
        app = PiApplication(config, plugin_manager)
        app.main_loop()
예제 #49
0
            os.remove('BEST_WS')

            best_asd_path = os.path.join(
                config.DATA_PATH,
                'Trained_Models',
                'Complex_Fully_Connected_WGAN_LPF_W_Adjusted',
                time_stamp() + '|WS' + '|BC:' + str(batch_size) + '|g_eta:' + str(g_eta) + '|d_eta:' + str(d_eta) + '|n_critic:' + str(n_critic) + '|clip_value:' + str(clip_value)
            )
            torch.save(self.state_dict(), best_asd_path)
        except:
            pass

        return


if __name__ == '__main__':
    set_start_method('spawn')   # To make dynamic reporter works

    for eta in [0.0001, 0.00001, 0.001]:
        for i in range(5):
            report_path = os.path.join(
                config.DATA_PATH,
                'Training_Reports',
                'Complex_Fully_Connected_WGAN_LPF_W_Adjusted',
                time_stamp() + '|eta:' + str(eta) + '|n_critic:' + str(5) + '|clip_value:' + str(0.01) + '.png'
            )
            init_dynamic_report(3, report_path)
            gan = Complex_Fully_Connected_WGAN_LPF_W_Adjusted(dim)
            gan.train(train_set, 10, 40, eta, eta, 5, 0.01, True)
            stop_dynamic_report()
예제 #50
0
    elif module5:
        for k in tilenames:
            #MODULE 5
            t_mod5 = time.time()
            options = kwargs.get('module5', {})
            m5.manager(k, **options)
            t_mod5 = (time.time() - t_mod5) / 60
            print("MOD5 TIME = %imin                                        " %
                  (int(t_mod5)))


#---------------------------------------------------------------------------------------------------#
if (__name__ == '__main__'):
    #MULTIPROCESSING INITIALIZATION
    freeze_support()  #needed for windows
    set_start_method(
        'spawn')  # because the VSCode debugger (ptvsd) is not fork-safe

    #READ COMMAND ARGUMENTS
    parser = argparse.ArgumentParser()

    parser.add_argument('-c', '--config', required=True, metavar='config.ini')
    parser.add_argument('-m1',
                        '--module1',
                        action='store_true',
                        help="run module 1")
    parser.add_argument('-m2',
                        '--module2',
                        action='store_true',
                        help="run module 2")
    parser.add_argument('-m3',
                        '--module3',
예제 #51
0
from vbmc4vsphere import exception, log, utils
from vbmc4vsphere.vbmc import VirtualBMC

LOG = log.get_logger()

# BMC status
RUNNING = "running"
DOWN = "down"
ERROR = "error"

DEFAULT_SECTION = "VirtualBMC"

CONF = vbmc_config.get_config()

# Always default to 'fork' multiprocessing
multiprocessing.set_start_method("fork")


class VirtualBMCManager(object):

    VBMC_OPTIONS = [
        "username",
        "password",
        "address",
        "port",
        "fakemac",
        "vm_name",
        "viserver",
        "viserver_username",
        "viserver_password",
        "active",
예제 #52
0
                        dict(color='red',
                             text='Your code probably took too long.\n'),
                        dict(color='red',
                             text='Maybe you have an infinite loop?\n'),
                    ],
                    output='The process died.',
                )
        return result


user_processes = defaultdict(UserProcess)

app = flask.Flask(__name__)

try:
    multiprocessing.set_start_method("spawn")
except RuntimeError:
    # noinspection PyArgumentList
    assert multiprocessing.get_start_method() == "spawn"


def monitor_processes():
    history = deque([], MONITOR.NUM_MEASUREMENTS)
    while True:
        sleep(MONITOR.SLEEP_TIME)
        percent = psutil.virtual_memory().percent
        history.append(percent)
        log.info(f"Recent memory usage: {history}")
        log.info(f"Number of user processes: {len(user_processes)}")
        if (len(history) == history.maxlen and min(history) > MONITOR.THRESHOLD
                and len(user_processes) > MONITOR.MIN_PROCESSES):
    :py:class:`~bids.layout.BIDSLayout`, etc.)

"""
import os
from multiprocessing import set_start_method

# Disable NiPype etelemetry always
_disable_et = bool(
    os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"

CONFIG_FILENAME = "fmriprep.toml"

try:
    set_start_method("forkserver")
except RuntimeError:
    pass  # context has been already set
finally:
    # Defer all custom import for after initializing the forkserver and
    # ignoring the most annoying warnings
    import sys
    import random
    from uuid import uuid4
    from time import strftime

    from pathlib import Path
    from nipype import __version__ as _nipype_ver
    from templateflow import __version__ as _tf_ver
    from . import __version__
def optimize_lake_problem(use_original_R_metrics=False, demo=True):
    """Analysis of the Lake Problem.

    (1) Runs a multi-objective robust optimisation of the Lake Problem
        using both standard and custom robustness metrics;
    (2) analyses the effects of different sets of scenarios on the
        robustness values and robustness rankings;
    (3) plots these effects;
    (4) analyses the effects of different robustness metrics on the
        robustness values and robustness rankings; and
    (5) plots these effects.
    """
    filepath = './robust_results.h5'

    robustness_functions = (get_original_R_metrics() if use_original_R_metrics
                            else get_custom_R_metrics_for_workbench())

    lake_model = get_lake_model()

    if not os.path.exists(filepath):
        n_scenarios = 10 if demo else 200  # for demo purposes only, should in practice be higher
        scenarios = sample_uncertainties(lake_model, n_scenarios)
        nfe = 1000 if demo else 50000  # number of function evaluations

        # Needed un Linux-based machines
        multiprocessing.set_start_method('spawn', True)

        # Run optimisation
        with MultiprocessingEvaluator(lake_model) as evaluator:
            robust_results = evaluator.robust_optimize(
                robustness_functions,
                scenarios,
                nfe=nfe,
                population_size=(10 if demo else 50),
                epsilons=[
                    0.1,
                ] * len(robustness_functions))
        print(robust_results)

    robust_results = pd.read_hdf(filepath, key='df')

    # Results are performance in each timestep, followed by robustness
    # we only care about the robustness, so we get that
    col_names = robust_results.columns.values.tolist()
    col_names = col_names[-len(robustness_functions):]

    # Plot the robustness results
    sns.pairplot(robust_results, vars=col_names, diag_kind='kde')
    # plt.show()

    # Extract the decision alternatives from the results
    # We need to extract the decision alternatives
    decision_alternatives = robust_results.iloc[:, :-4].values
    decision_alternatives = [
        Policy(
            idx, **{
                str(idx): value
                for idx, value in enumerate(
                    decision_alternatives[idx].tolist())
            }) for idx in range(decision_alternatives.shape[0])
    ]

    # Find the influence of scenarios. Here we are creating 5
    # sets of 100 scenarios each, all using the same sampling
    # method.
    scenarios_per_set = 100
    n_sets = 5
    n_scenarios = scenarios_per_set * n_sets
    scenarios = sample_uncertainties(lake_model, n_scenarios)

    # Simulate optimal solutions across all scenarios
    with MultiprocessingEvaluator(lake_model) as evaluator:
        results = evaluator.perform_experiments(scenarios=scenarios,
                                                policies=decision_alternatives)
    # We will just look at the vulnerability ('max_P') for this example
    f = np.reshape(results[1]['max_P'], newshape=(-1, n_scenarios))
    # Split the results into the different sets of scenarios
    split_f = np.split(f, n_sets, axis=1)
    # Calculate robustness for each set of scenarios
    # Note that each split_f[set_idx] is a 2D array, with each row being
    # a decision alternative, and each column a scenario
    R_metric = get_custom_R_metrics()[0]
    R = [R_metric(split_f[set_idx]) for set_idx in range(n_sets)]
    R = np.transpose(R)

    # Calculate similarity in robustness from different scenario sets
    delta, tau = analysis.scenarios_similarity(R)
    # Plot the deltas using a helper function
    analysis.delta_plot(delta)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)

    # We now want to test the effects of different robustness metrics,
    # across all of the 100 scenarios. We first define a few new
    # robustness metrics (in addition to our original R metric for
    # the vulnerability). For this example we use some classic metrics
    R_metrics = [
        R_metric,  # The original robustness metric
        functools.partial(metrics.maximax, maximise=False),
        functools.partial(metrics.laplace, maximise=False),
        functools.partial(metrics.minimax_regret, maximise=False),
        functools.partial(metrics.percentile_kurtosis, maximise=False)
    ]

    # Calculate robustness for each robustness metric
    R = np.transpose([R_metric(f) for R_metric in R_metrics])

    # Calculate similarity in robustness from different robustness metrics
    tau = analysis.R_metric_similarity(R)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)
예제 #55
0
                            2,
                        )
                        del d

                del t, bbox

        cv2.imshow("Object Tracking", frame)
        cv2.imwrite("./tracked/{}.png".format(tally), frame)
        tally += 1
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            raise KeyboardInterrupt


if __name__ == "__main__":
    multiprocessing.set_start_method("spawn", force=True)
    cap_queue = multiprocessing.Queue(1)
    processes = []
    processes.append(multiprocessing.Process(target=main, args=(cap_queue, )))
    processes.append(
        multiprocessing.Process(target=cap_worker, args=(cap_queue, )))

    try:
        for process in processes:
            process.start()
        for process in processes:
            process.join()
    except KeyboardInterrupt:
        for process in processes:
            process.terminate()
def multi_import(*argv):
    """
.. function:: multi_import ( *argv )

   Import CSV files using multiprocessing

   :param argv: list of command lines

   """

    usage_message = '''
    
    A master script to manage uploading of a single data file as multiple input files. Multi-import
    will optionally split a single file (specified by the --single argument) or optionally upload an
    already split list of files passed in on the command line.
    Each file is uplaoded by a separate pymongoimport subprocess. 
    '''

    parser = argparse.ArgumentParser(usage=usage_message)
    parser = add_standard_args(parser)
    parser.add_argument("--poolsize",
                        type=int,
                        default=multiprocessing.cpu_count(),
                        help="The number of parallel processes to run")
    parser.add_argument(
        "--forkmethod",
        choices=["spawn", "fork", "forkserver"],
        default="spawn",
        help=
        "The model used to define how we create subprocesses (default:'spawn')"
    )

    args = parser.parse_args(*argv)

    multiprocessing.set_start_method(args.forkmethod)

    log = Logger("multi_import").log()

    Logger.add_file_handler("multi_import")
    Logger.add_stream_handler("multi_import")

    child_args = sys.argv[1:]
    children = OrderedDict()

    log.info("filenames:%s", args.filenames)
    if len(args.filenames) == 0:
        log.info("no input files")
        sys.exit(0)

    if args.poolsize:
        poolsize = args.poolsize
        child_args = strip_arg(child_args, "--poolsize", True)

    if args.restart:
        log.info("Ignoring --drop overridden by --restart")
    elif args.drop:
        client = pymongo.MongoClient(args.host)
        log.info("Dropping database : %s", args.database)
        client.drop_database(args.database)
        child_args = strip_arg(child_args, args.drop)

    if args.audit:
        audit = Audit(client)
        batch_ID = audit.start_batch({"command": sys.argv})
    else:
        audit = None
        batch_ID = None

    start = time.time()

    process_count = 0
    log.info("Poolsize:{}".format(poolsize))

    log.info("Fork using:'%s'", args.forkmethod)
    if args.forkmethod == "fork":
        subprocess = SubProcess(log=log,
                                audit=audit,
                                batch_ID=batch_ID,
                                args=args)
    elif args.forkmethod == "spawn":
        subprocess = SubProcess(log=None,
                                audit=audit,
                                batch_ID=batch_ID,
                                args=args)
    elif args.forkmethod == "forkserver":
        subprocess = SubProcess(log=None,
                                audit=audit,
                                batch_ID=batch_ID,
                                args=args)

    subprocess.setup_log_handlers()

    try:

        #
        # Should use a Pool here but Pools need top level functions which is
        # ugly.
        #
        proc_list = []

        for arg_list in chunker(args.filenames,
                                poolsize):  # blocks of poolsize
            proc_list = []
            for i in arg_list:
                if os.path.isfile(i):
                    log.info(f"Processing:'{i}'")
                    proc = Process(target=subprocess.run, args=(i, ), name=i)
                    proc.start()
                    proc_list.append(proc)
                else:
                    log.warning("No such file: '{i}' ignoring")

            for proc in proc_list:
                proc.join()

    except KeyboardInterrupt:
        log.info("Keyboard interrupt...")
        for i in proc_list:
            log.info("terminating process: '%s'", proc_list[i].name)
            proc_list[i].terminate()

    finish = time.time()

    log.info("Total elapsed time:%f" % (finish - start))
예제 #57
0
from data import get_train_dataflow
from eval import EvalCallback
from modeling.generalized_rcnn import ResNetC4Model, ResNetFPNModel

try:
    import horovod.tensorflow as hvd
except ImportError:
    pass

if __name__ == '__main__':
    # "spawn/forkserver" is safer than the default "fork" method and
    # produce more deterministic behavior & memory saving
    # However its limitation is you cannot pass a lambda function to subprocesses.
    import multiprocessing as mp
    try:
        mp.set_start_method('spawn', force=True)
    except RuntimeError:
        print('Damn that runtime error!')
        pass
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--load',
        help=
        'load a model to start training from. Can overwrite BACKBONE.WEIGHTS')
    parser.add_argument('--logdir',
                        help='log directory',
                        default='train_log/maskrcnn')
    parser.add_argument(
        '--config',
        help="A list of KEY=VALUE to overwrite those defined in config.py",
        nargs='+')
예제 #58
0
from multiprocessing import Process, Queue
import multiprocessing as mp
from game import main_game
from game_sim import main_game_sim
from monitoring import main_monitor

if __name__ == "__main__":
    mp.set_start_method("spawn")

    queue = Queue()

    p1 = Process(target=main_game, args=[queue])
    p1.start()

    # p1 = Process(target=main_game_sim, args=[queue])
    # p1.start()

    # p2 = Process(target=main_monitor, args=[queue])
    # p2.start()

    p1.join()
    # p2.join()
예제 #59
0
                                     satur.during_training(
                                         satur_training_dir, True, dig),
                                     skip=5)).reg(
                                         tnr.OnEpochCaller.create_every(
                                             tnr.save_model(trained_net_dir),
                                             skip=5))
     #.reg(pca3d_throughtrain.PCAThroughTrain(pca_throughtrain_dir, layer_names, True, layer_indices=plot_layers))
     .reg(tnr.OnFinishCaller(lambda *args, **kwargs: dig.join())).reg(
         tnr.ZipDirOnFinish(dtt_training_dir)).reg(
             tnr.ZipDirOnFinish(pca_training_dir)).reg(
                 tnr.ZipDirOnFinish(pca3d_training_dir)).reg(
                     tnr.ZipDirOnFinish(pr_training_dir)).reg(
                         tnr.ZipDirOnFinish(svm_training_dir)).reg(
                             tnr.ZipDirOnFinish(satur_training_dir)).reg(
                                 tnr.ZipDirOnFinish(trained_net_dir)).reg(
                                     tnr.CopyLogOnFinish(logpath)))

    trainer.train(network)
    dig.archive_raw_inputs(os.path.join(SAVEDIR, 'digestor_raw.zip'))


if __name__ == '__main__':
    import multiprocessing as mp
    try:
        mp.set_start_method('spawn')
    except RuntimeError:
        print(
            'failed to set multiprocessing spawn method; this happens on windows'
        )

    main()
예제 #60
0
def gpu_aamp(T_A, m, T_B=None, ignore_trivial=True, device_id=0):
    """
    Compute the non-normalized (i.e., without z-normalization) matrix profile with one
    or more GPU devices

    This is a convenience wrapper around the Numba `cuda.jit` `_gpu_aamp` function
    which computes the non-normalized matrix profile according to modified version
    GPU-STOMP.

    Parameters
    ----------
    T_A : numpy.ndarray
        The time series or sequence for which to compute the matrix profile

    m : int
        Window size

    T_B : numpy.ndarray, default None
        The time series or sequence that contain your query subsequences
        of interest. Default is `None` which corresponds to a self-join.

    ignore_trivial : bool, default True
        Set to `True` if this is a self-join. Otherwise, for AB-join, set this
        to `False`. Default is `True`.

    device_id : int or list, default 0
        The (GPU) device number to use. The default value is `0`. A list of
        valid device ids (int) may also be provided for parallel GPU-STUMP
        computation. A list of all valid device ids can be obtained by
        executing `[device.id for device in numba.cuda.list_devices()]`.

    Returns
    -------
    out : numpy.ndarray
        The first column consists of the matrix profile, the second column
        consists of the matrix profile indices, the third column consists of
        the left matrix profile indices, and the fourth column consists of
        the right matrix profile indices.

    Notes
    -----
    `arXiv:1901.05708 \
    <https://arxiv.org/pdf/1901.05708.pdf>`__

    See Algorithm 1

    Note that we have extended this algorithm for AB-joins as well.

    `DOI: 10.1109/ICDM.2016.0085 \
    <https://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf>`__

    See Table II, Figure 5, and Figure 6
    """
    if T_B is None:  # Self join!
        T_B = T_A
        ignore_trivial = True

    T_A, T_A_subseq_isfinite = core.preprocess_non_normalized(T_A, m)
    T_B, T_B_subseq_isfinite = core.preprocess_non_normalized(T_B, m)

    T_A_subseq_squared = np.sum(core.rolling_window(T_A * T_A, m), axis=1)
    T_B_subseq_squared = np.sum(core.rolling_window(T_B * T_B, m), axis=1)

    if T_A.ndim != 1:  # pragma: no cover
        raise ValueError(
            f"T_A is {T_A.ndim}-dimensional and must be 1-dimensional. "
            "For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`"
        )

    if T_B.ndim != 1:  # pragma: no cover
        raise ValueError(
            f"T_B is {T_B.ndim}-dimensional and must be 1-dimensional. "
            "For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`"
        )

    core.check_window_size(m, max_size=min(T_A.shape[0], T_B.shape[0]))

    if ignore_trivial is False and core.are_arrays_equal(
            T_A, T_B):  # pragma: no cover
        logger.warning("Arrays T_A, T_B are equal, which implies a self-join.")
        logger.warning("Try setting `ignore_trivial = True`.")

    if ignore_trivial and core.are_arrays_equal(
            T_A, T_B) is False:  # pragma: no cover
        logger.warning(
            "Arrays T_A, T_B are not equal, which implies an AB-join.")
        logger.warning("Try setting `ignore_trivial = False`.")

    n = T_B.shape[0]
    k = T_A.shape[0] - m + 1
    l = n - m + 1
    excl_zone = int(np.ceil(
        m / config.STUMPY_EXCL_ZONE_DENOM))  # See Definition 3 and Figure 3

    T_A_fname = core.array_to_temp_file(T_A)
    T_B_fname = core.array_to_temp_file(T_B)
    T_A_subseq_isfinite_fname = core.array_to_temp_file(T_A_subseq_isfinite)
    T_B_subseq_isfinite_fname = core.array_to_temp_file(T_B_subseq_isfinite)
    T_A_subseq_squared_fname = core.array_to_temp_file(T_A_subseq_squared)
    T_B_subseq_squared_fname = core.array_to_temp_file(T_B_subseq_squared)

    out = np.empty((k, 4), dtype=object)

    if isinstance(device_id, int):
        device_ids = [device_id]
    else:
        device_ids = device_id

    profile = [None] * len(device_ids)
    indices = [None] * len(device_ids)

    for _id in device_ids:
        with cuda.gpus[_id]:
            if (cuda.current_context().__class__.__name__ !=
                    "FakeCUDAContext"):  # pragma: no cover
                cuda.current_context().deallocations.clear()

    step = 1 + l // len(device_ids)

    # Start process pool for multi-GPU request
    if len(device_ids) > 1:  # pragma: no cover
        mp.set_start_method("spawn", force=True)
        p = mp.Pool(processes=len(device_ids))
        results = [None] * len(device_ids)

    QT_fnames = []
    QT_first_fnames = []

    for idx, start in enumerate(range(0, l, step)):
        stop = min(l, start + step)

        QT, QT_first = core._get_QT(start, T_A, T_B, m)
        QT_fname = core.array_to_temp_file(QT)
        QT_first_fname = core.array_to_temp_file(QT_first)
        QT_fnames.append(QT_fname)
        QT_first_fnames.append(QT_first_fname)

        if len(device_ids
               ) > 1 and idx < len(device_ids) - 1:  # pragma: no cover
            # Spawn and execute in child process for multi-GPU request
            results[idx] = p.apply_async(
                _gpu_aamp,
                (
                    T_A_fname,
                    T_B_fname,
                    m,
                    stop,
                    excl_zone,
                    T_A_subseq_isfinite_fname,
                    T_B_subseq_isfinite_fname,
                    T_A_subseq_squared_fname,
                    T_B_subseq_squared_fname,
                    QT_fname,
                    QT_first_fname,
                    k,
                    ignore_trivial,
                    start + 1,
                    device_ids[idx],
                ),
            )
        else:
            # Execute last chunk in parent process
            # Only parent process is executed when a single GPU is requested
            profile[idx], indices[idx] = _gpu_aamp(
                T_A_fname,
                T_B_fname,
                m,
                stop,
                excl_zone,
                T_A_subseq_isfinite_fname,
                T_B_subseq_isfinite_fname,
                T_A_subseq_squared_fname,
                T_B_subseq_squared_fname,
                QT_fname,
                QT_first_fname,
                k,
                ignore_trivial,
                start + 1,
                device_ids[idx],
            )

    # Clean up process pool for multi-GPU request
    if len(device_ids) > 1:  # pragma: no cover
        p.close()
        p.join()

        # Collect results from spawned child processes if they exist
        for idx, result in enumerate(results):
            if result is not None:
                profile[idx], indices[idx] = result.get()

    os.remove(T_A_fname)
    os.remove(T_B_fname)
    os.remove(T_A_subseq_isfinite_fname)
    os.remove(T_B_subseq_isfinite_fname)
    os.remove(T_A_subseq_squared_fname)
    os.remove(T_B_subseq_squared_fname)
    for QT_fname in QT_fnames:
        os.remove(QT_fname)
    for QT_first_fname in QT_first_fnames:
        os.remove(QT_first_fname)

    for idx in range(len(device_ids)):
        profile_fname = profile[idx]
        indices_fname = indices[idx]
        profile[idx] = np.load(profile_fname, allow_pickle=False)
        indices[idx] = np.load(indices_fname, allow_pickle=False)
        os.remove(profile_fname)
        os.remove(indices_fname)

    for i in range(1, len(device_ids)):
        # Update all matrix profiles and matrix profile indices
        # (global, left, right) and store in profile[0] and indices[0]
        for col in range(profile[0].shape[1]):  # pragma: no cover
            cond = profile[0][:, col] < profile[i][:, col]
            profile[0][:, col] = np.where(cond, profile[0][:, col],
                                          profile[i][:, col])
            indices[0][:, col] = np.where(cond, indices[0][:, col],
                                          indices[i][:, col])

    out[:, 0] = profile[0][:, 0]
    out[:, 1:4] = indices[0][:, :]

    threshold = 10e-6
    if core.are_distances_too_small(out[:, 0],
                                    threshold=threshold):  # pragma: no cover
        logger.warning(
            f"A large number of values are smaller than {threshold}.")
        logger.warning("For a self-join, try setting `ignore_trivial = True`.")

    return out