Example #1
0
    def __del__(self):
        atexit.unregister(self.__del__)

        if self.procs:
            self.__log.info('shuting down all processes')
            @coroutine
            def shutdown(remote, procs):
                try:
                    yield from asyncio.wait_for(remote.shutdown(), timeout=5)
                except asyncio.TimeoutError:
                    self.__log.warn("can't shutdown %s properly, killing it", set(procs))
                    for p in procs:
                        p.terminate()

            future = asyncio.gather(*[shutdown(remote, proc)
                        for remote,proc in zip(self.remotes.values(), self.procs.values())], 
                        return_exceptions=True)
            asyncio.get_event_loop().run_until_complete(future)
            for r in future:
                if r:
                    self.__log.warn('%r when shuting down', r)

            self.procs.clear()
            self.remotes.clear()
        os.system("rm -rf {!r}".format(self.path))
Example #2
0
def get_logger():
    '''
    Returns logger used by multiprocessing
    '''
    global _logger
    import logging, atexit

    logging._acquireLock()
    try:
        if not _logger:

            _logger = logging.getLogger(LOGGER_NAME)
            _logger.propagate = 0
            logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
            logging.addLevelName(SUBWARNING, 'SUBWARNING')

            # XXX multiprocessing should cleanup before logging
            if hasattr(atexit, 'unregister'):
                atexit.unregister(_exit_function)
                atexit.register(_exit_function)
            else:
                atexit._exithandlers.remove((_exit_function, (), {}))
                atexit._exithandlers.append((_exit_function, (), {}))

    finally:
        logging._releaseLock()

    return _logger
Example #3
0
 def __del__(self):
     if platform.system() == "Windows":
         self.close()
         try:
             atexit.unregister(self.close)
         except:
             pass
Example #4
0
def get_logger():
    """
    Returns logger used by multiprocessing
    """
    global _logger
    import logging

    logging._acquireLock()
    try:
        if not _logger:

            _logger = logging.getLogger(LOGGER_NAME)
            _logger.propagate = 0

            # XXX multiprocessing should cleanup before logging
            if hasattr(atexit, "unregister"):
                atexit.unregister(_exit_function)
                atexit.register(_exit_function)
            else:
                atexit._exithandlers.remove((_exit_function, (), {}))
                atexit._exithandlers.append((_exit_function, (), {}))

    finally:
        logging._releaseLock()

    return _logger
Example #5
0
	def run(self, *args, **kw):
		if not self.cluster.initialized():
			self.cluster.encoding = 'utf-8'
			self.cluster.init(
				user = '******',
				encoding = self.cluster.encoding,
				logfile = None,
			)
			sys.stderr.write('*')
			try:
				atexit.register(self.cluster.drop)
				self.configure_cluster()
				self.cluster.start(logfile = sys.stdout)
				self.cluster.wait_until_started()
				self.initialize_database()
			except Exception:
				self.cluster.drop()
				atexit.unregister(self.cluster.drop)
				raise
		if not self.cluster.running():
			self.cluster.start()
			self.cluster.wait_until_started()

		db = self.connection()
		with db:
			self.db = db
			return super().run(*args, **kw)
			self.db = None
Example #6
0
def flac2mp3(folders, concurrency):
    '''Convert all files in folders to mp3'''
    import atexit
    import concurrent.futures as cf
    from concurrent.futures.thread import _python_exit
    from pydub import AudioSegment
    flac_files = list(lib.find_files(folders, ['flac']))

    pbar = None
    if not config.quiet:
        pbar = click.progressbar(length=len(flac_files), label='Converting musics')

    def convert(flac_path):
        logger.debug('Converting %s', flac_path)
        flac_audio = AudioSegment.from_file(flac_path, "flac")
        mp3_path = flac_path.replace('.flac', '.mp3')
        if not config.dry:
            flac_audio.export(mp3_path, format="mp3")
        else:
            logger.info("[DRY-RUN] Exporting from %s to %s", flac_path, mp3_path)
        if pbar:
            pbar.update(1)
    # Permit CTRL+C to work as intended
    atexit.unregister(_python_exit)  # pylint: disable=protected-access
    with cf.ThreadPoolExecutor(max_workers=concurrency) as executor:
        executor.shutdown = lambda wait: None
        futures = [executor.submit(convert, flac_path) for flac_path in flac_files]
        cf.wait(futures)
Example #7
0
def verify(TESTMODE):
    if not os.path.exists(SCRIPT_PARENT_DIR + "/shelllocker.conf"):
        print(ansi_colors.YELLOW + "shelllocker.conf not found" + ansi_colors.RESET)
        print(ansi_colors.YELLOW + "Run with '-s' flag to generate the file" + ansi_colors.RESET)
        atexit.unregister(invalidate)
        exit()
    f = open(os.path.dirname(SCRIPT_PATH) + "/shelllocker.conf" , 'r')
    DATA = f.read().replace(" " , "")
    f.close()
    # Check header:
    if DATA[0:34] != "7368656c6c6c6f636b6572686561646572":
        print(ansi_colors.RED + "Invalid configuration file!" + ansi_colors.RESET)
        print(ansi_colors.RED + "Fatal internal error occured" + ansi_colors.RESET)
        exit()
    DATA = DATA[34:]
    # First byte = length of username
    USER_LEN = int(DATA[0:2])
    DATA = DATA[2:]
    USERNAME = "".join(DATA[i:i+1] for i in range(0, USER_LEN * 2))
    PASSWORD = DATA[USER_LEN * 2:]

    user = binascii.hexlify(getpass.getuser().encode('utf-8')).decode()
    if user != USERNAME:
        print(ansi_colors.RED + "User not recognized!" + ansi_colors.RESET)
        invalidate()
    response = str(getpass.getpass("shell_auth: "))
    m = hashlib.md5()
    m.update(response.encode('utf-8'))
    if m.hexdigest() == PASSWORD:
        atexit.unregister(invalidate)
        exit()
    else:
        print("Incorrect.")
        blankshell(TESTMODE)
Example #8
0
def stop_bitwrk_client():
    global CLIENT_PROC
    if not bitwrk_client_alive():
        return
    p, CLIENT_PROC = CLIENT_PROC, None
    p.terminate()
    atexit.unregister(_exithandler)
Example #9
0
def __del__():
    for x in globals():
        if type(globals()[x])==type(__builtins__):
            if hasattr(globals()[x], "__del__"):
                globals()[x].__del__()
    for x in exit_handlers:
        atexit.unregister(x)
Example #10
0
	def run(self):
		self.isRunning = True
		readBuffer = ""
		while (self.isRunning):
			try:
				if (self.s != None):
					temp = self.s.recv(1024).decode("UTF-8")
					if (temp == ""):
						self.isRunning = False
						self.out("IRC client has stopped running...")
					else:
						readBuffer += temp
						temp = readBuffer.split("\n")
						readBuffer = temp.pop()
						for line in temp:
							self.handleTokens(self.makeTokens(line))
			except Exception:
				traceback.print_tb(sys.exc_info()[2])
		try:
			self.s.shutdown(socket.SHUT_RDWR)
		except Exception:
			traceback.print_tb(sys.exc_info()[2])
		sleep(0.5)

		self.out("Closing socket...")
		self.s.close()

		atexit.unregister(self.quit)
def main(argv):
    p = argparse.ArgumentParser(description='Install DC/OS on OVH Cloud')
    p.add_argument('--url',       help='URL to dcos_generate_config.sh',
                   default='https://downloads.dcos.io/dcos/EarlyAccess/dcos_generate_config.sh')
    p.add_argument('--project',     help='OVH Cloud Project Name', required=True)
    p.add_argument('--flavor',      help='OVH Cloud Machine Type (default hg-15)', default='hg-15')
    p.add_argument('--image',       help='OVH Cloud OS Image (default Centos 7)', default='Centos 7')
    p.add_argument('--ssh-key',     help='OVH Cloud SSH Key Name', required=True)
    p.add_argument('--security',    help='Security mode (default permissive)', default='permissive')
    p.add_argument('--ssh-user',    help='SSH Username (default centos)', default='centos')
    p.add_argument('--ssh-port',    help='SSH Port (default 22)', default=22, type=int)
    p.add_argument('--region',      help='OVH Cloud Region (default SBG1)', default='SBG1')
    p.add_argument('--name',        help='OVH Cloud VM Instance Name(s)', default='Test')
    p.add_argument('--docker-size', help='Docker Disk Size in GiB (default 10G)', default=10, type=int)
    p.add_argument('--masters',     help='Number of Master Instances (default 1)', default=1, type=int)
    p.add_argument('--agents',      help='Number of Agent Instances (default 1)', default=1, type=int)
    p.add_argument('--pub-agents',  help='Number of Public Agent Instances (default 0)', default=0, type=int)
    p.add_argument('--no-cleanup',  help="Don't clean up Instances on EXIT", dest='cleanup', action='store_false',
                   default=True)
    p.add_argument('--no-error-cleanup', help="Don't clean up Instances on ERROR", dest='errclnup',
                   action='store_false', default=True)
    args = p.parse_args(argv)

    dcos = DCOSInstall(args, OVHInstances(args))
    dcos.deploy()

    if args.cleanup:
        input('Press Enter to DESTROY all instances...')
        if not args.errclnup:
            dcos.oi.cleanup()
    else:
        if args.errclnup:
            atexit.unregister(dcos.oi.cleanup)
    sys.exit(0)
Example #12
0
 def __del__(self):
     if self.windows:
         self.close()
         try:
             atexit.unregister(self.close)
         except:
             pass   
Example #13
0
    def close(self, flush=False):
        with self._global_lock:
            if self._dbm_shutdown.is_set():
                return
            else:
                self._dbm_shutdown.set()

            if hasattr(atexit, 'unregister'):
                atexit.unregister(self.close)
            else:
                try:
                    atexit._exithandlers.remove((self.close, (), {}))
                except ValueError:
                    pass

            if self.schema:
                # release all the failed sources waiting for restart
                self._event_queue.put(('localhost', (ShutdownException(), )))
                # release all the sources
                for target, source in self.sources.cache.items():
                    source.close(flush)
                # close the database
                self.schema.commit()
                self.schema.close()
                # shutdown the _dbm_thread
                self._event_queue.put(('localhost', (DBMExitException(), )))
                self._dbm_thread.join()
Example #14
0
 def atexit(self):
     atexit.unregister(self.atexit)
     for c in self._cli:
         try:
             c.shutdown()
         except:
             pass
Example #15
0
    def dump(cls):
        """Output all recorded metrics"""
        with cls.lock:
            if not cls.instances: return
            atexit.unregister(cls.dump)

            for self in cls.instances.values():
                self.fh.close()
Example #16
0
def stop_worker():
    global WORKER_PROC
    if not worker_alive():
        return
    print("Terminating worker")
    p, WORKER_PROC = WORKER_PROC, None
    p.terminate()
    atexit.unregister(_exithandler)
Example #17
0
    def stop_frame_grabber(self):
        if self.frame_grabber_process is None:
            return None

        self.frame_grabber_process.kill()
        self.frame_grabber_process = None

        atexit.unregister(self._handle_signal)
Example #18
0
 def _cleanup_atexit(self):
     if hasattr(atexit, 'unregister'):
         atexit.unregister(self.close)
     else:
         try:
             atexit._exithandlers.remove((self.close, (), {}))
         except ValueError:
             pass
Example #19
0
 def terminate(self):
     _logger().debug('terminating process')
     if self._process:
         self._process.terminate()
         self._process.kill()
     try:
         atexit.unregister(self.terminate)
     except ValueError:
         _logger().warn('failed to unregister atexit callback')
Example #20
0
def _cleanup(cov):
    if cov is not None:
        cov.stop()
        cov.save()
        cov._auto_save = False  # prevent autosaving from cov._atexit in case the interpreter lacks atexit.unregister
        try:
            atexit.unregister(cov._atexit)
        except Exception:
            pass
    def test_bound_methods(self):
        l = []
        atexit.register(l.append, 5)
        atexit._run_exitfuncs()
        self.assertEqual(l, [5])

        atexit.unregister(l.append)
        atexit._run_exitfuncs()
        self.assertEqual(l, [5])
Example #22
0
	def terminateProcess(self):
		self.log("Terminating nethack...")
		
		self.autoRestart = False
		
		self.terminalEmulatedProcess.terminate()
		self.terminalEmulatedProcess.logger = None
		
		atexit.unregister(self.terminateProcess)
Example #23
0
 def __del__(self):
     if hasattr(self, '_sdStream'):
         if not travisCI:
             self._sdStream.stop()
         del self._sdStream
     if hasattr(sys, 'stdout'):
         sys.stdout.flush()
     if PY3:
         atexit.unregister(self.__del__)
Example #24
0
    def cleanup(self):
        if not self.needs_cleanup:
            return

        self.needs_cleanup = False
        atexit.unregister(self._cleanup_fn)
        self.animation.cleanup()
        for c in self.controls:
            c.cleanup()
        self.layout.cleanup_drivers()
Example #25
0
    def close(self):
        '''Closes the database to avoid pickling writes to file'''

        if six.PY3 and self._forcesave:
            # unregister undefined in PY2
            atexit.unregister(self.save)

        self.path = None
        self.data = None
        self.registered = False
Example #26
0
 def run(self):
     try:
         while self.running:
             self.loop()
     # got to unregister the stop() method, otherwise that reference
     # to this thread object will stay around until interpreter exit, and thus
     # any resources held by the thread (i.e. ISM_Buffer-backed arrays) won't
     # be automatically released.
     finally:
         atexit.unregister(self.stop)
Example #27
0
 def __init__(self, cachedir=None):
     self.cachedir = cachedir if cachedir is not None else os.path.dirname(__file__)
     self.cachefile = os.path.join(self.cachedir, 'cache.pickle')
     if os.path.exists(self.cachefile):
         print('loading {}'.format(self.cachefile))
         self._cache = pickle.load(open(self.cachefile, 'rb'))
     else:
         self._cache = dict()
     # TODO this is causing multiple runs on imp.reload
     atexit.unregister(self.save_cache)
     atexit.register(self.save_cache)
Example #28
0
    def close(self):
        if not self._is_open:
            return

        atexit.unregister(self.close)

        self._pidfile.release()
        self._pidfile = None

        self._close_sys_streams()

        self._is_open = False
Example #29
0
def unregister(func):
    """remove func from the list of functions that are registered
    doesn't do anything if func is not found

    func = function to be unregistered
    """
    if hasattr(atexit, "unregister"):
        atexit.unregister(func)
    else:
        handler_entries = [e for e in _exithandlers if e[0] == func]
        for e in handler_entries:
            _exithandlers.remove(e)
Example #30
0
File: run.py Project: ihptru/aamms
def exit():
    sys.stderr.write("Exiting ... ")
    parser.exit(True, True)
    sys.stderr.write("Ok \n")
    sys.stderr.write("Exiting server ... ")
    global p
    if p!=None:
        print("QUIT")
        p.wait()
    atexit.unregister(exit)
    global exitEvent
    exitEvent.set()
    sys.stderr.write("Done\n")
Example #31
0
    # PSI_SCRATCH must be set before this call!
    content = psi4.process_input(content)

# Handle Verbose
if args["verbose"]:
    psi4.core.print_out('\nParsed Psithon:')
    psi4.core.print_out(content)
    psi4.core.print_out('-' * 75)

# Handle Messy
_clean_functions = [psi4.core.clean, psi4.extras.clean_numpy_files]
if args["messy"]:

    if sys.version_info >= (3, 0):
        for func in _clean_functions:
            atexit.unregister(func)
    else:
        for handler in atexit._exithandlers:
            for func in _clean_functions:
                if handler[0] == func:
                    atexit._exithandlers.remove(handler)

# Register exit printing, failure GOTO coffee ELSE beer
atexit.register(psi4.extras.exit_printing, start_time)

# Run the program!
try:
    exec(content)
    psi4.extras._success_flag_ = True

# Capture _any_ python error message
Example #32
0
 def _handle_interrupt(self, signum, frame):
     log.warn("%s \nEnvironment: Caught interrupt" % self.short_id)
     atexit.unregister(self.close())
     self.close()
     exit(1)
Example #33
0
    parser.add_argument("--debug",
                        help="Verbose debug logging",
                        action="store_true")
    parser.add_argument("--logfile", help="Log file to log to")
    parser.add_argument(
        "--rpccookiefile",
        help="Cookie file for Bitcoin Core RPC creds",
        default="~/.bitcoin/.cookie",
    )
    args = parser.parse_args()

    # Set logging level
    loglevel = logging.DEBUG if args.debug else logging.INFO
    LOG.setLevel(loglevel)

    # Set the log file
    if args.logfile is not None:
        LOG.removeHandler(file_handler)
        logfile_handler = logging.FileHandler(filename=args.logfile)
        LOG.addHandler(logfile_handler)

    try:
        while True:
            w = Watcher(args.url, args.userpass, args.rpccookiefile)
            atexit.register(w.close)
            w.get_stratum_work()
            atexit.unregister(w.close)
    except KeyboardInterrupt:
        # When receiving a keyboard interrupt, do nothing and let atexit clean things up
        pass
Example #34
0
 def __del__(self):
     atexit.unregister(self._executor.shutdown)
Example #35
0
 def close(self, *args, **kwargs):
     self.clean_docker()
     self.clean_docker_network()
     out = super(DockerFactory, self).close(*args, **kwargs)
     atexit.unregister(self.run_cleanup_cmd)
     return out
Example #36
0
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    if options.max_obj_size < 1024:
        # This warning should never be converrted to an exception
        log.warning(
            'Warning: maximum object sizes less than 1 MiB will seriously degrade '
            'performance.',
            extra={'force_log': True})

    plain_backend = get_backend(options, raw=True)
    atexit.register(plain_backend.close)

    log.info(
        "Before using S3QL, make sure to read the user's guide, especially\n"
        "the 'Important Rules to Avoid Loosing Data' section.")

    if isinstance(plain_backend,
                  s3.Backend) and '.' in plain_backend.bucket_name:
        log.warning(
            '***Warning*** S3 Buckets with names containing dots cannot be '
            'accessed using SSL!')
        log.warning(
            '(cf. https://forums.aws.amazon.com/thread.jspa?threadID=130560)')

    if 's3ql_metadata' in plain_backend:
        if not options.force:
            raise QuietError(
                "Found existing file system! Use --force to overwrite")

        log.info('Purging existing file system data..')
        plain_backend.clear()
        log.info(
            'Please note that the new file system may appear inconsistent\n'
            'for a while until the removals have propagated through the backend.'
        )

    if not options.plain:
        if sys.stdin.isatty():
            wrap_pw = getpass("Enter encryption password: "******"Confirm encryption password: "******"Passwords don't match.")
        else:
            wrap_pw = sys.stdin.readline().rstrip()
        wrap_pw = wrap_pw.encode('utf-8')

        # Generate data encryption passphrase
        log.info('Generating random encryption key...')
        fh = open('/dev/random', "rb", 0)  # No buffering
        data_pw = fh.read(32)
        fh.close()

        backend = ComprencBackend(wrap_pw, ('lzma', 2), plain_backend)
        backend['s3ql_passphrase'] = data_pw
        backend['s3ql_passphrase_bak1'] = data_pw
        backend['s3ql_passphrase_bak2'] = data_pw
        backend['s3ql_passphrase_bak3'] = data_pw
    else:
        data_pw = None

    backend = ComprencBackend(data_pw, ('lzma', 2), plain_backend)
    atexit.unregister(plain_backend.close)
    atexit.register(backend.close)

    # Setup database
    cachepath = get_backend_cachedir(options.storage_url, options.cachedir)

    # There can't be a corresponding backend, so we can safely delete
    # these files.
    if os.path.exists(cachepath + '.db'):
        os.unlink(cachepath + '.db')
    if os.path.exists(cachepath + '-cache'):
        shutil.rmtree(cachepath + '-cache')

    log.info('Creating metadata tables...')
    db = Connection(cachepath + '.db')
    create_tables(db)
    init_tables(db)

    param = dict()
    param['revision'] = CURRENT_FS_REV
    param['seq_no'] = 1
    param['label'] = options.label
    param['max_obj_size'] = options.max_obj_size * 1024
    param['needs_fsck'] = False
    param['inode_gen'] = 0
    param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes')
    param['last_fsck'] = time.time()
    param['last-modified'] = time.time()

    log.info('Dumping metadata...')
    dump_and_upload_metadata(backend, db, param)
    backend.store('s3ql_seq_no_%d' % param['seq_no'], b'Empty')
    with open(cachepath + '.params', 'wb') as fh:
        fh.write(freeze_basic_mapping(param))

    if data_pw is not None:
        print(
            'Please store the following master key in a safe location. It allows ',
            'decryption of the S3QL file system in case the storage objects holding ',
            'this information get corrupted:',
            '---BEGIN MASTER KEY---',
            ' '.join(split_by_n(b64encode(data_pw).decode(), 4)),
            '---END MASTER KEY---',
            sep='\n')
Example #37
0
def load_modc(path, from_format=None):
    """Load models from a .modc file for inference

    Parameters
    ----------
    path: str or path-like
        Path to a .modc file
    from_format: str
        If set to None, the first available format in
        :func:`BaseModel.all_formats` is used. If set to
        a key in :func:`BaseModel.all_formats`, then this
        format will take precedence and an error will
        be raised if loading with this format fails.

    Returns
    -------
    model: list of dclab.rtdc_dataset.feat_anc_ml.ml_model.BaseModel
        Models that can be used for inference via `model.predict`
    """
    # unpack everything
    t_dir = pathlib.Path(tempfile.mkdtemp(prefix="modc_load_"))
    cleanup = atexit.register(lambda: shutil.rmtree(t_dir, ignore_errors=True))
    shutil.unpack_archive(path, t_dir, format="zip")

    # Get the metadata
    with (t_dir / "index.json").open("r") as fd:
        meta = json.load(fd)

    assert meta["model count"] == len(meta["models"])
    supported_formats = BaseModel.all_formats()
    dc_models = []
    for model_dict in meta["models"]:
        mpath = t_dir / model_dict["path"]

        formats = list(model_dict["formats"].keys())
        if from_format:
            formats = [from_format] + formats

        for fmt in formats:
            if fmt in supported_formats:
                cls = supported_formats[fmt]["class"]
                load = cls.load_bare_model
                try:
                    bare_model = load(mpath / model_dict["formats"][fmt])
                except BaseException:
                    if from_format and fmt == from_format:
                        # user requested this format explicitly
                        raise
                else:
                    # load `bare_model` into BaseModel
                    model = cls(
                        bare_model=bare_model,
                        inputs=model_dict["input features"],
                        outputs=model_dict["output features"],
                        info=model_dict,
                    )
                    break
            elif from_format and fmt == from_format:
                raise ValueError("The format specified via `from_format` " +
                                 " '{}' is not supported!".format(fmt))
        else:
            raise ValueError("No compatible model file format found!")
        dc_models.append(model)

    # We are nice and do the cleanup before exit
    cleanup()
    atexit.unregister(cleanup)

    return dc_models
Example #38
0
 def on_signal():
     asyncio.ensure_future(self._async_cleanup_kernel())
     atexit.unregister(self._cleanup_kernel)
Example #39
0
    def _terminate(self):
        """
        This method actually terminates the started daemon.
        """
        # We completely override the parent class method because we're not using the
        # self._terminal property, it's a systemd service
        if self._process is None:  # pragma: no cover
            if TYPE_CHECKING:
                # Make mypy happy
                assert self._terminal_result
            return self._terminal_result  # pylint: disable=access-member-before-definition

        atexit.unregister(self.terminate)
        log.info("Stopping %s", self.factory)
        pid = self.pid
        # Collect any child processes information before terminating the process
        with contextlib.suppress(psutil.NoSuchProcess):
            for child in psutil.Process(pid).children(recursive=True):
                if child not in self._children:  # pylint: disable=access-member-before-definition
                    self._children.append(child)  # pylint: disable=access-member-before-definition

        if self._process.is_running():  # pragma: no cover
            cmdline = self._process.cmdline()
        else:
            # The main pid is not longer alive, try to get the cmdline from systemd
            ret = self._internal_run("systemctl", "show", "-p", "ExecStart",
                                     self.get_service_name())
            cmdline = ret.stdout.split("argv[]=")[-1].split(
                ";")[0].strip().split()

        # Tell systemd to stop the service
        self._internal_run("systemctl", "stop", self.get_service_name())

        if self._process.is_running():  # pragma: no cover
            cmdline = self._process.cmdline()
            try:
                self._process.wait()
            except psutil.TimeoutExpired:
                self._process.terminate()
                try:
                    self._process.wait()
                except psutil.TimeoutExpired:
                    pass

        exitcode = self._process.wait() or 0

        # Dereference the internal _process attribute
        self._process = None
        # Lets log and kill any child processes left behind, including the main subprocess
        # if it failed to properly stop
        terminate_process(
            pid=pid,
            kill_children=True,
            children=self._children,  # pylint: disable=access-member-before-definition
            slow_stop=self.factory.slow_stop,
        )

        if self._terminal_stdout is not None:
            self._terminal_stdout.close()  # pylint: disable=access-member-before-definition
        if self._terminal_stderr is not None:
            self._terminal_stderr.close()  # pylint: disable=access-member-before-definition
        stdout = ""
        ret = self._internal_run("journalctl", "--no-pager", "-u",
                                 self.get_service_name())
        stderr = ret.stdout
        try:
            self._terminal_result = ProcessResult(returncode=exitcode,
                                                  stdout=stdout,
                                                  stderr=stderr,
                                                  cmdline=cmdline)
            log.info("%s %s", self.factory.__class__.__name__,
                     self._terminal_result)
            return self._terminal_result
        finally:
            self._terminal = None
            self._terminal_stdout = None
            self._terminal_stderr = None
            self._terminal_timeout = None
            self._children = []
Example #40
0
 def close(self):
     if not self._closed:
         if six.PY3:
             atexit.unregister(self.close)
         self._closed = True
                response = await websocket.recv()
                print(response)
            else:
                await asyncio.sleep(1)


def shutdown(executor: ThreadPoolExecutor):
    for task in asyncio.all_tasks():
        if not task.done():
            task.cancel()
    executor.shutdown(wait=False)


def main(url: str):
    loop = asyncio.get_event_loop()
    try:
        executor = ThreadPoolExecutor()
        loop.add_signal_handler(signal.SIGINT, shutdown, executor)
        loop.run_until_complete(client(loop, executor, url))
    except CancelledError:
        pass
    finally:
        loop.close()


if __name__ == "__main__":
    # See: https://gist.github.com/yeraydiazdiaz/b8c059c6dcfaf3255c65806de39175a7
    atexit.unregister(thread._python_exit)  # pylint: disable=protected-access

    main(sys.argv[1])
Example #42
0
def main() -> None:
    # Check if at least one argument was passed
    if len(sys.argv) < 2:
        print_usage()
        exit(1)

    try:
        config: dict[str, Any] = parse_config(find_config_file())

    except ConfigError as e:
        print(f"Error: {e}")
        exit(1)

    # Remove last char from downloads folder if it's '/'
    if config["downloads"]["folder"][-1:] == '/':
        config["downloads"]["folder"] = config["downloads"]["folder"][:-1]

    download_path: str
    download_to_temp: bool

    # Set download path to temp folder "zip" setting is used
    if config["downloads"]["zip"]:
        download_path = TEMP_FOLDER_PATH
        download_to_temp = True

    else:
        download_path = config["downloads"]["folder"]
        download_to_temp = False

    try:
        playlist_downloader = PlaylistDownloader(config["ffmpeg"]["path"],
                                                 config["ffmpeg"]["args"])

    except FFmpegNotFound as e:
        print(f"Error: {e}")
        exit(1)

    for url in sys.argv[1:]:
        try:
            print(f"\nScraping {url}...")
            movie_data: MovieData = iSubRip.find_m3u8_playlist(
                url, config["downloads"]["user-agent"])
            print(f"Found movie: {movie_data.name}\n")

            if movie_data.playlist is None:
                print(
                    f"Error: Main m3u8 playlist could not be found / downloaded."
                )
                continue

            current_download_path: str

            # Create temp folder
            if download_to_temp:
                current_download_path = os.path.join(
                    download_path, f"{format_title(movie_data.name)}.iT.WEB")
                os.makedirs(current_download_path, exist_ok=True)
                atexit.register(shutil.rmtree, current_download_path)

            else:
                current_download_path = download_path

            downloaded_subtitles: list = []

            for subtitles in iSubRip.find_matching_subtitles(
                    movie_data.playlist, config["downloads"]["filter"]):
                print(
                    f"Downloading \"{subtitles.language_name}\" ({subtitles.language_code}) subtitles..."
                )
                file_name = format_file_name(movie_data.name,
                                             subtitles.language_code,
                                             subtitles.subtitles_type)

                # Download subtitles
                downloaded_subtitles.append(
                    playlist_downloader.download_subtitles(
                        subtitles.playlist_url, current_download_path,
                        file_name, config["downloads"]["format"]))

            if download_to_temp:
                if len(downloaded_subtitles) == 1:
                    shutil.move(downloaded_subtitles[0],
                                config["downloads"]["folder"])

                elif len(downloaded_subtitles) > 1:
                    # Create zip archive
                    print(f"Creating zip archive...")
                    archive_name = f"{format_title(movie_data.name)}.iT.WEB.zip"
                    archive_path = os.path.join(current_download_path,
                                                archive_name)

                    zf = zipfile.ZipFile(archive_path,
                                         compression=zipfile.ZIP_DEFLATED,
                                         mode='w')

                    for file in downloaded_subtitles:
                        zf.write(file, os.path.basename(file))

                    zf.close()
                    shutil.move(archive_path, config["downloads"]["folder"])

                # Remove current temp dir
                shutil.rmtree(current_download_path)
                atexit.unregister(shutil.rmtree)

            print(
                f"\n{len(downloaded_subtitles)} matching subtitles for \"{movie_data.name}\" were found and downloaded to \"{os.path.abspath(config['downloads']['folder'])}\"."
            )

        except Exception as e:
            print(f"Error: {e}\nSkipping...")
            continue
Example #43
0
 def close(self):
     atexit.unregister(self.cleanup)
     self.cleanup(in_background=True)
Example #44
0
 def disable_watchdog(self):
     atexit.unregister(self.watchdog_alert)
Example #45
0
def coldcard_test_suite(simulator, rpc, userpass, interface):
    try:
        os.unlink('coldcard-emulator.stdout')
    except FileNotFoundError:
        pass
    coldcard_log = open('coldcard-emulator.stdout', 'a')
    # Start the Coldcard simulator
    coldcard_proc = subprocess.Popen(
        ['python3', os.path.basename(simulator), '--ms'],
        cwd=os.path.dirname(simulator),
        stdout=coldcard_log,
        preexec_fn=os.setsid)
    # Wait for simulator to be up
    while True:
        try:
            enum_res = process_commands(['enumerate'])
            found = False
            for dev in enum_res:
                if dev['type'] == 'coldcard' and 'error' not in dev:
                    found = True
                    break
            if found:
                break
        except Exception:
            pass
        time.sleep(0.5)
    # Cleanup

    def cleanup_simulator():
        if coldcard_proc.poll() is None:
            os.killpg(os.getpgid(coldcard_proc.pid), signal.SIGTERM)
            os.waitpid(os.getpgid(coldcard_proc.pid), 0)
        coldcard_log.close()

    atexit.register(cleanup_simulator)

    # Coldcard specific management command tests
    class TestColdcardManCommands(DeviceTestCase):
        def test_setup(self):
            result = self.do_command(self.dev_args + ['-i', 'setup'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(result['error'],
                             'The Coldcard does not support software setup')
            self.assertEqual(result['code'], -9)

        def test_wipe(self):
            result = self.do_command(self.dev_args + ['wipe'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(
                result['error'],
                'The Coldcard does not support wiping via software')
            self.assertEqual(result['code'], -9)

        def test_restore(self):
            result = self.do_command(self.dev_args + ['-i', 'restore'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(
                result['error'],
                'The Coldcard does not support restoring via software')
            self.assertEqual(result['code'], -9)

        def test_backup(self):
            result = self.do_command(self.dev_args + ['backup'])
            self.assertTrue(result['success'])
            for filename in glob.glob("backup-*.7z"):
                os.remove(filename)

        def test_pin(self):
            result = self.do_command(self.dev_args + ['promptpin'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(
                result['error'],
                'The Coldcard does not need a PIN sent from the host')
            self.assertEqual(result['code'], -9)

            result = self.do_command(self.dev_args + ['sendpin', '1234'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(
                result['error'],
                'The Coldcard does not need a PIN sent from the host')
            self.assertEqual(result['code'], -9)

    class TestColdcardGetXpub(DeviceTestCase):
        def test_getxpub(self):
            result = self.do_command(self.dev_args +
                                     ['--expert', 'getxpub', 'm/44h/0h/0h/3'])
            self.assertEqual(
                result['xpub'],
                'tpubDFHiBJDeNvqPWNJbzzxqDVXmJZoNn2GEtoVcFhMjXipQiorGUmps3e5ieDGbRrBPTFTh9TXEKJCwbAGW9uZnfrVPbMxxbFohuFzfT6VThty'
            )
            self.assertTrue(result['testnet'])
            self.assertFalse(result['private'])
            self.assertEqual(result['depth'], 4)
            self.assertEqual(result['parent_fingerprint'], 'bc123c3e')
            self.assertEqual(result['child_num'], 3)
            self.assertEqual(
                result['chaincode'],
                '806b26507824f73bc331494afe122f428ef30dde80b2c1ce025d2d03aff411e7'
            )
            self.assertEqual(
                result['pubkey'],
                '0368000bdff5e0b71421c37b8514de8acd4d98ba9908d183d9da56d02ca4fcfd08'
            )

    # Generic device tests
    suite = unittest.TestSuite()
    suite.addTest(
        DeviceTestCase.parameterize(TestColdcardManCommands,
                                    rpc,
                                    userpass,
                                    'coldcard',
                                    'coldcard',
                                    '/tmp/ckcc-simulator.sock',
                                    '0f056943',
                                    '',
                                    interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestColdcardGetXpub,
            rpc,
            userpass,
            'coldcard',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestDeviceConnect,
            rpc,
            userpass,
            'coldcard',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestDeviceConnect,
            rpc,
            userpass,
            'coldcard_simulator',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestGetDescriptors,
            rpc,
            userpass,
            'coldcard',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestGetKeypool,
            rpc,
            userpass,
            'coldcard',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestDisplayAddress,
            rpc,
            userpass,
            'coldcard',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestSignMessage,
            rpc,
            userpass,
            'coldcard',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))
    suite.addTest(
        DeviceTestCase.parameterize(
            TestSignTx,
            rpc,
            userpass,
            'coldcard',
            'coldcard',
            '/tmp/ckcc-simulator.sock',
            '0f056943',
            'tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd',
            interface=interface))

    result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
    cleanup_simulator()
    atexit.unregister(cleanup_simulator)
    return result.wasSuccessful()
Example #46
0
def post_worker_init(worker):
    atexit.unregister(_exit_function)
# Check environment variables
secret_namespace = os.getenv('SECRET_NAMESPACE')
if not secret_namespace:
    sys.exit('ERROR: variable SECRET_NAMESPACE is required.')

secret_name = os.getenv('SECRET_NAME')
if not secret_name:
    sys.exit('ERROR: variable SECRET_NAME is required.')

# The callback function to check if the secret event received has secret data
def secret_event_callback(event):
    try:
        secret_data = event['raw_object'].get('data')
        if not secret_data:
            return False   
        return True
    except Exception as e:
        sys.exit("Error occured while processing secret event: " + str(e))

# Checking if the kubernetes secret has secret data
timeout = int(os.getenv('TIMEOUT')) if os.getenv('TIMEOUT') else 300
api_instance = client.CoreV1Api()
watch_kubernetes_secret(api_instance, secret_namespace, secret_name, timeout, secret_event_callback)
print("The secret data was retrieved successfully.")

# Exit with success
EXIT_CODE = 0
atexit.unregister(save_results)
atexit.register(save_results, RESULTS_TAR_FILENAME, RESULTS_DIR, RESULT_FILE, DONE_FILE, EXIT_CODE, test_cases)
Example #48
0
 def unregister_shutdown(self):
     atexit.unregister(_client_shutdown_hook)
Example #49
0
def jade_test_suite(emulator, rpc, userpass, interface):

    # Jade specific disabled command tests
    class TestJadeDisabledCommands(DeviceTestCase):
        def test_pin(self):
            result = self.do_command(self.dev_args + ['promptpin'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(result['error'], 'Blockstream Jade does not need a PIN sent from the host')
            self.assertEqual(result['code'], -9)

            result = self.do_command(self.dev_args + ['sendpin', '1234'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(result['error'], 'Blockstream Jade does not need a PIN sent from the host')
            self.assertEqual(result['code'], -9)

        def test_setup(self):
            result = self.do_command(self.dev_args + ['-i', 'setup'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(result['error'], 'Blockstream Jade does not support software setup')
            self.assertEqual(result['code'], -9)

        def test_wipe(self):
            result = self.do_command(self.dev_args + ['wipe'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(result['error'], 'Blockstream Jade does not support wiping via software')
            self.assertEqual(result['code'], -9)

        def test_restore(self):
            result = self.do_command(self.dev_args + ['-i', 'restore'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(result['error'], 'Blockstream Jade does not support restoring via software')
            self.assertEqual(result['code'], -9)

        def test_backup(self):
            result = self.do_command(self.dev_args + ['backup'])
            self.assertIn('error', result)
            self.assertIn('code', result)
            self.assertEqual(result['error'], 'Blockstream Jade does not support creating a backup via software')
            self.assertEqual(result['code'], -9)

    class TestJadeGetXpub(DeviceTestCase):
        def setUp(self):
            self.dev_args.remove("--chain")
            self.dev_args.remove("test")

        def test_getexpertxpub(self):
            result = self.do_command(self.dev_args + ['--expert', 'getxpub', 'm/44h/0h/0h/3'])
            self.assertEqual(result['xpub'], 'xpub6EZPQwwr93eGRt5uAN8fqNpLWtgoWM4Cn96Y7XERhRBaXus5FjuTpgGBWuvuAXp1PhYBfp7h7C7HPyuRvCyyc6wBAK7PC1Z1JVEGBnrZUXi')
            self.assertFalse(result['testnet'])
            self.assertFalse(result['private'])
            self.assertEqual(result['depth'], 4)
            self.assertEqual(result['parent_fingerprint'], '8b878d56')
            self.assertEqual(result['child_num'], 3)
            self.assertEqual(result['chaincode'], '7f6e61a651f74388da6a741be38aaf223e849ab5a677220dee113c34c51028b3')
            self.assertEqual(result['pubkey'], '03f99c7114dd0418434585410e11648ec202817dcba5551d7a5ab1d3f93a2aad2e')

    # Because Jade has some restrictions about what multisigs it supports, we run
    # explicit multisig-address tests, rather than using the standard/provided ones.
    class TestJadeGetMultisigAddresses(DeviceTestCase):
        # NOTE: These ones are present in Jade's own unit tests
        # Jade test case: test_data/multisig_reg_ss_p2sh.json
        def test_getp2sh(self):
            descriptor_param = '--desc=sh(multi(2,[1273da33/44/0h/0h]tpubDDCNstnPhbdd4vwbw5UWK3vRQSF1WXQkvBHpNXpKJAkwFYjwu735EH3GVf53qwbWimzewDUv68MUmRDgYtQ1AU8FRCPkazfuaBp7LaEaohG/3/1/11/12,[e3ebcc79/3h/1h/1]tpubDDExQpZg2tziZ7ACSBCYsY3rYxAZtTRBgWwioRLYqgNBguH6rMHN1D8epTxUQUB5kM5nxkEtr2SNic6PJLPubcGMR6S2fmDZTzL9dHpU7ka/1/3/4/5))'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], '2N2K6xGHeEYNaKEHZSBVy33g2GfUdFwJr2A')

        # Jade test case: test_data/multisig_reg_ss_wsh_sorted.json
        def test_get_sorted_p2wsh(self):
            descriptor_param = '--desc=wsh(sortedmulti(2,[1273da33/44/0h/0h]tpubDDCNstnPhbdd4vwbw5UWK3vRQSF1WXQkvBHpNXpKJAkwFYjwu735EH3GVf53qwbWimzewDUv68MUmRDgYtQ1AU8FRCPkazfuaBp7LaEaohG/3/1/0/1,[e3ebcc79/3h/1h/1]tpubDDExQpZg2tziZ7ACSBCYsY3rYxAZtTRBgWwioRLYqgNBguH6rMHN1D8epTxUQUB5kM5nxkEtr2SNic6PJLPubcGMR6S2fmDZTzL9dHpU7ka/1/0/16))'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], 'tb1qx474um8lr97sn46wz9v90u6qs49ttrhdze2cfxa5x4tajvl62avshcqval')

        # Jade test case: test_data/multisig_reg_ss_matches_ga_2of3.json
        def test_getp2shwsh(self):
            descriptor_param = '--desc=sh(wsh(multi(2,[b5281696/3]tpubECMbgHMZm4QymrFbuEntaWbSsaaKvJVpoR6rhVwxGUrT9qf6WLfC88piWYVsQDV86PUKx3QsXYiCqugWX1oz8ekucUdFUdupuNCgjf17EYK/13,[e3ebcc79/3h/2h/1]tpubDD8fpYqWy6DEvbqdj9CVWptA3gd3xqarNN6wCAjfDs1sFnd8zfb9SeDzRAXA3S4eeeYvo2sz6mbyS3KaXuDe5PcWy94PqShTpBjiJN198A6/13,[1273da33/1]tpubD8PzcLmm1rVeUpEjmd2kQD6a9DXy6dwVVgE14mrh1zc6L98nmNqmDaApAbEcbrJ1iPBpo2gnEniSpVXHgovU8ecWwfHVP113DK2bWEMPpEs/13)))'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], '2NFgE1k4EpyiBTN4SBkGamFU9E1DwLLajLo')

        # NOTE: these ones are used by the sign-tx tests - using them here gets
        # them 'registered' on the Jade hw - This is not mandatory but means in
        # sign-tx we should be using them to auto-validate the change outputs.
        SIGN_TX_MULTI_DESCRIPTOR = 'multi(2,[1273da33/48h/1h/2h/0h]tpubDE5KhdeLh956ERiopzHRskaJ3huWXLUPKiQUSkR3R3nTsr4SQfVVU6DbA9E66BZYwTk87hwE7wn1175WqBzMsbkFErGt3ATJm2xaisCPUmn/0/1,[1273da33/48h/1h/0h/0h]tpubDEAjmvwVDj4aNW8D1KX39VmMW1ZUX8BNgVEyD6tUVshZYCJQvbp9LSqvihiJa4tGZUisf6XpyZHg76dDBxNZLHTf6xYwgbio4Xnj6i21JgN/0/1,[1273da33/48h/1h/1h/0h]tpubDERHGgfviqDnoRSykG1YBBfhFbgNPuTeWvjwJBXM36d5wzFwkQpWFXHC76XW99hMgd1NkR6A3rRHM93Njqdx2X3KoUebekokUPsAvmeC4NE/0/1)'
        SIGN_TX_SORTEDMULTI_DESCRIPTOR = f'sorted{SIGN_TX_MULTI_DESCRIPTOR}'

        def test_get_signing_p2sh(self):
            descriptor_param = f'--desc=sh({self.SIGN_TX_MULTI_DESCRIPTOR})'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], '2N4v6yct4NHy7FLex1wgwksLG97dXSTEs9x', result)

            descriptor_param = f'--desc=sh({self.SIGN_TX_SORTEDMULTI_DESCRIPTOR})'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], '2MwZVj4Sbjn3ewD87ZoDgWrKFNjav4uPJm9', result)

        def test_get_signing_p2wsh(self):
            descriptor_param = f'--desc=wsh({self.SIGN_TX_MULTI_DESCRIPTOR})'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], 'tb1qxjxvxk69yedt49u2djh8mu9lsmw6tf4n2pwuqend5tjlqrumuq2skh7qzc', result)

            descriptor_param = f'--desc=wsh({self.SIGN_TX_SORTEDMULTI_DESCRIPTOR})'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], 'tb1qeuh6d9eg28aqy5ckrqxn868saapcr6kg968prm6999gclkr4ewqsv22prt', result)

        def test_get_signing_p2shwsh(self):
            descriptor_param = f'--desc=sh(wsh({self.SIGN_TX_MULTI_DESCRIPTOR}))'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], '2N6wdpQsBUvT3nAp8hGaXEFpfvzerqaHFVC', result)

            descriptor_param = f'--desc=sh(wsh({self.SIGN_TX_SORTEDMULTI_DESCRIPTOR}))'
            result = self.do_command(self.dev_args + ['displayaddress', descriptor_param])
            self.assertEqual(result['address'], '2NAXBEePa5ebo1zTDrtQ9C21QDkkamwczfQ', result)

    full_type = 'jade'
    device_model = JADE_MODEL
    path = JADE_PATH
    master_xpub = 'xpub6CYWf8Kf1MXHij4KJjjtkNgQxJufSmAoyrmGuiGWvjXHSpak638GrmgWZqiem339nuHf2xuCmEVmmnXDmskEjB7QdZGW2HdiBUnoEAwV1q2'
    fingerprint = '1273da33'
    dev_emulator = JadeEmulator(emulator)
    dev_emulator.start()
    atexit.register(dev_emulator.stop)

    signtx_cases = [
        (["legacy"], True, True, True),
        (["segwit"], True, True, True),
        (["legacy", "segwit"], True, True, True),
    ]

    # Generic Device tests
    suite = unittest.TestSuite()
    suite.addTest(DeviceTestCase.parameterize(TestJadeDisabledCommands, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestDeviceConnect, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestJadeGetXpub, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestGetDescriptors, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestGetKeypool, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestDisplayAddress, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestJadeGetMultisigAddresses, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestSignMessage, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface))
    suite.addTest(DeviceTestCase.parameterize(TestSignTx, rpc, userpass, device_model, full_type, path, fingerprint, master_xpub, interface=interface, signtx_cases=signtx_cases))

    result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
    dev_emulator.stop()
    atexit.unregister(dev_emulator.stop)
    return result.wasSuccessful()
Example #50
0
 def shutdown(self):
     """Shut down the span processors added to the tracer."""
     self._active_span_processor.shutdown()
     if self._atexit_handler is not None:
         atexit.unregister(self._atexit_handler)
         self._atexit_handler = None
Example #51
0
 def release_on_exit(self, do):
     if do:
         atexit.register(self.release)
     else:
         atexit.unregister(self.release)
     self._do_release_on_exit = do
    Cleanup before system exit

    """
    global _shutdown
    _shutdown = True
    items = list(_threads_queues.items())
    for t, q in items:
        q.put(NULL_ENTRY)
    for t, q in items:
        t.join()

# change default cleanup


atexit.unregister(_python_exit)
atexit.register(python_exit)

########################################################################################################################
#                                               Worker implementation                                                  #
########################################################################################################################


def _worker(executor_reference, work_queue):
    """

    Worker

    :param executor_reference: executor function
    :type executor_reference: callable
    :param work_queue: work queue
Example #53
0
 def shutdown(data, addr, sock):
     atexit.unregister(_client_shutdown_hook)
     sock.close()
     exit()
Example #54
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     atexit.unregister(self.on_exit())
     self.on_exit()
Example #55
0
async def start(zmq_endpoint: str, snapshot: int, modules_config: dict):
    """
    Starts the STIX-Shifter Threat Bus app.
    @param zmq_endpoint The ZMQ management endpoint of Threat Bus ('host:port')
    @param snapshot An integer value to request n days of historical IoC items
    @param modules_config User-provided configuration for STIX-Shifter modules
    """
    global logger, async_tasks, p2p_topic
    # needs to be created inside the same eventloop where it is used
    logger.debug(f"Calling Threat Bus management endpoint {zmq_endpoint}")
    reply = subscribe(zmq_endpoint, "stix2/indicator", snapshot)
    if not reply_is_success(reply):
        logger.error("Subscription failed")
        return
    pub_port = reply.get("pub_port", None)
    sub_port = reply.get("sub_port", None)
    topic = reply.get("topic", None)
    if not pub_port or not sub_port or not topic:
        logger.error("Subscription failed")
        return
    zmq_host = zmq_endpoint.split(":")[0]
    pub_endpoint = f"{zmq_host}:{pub_port}"
    sub_endpoint = f"{zmq_host}:{sub_port}"

    logger.info(f"Subscription successful. New p2p_topic: {topic}")
    if p2p_topic:
        # The 'start' function is called as result of a restart
        # Unsubscribe the old topic as soon as we get a working connection
        logger.info("Cleaning up old p2p_topic subscription ...")
        unsubscribe(zmq_endpoint, p2p_topic)
        atexit.unregister(unsubscribe)
    p2p_topic = topic
    atexit.register(unsubscribe, zmq_endpoint, topic)

    # Set task exception handler.
    loop = asyncio.get_event_loop()

    def exception_handler(loop, context):
        logger.error(f"Error in async task: {context}")

    loop.set_exception_handler(exception_handler)

    # Start a heartbeat task so we notice when the Threat Bus host goes away.
    async_tasks.append(
        asyncio.create_task(heartbeat(zmq_endpoint, p2p_topic, interval=5))
    )

    # Create queues (channels) for passing indicators and sightings
    # asynchronously between the routines that make up this app.
    indicator_queue = asyncio.Queue()
    sightings_queue = asyncio.Queue()

    # Start a receive task to retrieve real-time updates from Threat Bus.
    async_tasks.append(
        asyncio.create_task(receive(pub_endpoint, p2p_topic, indicator_queue))
    )
    async_tasks.append(
        asyncio.create_task(
            process_indicators(indicator_queue, sightings_queue, modules_config)
        )
    )

    # Start a publisher task to send back sightings to Threat Bus.
    async_tasks.append(
        asyncio.create_task(report_sightings(sub_endpoint, sightings_queue))
    )

    loop = asyncio.get_event_loop()
    for s in [signal.SIGHUP, signal.SIGTERM, signal.SIGINT]:
        loop.add_signal_handler(s, lambda: asyncio.create_task(stop_signal()))
    return await asyncio.gather(*async_tasks)
Example #56
0
                        required=True,
                        help='codec ip address')
    parser.add_argument('-u',
                        dest='username',
                        required=True,
                        help='codec API username')
    parser.add_argument('-p',
                        dest='password',
                        required=True,
                        help='codec API password')

    in_args = parser.parse_args()
    print("args: {}".format(in_args))

    # do not switch the LEDs off
    atexit.unregister(unicornhat._clean_shutdown)

    color_widgets = ['red', 'green', 'blue']
    red, green, blue = (0, 0, 0)
    update_color_widgets = False
    for arg in in_args.widget_value:
        widget_id, value = arg.split('=')

        if widget_id == 'red':
            red = int(value)
            update_color_widgets = True
        elif widget_id == 'green':
            green = int(value)
            update_color_widgets = True
        elif widget_id == 'blue':
            blue = int(value)
Example #57
0
def setup(
    town: str,
    fps: int = 20,
    server_timestop: float = 20.0,
    client_timeout: float = 20.0,
    num_max_restarts: int = 5,
) -> Tuple[carla.Client, carla.World, int, subprocess.Popen]:  # pylint: disable=no-member
    """Returns the `CARLA` `server`, `client` and `world`.

  Args:
    town: The `CARLA` town identifier.
    fps: The frequency (in Hz) of the simulation.
    server_timestop: The time interval between spawing the server
      and resuming program.
    client_timeout: The time interval before stopping
      the search for the carla server.
    num_max_restarts: Number of attempts to connect to the server.

  Returns:
    client: The `CARLA` client.
    world: The `CARLA` world.
    frame: The synchronous simulation time step ID.
    server: The `CARLA` server.
  """
    assert town in ("Town01", "Town02", "Town03", "Town04", "Town05")

    # The attempts counter.
    attempts = 0

    while attempts < num_max_restarts:
        logging.debug(
            "{} out of {} attempts to setup the CARLA simulator".format(
                attempts + 1, num_max_restarts))

        # Random assignment of port.
        port = np.random.randint(2000, 3000)

        # Start CARLA server.
        env = os.environ.copy()
        env["SDL_VIDEODRIVER"] = "offscreen"
        env["SDL_HINT_CUDA_DEVICE"] = "0"
        logging.debug("Inits a CARLA server at port={}".format(port))
        server = subprocess.Popen(
            [
                os.path.join(os.environ.get("CARLA_ROOT"), "CarlaUE4.sh"),
                "-carla-rpc-port={}".format(port),
                "-quality-level=Epic",
            ],
            stdout=None,
            stderr=subprocess.STDOUT,
            preexec_fn=os.setsid,
            env=env,
        )
        atexit.register(os.killpg, server.pid, signal.SIGKILL)
        time.sleep(server_timestop)

        # Connect client.
        logging.debug("Connects a CARLA client at port={}".format(port))
        try:
            client = carla.Client("localhost", port)  # pylint: disable=no-member
            client.set_timeout(client_timeout)
            client.load_world(map_name=town)
            world = client.get_world()
            world.set_weather(carla.WeatherParameters.ClearNoon)  # pylint: disable=no-member
            frame = world.apply_settings(
                carla.WorldSettings(  # pylint: disable=no-member
                    no_rendering_mode=False,
                    synchronous_mode=True,
                    fixed_delta_seconds=1.0 / fps,
                ))
            logging.debug("Server version: {}".format(
                client.get_server_version()))
            logging.debug("Client version: {}".format(
                client.get_client_version()))
            return client, world, frame, server
        except RuntimeError as msg:
            logging.debug(msg)
            attempts += 1
            logging.debug("Stopping CARLA server at port={}".format(port))
            os.killpg(server.pid, signal.SIGKILL)
            atexit.unregister(lambda: os.killpg(server.pid, signal.SIGKILL))

    logging.debug("Failed to connect to CARLA after {} attempts".format(
        num_max_restarts))
    sys.exit()
Example #58
0
 def _close_connection(self):
     """ Close connection to the database. """
     _LOGGER.info("Closing database")
     atexit.unregister(self._close_connection)
     self.conn.close()
def NanoModeler(LIG1_FILE=None, CAP1=[], LIG1_C=CCC0, LIG1_S=0, LIG1_FRAC=1.0, MORPHOLOGY="random", RSEED=None, STRIPES=1, LIG2_FILE=None, CAP2=[], LIG2_C=0, LIG2_S=0, FRCMOD=None, CORE=None, ELONGATED=False):
    VAR = {
    "LIG1_FILE" : LIG1_FILE,   #ByteArray of the mol2 of ligand1
    "CAP1" : CAP1,		    #List of atom numbers (as in the mol2, likely to start in 1) to remove
    "LIG1_C"    : LIG1_C,    #Signed integer, atom number of carbon atom in LIG1_FILE used as anchor
    "LIG1_S"    : LIG1_S,            #Signed integer, atom number in the original mol2 file of ligand 1 corresponding to the anchoring S atom
    "LIG1_FRAC" : LIG1_FRAC,          #Float, fraction of ligand1 to place (0-1.0)
    "MORPHOLOGY" : MORPHOLOGY,    #String, morphology to distribute ligands1 and 2. random, janus, and stripe are allowed
    "RSEED" : RSEED,              #Integer, random seed for random morphology
    "STRIPES" : STRIPES,              #Signed integer, number of stripes for stripe morphology. It will start (bottom up with ligand 1)

    "LIG2_FILE" : LIG2_FILE,   #ByteArray of the mol2 of ligand2
    "CAP2" : CAP2,               #List of atom numbers (as in the mol2, likely to start in 1) to remove
    "LIG2_C"    : LIG2_C,    #Signed integer, atom number of carbon atom in LIG2_FILE used as anchor
    "LIG2_S"    : LIG2_S,            #Signed integer, atom number in the original mol2 file of ligand 1 corresponding to the anchoring S atom

    "FRCMOD"    : FRCMOD,       #ByteArray of the frcmod provided by the user
    "CORE" : CORE,    #ByteArray of the pdb of the core
    "ELONGATED" : ELONGATED    #If set to true, the first carbon of the cores will be ignored and placed 1.8A away from the S atoms along the O-S axis
    }

    log = "WELCOME TO NANOMODELER\n\n"
    log += "Importing sys library...\n"
    import sys
    log += "Importing numpy library...\n"
    import numpy as np
    log += "Importing random library...\n"
    import random
    log += "Importing math library...\n"
    import math
    log += "Importing scipy library...\n"
    from scipy.spatial import distance
    log += "Importing sklearn library...\n"
    from sklearn.decomposition import PCA
    log += "Importing os library...\n"
    import os
    log += "Importing shutil library...\n"
    import shutil
    log += "Importing tempfile...\n"
    import tempfile
    log += "Importing zipfile...\n"
    import zipfile
    log += "Importing atexit library...\n"
    import atexit
    log += "Importing re library...\n"
    import re
    log += "Importing pickle library...\n"
    import pickle
    log += "Importing subprocess library...\n"
    import subprocess
    log += "Importing signal library...\n"
    import signal
    log += "Importing traceback library...\n"
    import traceback
    log += "Importing datetime library...\n"
    import datetime
    log += "Importing acpype.py...\n"
    from DEPENDENCIES.acpype import MolTopol
    log += "Importing NP_builder dependency...\n"
    from DEPENDENCIES.NP_builder import init_lig_mol2, init_core_pdb, get_ligand_pill, assign_morph, get_stones, coat_NP, print_NP_pdb
    log += "Importing staples dependency...\n"
    from DEPENDENCIES.staples import load_gro, load_top, get_ndxs, make_blocks, write_bonds, write_angles, write_topology
    log += "Importing default function dependency...\n"
    from DEPENDENCIES.defaults import check_VAR, check_mol2, check_frcmod, write_leap
    log += "Importing transformations dependency...\n"
    from  DEPENDENCIES.transformations import affine_matrix_from_points, vector_norm, quaternion_matrix
    log += "Importing subunits dependency...\n"
    import DEPENDENCIES.subunits
    log += "Importing rewrite_mol2 dependency...\n"
    from DEPENDENCIES.rewrite_mol2 import rewrite_mol2
    log += "Importing cleanup dependency...\n\n"
    from DEPENDENCIES.cleanup import cleanup_error

    log += "Creating temporary folder...\n"
    TMP = tempfile.mkdtemp(dir="./")
    atexit.register(cleanup_error, TMP, log)    #From this point, the cleanup_error will be excecuted after NanoModeler finishes (or crashes)

    log += "Checking input options...\n"
    check_VAR(VAR)      #Checks validity of values in the variables
    
    #Based on the morphology, number of stripes, and fraction of ligand1, it is decided if there are one or two ligands
    if (VAR["MORPHOLOGY"] == "stripe"):
        if VAR["STRIPES"] == 1:
            two_lig = False
        else:
            two_lig = True
    else:
        two_lig = (VAR["LIG1_FRAC"] < 1.0)
    
    log += "Imported options:\n"
    for i in VAR:
        if (i == "LIG1_FILE" or i == "LIG2_FILE" or i == "CORE" or i == "FRCMOD") and VAR[i]:
            if VAR[i]:
                log += "\t{:<20}{:>20}\n".format(i, str(VAR[i].name))
        else:
            log += "\t{:<20}{:>20}\n".format(i, str(VAR[i]))
    
    log += "\nOne ligand was found...\n"
    log += "Checking ligand1 mol2 file...\n"
    LIG1_MOL2 = VAR["LIG1_FILE"].readlines()        #Read lines and eliminates the \n character from them
    LIG1_MOL2 = [s.replace("\n", "") for s in LIG1_MOL2]
    log = check_mol2(LIG1_MOL2, log)
    log += "Rewriting ligand1 file...\n"
    VAR["LIG1_C"], log = rewrite_mol2(LIG1_MOL2, VAR["CAP1"], VAR["LIG1_S"], VAR["LIG1_C"], TMP+"/LIG1.mol2", VAR["ELONGATED"], log)        #Writes new mol2 without capping, S atom at the end, and properly numbered

    if two_lig:
        log += "Two ligands were found...\n"
        log += "Checking ligand2 mol2 file...\n"
        LIG2_MOL2 = VAR["LIG2_FILE"].readlines()        #Read lines and eliminates the \n character from them
        LIG2_MOL2 = [s.replace("\n", "") for s in LIG2_MOL2]
        log = check_mol2(LIG2_MOL2, log)
        log += "Rewriting ligand2 file...\n"
        VAR["LIG2_C"], log = rewrite_mol2(LIG2_MOL2, VAR["CAP2"], VAR["LIG2_S"], VAR["LIG2_C"], TMP+"/LIG2.mol2", VAR["ELONGATED"], log)        #Writes new mol2 without capping, S atom at the end, and properly numbered
    ##############################NP_builder########################

    log += "Initializing core...\n"
    CORE_PDB = VAR["CORE"].readlines()          #Read lines and eliminates the \n character from them
    CORE_PDB = [s.replace("\n", "") for s in CORE_PDB]
    xyz_core, names_core, res_core = init_core_pdb(CORE_PDB, VAR["ELONGATED"])      #Extracts xyz coordinates, names, and residues of the core

    log += "Initializing ligand1...\n"
    xyz_lig1, names_lig1, res_lig1 = init_lig_mol2(TMP+"/LIG1.mol2", VAR["LIG1_S"], VAR["LIG1_C"])      #Reads rewritten mol2 file and puts the C atom in the origin
    if two_lig:
        log += "Initializing ligand2...\n"
        xyz_lig2, names_lig2, res_lig2 = init_lig_mol2(TMP+"/LIG2.mol2", VAR["LIG1_S"], VAR["LIG2_C"])      #Reads rewritten mol2 file and puts the C atom in the origin
    else:
        xyz_lig2, names_lig2, res_lig2 = [], [], []

    log += "Running PCA for ligand1...\n"
    xyz_pillars1, log = get_ligand_pill(xyz_lig1, VAR["LIG1_C"], VAR["LIG1_S"], log)        #Gives the coordinates of the C atom and the projections of 2 pseudo random atoms into PCA1
    if two_lig:
        log += "Running PCA for ligand2...\n"
        xyz_pillars2, log = get_ligand_pill(xyz_lig2, VAR["LIG2_C"], VAR["LIG2_S"], log)        #Gives the coordinates of the C atom and the projections of 2 pseudo random atoms into PCA1
    else:
        xyz_pillars2 = []

    N_S = len(names_core[names_core=='ST'])

    log +="Assigning morphology...\n"
    xyz_anchors1, xyz_anchors2, log = assign_morph(xyz_core, names_core, VAR["LIG1_FRAC"], VAR["RSEED"], VAR["MORPHOLOGY"], VAR["STRIPES"], log)        #Gives the coordinates of the C atoms in the core corresponding to each ligand depending on the morphology specified

    xyz_stones1 = get_stones(xyz_core, names_core, xyz_anchors1, xyz_pillars1, VAR["LIG1_S"])       #Gives a 3D array with the xyz coordinates for all the stones of all the anchors of ligand 1
    if two_lig:
        xyz_stones2 = get_stones(xyz_core, names_core, xyz_anchors2, xyz_pillars2, VAR["LIG2_S"])       #Gives a 3D array with the xyz coordinates for all the stones of all the anchors of ligand 2
    else:
        xyz_stones2 = []

    log += "Coating nanoparticle...\n"  #Merges xyz coordinates and names of the core and the ligands into one coated NP
    xyz_coated_NP, names_coated_NP, res_coated_NP, log = coat_NP(xyz_core, names_core, xyz_lig1, names_lig1, xyz_pillars1, xyz_stones1, xyz_lig2, names_lig2, xyz_pillars2, xyz_stones2, res_lig1, res_lig2, VAR["LIG1_S"], VAR["LIG2_S"], VAR["ELONGATED"], log)

    log += "Writing pdb of the coated nanoparticle...\n"
    print_NP_pdb(xyz_coated_NP, names_coated_NP, res_coated_NP, xyz_anchors1, xyz_anchors2, xyz_lig1, xyz_lig2, TMP+"/NP.pdb")      #Writes pdb of the nanoparticle

    ###########################Parameters (parmchk2, tleap, acpype)#####################################

    log += "Running parmchk2 for ligand1...\n"
    os.system("parmchk2 -i {}/LIG1.mol2 -f mol2 -o {}/LIG1.frcmod -a y".format(TMP, TMP))       #Generated frcmod of the ligand (includes parameters with the S atom)
    log += "Checking parameters for ligand1...\n"
    log = check_frcmod(TMP+"/LIG1.frcmod", log)         #If parameters could not be assigned, they are shown in the log file
    if two_lig:
        log += "Running parmchk2 for ligand2...\n"
        os.system("parmchk2 -i {}/LIG2.mol2 -f mol2 -o {}/LIG2.frcmod -a y".format(TMP, TMP))       #Generated frcmod of the ligand (includes parameters with the S atom)
        log += "Checking parameters for ligand2...\n"
        log = check_frcmod(TMP+"/LIG2.frcmod", log)     #If parameters could not be assigned, they are shown in the log file

    log += "Writing tleap input file...\n"
    write_leap(VAR, TMP, two_lig)           #Writes file to be run by tleap
    log += "Running tleap...\n"
    os.system("tleap -sf {}/TLeap.in > {}/TLeap.log".format(TMP, TMP))

    log += "Running acpype...\n"
    acpype_system = MolTopol(acFileXyz = "{}/NP.inpcrd".format(TMP), acFileTop = "{}/NP.prmtop".format(TMP),
                      debug = False, basename = "{}/NP".format(TMP),
                      verbose = False,  gmx45 = True,
                      disam = False,     direct = False,
                      is_sorted = False, chiral = False)
    acpype_system.writeGromacsTopolFiles(amb2gmx = True)

    ##############################Staples########################

    log += "Reading gro file of the coated nanoparticle...\n"
    xyz_sys, names_sys = load_gro(TMP+"/NP.gro")        #Reads gro file without velocities

    log += "Reading top file of the unlinked nanoparticle...\n"
    types_sys, res_sys = load_top(TMP+"/NP.top")        #Saves types and residue name of all atoms in the system

    log += "Looking for anchoring carbon atoms for ligand1...\n"
    ndx_C1 = get_ndxs(xyz_sys, names_sys, types_sys, res_sys, res_lig1, xyz_anchors1)     #Get indexes of all the C atoms
    if two_lig:
        log += "Looking for anchoring carbon atoms for ligand2...\n"
        ndx_C2 = get_ndxs(xyz_sys, names_sys, types_sys, res_sys, res_lig2, xyz_anchors2)      #Get indexes of all the C atoms
    else:
        ndx_C2 = [], []
    blocks = make_blocks(xyz_sys, names_sys, names_core, res_core, ndx_C1)      #Makes list of blocks consisting of 1C, 1S and 2Au atoms
    if two_lig:
        blocks = blocks + make_blocks(xyz_sys, names_sys, names_core, res_core, ndx_C2)     #Appends list of blocks consisting of 1C, 1S and 2Au atoms

    #Manually assigns the intra- and inter-blocks parameters
    log += "Writing bonds parameters...\n"
    write_bonds(blocks, TMP+"/bonds.top", xyz_sys, names_sys)
    log += "Writing angles parameters...\n"
    write_angles(blocks, TMP+"/angles.top", xyz_sys, names_sys)
    log += "Writing final topology file...\n"
    write_topology(TMP+"/NP.top", TMP+"/bonds.top", TMP+"/angles.top")
    #############################################################
    
    atexit.unregister(cleanup_error)        #If NanoModeler crashes, not it wont run anything after

    log += "Compressing final files...\n"

    copy = ["LIG1.mol2", "NP.pdb", "NP.top", "NP.gro"]      #List of files to compress to output
    if two_lig:
        copy.append("LIG2.mol2")

    zip_path = TMP + ".zip"
    zip_tmp = zipfile.ZipFile(zip_path, "w")
    for i in copy:
        zip_tmp.write("{}/{}".format(TMP, i))       #Saves the list of output files in zip file
    zip_tmp.close()
    zf = open(zip_path, 'rb')       #Opens and reads the saved zip file
    zip_data = zf.read()
    zf.close()

    log += "Cleaning...\n"
    bye = ["ANTECHAMBER.FRCMOD", "leap.log", "md.mdp", "em.mdp", zip_path]      #List of files of delete at the end of the run (including the zip file)
    for i in bye:
        os.remove(i)
    #shutil.rmtree(TMP)     #Deletes the temporary folder

    log += "\"Senoras y senores, bienvenidos al party, agarren a su pareja (de la cintura) y preparense porque lo que viene no esta facil, no esta facil no.\"\n\tIvy Queen.\n"
    log += "NanoModeler terminated normally. Que gracias.\n"
    print(log)
    return (1, log, zip_data)
Example #60
0
    def __call__(self, event):  # noqa: D102
        data = event[0]

        if isinstance(data, JobQueued):
            self._queued_count += 1

        elif isinstance(data, JobStarted):
            job = event[1]
            assert job not in self._running
            self._running[job] = {'start_time': time.monotonic()}

        elif isinstance(data, JobProgress):
            job = event[1]
            self._running[job]['progress'] = [data.progress]

        elif isinstance(data, StdoutLine):
            line = data.line
            if isinstance(line, bytes):
                line = line.decode(errors='replace')
            match = self._progress_pattern.match(line)
            if not match:
                return
            job = event[1]
            progress = self._running[job].get('progress', [])
            while len(progress) > 1:
                progress.pop()
            progress.append(match.group(1).lstrip() + '%')

        elif isinstance(data, JobEnded):
            job = event[1]
            self._ended[job] = self._running[job]
            self._ended[job]['end_time'] = time.monotonic()
            self._ended[job]['rc'] = data.rc
            del self._running[job]

        elif isinstance(data, TimerEvent):
            now = time.monotonic()
            blocks = []

            # runtime in seconds
            duration_string = format_duration(
                now - self._start_time, fixed_decimal_points=1)
            blocks.append('[{duration_string}]'.format_map(locals()))

            # number of completed jobs / number of jobs
            blocks.append(
                '[%d/%d complete]' %
                (len(self._ended), self._queued_count))

            # number of failed jobs if not zero
            failed_jobs = [
                j for j, d in self._ended.items()
                if d['rc'] and d['rc'] != SIGINT_RESULT]
            if failed_jobs:
                blocks.append('[%d failed]' % len(failed_jobs))

            # number of ongoing jobs if greater one
            if len(self._running) > 1:
                blocks.append('[%d ongoing]' % len(self._running))

            # job identifier, label and time for ongoing jobs
            for job, d in self._running.items():
                msg = job.task.context.pkg.name
                if 'progress' in d:
                    msg += ':%s' % ' '.join(d['progress'])
                duration_string = format_duration(
                    now - d['start_time'], fixed_decimal_points=1)
                blocks.append(
                    '[{msg} - {duration_string}]'.format_map(locals()))

            # determine blocks which fit into terminal width
            max_width = shutil.get_terminal_size().columns
            for i in reversed(range(len(blocks))):
                msg = ' '.join(blocks[0:i + 1])
                # append dots when skipping at least one block
                if i < len(blocks) - 1:
                    msg += ' ...'
                if len(msg) < max_width:
                    break
            else:
                return

            print(msg, end='\r')
            self._last_status_line_length = len(msg)

        elif isinstance(data, EventReactorShutdown):
            self._clear_last_status_line()
            atexit.unregister(self._clear_last_status_line)