def _get_state(self): args = [ 'docker', 'ps', '--no-trunc', '--format', '{{.Names}},{{.ID}},{{.Status}},' '{{.Command}}', '--filter', 'network={}'.format(self._prefix)] try: output = subprocess.check_output(args) except subprocess.CalledProcessError as e: raise ManagementError(str(e)) except OSError as e: if e.errno == 2: raise ManagementError("{}:{}".format(str(e), args[0])) entries = [] for line in output.decode().split('\n'): if len(line) < 1: continue parts = line.split(',') entries.append(_StateEntry( name=parts[0].replace(self._prefix + '-', ''), identifier=parts[1], status=parts[2], command=parts[3])) return entries
def _get_state(self): sep = re.compile(r"[\s]+") # Retrieves list of all running validators cmd = ['ps', '-ef'] try: output = subprocess.check_output(cmd) except subprocess.CalledProcessError as e: raise ManagementError(str(e)) except OSError as e: if e.errno == 2: raise ManagementError("{}".format(str(e))) entries = [] for line in output.decode().split('\n'): if "validator" in line and not len(line) < 1: parts = sep.split(line) entries.append( _StateEntry( name="validator-0{}".format(parts[12][-2:]), pid=parts[1], status='Up')) # If the process exists, it is up return entries
def stop(self, node_name): state = self._load_state() node_num = node_name[len('validator-'):] # only first node has a rest_api process associated with it if node_num == '000': processes = state['Processors'] + ['rest_api'] + ['validator'] else: processes = state['Processors'] + ['validator'] containers = [ '-'.join([self._prefix, proc, node_num]) for proc in processes ] for c_name in containers: args = ['docker', 'stop', c_name] LOGGER.debug('stopping %s: %s', c_name, ' '.join(args)) try: output = subprocess.check_output(args) except subprocess.CalledProcessError as e: raise ManagementError(str(e)) for line in output.decode().split('\n'): if len(line) < 1: continue LOGGER.debug("command output: %s", str(line)) args = ['docker', 'rm', c_name] LOGGER.debug('stopping %s: %s', c_name, ' '.join(args)) try: output = subprocess.check_output(args) except subprocess.CalledProcessError as e: raise ManagementError(str(e)) for line in output.decode().split('\n'): if len(line) < 1: continue LOGGER.debug("command output: %s", str(line)) if 'validator' in c_name: network = c_name.replace('-', '') + '_default' args = ['docker', 'network', 'rm', network] try: output = subprocess.check_output(args) except subprocess.CalledProcessError as e: raise ManagementError(str(e)) for line in output.splitlines(): if len(line) < 1: continue LOGGER.debug("command output: %s", str(line))
def _is_bridge_network_started(self): try: network_ls_args = ['docker', 'network', 'ls', '--filter', 'NAME={}'.format(self._prefix), '-q'] network_output = subprocess.check_output( network_ls_args).splitlines() return len(network_output) > 0 except subprocess.CalledProcessError as e: raise ManagementError(str(e))
def _start_bridge_network(self): try: network_args = ['docker', 'network', 'create', '-d', 'bridge', self._prefix] n_output = subprocess.check_output(network_args) for l in n_output.splitlines(): LOGGER.info(l) except subprocess.CalledProcessError as e: raise ManagementError(str(e))
def _find_peers(self): peers = [] args = ['docker', 'inspect', self._prefix, '--format', '{{range .Containers }}{{.IPv4Address}}{{end}}'] try: output = subprocess.check_output(args) peers = output.split(b"/16") except subprocess.CalledProcessError as e: raise ManagementError(str(e)) return ['tcp://' + str(p) + ':8800' for p in peers if len(p) > 4]
def _get_executable_script(script_name): ''' Searches PATH environmental variable to find the information needed to execute a script. Args: script_name: the name of the 'executable' script Returns: ret_val (list<str>): A list containing the python executable, and the full path to the script. Includes sys.executable, because certain operating systems cannot execute scripts directly. ''' ret_val = None if 'PATH' not in os.environ: raise ManagementError('no PATH environmental variable') search_path = os.environ['PATH'] for directory in search_path.split(os.pathsep): if os.path.exists(os.path.join(directory, script_name)): ret_val = os.path.join(directory, script_name) break if ret_val is not None: ret_val = [sys.executable, ret_val] else: raise ManagementError("could not locate %s" % (script_name)) return ret_val
def start(self, node_config): node_name = node_config.node_name http_port = node_config.http_port # The first time a node is started, it should start a bridge # network. Subsequent nodes should wait until the network # has successfully been started. with self._lock: if not self._is_bridge_network_started(): self._start_bridge_network() compose_file = os.path.join( tempfile.mkdtemp(), 'docker-compose.yaml') start_args = self._construct_start_args(node_name, compose_file) LOGGER.debug('starting %s: %s', node_name, self._join_args(start_args)) peers = self._find_peers() if node_config.genesis: entrypoint = 'bash -c "./bin/sawtooth admin keygen && \ ./bin/sawtooth admin genesis && \ ./bin/validator {} -v"' else: entrypoint = 'bash -c "./bin/sawtooth admin keygen && \ ./bin/validator {} -v"' if len(peers) > 0: entrypoint = entrypoint.format('--peers ' + " ".join(peers)) else: entrypoint = entrypoint.format('') compose_dict = { 'version': '2', 'services': { 'validator': { 'image': 'sawtooth-dev-validator', 'expose': ['40000', '8800'], 'networks': {self._prefix: {}, 'default': {'aliases': [node_name]}}, 'volumes': ['/project:/project'], 'container_name': self._prefix + '-' + node_name, 'entrypoint': entrypoint } }, 'networks': {self._prefix: {'external': True}} } state = self._load_state() # add the processors node_num = node_name[len('validator-'):] processors = state['Processors'] for proc in processors: compose_dict['services'][proc] = { 'image': 'sawtooth-dev-{}'.format(proc), 'expose': ['40000'], 'links': ['validator'], 'volumes': ['/project:/project'], 'container_name': '-'.join([self._prefix, proc, node_num]), 'entrypoint': 'bin/{} tcp://{}:40000'.format(proc, node_name) } # start the rest_api for the first node only if node_num == '000': compose_dict['services']['rest_api'] = { 'image': 'sawtooth-dev-rest_api', 'expose': ['40000', '8080'], 'links': ['validator'], 'volumes': ['/project:/project'], 'container_name': '-'.join([self._prefix, 'rest_api', node_num]), 'entrypoint': './bin/rest_api --stream-url tcp://{}:40000'. format(node_name), 'ports': ['8080:8080'] } # add the host:container port mapping for validator http_port = http_port + 31200 compose_dict['services']['validator']['ports'] = \ [str(http_port) + ":" + str(40000)] yaml.dump(compose_dict, open(compose_file, mode='w')) try: output = subprocess.check_output(start_args) except subprocess.CalledProcessError: raise ManagementError( 'Possibly unbuilt processors: {}'.format(processors)) except OSError as e: if e.errno == 2: raise ManagementError("{}".format(str(e))) else: raise e for line in output.decode().split('\n'): if len(line) < 1: continue LOGGER.debug("command output: %s", str(line))