def mount_volumes(self): self.setup_mountpoint() for mount in self.mount_list: Path.create(self.mountpoint + mount.mountpoint) Command.run( ['mount', mount.device, self.mountpoint + mount.mountpoint] )
def __init__(self, parent): super().__init__(parent) self.mSettings = QSettings() self.mSaveBeforeExecute = False self.mCommands = QList() # Load saveBeforeExecute option s = self.mSettings.value("saveBeforeExecute", True) self.mSaveBeforeExecute = bool(s) # Load command list variant = self.mSettings.value("commandList") commands = variant if commands is None: commands = [] for commandVariant in commands: self.mCommands.append(Command.fromQVariant(commandVariant)) # Add default commands the first time the app has booted up. # This is useful on it's own and helps demonstrate how to use the commands. addPrefStr = "addedDefaultCommands" addedCommands = self.mSettings.value(addPrefStr, False) if (not addedCommands): # Disable default commands by default so user gets an informative # warning when clicking the command button for the first time command = Command(False) if sys.platform == 'linux': command.command = "gedit %mapfile" elif sys.platform == 'darwin': command.command = "open -t %mapfile" if (not command.command.isEmpty()): command.name = self.tr("Open in text editor") self.mCommands.push_back(command) self.commit() self.mSettings.setValue(addPrefStr, True)
def start(self): print 'Starting Activator...' password = getpass.getpass("Please enter the password for the Java KeyStore: ") # workaround: use the stage task as the start command doesn't work with HTTPS for now... Command.execute('{0} stage'.format(self.bin), self.code) Command.execute('{0} -Dhttp.port=9001 -Dhttps.port=9000 -Dhttps.keyStore={1} -Dhttps.keyStorePassword={2} &' .format(self.app, self.keystore, password), redirect=os.path.join(self.parent, 'logs', 'activator.log'))
def __del__(self): if self.volume_group: log.info('Cleaning up %s instance', type(self).__name__) if self.is_mounted(): all_volumes_umounted = True for mount in reversed(self.mount_list): umounted_successfully = False for busy in [1, 2, 3]: try: Command.run(['umount', mount.device]) umounted_successfully = True break except Exception: log.warning( '%d umount of %s failed, try again in 1sec', busy, mount.device ) time.sleep(1) if not umounted_successfully: all_volumes_umounted = False log.warning( '%s still busy at %s', self.mountpoint + mount.mountpoint, type(self).__name__ ) if all_volumes_umounted: Path.wipe(self.mountpoint) try: Command.run(['vgchange', '-an', self.volume_group]) except Exception: log.warning( 'volume group %s still busy', self.volume_group )
def __init__(self): self.stopped = False self.command = None self.was_running = None if MysqlService.is_running(): MysqlService.stop() was_running = True else: was_running = False self.orig_varrun_mode = stat.S_IMODE(os.stat(self.PATH_VARRUN).st_mode) os.chmod(self.PATH_VARRUN, 0750) command = Command(self.COMMAND) def cb(): if MysqlService.is_running(): continue_waiting = False else: continue_waiting = True return continue_waiting command.wait(timeout=10, callback=cb) if not command.running or not MysqlService.is_running(): command.terminate() if was_running: MysqlService.start() raise Error(("%s failed to start\n" % self.COMMAND) + command.output) self.command = command self.was_running = was_running
def setUp(self): self.workspace = 'test_commit' Command.cmd_init(self.workspace) self.path = '1.txt' self.content = '1\n' write_to_file(self.path, self.content) Command.cmd_add(self.path)
def test_commit_once(self): Command.cmd_commit('first ci') commit = Commit(sha1=Branch().head_commit) self.assertIsNone(commit.parent_sha1) tree = Tree(sha1=commit.tree) objects = tree.parse_objects() self.assertEqual(objects[self.path]['sha1'], Blob(self.content).sha1)
def init(self): # config options self.config = {} self.loadConfig() self.running = False # initialize the event object used for sleeping self.event = threading.Event() if self.config['persistence'] and os.path.exists(self.config['file_name']): try: Logger.info('loading database from %s' % self.config['file_name']) self.load() except Exception as e: Logger.critical(str(e)) self.setup() # register our 'save' commands Command.register(self.saveCmd, 'db.save', 0, 'db.save') # treat events Event.register('core.reload', self.reloadEvent) Event.register('core.shutdown', self.shutdownEvent)
def create_image_format(self): self.temp_image_dir = mkdtemp( dir=self.target_dir ) diskname = ''.join( [ self.target_dir, '/', self.xml_state.xml_data.get_name(), '.raw' ] ) Command.run( ['cp', diskname, self.temp_image_dir + '/disk.raw'] ) if self.tag: with open(self.temp_image_dir + '/manifest.json', 'w') as manifest: manifest.write('{"licenses":["%s"]}' % self.tag) archive_name = self.get_target_name_for_format('gce') # delete the '.gz' suffix from the name. The suffix is appended by # the archive creation method depending on the creation type. archive_name = archive_name.replace('.gz', '') archive = ArchiveTar( self.target_dir + '/' + archive_name ) archive.create_gnu_gzip_compressed( self.temp_image_dir )
def create_on_file(self, filename, label=None): # there is no label which could be set for clicfs # thus this parameter is not used self.container_dir = mkdtemp() clicfs_container_filesystem = self.container_dir + '/fsdata.ext4' loop_provider = LoopDevice( clicfs_container_filesystem, self.__get_container_filesystem_size_mbytes() ) loop_provider.create() filesystem = FileSystemExt4( loop_provider, self.root_dir ) filesystem.create_on_device() filesystem.sync_data() Command.run( ['resize2fs', '-f', loop_provider.get_device(), '-M'] ) # force cleanup and umount of container filesystem # before mkclicfs is called del filesystem Command.run( ['mkclicfs', clicfs_container_filesystem, filename] )
def wipe(self): """ Zap (destroy) any GPT and MBR data structures if present For DASD disks create a new VTOC table """ if 'dasd' in self.table_type: log.debug('Initialize DASD disk with new VTOC table') fdasd_input = NamedTemporaryFile() with open(fdasd_input.name, 'w') as vtoc: vtoc.write('y\n\nw\nq\n') bash_command = ' '.join( [ 'cat', fdasd_input.name, '|', 'fdasd', '-f', self.storage_provider.get_device() ] ) Command.run( ['bash', '-c', bash_command] ) else: log.debug('Initialize %s disk', self.table_type) Command.run( [ 'sgdisk', '--zap-all', self.storage_provider.get_device() ] )
def get_format(self): for zipper in self.supported_zipper: try: Command.run([zipper, '-l', self.source_filename]) return zipper except Exception: pass
def _receive_cmd(self, conn): buf = conn.receive_data_line() if buf == None: # connection closed raise Error(1, 'Connection closed.') cmd = Command() cmd.parse(buf) return cmd
def testAf2(self): path1 = r'./resource/3.3.2-Ringing_Timer_Stopped_Due_To_Call_Answered.dmf' path2 = r'./resource/3.3.3-Ringing_Timer_Stopped_Due_To_Call_Ignored.dmf' cmd1 = Command(path1, 0, 's') cmd2 = Command(path2, 0, 's') self.assertTrue(cmd1.sameAf2(cmd2))
def setUp(self): self.workspace = 'test_diff' Command.cmd_init(self.workspace) self.old_content = ''' The Way that can be told of is not the eternal Way; The name that can be named is not the eternal name. The Nameless is the origin of Heaven and Earth; The Named is the mother of all things. Therefore let there always be non-being, so we may see their subtlety, And let there always be being, so we may see their outcome. The two are the same, But after they are produced, they have different names. ''' self.new_content = ''' The Nameless is the origin of Heaven and Earth; The named is the mother of all things. Therefore let there always be non-being, so we may see their subtlety, And let there always be being, so we may see their outcome. The two are the same, But after they are produced, they have different names. They both may be called deep and profound. Deeper and more profound, The door of all subtleties! ''' self.file_list = [('1.txt', self.old_content), ('2.txt', self.old_content)] for path, content in self.file_list: write_to_file(path, content) Command.cmd_add(path)
def dump_reload_package_database(self, version=45): db_load_for_version = { 45: 'db45_load', 48: 'db48_load' } if version not in db_load_for_version: raise KiwiRpmDatabaseReloadError( 'Dump reload for rpm DB version: %s not supported' % version ) if not self.database_consistent(): reload_db_files = [ '/var/lib/rpm/Name', '/var/lib/rpm/Packages' ] for db_file in reload_db_files: root_db_file = self.root_dir + db_file root_db_file_backup = root_db_file + '.bak' Command.run([ 'db_dump', '-f', root_db_file_backup, root_db_file ]) Command.run(['rm', '-f', root_db_file]) Command.run([ db_load_for_version[version], '-f', root_db_file_backup, root_db_file ]) Command.run(['rm', '-f', root_db_file_backup]) Command.run([ 'chroot', self.root_dir, 'rpm', '--rebuilddb' ])
def process_delete_requests(self, force=False): delete_items = [] for delete_item in self.__delete_items(): try: Command.run(['chroot', self.root_dir, 'rpm', '-q', delete_item]) delete_items.append(delete_item) except Exception: # ignore packages which are not installed pass if not delete_items: raise KiwiRequestError( 'None of the requested packages to delete are installed' ) if force: force_options = ['--nodeps', '--allmatches', '--noscripts'] return Command.call( [ 'chroot', self.root_dir, 'rpm', '-e' ] + force_options + delete_items, self.command_env ) else: chroot_zypper_args = self.root_bind.move_to_root( self.zypper_args ) return Command.call( ['chroot', self.root_dir, 'zypper'] + chroot_zypper_args + [ 'remove', '-u', '--force-resolution' ] + delete_items, self.command_env )
def test_livestream(self): from time import sleep, time from command import Command #p = Popen('rtmpdump -v -r rtmp://192.168.1.106:1935/live/test --resume -m 5', shell=True,stdin=PIPE, stdout=PIPE, stderr=PIPE) #output, err = p.communicate() #end = time() + 5 #while time() < end: # sleep(0.001) #log(output) cmd = "ffprobe -v quiet -print_format json -show_format -show_streams rtmp://192.168.1.106/live/test" cmd = "rtmpdump -m 5 -v -r rtmp://192.168.1.106:1935/live/test -o test.flv --resume" #rtmpdump -r rtmp://192.168.1.106:1935/live/test --resume #output = subprocess.call("rtmpdump -m 5 -v -r rtmp://192.168.1.106:1935/live/test -o test.flv --resume",shell=True,stdout=None) #print output command = Command(r"rtmpdump -v -r rtmp://192.168.1.106:1935/live/test -o test.flv --resume") command.run(timeout=10) print "done" #log() pass
def rememberHandler(msg): """Handle the remembering process""" prevMessage = findPreviousChatMessage(msg.Id, msg.Chat) c = Command("remember", prevMessage.FromDisplayName, prevMessage.Body) msg.Chat.SendMessage("remembering '{0}: {1}'".format( prevMessage.FromDisplayName, prevMessage.Body)) c.remember()
def add_repo(self, name, uri, repo_type="rpm-md", prio=None): repo_file = self.shared_zypper_dir["reposd-dir"] + "/" + name + ".repo" self.repo_names.append(name + ".repo") if "iso-mount" in uri: # iso mount point is a tmpdir, thus different each time we build Path.wipe(repo_file) if not os.path.exists(repo_file): Command.run( ["zypper"] + self.zypper_args + [ "--root", self.root_dir, "addrepo", "-f", "--type", self.__translate_repo_type(repo_type), "--keep-packages", uri, name, ], self.command_env, ) if prio: Command.run( ["zypper"] + self.zypper_args + ["--root", self.root_dir, "modifyrepo", "-p", format(prio), name], self.command_env, )
def __del__(self): try: for mount in reversed(self.mount_stack): Command.run(['umount', mount]) Path.remove(mount) except Exception: pass
def test_rm_no_cached(self): entries = Repository().index.entries self.assertIn(self.path, entries) Command.cmd_rm(self.path, False) entries = Repository().index.entries self.assertNotIn(self.path, entries) self.assertFalse(os.path.exists(self.path))
def test_status_untracked_files(self): path, content = ('1.txt', '1\n') write_to_file(path, content) repo = Repository() untracked_files = repo.get_untracked_files() self.assertEqual(untracked_files, ['1.txt']) Command.cmd_status()
def import_overlay_files( self, follow_links=False, preserve_owner_group=False ): """ copy overlay files from the image description to the image root tree. Supported are a root/ directory or a root.tar.gz tarball. The root/ directory takes precedence over the tarball """ overlay_directory = self.description_dir + '/root/' overlay_archive = self.description_dir + '/root.tar.gz' if os.path.exists(overlay_directory): log.info('Copying user defined files to image tree') rsync_options = [ '-r', '-p', '-t', '-D', '-H', '-X', '-A', '--one-file-system' ] if follow_links: rsync_options.append('--copy-links') else: rsync_options.append('--links') if preserve_owner_group: rsync_options.append('-o') rsync_options.append('-g') Command.run( ['rsync'] + rsync_options + [ overlay_directory, self.root_dir ] ) elif os.path.exists(overlay_archive): log.info('Extracting user defined files from archive to image tree') archive = ArchiveTar(overlay_archive) archive.extract(self.root_dir)
def create_degraded_raid(self, raid_level): """ create a raid array in degraded mode with one device missing. This only works in the raid levels 0(striping) and 1(mirroring) """ if raid_level not in self.raid_level_map: raise KiwiRaidSetupError( 'Only raid levels 0(striping) and 1(mirroring) are supported' ) raid_device = None for raid_id in range(9): raid_device = '/dev/md' + format(raid_id) if os.path.exists(raid_device): raid_device = None else: break if not raid_device: raise KiwiRaidSetupError( 'Could not find free raid device in range md0-8' ) log.info( 'Creating raid array in %s mode as %s', raid_level, raid_device ) Command.run( [ 'mdadm', '--create', '--run', raid_device, '--level', self.raid_level_map[raid_level], '--raid-disks', '2', self.storage_provider.get_device(), 'missing' ] ) self.raid_device = raid_device
def create_gnu_gzip_compressed(self, source_dir, exclude=None): Command.run( [ 'tar', '-C', source_dir, '--format=gnu', '-cSz', '-f', self.filename + '.gz' ] + self.__get_archive_items(source_dir, exclude) )
def install(self): """ install bootloader on self.device """ log.info('Installing grub2 on disk %s', self.device) device_map_file = NamedTemporaryFile() with open(device_map_file.name, 'w') as device_map: device_map.write('(hd0) %s\n' % self.device) # The following copy action is only needed because grub2-probe # is not able to resolve the canonical path of the boot directory # if it lives on e.g a tmpfs. However building an image in a tmpfs # is done pretty often to increase the build performance. In order # to make grub happy we have to copy out the boot data with the # hope that /boot of the build host system is not on a filesystem # which causes grub2-probe to fail again self.temporary_boot_dir = mkdtemp(prefix='kiwi_bootloader.') Command.run( ['cp', '-a', self.root_dir + '/boot/', self.temporary_boot_dir] ) Command.run( [ 'grub2-bios-setup', '-f', '-d', self.temporary_boot_dir + '/boot/grub2/i386-pc', '-m', device_map_file.name, self.device ] )
def setUp(self): self.workspace = "test_rm" Command.cmd_init(self.workspace) self.path = "1.txt" content = "1\n" write_to_file(self.path, content) Command.cmd_add(self.path)
def __create_live_iso_kernel_and_initrd(self): boot_path = self.media_dir + '/boot/x86_64/loader' Path.create(boot_path) kernel = Kernel(self.boot_image_task.boot_root_directory) if kernel.get_kernel(): kernel.copy_kernel(boot_path, '/linux') else: raise KiwiLiveBootImageError( 'No kernel in boot image tree %s found' % self.boot_image_task.boot_root_directory ) if self.machine and self.machine.get_domain() == 'dom0': if kernel.get_xen_hypervisor(): kernel.copy_xen_hypervisor(boot_path, '/xen.gz') else: raise KiwiLiveBootImageError( 'No hypervisor in boot image tree %s found' % self.boot_image_task.boot_root_directory ) self.boot_image_task.create_initrd(self.mbrid) Command.run( [ 'mv', self.boot_image_task.initrd_filename, boot_path + '/initrd' ] )
def __init__(self): tree=[] #this command needs two levels, base and a yes/no base=Node(['weather','forecast'],self.base) affirmFull=Node(['yes','ok'],self.affirmFull) daffirmFull=Node(['no'],self.daffirmFull) tree=[[base],[affirmFull,daffirmFull]] Command.__init__(self,tree)
def __init__(self, ifName, newValue): Command.__init__(self) self.ifName = ifName self.newValue = newValue self.oldValue = str(self.ifc.getNetmask(ifName))
import sys # добавлем пути поиска для классов sys.path.append('./classes/import') sys.path.append('./classes/commands') # подключаем классы from importData import ImportData from command import Command # Импортируем данные из файла i = ImportData('./data/input/testDb.txt') i.importFromTxt() # создаем объект для обработки команд cmd = Command(i.getDbs()) # команда req = {"command": "find", "table": "sales"} # выполняем команду cmd.doCommand(req) req = { "command": "remove", "table": "sales", "condition": { "=": ['city', 'London'] } } cmd.doCommand(req)
def DeployExe(request): global username, role, clientip username = request.user.username try: role = request.user.userprofile.role except: role = 'none' clientip = request.META['REMOTE_ADDR'] if request.is_websocket(): for postdata in request.websocket: logger.info('%s is requesting. %s 执行参数:%s' % (clientip, request.get_full_path(), postdata)) #logger.info(type(postdata)) data = json.loads(postdata) ### step one ### info_one = {} info_one['step'] = 'one' request.websocket.send(json.dumps(info_one)) time.sleep(1) ### final step ### info_final = {} info_final['step'] = 'final' info_final['minion_all'] = len(data['minion_id']) info_final['minion_count'] = 0 #set timeout for specific module if data['module'] == 'init': timeout = 600 elif data['module'] == 'tomcat': timeout = 1200 elif data['module'] == 'php': timeout = 1800 else: timeout = 300 #execute deploy module for minion_id in data['minion_id']: info_final['minion_id'] = minion_id info_final['module'] = data['module'] info_final['project'] = data['project'] info_final['result'] = "" request.websocket.send(json.dumps(info_final)) logger.info('部署参数:%s' % info_final) info_final['minion_count'] += 1 if data['module'] == 'tomcat': commandexe = Command('WTT_100_109', 'cmd.run', '/srv/shell/install_tomcat.sh %s %s' % (minion_id, data['project']), 'glob', timeout=timeout) info_final['result'] = commandexe.CmdRun()['WTT_100_109'] logger.info("%s 部署完成。" % data['project']) else: commandexe = Command(minion_id, 'state.sls', data['module'], 'glob', timeout=timeout) info_final['result'] = commandexe.StateSls()[minion_id] logger.info("%s 部署完成。" % data['module']) request.websocket.send(json.dumps(info_final))
def pushImage(dockerImageTagList, sshHost, sshIdentityFile, sshPort, primeImages, registryPort): # Setup remote docker registry print("Setting up secure private registry... ") registryCommandResult = Command("ssh", [ "-i", sshIdentityFile, "-p", sshPort, "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", sshHost, "sh -l -c \"docker run -d -v /etc/docker-push-ssh/registry:/var/lib/registry " + "--name docker-push-ssh-registry -p 127.0.0.1:{0}:5000 registry\"".format(registryPort) ]).execute() if registryCommandResult.failed(): print("ERROR") print(registryCommandResult.stdout) print(registryCommandResult.stderr) return False try: # Establish ssh tunnel print("Establishing SSH Tunnel...") sshTunnelCommandResult = Command("docker", [ "run", "-d", "--name", "docker-push-ssh-tunnel", "-p", "127.0.0.1:5000:5000", "-v", "{0}:/etc/ssh_key_file".format(sshIdentityFile), "brthornbury/docker-alpine-ssh", "ssh", "-N", "-L", "*:5000:localhost:{0}".format(registryPort), "-i", "/etc/ssh_key_file", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", "-p", sshPort, sshHost ]).environment_dict(os.environ).execute() if sshTunnelCommandResult.failed(): print("ERROR") print(sshTunnelCommandResult.stdout) print(sshTunnelCommandResult.stderr) return False print("Waiting for SSH Tunnel Initialization...") if not waitForSshTunnelInit(): print("ERROR") print("SSH Tunnel failed to initialize.") logsCmd = Command("docker", ["logs", "docker-push-ssh-tunnel"]).environment_dict(os.environ).execute() print(logsCmd.stdout, logsCmd.stderr) return False if sshTunnelCommandResult.failed(): print("ERROR") print(sshTunnelCommandResult.stdout) print(sshTunnelCommandResult.stderr) return False print("Priming Registry with base images...") for primeImage in (primeImages or []): print("Priming base image ({0})".format(primeImage)) primingCommand = Command("ssh", [ "-i", sshIdentityFile, "-p", sshPort, "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", sshHost, "sh -l -c \"docker pull {0}".format(primeImage) + " && docker tag {0} localhost:{1}/{0} && docker push localhost:{1}/{0}\"".format(primeImage, registryPort) ]).execute() if primingCommand.failed(): print("ERROR") print(primingCommand.stdout) print(primingCommand.stderr) return False print("Tagging image(s) for push...") for dockerImageTag in dockerImageTagList: tagCommandResult = Command("docker", [ "tag", dockerImageTag, "localhost:5000/{0}".format(dockerImageTag) ]).environment_dict(os.environ).execute() if tagCommandResult.failed(): print("ERROR") print(tagCommandResult.stdout) print(tagCommandResult.stderr) return False print("Pushing Image(s) from local host...") for dockerImageTag in dockerImageTagList: pushDockerImageCommandResult = Command("docker", [ "push", "localhost:5000/{0}".format(dockerImageTag) ]).environment_dict(os.environ).execute() if pushDockerImageCommandResult.failed(): print("ERROR") print(pushDockerImageCommandResult.stdout) print(pushDockerImageCommandResult.stderr) print("Error Pushing Image: Ensure localhost:5000 is added to your insecure registries.") print("More Details (OS X): " "https://stackoverflow.com/questions/32808215/where-to-set-the-insecure-registry-flag-on-mac-os") return False print("Pushed Image {0} Successfully...".format(dockerImageTag)) print("Pulling and Retagging Image on remote host...") for dockerImageTag in dockerImageTagList: pullDockerImageCommandResult = Command("ssh", [ "-i", sshIdentityFile, "-p", sshPort, "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", sshHost, "sh -l -c \"docker pull " + "localhost:{1}/{0}".format(dockerImageTag, registryPort) + " && docker tag localhost:{1}/{0} {0}\"".format(dockerImageTag, registryPort) ]).execute() if pullDockerImageCommandResult.failed(): print("ERROR") print(pullDockerImageCommandResult.stdout) print(pullDockerImageCommandResult.stderr) return False print("Pulled Image {0} Successfully...".format(dockerImageTag)) finally: print("Cleaning up...") Command("ssh", [ "-i", sshIdentityFile, "-p", sshPort, "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", sshHost, "sh -l -c \"docker rm -f docker-push-ssh-registry\"" ]).execute() Command("docker", [ "rm", "-f", "docker-push-ssh-tunnel" ]).environment_dict(os.environ).execute() for dockerImageTag in dockerImageTagList: Command("docker", [ "image", "rm", "localhost:5000/{0}".format(dockerImageTag) ]).environment_dict(os.environ).execute() return True
import Queue from threading import Thread from command import Command from configuration import ClonerConfiguration from goreplay import GoReplayCommand configuration = ClonerConfiguration("./").load() goReplayCommand = GoReplayCommand(configuration, "/home/banan/goreplay").build_string() goReplay = Command(goReplayCommand, shell=True) results = Queue.Queue() def execute(goReplay, results): results.put(goReplay.execute()) def stop(goReplay): return goReplay.interrupt() if __name__ == '__main__': t = None try: t = Thread(target=execute, args=[goReplay, results]) t.setName("GoReplay") t.setDaemon(True) t.start() finally: print("==== Good bye !")
def run_client(self, client_name, write_config=False): self._client = Client(self, client_name) ret = [] if write_config: msg = 'Writing config file for client {}'.format(client_name) else: msg = 'Starting image for client {} on port {}'.format( client_name, self.client.port) if write_config: command = 'sudo docker run --rm ' else: if self.debug: command = 'sudo docker run --rm -it ' else: command = 'sudo docker run -d ' if self.client.get_image('aeroo'): command += '--link aeroo:aeroo ' # open port for wdb if self.debug: command += '-p 1984:1984 ' # exponer los puertos solo si no tenemos nginx if not self.nginx: command += '-p {}:8069 '.format(self.client.port) command += '-p 8072:8072 ' command += self._add_normal_mountings() if self.debug: command += self._add_debug_mountings(self.client.numeric_ver) command += '--link pg-{}:db '.format(self.client.name) if not (self.debug or write_config): command += '--restart=always ' command += '--name {} '.format(self.client.name) if write_config: command += self.set_config_environment() else: command += '-e ODOO_CONF=/dev/null ' # si estamos en modo debug agregarlo al nombre de la imagen if self.debug: command += '-e SERVER_MODE=test ' command += '{}.debug '.format(self.client.get_image('odoo').name) else: command += '-e SERVER_MODE= ' command += '{} '.format(self.client.get_image('odoo').name) if not self.debug: command += '--logfile=/var/log/odoo/odoo.log ' else: command += '--logfile=/dev/stdout ' if write_config: command += '--stop-after-init ' cmd = Command( self, command=command, usr_msg=msg, ) ret.append(cmd) ################################################################## # Launching nginx proxy if needed ################################################################## if self.nginx: msg = 'Starting nginx reverse proxy' image = self.client.get_image('nginx') nginx_dir = self.client.nginx_dir command = 'sudo docker run -d ' command += '-v {}conf:/etc/nginx/conf.d:ro '.format(nginx_dir) command += '-v {}cert:/etc/letsencrypt/live/certificadositio '.format( nginx_dir) command += '-v {}log:/var/log/nginx/ '.format(nginx_dir) command += '-p 80:80 ' command += '-p 443:443 ' command += '--name={} '.format(image.short_name) command += '--link {}:odoo '.format(client_name) command += '--restart=always ' command += image.name cmd = Command( self, command=command, usr_msg=msg, ) ret.append(cmd) return ret
def install(self, client_name): """ Instalacion de cliente, """ self._client = Client(self, client_name) ret = [] ################################################################## # Create base dir with sudo ################################################################## msg = 'Installing client {}'.format(client_name) cmd = MakedirCommand(self, command='sudo mkdir {}'.format(BASE_DIR), args=BASE_DIR, usr_msg=msg) ret.append(cmd) ################################################################## # change ownership of base dir ################################################################## username = pwd.getpwuid(os.getuid()).pw_name cmd = Command(self, command='sudo chown {}:{} {}'.format( username, username, BASE_DIR)) ret.append(cmd) ################################################################## # create all client hierarchy ################################################################## for w_dir in [ 'postgresql', 'config', 'data_dir', 'backup_dir', 'log', 'sources' ]: r_dir = '{}{}'.format(self.client.base_dir, w_dir) cmd = MakedirCommand(self, command='mkdir -p {}'.format(r_dir), args='{}'.format(r_dir)) ret.append(cmd) ################################################################## # create dir for common sources ################################################################## # r_dir = '{}'.format(self.client.sources_com) # cmd = MakedirCommand( # self, # command='mkdir -p {}'.format(r_dir), # args='{}'.format(r_dir) # ) # ret.append(cmd) ################################################################## # create dirs for extracting sources, only for debug ################################################################## if self.debug: # no sacamos dist-local-packages for w_dir in ['dist-packages', 'extra-addons']: r_dir = '{}{}'.format(self.client.version_dir, w_dir) cmd = MakedirCommand(self, command='mkdir -p {}'.format(r_dir), args='{}'.format(r_dir)) ret.append(cmd) ################################################################## # change o+w for those dirs ################################################################## if self.debug: # no sacamos dist-local-packages for w_dir in ['dist-packages', 'extra-addons']: r_dir = '{}{}'.format(self.client.version_dir, w_dir) cmd = Command(self, command='chmod o+w {}'.format(r_dir)) ret.append(cmd) ################################################################## # change o+w for config, data, log and backup_dir ################################################################## for w_dir in ['config', 'data_dir', 'log', 'backup_dir']: r_dir = '{}{}'.format(self.client.base_dir, w_dir) cmd = Command(self, command='chmod o+w {}'.format(r_dir)) ret.append(cmd) ################################################################## # create dirs for nginx if needed ################################################################## if self.nginx: for w_dir in ['cert', 'conf', 'log']: r_dir = '{}{}'.format(BASE_DIR, 'nginx/' + w_dir) cmd = MakedirCommand(self, command='mkdir -p {}'.format(r_dir), args='{}'.format(r_dir)) ret.append(cmd) ################################################################## # create nginx.conf template if needed. Do not overwrite ################################################################## if self.nginx: r_dir = '{}{}'.format(BASE_DIR, 'nginx/conf/') cmd = CreateNginxTemplate(self, command='{}nginx.conf'.format(r_dir), args='{}nginx.conf'.format(r_dir), usr_msg='Generating nginx.conf template') ret.append(cmd) ################################################################## # create dirs for postfix ################################################################## if self.postfix: r_dir = '{}{}'.format(BASE_DIR, 'postfix') cmd = MakedirCommand(self, command='mkdir -p {}'.format(r_dir), args='{}'.format(r_dir)) ret.append(cmd) ################################################################## # Extracting sources from image if debug enabled ################################################################## if self.debug: # no sacamos dist-local-packages for module in ['dist-packages', 'extra-addons']: msg = 'Extracting {} from image {}.debug'.format( module, self.client.get_image('odoo').name) command = 'sudo docker run -it --rm ' command += '--entrypoint=/extract_{}.sh '.format(module) command += '-v {}{}/:/mnt/{} '.format(self.client.version_dir, module, module) command += '{}.debug '.format( self.client.get_image('odoo').name) cmd = ExtractSourcesCommand( self, command=command, args='{}{}'.format(self.client.version_dir, module), usr_msg=msg, ) ret.append(cmd) ################################################################## # Clone or update repos as needed ################################################################## ret += self._process_repos() return ret
def stop(self): print 'Shutting down MongoDB...' Command.execute('pkill mongod')
def test_str(self): cmd = Command(["foo", "bar"]) self.assertEqual("foo bar", str(cmd))
def dump(self): print 'Dumping database...' Command.execute( '{0} --db healthdata --out {1}'.format( os.path.join(self.bin, 'mongodump'), os.path.join(self.parent, 'dump', 'mongodb')), self.parent)
def test_getoutput(self): cmd = Command(['/bin/ls', '/etc/passwd']) cmd.execute() self.assertEqual(['/etc/passwd\n'], cmd.getoutput())
def start(self): print 'Starting MongoDB...' Command.execute( '{0} --config {1}'.format(os.path.join(self.bin, 'mongod'), os.path.join(self.base, 'mongod.conf')), self.parent)
def test_subst_append_exclsubst(self): cmd = Command(['foo', 'ARG', 'bar'], args_subst={"ARG": "blah"}, args_append=["1", "2"], excl_subst=True) self.assertEqual(['foo', 'blah', 'bar'], cmd.cmd)
def test_retcode(self): cmd = Command(["/bin/false"]) cmd.execute() self.assertNotEqual(0, cmd.getretcode()) self.assertEqual(Command.FINISHED, cmd.getstate()) cmd = Command(["/bin/true"]) cmd.execute() self.assertEqual(0, cmd.getretcode()) self.assertEqual(Command.FINISHED, cmd.getstate())
def __init__(self): define.command = Command()
def test_execute_nonexistent(self): cmd = Command(['/baaah', '/etc/passwd']) cmd.execute() self.assertEqual(None, cmd.getretcode()) self.assertEqual(Command.ERRORED, cmd.getstate())
from command import Command from file_handler import FileHandler from validator import Validator from view import View from db import DatabaseHandler import pickle import sys # hasitha try: database_name = sys.argv[1] except IndexError: database_name = "db" try: database = pickle.load(open(database_name + ".p", "rb")) except FileNotFoundError: database = DatabaseHandler(Validator(), database_name) database.load() pickle.dump(database, open(database_name + ".p", "wb")) cli = Command(FileHandler(Validator()), database, View()) cli.cmdloop()
def test_subst_append_default(self): cmd = Command(['foo', 'ARG', 'bar'], args_subst={"ARG": "blah"}, args_append=["1", "2"]) self.assertEqual(['foo', 'blah', 'bar', '1', '2'], cmd.cmd)
def monitorOutputFile(finalMessage): """Display the output file on the console.""" global monitorOutputCommand monitorOutputCommand = Command('tail -f ' + outputFilename, ansiEscape) monitorOutputCommand.run(expectedString=finalMessage, silent=False)
def __init__(self, name): self.name = name self.cmd = Command()
def do_GET(self): # Serve URLs, POST method not used in html files. # url are at root level. # Rather than a long list of if-else chain dictionary indexing # from path to function might be better ?? try: self.protocol_version = "HTTP/1.1" logger.debug("\nPath: %s", self.path) for header, value in self.headers.items(): logger.debug("%s: %s", header, value) if self.path == '/': self.serve_index() elif self.path == '/reboot': self.redirect_to_home() command = Command(Command.CMD_REBOOT) WebInterfaceHandler.cmd_q.put(command) # Redirect before queuing command so that browser doesn't get stuck at this command. """ if command.wait(): self.redirect_to_home() else: self.send_error(_HTTP_STATUS_CODE_REQUEST_TIMEOUT) """ elif self.path == '/poweroff': self.redirect_to_home() command = Command(Command.CMD_SHUTDOWN) WebInterfaceHandler.cmd_q.put(command) """ if command.wait(): self.redirect_to_home() else: self.send_error(_HTTP_STATUS_CODE_REQUEST_TIMEOUT) """ elif self.path == '/view-records': self.serve_view_records() elif '/get-record' in self.path: # This url will have paramters: ?f=<name> # convert them into key value pairs. kv = self.parse_get_params() if 'f' in kv: self.serve_record(kv['f'][0], True) else: self.send_error(_HTTP_STATUS_CODE_BAD_REQUEST) elif '/play-record' in self.path: # This url will have paramters: ?f=<name> # convert them into key value pairs. kv = self.parse_get_params() if 'f' in kv: self.serve_record(kv['f'][0], False) else: self.send_error(_HTTP_STATUS_CODE_BAD_REQUEST) elif self.path == '/rotate': command = Command(Command.CMD_ROTATE) WebInterfaceHandler.cmd_q.put(command) if command.wait(): self.redirect_to_home() else: self.send_error(_HTTP_STATUS_CODE_REQUEST_TIMEOUT) elif self.path == '/livesnap': self.serve_snap() elif self.path == '/stop': command = Command(Command.CMD_STOP_REC) WebInterfaceHandler.cmd_q.put(command) if command.wait(): self.redirect_to_home() else: self.send_error(_HTTP_STATUS_CODE_REQUEST_TIMEOUT) elif self.path == '/start': command = Command(Command.CMD_START_REC) WebInterfaceHandler.cmd_q.put(command) if command.wait(): self.redirect_to_home() else: self.send_error(_HTTP_STATUS_CODE_REQUEST_TIMEOUT) elif self.path.endswith(config.RECORD_FORMAT_EXTENSION): self.serve_record(self.path.lstrip("/"), False) else: msg = "Requested URI: '" + self.path + "' not found." self.send_error(_HTTP_STATUS_CODE_NOT_FOUND, explain=msg) return except Exception as e: logger.error(traceback.format_exc()) logger.error(e)
# # commands.py # Contains routing functions needed for eamuse functionality # from command import Command from CroBot.features.eamuse import embeds from CroBot.features.eamuse import maintenance as maint eamuse_command = Command('!eamuse') @eamuse_command.register('maintenance') async def maintenance(client, message): """ maintenance: Returns the times of maintenance in 4 time zones (JST, EST/EDT, CST/CDT, PST/PDT). :param client: Not used, sent by default from commands :param message: The message to reply to :return: N/A """ await message.channel.send(embed=embeds.maintenance( await maint.check_dst(), await maint.calculate_maintenance_times()))
class QuarkusApplication(object): cmd = Command() image_name_with_tag = "quay.io/pmacik/using-spring-data-jqa-quarkus:latest" openshift = Openshift() name = "" namespace = "" deployment_name_pattern = "{name}-\\w+-deployment" def __init__(self, name, namespace): self.name = name self.namespace = namespace def install(self): knative_service_output = self.openshift.create_knative_service( self.name, self.namespace, self.image_name_with_tag) output = re.search( r'.*service.serving.knative.dev/%s\s(created|configured|unchanged)' % self.name, knative_service_output) assert output is not None, f"Knative serving is not created as the result is {knative_service_output}" return True def get_pod_name_running(self, pod_name_pattern, wait=False): if wait: pod_name = self.openshift.wait_for_pod( self.format_pattern(pod_name_pattern), self.namespace, timeout=500) else: pod_name = self.openshift.search_pod_in_namespace( self.format_pattern(pod_name_pattern), self.namespace) return pod_name def is_imported(self, wait=False, interval=5, timeout=600): deployment_name = self.openshift.get_deployment_name_in_namespace( self.format_pattern(self.deployment_name_pattern), self.namespace, wait=wait, timeout=timeout) if deployment_name is None: return False else: deployment_replicas = self.openshift.get_resource_info_by_jsonpath( "deployment", deployment_name, self.namespace, "{.status.replicas}") assert deployment_replicas.isnumeric( ), f"Number of replicas of deployment '{deployment_name}' should be a numerical value, but is actually: '{deployment_replicas}" assert int(str(deployment_replicas)) > 0, "Number of replicas of deployment '{deployment_name}' " + \ "should be greater than 0, but is actually: '{deployment_replicas}'." return True def get_response_from_api(self, endpoint, wait=False, interval=5, timeout=300): route_url = self.openshift.get_knative_route_host( self.name, self.namespace) if route_url is None: return None url = f"{route_url}{endpoint}" if wait: start = 0 while ((start + interval) <= timeout): db_name = requests.get(url) if db_name.status_code == 200: return db_name.text time.sleep(interval) start += interval else: db_name = requests.get(url) if db_name.status_code == 200: return db_name.text return None def get_observed_generation(self): deployment_name = self.openshift.get_deployment_name_in_namespace( self.format_pattern(self.deployment_name_pattern), self.namespace) return self.openshift.get_resource_info_by_jsonpath( "deployment", deployment_name, self.namespace, "{.status.observedGeneration}") def format_pattern(self, pattern): return pattern.format(name=self.name) def get_redeployed_rev_name(self, old_rev_name, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): revisions = self.openshift.get_revisions(self.namespace) for rev in revisions: if rev != old_rev_name and re.match(self.name, rev) is not None: new_revision = self.openshift.get_last_revision_status( rev, self.namespace) if new_revision == 'True': return rev time.sleep(interval) start += interval return None def get_rev_name_redeployed_by_generation(self, old_generation, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): current_generation = self.get_generation() revisions = self.openshift.get_revisions(self.namespace) for rev in revisions: if (current_generation > old_generation) and (re.match( self.name, rev) is not None): new_revision = self.openshift.get_last_revision_status( rev, self.namespace) if new_revision == 'True': return rev time.sleep(interval) start += interval return None def get_generation(self): deployment_name = self.openshift.get_deployment_name_in_namespace( self.format_pattern(self.deployment_name_pattern), self.namespace) return self.openshift.get_resource_info_by_jsonpath( "deployment", deployment_name, self.namespace, "{.metadata.generation}") def get_deployment_with_intermediate_secret(self, intermediate_secret_name): return self.openshift.get_deployment_with_intermediate_secret_of_given_pattern( intermediate_secret_name, self.format_pattern(self.deployment_name_pattern), self.namespace, wait=True, timeout=120)
protosTargetDirectory + os.sep + protoFileName) worldsCount = generateWorldsList(groupName, worldsFilename) firstSimulation = findFirstWorldFilename(worldsFilename) if not os.path.exists(firstSimulation): continue resetIndexFile(indexFilename) # Here is an example to run webots in gdb and display the stack # when it crashes. # this is particuarliy useful to debug on the jenkins server # command = Command('gdb -ex run --args ' + webotsFullPath + '-bin ' + # firstSimulation + ' --mode=fast --no-rendering --minimize') # command.run(silent = False) command = Command(webotsFullPath + ' ' + firstSimulation + ' ' + webotsArguments) # redirect stdout and stderr to files command.runTest(timeout=10 * 60) # 10 minutes if command.isTimeout or command.returncode != 0: if command.isTimeout: failures += 1 appendToOutputFile('FAILURE: Webots has been terminated ' + 'by the test suite script\n') else: failures += 1 appendToOutputFile( 'FAILURE: Webots exits abnormally with this error code: ' + str(command.returncode) + '\n') testFailed = True
def __volume_group_in_use_on_host_system(self, volume_group_name): vgs_call = Command.run(['vgs', '--noheadings', '-o', 'vg_name']) for host_volume_group_name in vgs_call.output.split('\n'): if volume_group_name in host_volume_group_name: return True return False
def __init__(self, flow, cmd_name, params): Command.__init__(self, flow, cmd_name, params)
def __init__(self, partname, unit, idx, skiplist=[]): self._unitstr = '-' + unit self._idx = idx self._partname = partname self._cmd = Command() self._skiplist = skiplist
from command import Command from player import Player import map import utils # Startup by initializing the world player = Player(map.start) # Display the introduction to the story utils.type_slow(['You wake up itchy.', 'You have no idea how you got here.']) print() # Loop until the end condition is met while (player.is_alive()): # Describe the current place player.get_place().describe() # prompt for command command = Command(player, "> ") # execute command try: command.execute() except: print('Something went wrong') # whitespace between turns print() # game over utils.type_slow(['Game Over', 'Thanks for playing!'])
def init_ticket(echo="echo"): "" from command import Command destroy_ticket() Command('kinit -f -r 90d -l 1d -c $ticketpath').run(echo=echo)
def mount_volumes(self): self.setup_mountpoint() for mount in self.mount_list: Path.create(self.mountpoint + mount.mountpoint) Command.run( ['mount', mount.device, self.mountpoint + mount.mountpoint])