def extract_apk(apk_path, label): tmp_path = os.path.join(py_path, tmp_sufix) myapk = apk_info(apk_path, tmp_path) # TODO : here output path is fix under [this py file]/../tmp/[apkname].out maybe change from input argv or something output_path = os.path.join(tmp_path, myapk.base) + ".out" #add script of apktools here apktool_prefix = "apktool" command = "" command += apktool_prefix command += " d " command += apk_path command += " -o {}".format(myapk.output_path) exec_command(command, print_command) # parse smali and save to proper type(matrix?) if (os.path.exists(myapk.walk)): myapk.iter_all_smali(myapk.walk) else: # exec_dir = os.path.join(os.path.join(dir_name, base_name) + ".out","unknown") smali_path = myapk.gen_smali_from_class(myapk.exec_dir, myapk.base) myapk.iter_all_smali(smali_path) #for root, subdirs, files in os.walk(walk_dir): print(api_list) print(len(api_list)) # remove data from apktool command = "" command += "rm " command += "-rf " command += myapk.output_path exec_command(command, print_command)
def gen_userfs(mkfs, fstype, output_path, userfs_dirs_dict): userfs = os.path.join(output_path, 'userfs') userfs_etc = os.path.join(output_path, 'userfs/etc') if make_userfs_dir(userfs): return 0 if make_userfs_dir(userfs_etc): return 0 move_rootfs_to_userfs(output_path) log = os.path.join(output_path, 'build.log') tee_into_userfs(output_path, userfs) for path_part, value_list in userfs_dirs_dict.items(): source_path = os.path.join(output_path, path_part) target_path = os.path.join(userfs, value_list[0]) strip_cmd = value_list[2] if os.path.exists(source_path): if not os.path.exists(target_path): makedirs(target_path) ignore_list = value_list[1] copy(source_path, target_path, ignore_list) check_strip(target_path, strip_cmd, log) change_userfs_filemode(userfs) if fstype == "vfat": cmd = [mkfs, userfs, fstype, '52428800'] else: cmd = [mkfs, userfs, fstype] exec_command(cmd, log_path=log) return 0
def runMain(): if utils.inputMatch([]): utils.print_header('deep') utils.print_option("full", "cluster->restore->download deep eval") utils.print_option("restore-eval", "restore deep models evaluations") utils.print_option("upload-eval", "tar and upload deep models evaluations") utils.print_option("restore-model", "restore deep models (download from bucket and untar)") utils.print_option("eval-tomaster", "copy evals from slaves to master") utils.print_option("eval-frommaster", "copy evals from master to slaves") utils.print_option("eval-permanent", "copy evals from slaves to master") utils.print_option("eval-sync", "eval-tomaster->eval-frommaster") utils.print_header('') if utils.inputMatch(['full']): full() if utils.inputMatch(['restore-eval']): restoreDeepEval() if utils.inputMatch(['restore-model']): restoreDeepModels() if utils.inputMatch(['eval-tomaster']): syncEvalToMaster() if utils.inputMatch(['eval-frommaster']): syncEvalFromMaster() if utils.inputMatch(['upload-eval']): uploadDeepEval() if utils.inputMatch(['eval-permanent']): permanentEvalToMaster() if utils.inputMatch(['upload-subdirs'], doPartial=True): host = sys.argv[2] source = sys.argv[3] prefix = sys.argv[4] suffix = sys.argv[5] utils.ssh(cluster.ipGetAny(host),'cd {0};ls -1 -d */ > listdir.txt'.format(source)) utils.exec_command('gcloud compute copy-files '+host+':'+os.path.join(source, 'listdir.txt')+' .') with open ("listdir.txt", "r") as myfile: data=myfile.readlines() utils.exec_command('rm listdir.txt') data = [os.path.dirname(d.rstrip()) for d in data] command = getUploadCommand(source, data, prefix, suffix) print command utils.ssh(cluster.ipGetAny(host), command)
def check_strip(path, strip_cmd, log): if strip_cmd == "": return strip_cmd_list = strip_cmd.split(" ") for relpath, _, files in os.walk(path): for file in files: full_path = os.path.join(path, relpath, file) if os.path.isfile(full_path): cmd = strip_cmd_list + [full_path] exec_command(cmd, log_path=log)
def launchUi(): (listnodes, nodes) = cluster.instanceListAll() mastername = listnodes['master'][0] master = nodes[mastername] masterIp = master['externalIp'] utils.exec_command('firefox --new-window') command = 'firefox --new-tab ' + \ masterIp + ':8080 --new-tab ' + masterIp + ':4040' utils.exec_command(command)
def node_erase(self, port, command, image_path): if port in self.serials_locks: with self.serials_locks[port]: res = utils.exec_command(port, command, image_path) if res == 0: with self.lock_serials: self.serials = [ _serial for _serial in self.serials if _serial.port != port ] else: # New serial connected. Erase memory! res = utils.exec_command(port, command, image_path) return res
def _submit_jobs(commands): for command in commands: out, err, code = utils.exec_command(command) if code != 0: logger.error( "Error when submitting job with %s (exit code %d): stdout: %s, stderr: %s", command, code, out, err)
def get_volume_profile_info(self, volName, cluster_id): ret_val = {} brickName = '' profile_info = {} for trial_cnt in xrange(0, 3): profile_cmd_op, profile_err = tendrl_glusterfs_utils.exec_command( "gluster volume profile %s info --xml" % volName) if profile_err: time.sleep(5) if trial_cnt == 2: collectd.error( 'Failed to fetch profile info. The error is: %s' % (profile_err)) return ret_val continue else: break try: profile_info = self._parseVolumeProfileInfo( ElementTree.fromstring(profile_cmd_op)) return profile_info except (AttributeError, KeyError, ValueError, ElementTree.ParseError): collectd.error( 'Failed to collect iops details of brick %s in volume %s of ' 'cluster %s. The profile info is %s. Error %s' % (brickName, volName, cluster_id, str(profile_info), traceback.format_exc())) return ret_val
def node_flash(self, port, command, image_path): with self.serials_locks[port]: res = utils.exec_command(port, command, image_path) if res == 0: ser = serial.Serial(port=port, baudrate=115200, timeout=0) self.serials.append(ser) return res
def get_last_commit_date(repo_name, temp_dir): repo_path = join(temp_dir, repo_name) success, out, err = exec_command([ 'bash', '-c', 'cd {} && git log --format=%at --no-merges -n 1'.format(repo_path) ]) return (success, out, err)
def get_volume_heal_info(vol): ret_val = [] for trial_cnt in xrange(0, 3): vol_heal_op, vol_heal_err = \ tendrl_glusterfs_utils.exec_command( "gluster volume heal %s statistics" % vol['name'] ) if vol_heal_err: time.sleep(5) if trial_cnt == 2: collectd.error('Failed to fetch volume heal statistics.' 'The error is: %s' % (vol_heal_err)) return ret_val continue else: break try: vol_heal_info = _parse_self_heal_stats(vol_heal_op) for idx, brick_heal_info in enumerate(vol_heal_info): for sub_vol_id, sub_vol in vol['bricks'].iteritems(): for brick_idx, sub_vol_brick in enumerate(sub_vol): if (sub_vol_brick['brick_index'] == brick_heal_info['brick_index']): vol_heal_info[idx]['brick_path'] = sub_vol_brick[ 'path'] ret_val.append(vol_heal_info[idx]) return ret_val except (AttributeError, KeyError, ValueError): collectd.error('Failed to collect volume heal statistics. Error %s' % (traceback.format_exc())) return ret_val
def node_reset(self, port, command, image_path): with self.serials_locks[port]: with self.lock_serials: self.serials = [ _serial for _serial in self.serials if _serial.port != port ] res = utils.exec_command(port, command, image_path) if res == 0: ser = serial.Serial(port=port, baudrate=115200, timeout=0) self.serials.append(ser) return res
def gen_smali_from_class(self, path, base_name): print("path: ", path) jar_pos = os.path.join(path, base_name) command = "" command += "jar cf" command += " " command += jar_pos + ".jar" command += " " command += os.path.join(path, "*.class") exec_command(command, print_command) command = "" command += os.path.join(dex2jar_path, j2dex_command) command += " -f " command += " -f " command += jar_pos + ".jar" command += " -o " command += jar_pos + "-jar2dex.dex" exec_command(command, print_command) command = "" command += os.path.join(dex2jar_path, dex2smali_command) command += " " command += jar_pos + "-jar2dex.dex" command += " -o " command += jar_pos + "-jar2dex.out" exec_command(command, print_command) return os.path.join(path, jar_pos + "-jar2dex.out")
def get_brick_source_and_mount(self, brick_path): # source and target correspond to fields "Filesystem" and # "Mounted on" from df command output. The below command # gives the filesystem and mount point for a given path, # Eg: "/dev/mapper/tendrlMyBrick4_vg-tendrlMyBrick4_lv " \ # "/tendrl_gluster_bricks/MyBrick4_mount" command = "df --output=source,target " + brick_path out, err = gluster_utils.exec_command(command) if err: return None mount_source, mount_point = out.split("\n")[-2].split() return mount_point
def get_posted_ids(cls): if not os.path.isfile(POSTED_IDS_PATH): print "no file %s"%POSTED_IDS_PATH return [] command = 'tail -n 50 %s'%POSTED_IDS_PATH lines = exec_command(command, output_as = 'array') result = [] for line in lines: line = line.strip() if line and ":" in line: post_id = line.split(":")[0] if post_id and post_id.isdigit() : result.append( int(post_id) ) print "posted ids: {0}".format(result) return result
def gen_rootfs(mkfs, fstype, output_path, rootfs_dirs_dict, kernel): mv_usr_libs(output_path) rootfs = os.path.join(output_path, 'rootfs') rootfs_tar = os.path.join(output_path, 'rootfs.tar') if not os.path.exists(rootfs): print('rootfs dir not exist in {}'.format(rootfs)) return 0 log = os.path.join(output_path, 'build.log') for path_part, value_list in rootfs_dirs_dict.items(): source_path = os.path.join(output_path, path_part) target_path = os.path.join(rootfs, value_list[0]) strip_cmd = value_list[2] if os.path.exists(source_path): if not os.path.exists(target_path): makedirs(target_path) ignore_list = value_list[1] copy(source_path, target_path, ignore_list) check_strip(target_path, strip_cmd, log) if kernel == "linux": change_rootfs_filemode_linux(rootfs) cmd = [mkfs, rootfs, "jffs2"] exec_command(cmd, log_path=log) if kernel == "liteos_a": remove_file_in_rootfs(output_path) change_rootfs_filemode(rootfs) cmd = [mkfs, rootfs, fstype] exec_command(cmd, log_path=log) make_rootfs_tar(rootfs_tar, rootfs) if os.path.exists(rootfs): chmod_files_mode(rootfs, 511, 511) shutil.rmtree(rootfs) return 0
def count_jobs(self): """Returns how many jobs with self.jobs_name are currently queued or running""" try: out, err, code = utils.exec_command("qstat") except OSError: raise RuntimeError("Couldn't run qstat, is it installed?") if code: raise RuntimeError( "qstat failed with code %d: stdout: %s, stderr: %s" % (code, out, err)) lines_with_jobname = [ l for l in out.splitlines() if self.jobs_name in l ] return len(lines_with_jobname)
def run_command(args, config): general_config = config.get('general') account_config = config.get(args.account) if account_config is None: show_error("Account configuration '%s' not found" % args.account) sys.exit(1) use_tls = True if account_config.get('remote.use_tls') else False address = account_config.get('remote.host') port = int(account_config.get('remote.port')) sieve = ManageSieveClient(address, port, use_tls=use_tls) sieve.connect() username = account_config.get('remote.user') auth_mech = account_config.get('remote.auth', '') auth_name = account_config.get('remote.auth_name', None) general_password = general_config.get('password') password_command = account_config.get('remote.password_command') # Use the password submitted via stdin if present if general_password: password = general_password # Try to execute the password command elif password_command: password = exec_command(password_command) # Get the password from the config file else: password = account_config.get('remote.password') try: if not auth_mech: res = sieve.login(auth_mech, username, password) elif auth_mech == 'LOGIN': res = sieve.authenticate(auth_mech, username, password) else: res = sieve.authenticate(auth_mech, auth_name, username, password) except Exception, e: raise
def get_volume_heal_info_split_brain_stats(vol, integration_id, etcd_client): for trial_cnt in xrange(0, 3): vol_heal_op, vol_heal_err = \ tendrl_glusterfs_utils.exec_command( "gluster volume heal %s info split-brain " "--nolog --xml" % vol['name'] ) if vol_heal_err: time.sleep(5) if trial_cnt == 2: collectd.error( 'Failed to fetch volume heal info split-brain.' 'The error is: %s' % ( vol_heal_err ) ) return {} continue else: break try: vol_heal_info = _parse_heal_info_stats( ElementTree.fromstring(vol_heal_op), integration_id, etcd_client ) return vol_heal_info except ( AttributeError, KeyError, ValueError, ElementTree.ParseError ): # For heal info command timeout and older version of glusterd # ElementTree will raise parser error collectd.error( 'Failed to collect volume heal info split-brain. Error %s' % ( traceback.format_exc() ) ) return {}
def get_volume_heal_info_stats(vol): for trial_cnt in xrange(0, 3): vol_heal_op, vol_heal_err = \ tendrl_glusterfs_utils.exec_command( "gluster volume heal %s info --xml" % vol['name'] ) if vol_heal_err: time.sleep(5) if trial_cnt == 2: collectd.error('Failed to fetch volume heal info.' 'The error is: %s' % (vol_heal_err)) return {} continue else: break try: vol_heal_info = _parse_heal_info_stats( ElementTree.fromstring(vol_heal_op)) return vol_heal_info except (AttributeError, KeyError, ValueError): collectd.error('Failed to collect volume heal info. Error %s' % (traceback.format_exc())) return {}
def diskLocalDataCreate(instance_name): command = 'gcloud compute disks create ' +\ diskLocalDataName(instance_name) + ' --size ' + disklocalSize utils.exec_command(command)
def networkOpen(): command = "gcloud compute firewall-rules create tcprule --allow tcp:8080" utils.exec_command(command)
def tunnel(): """tunnel connection through ssh""" utils.exec_command( 'ssh -vv -ND 8080 {0}@{1}'.format(username, ipGetMaster()))
def compile(self, config): cmd_list = config.get_cmd(self.gn_path, self.ninja_path) for cmd in cmd_list: exec_command(cmd, log_path=config.log_path, shell=True)
def diskLocalDataDelete(instance_name): command = 'gcloud compute -q disks delete ' +\ diskLocalDataName(instance_name) utils.exec_command(command)
def install_apps(self): # s'inspirer de mintinstall # https://github.com/linuxmint/mintinstall/blob/master/usr/bin/mint-synaptic-install # systèmes sans synaptic : on tente de récupérer la commande # d'installation du gestionnaire de paquets qu'on exécute # normalement. # à voir pour les autres plateformes ce qu'on peut faire avec # leur installateur graphique. if self.TO_INSTALL: if os.path.isfile(_SYNAPTIC_PATH): ret = utils.synaptic_install(self.TO_INSTALL) if ret == 0: self.display_dialog("Tous les paquets ont été installés avec succès.") else: pacman = utils.get_package_manager(packman=postinstaller._PACKMAN) if not pacman: print "ERROR: what is your platform and your package manager ? Please e-mail the developper. No packages will be installed." else: cmd = [pacman, ' '.join(['%s' % pac for pac in self.TO_INSTALL]) ] comnd = Popen( ['gksudo', ' '.join(cmd)], stdout=PIPE, stderr=PIPE ) # todo: remplacer gksudo par sudocmnd ret = comnd.wait() # utils.exec_command("gksudo " + pacman + # " ".join( ['%s' % app for app in postinstaller.TO_INSTALL] )) # utils.packages_install(self.TO_INSTALL) # def execute_commands(self): ERROR = False for cmd in self.TO_EXEC: self.expander.set_label("Exécution de " + cmd) returnCode, stdout, stderr = utils.exec_command(cmd) if returnCode == 1: ERROR = True print 'erreur lors de l execution de ', cmd if ERROR: #TODO: le mieux est de récupérer la liste et d'afficher un dialogue avec expander qui montre les stderr. self.expander.set_label("Des commandes ont échoué") else: if len(self.TO_EXEC): # boite de dialogue très rapide : self.display_dialog("Toutes les commandes ont été exécutées avec succès.") # self.dialog = self.builder.get_object('messagedialog1') # self.dialog.format_secondary_text('Toutes les commandes ont été executées avec succès.') # self.dialog.run() # self.dialog.hide() # self.expander.set_label("Toutes les commandes ont été executées avec succès") if self.DO_UPGRADE: ret = utils.do_upgrade() if ret != 0: self.display_dialog("La mise à jour n'a pas pu se produire.")