Example #1
0
	def install_packages(self):

		# Copy all required rpms to inside the jail
		package_dir = self.config.jail_dir + os.sep + "jailbuilder"
		os.mkdir(package_dir)

		# write a new manifest file 
		#(can't use manifest file on rpm < 4)
		#jail_manifest = open(self.config.jail_dir + os.sep + "jailbuilder" + os.sep + "manifest", 'w')
		rpm_list = ""
		for rpm in self.required_rpms:
			rpm_path = self.available_rpms[rpm].full_path
			shutil.copy(rpm_path, package_dir)
			#jail_manifest.write("jailbuilder" + os.sep + os.path.basename(rpm_path) + "\n")
			rpm_list = rpm_list + " jailbuilder" + os.sep + os.path.basename(rpm_path)
		#jail_manifest.close()

		# Is this location ever going to be different for different distros?
		shutil.rmtree(self.config.jail_dir + os.sep + "var/lib/rpm")
		distutils.dir_util.mkpath(self.config.jail_dir + os.sep + "var/lib/rpm")

		# Add chroot path to environment for redhat based systems
		os.environ['PATH'] = os.environ['PATH'] + ":/usr/sbin"

		# Find out whether to use biarch switch or not
		(status, host_arch) = utils.launch_process("uname -m")

		if jail_config.bi_arch_switch.has_key(host_arch) and jail_config.bi_arch_switch[host_arch].has_key(self.arch):
			switch_cmd = jail_config.bi_arch_switch[host_arch][self.arch]
		else:
		 	switch_cmd = ""
	
		# Reinitialize the rpm database with the jail's version of rpm	
		command = "%s chroot %s env %s rpm --initdb" % (switch_cmd, self.config.jail_dir, self.config.environment)
		print command
		(status, output) = utils.launch_process(command)
		print "Status: %d" % status
		print "Output: " + output
		if status:
			sys.exit(1)

		# Reinstall the rpms from inside the jail		
		# manifest files don't work on rpm 3 and below...
		#command = "chroot %s env %s rpm --force -U %s" % (self.config.jail_dir, self.environment, "jailbuilder" + os.sep + "manifest")

		# But, this method may be a problem because of the length of the arguments
		command = "%s chroot %s env %s rpm --force -U %s" % (switch_cmd, self.config.jail_dir, self.config.environment, rpm_list)
		print command
		(status, output) = utils.launch_process(command)
		print "Status: %d" % status
		print "Output: " + output
Example #2
0
	def initialize_jail(self):

		# Blow away the directory
		# Unmount the possible proc dir just in case
		utils.launch_process("umount %s" % self.config.jail_dir + os.sep + "proc")
		print "Removing jail target dir..."
		shutil.rmtree(self.config.jail_dir)
		
		os.makedirs(self.config.jail_dir + os.sep + "var/lib/rpm") # Needed for rpm version 3
		command = """rpm --root %s --initdb""" % self.config.jail_dir
		print command
		(status, output) = utils.launch_process(command)
		if status:
			print "Error initializing the rpm database inside the jail"
			sys.exit(1)
Example #3
0
	def copy_to(self, src, dest, compress=True, my_logger=""):
		"""Args: src (list), dest, Returns: (exit_code, output)."""

		if not my_logger:
			my_logger = self.logger

		# this is the original 'scp' mode
		# fixed: You can't mput files outside the current local dir (going to have to chdir to each dir and issue separate smbclient commands)
		# Kinda ugly, but it works!
		current_dir = os.getcwd()
		command = ""
		for file in src:
			dir = os.path.dirname(file)
			filename = os.path.basename(file)

			if dir: dir_cmd = "cd %s;" % dir
			else: dir_cmd = ""

			command += "%s smbclient //%s/%s -A %s -U %s -D %s -c 'prompt; recurse; mput %s' ; cd %s ;" % (dir_cmd, self.hostname, self.SMB_SHARE, config.smb_passfile, self.username, dest, filename, current_dir)

		# This is for 'tar' mode:
		# (But doesn't have compression, only useful if not using tar over ssh)
		#command = "%s -spc %s | smbclient //%s/%s -A %s -U %s -D %s -Trqx -" % (self.local_tar_path, src, self.host, self.SMB_SHARE, config.smb_passfile, self.user, dest)


		return utils.launch_process(command, my_logger=my_logger)
Example #4
0
	def load_package_info(self):
		(status, output) = utils.launch_process("""rpm -qp --queryformat "____NAME\n%{NAME}\n____ARCH\n%{ARCH}\n____FILELIST\n[%{FILENAMES}\n]" --queryformat "____REQUIRES\n" --requires --queryformat "____PROVIDES\n" --provides """ + self.full_path, print_output=0)

		#print output

		for line in output.split("\n"):
			line = line.strip()

			# Ignore rpm warnings...
			if line.count("warning:"):
				pass
			# If this is a marker, set the marker
			elif Package.marker.search(line):
				marker = line
			else:
				if marker == "____NAME":
					self.name = line.strip()
				elif marker == "____ARCH":
					self.arch = line.strip()
				elif marker == "____REQUIRES":
					# Ignore 'rpmlib(' requirements (don't know how to find out what the rpm binary provides)
					#  If the rpm binary cannot resolv, something will fail later anyway
					if not Package.rpmlib_req.search(line):
						line = Package.remove_version_req(line)
						self.requires.append(line)
				elif marker == "____PROVIDES":
					line = Package.remove_version_req(line)
					self.provides.append(line)
				elif marker == "____FILELIST":
					self.provides.append(line)
				else:
					print "Unknown marker tag: " + marker
					sys.exit(1)
Example #5
0
    def copy_from(self, src, dest, compress=True, my_logger=""):
        """Args: (src (list), dest) Returns: (exit_code, output).

                Optional args: compress=0 or 1
                # tar mode handles symbolic links and preserving time stamps, unlike scp.
                #  I guess I could also use zip/unzip... oh well (What does this mean?)
                # Note: tar mode appends src path of file to dest (just the way tar works)"""

        if not my_logger:
            my_logger = self.logger

        if compress:
            # CompressionLevel only works for protocol 1... (bummer)
            compress_option = "z"
        else:
            compress_option = ""

        files = ""

        # TODO: this method doesn't work (but it's not used)
        tar_dir_options = ""
        #tar_dir_options = "-C %s" % os.path.dirname(self.root_dir)

        # Note: the -f - option to the remote tar is required for solaris tar, otherwise it tries to read from a tape
        command = "cd %s; ssh %s %s@%s ' %s %s -%spcf - %s ' | %s -%spvxf - " % (
            dest, self.options, self.username, self.hostname,
            self.remote_tar_path, tar_dir_options, compress_option, files,
            self.local_tar_path, compress_tar_option)
        if debug: print "* Executing: " + command

        return utils.launch_process(command, my_logger=my_logger)
Example #6
0
    def copy_to(self, src, dest, compress=True, my_logger=""):
        """Args: src (list), dest, Returns: (exit_code, output)."""

        if not my_logger:
            my_logger = self.logger

        # this is the original 'scp' mode
        # fixed: You can't mput files outside the current local dir (going to have to chdir to each dir and issue separate smbclient commands)
        # Kinda ugly, but it works!
        current_dir = os.getcwd()
        command = ""
        for file in src:
            dir = os.path.dirname(file)
            filename = os.path.basename(file)

            if dir: dir_cmd = "cd %s;" % dir
            else: dir_cmd = ""

            command += "%s smbclient //%s/%s -A %s -U %s -D %s -c 'prompt; recurse; mput %s' ; cd %s ;" % (
                dir_cmd, self.hostname, self.SMB_SHARE, config.smb_passfile,
                self.username, dest, filename, current_dir)

        # This is for 'tar' mode:
        # (But doesn't have compression, only useful if not using tar over ssh)
        #command = "%s -spc %s | smbclient //%s/%s -A %s -U %s -D %s -Trqx -" % (self.local_tar_path, src, self.host, self.SMB_SHARE, config.smb_passfile, self.user, dest)

        return utils.launch_process(command, my_logger=my_logger)
Example #7
0
	def copy_from(self, src, dest, compress=True, my_logger=""):
                """Args: (src (list), dest) Returns: (exit_code, output).

                Optional args: compress=0 or 1
                # tar mode handles symbolic links and preserving time stamps, unlike scp.
                #  I guess I could also use zip/unzip... oh well (What does this mean?)
                # Note: tar mode appends src path of file to dest (just the way tar works)"""

		if not my_logger:
			my_logger = self.logger

                if compress:
                        # CompressionLevel only works for protocol 1... (bummer)
                        compress_option = "z"
                else:   
                        compress_option = ""

                files = ""

		# TODO: this method doesn't work (but it's not used)
		tar_dir_options = ""
		#tar_dir_options = "-C %s" % os.path.dirname(self.root_dir)

		# Note: the -f - option to the remote tar is required for solaris tar, otherwise it tries to read from a tape
		command = "cd %s; ssh %s %s@%s ' %s %s -%spcf - %s ' | %s -%spvxf - " % (dest, self.options, self.username, self.hostname, self.remote_tar_path, tar_dir_options, compress_option, files, self.local_tar_path, compress_tar_option )
		if debug: print "* Executing: " + command

                return utils.launch_process(command, my_logger=my_logger)
Example #8
0
	def latest_path_revision(self, path, revision=0):
		"""given a svn dir path, what's the latest revision for that url at a given revision.

		path can either be a string or sequence of strings
		"""

		# Convert to list
		if path.__class__ == str:
			path = [ path ]

		versions = []

		rev_arg = ""
		if revision: rev_arg = "-r " + str(revision)

		for item in path:
			dirname = os.path.dirname(item)
			module = os.path.basename(item)

			command = '%s svn %s ls %s/%s %s -v' % ( self.svn_env, self.svn_options, self.root , dirname, rev_arg)
			self.debug_print("Command: " + command)

			# Cache output for this command, should lessen load from svn server
			#  Only check if we have a revision
			if revision and self.latest_path_revision_cache.has_key(command):
				self.debug_print("CACHE:hit!")
				(code, output) = self.latest_path_revision_cache[command]

				# find hit and put it at the end of the list
				self.cache_lru.append(self.cache_lru.pop(self.cache_lru.index(command)))
			else:
				self.debug_print("CACHE:miss...")
				self.regulator()

				code, output = utils.launch_process(command, print_output=0, output_timeout=output_timeout)

				self.latest_path_revision_cache[command] = (code, output)
				self.cache_lru.append(command)

				# Cache cleanup, so we don't use up all memory since this is a long running process
				if len(self.cache_lru) > self.cache_max_size:
					self.debug_print("Removing old item from cache")
					self.latest_path_revision_cache.pop(self.cache_lru.pop(0))

			for line in output.split('\n'):
				list = line.split()

				# Catch network/ssh errors
				try: 
					version = int(list[0])
				except:
					return 0

				tmp_module = os.path.dirname(list.pop())

				if tmp_module == module:
					versions += [ version ]

		versions.sort()
		return versions.pop()
Example #9
0
File: memory.py Project: zdzhjx/see
def process_memory_snapshot(snapshot_path, profile, plugin):
    process = launch_process('volatility',
                             '--profile=%s' % profile,
                             '--filename=%s' % snapshot_path,
                             plugin)
    file_name = '%s_%s.log' % (snapshot_path.split('.')[0], plugin)

    collect_process_output(process, file_name)
Example #10
0
	def execute_command(self, command, my_logger=""):

		if not my_logger:
			my_logger = self.logger

		# TODO: execute this under a new pgid so only it gets killed, not us all
		# check http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
		# Or, put that code in the utils.launch_process routine, which would make it more robust
		return utils.launch_process(command, my_logger=my_logger)
Example #11
0
    def execute_command(self, command, my_logger=""):

        if not my_logger:
            my_logger = self.logger

        # TODO: execute this under a new pgid so only it gets killed, not us all
        # check http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
        # Or, put that code in the utils.launch_process routine, which would make it more robust
        return utils.launch_process(command, my_logger=my_logger)
Example #12
0
        def execute_command(self, command, my_logger=""):

		if not my_logger:
			my_logger = self.logger

		# Escape these so they get interpolated by the remote machine
		command = command.replace('$', '\$')

		# Put single quotes around the command so the whole command gets passed over ssh
		# (must use single quotes here since command may have double quotes (shell allows double quotes inside of single, without escaping)
		command = "ssh %s %s@%s '%s'" % (self.options, self.username, self.hostname, command)
		if debug: print "* Executing: " + command
                return utils.launch_process(command, my_logger=my_logger)
Example #13
0
    def start_trace_handler(self, event):
        folder_path = self.configuration['results_folder']

        self.logger.debug("Event %s: starting network tracing.", event)

        create_folder(folder_path)
        self.pcap_path = os.path.join(folder_path, "%s.pcap" % self.identifier)
        self.tracer_process = launch_process(
            TSHARK, '-w', self.pcap_path,
            '-i', self.context.network.bridgeName())
        self.context.trigger("network_tracing_started", path=self.pcap_path)

        self.logger.info("Network tracing started.")
Example #14
0
    def start_trace_handler(self, event):
        folder_path = self.configuration['results_folder']

        self.logger.debug("Event %s: starting network tracing.", event)

        create_folder(folder_path)
        self.pcap_path = os.path.join(folder_path, "%s.pcap" % self.identifier)
        self.tracer_process = launch_process(TSHARK, '-w', self.pcap_path,
                                             '-i',
                                             self.context.network.bridgeName())
        self.context.trigger("network_tracing_started", path=self.pcap_path)

        self.logger.info("Network tracing started.")
Example #15
0
    def execute_command(self, command, my_logger=""):

        if not my_logger:
            my_logger = self.logger

        # Escape these so they get interpolated by the remote machine
        command = command.replace('$', '\$')

        # Put single quotes around the command so the whole command gets passed over ssh
        # (must use single quotes here since command may have double quotes (shell allows double quotes inside of single, without escaping)
        command = "ssh %s %s@%s '%s'" % (self.options, self.username,
                                         self.hostname, command)
        if debug: print "* Executing: " + command
        return utils.launch_process(command, my_logger=my_logger)
Example #16
0
def clean_dir(dir):

	os.chdir(dir)

	# Get svn:ignore for this dir
	code, files = utils.launch_process('svn propget svn:ignore .', print_output=0)

	# If there's some valid data
	if not code:
		for f in files.split():
			if os.path.isdir(f):
				utils.launch_process('rm -Rf ' + f, print_command=1)
			else:
				utils.launch_process('rm -f ' + f, print_command=1)


		# Remove the ignored files

		for d in os.listdir('.'):
			if os.path.isdir(d):
				clean_dir(d)

	os.chdir('..')
Example #17
0
def clean_dir(dir):

    os.chdir(dir)

    # Get svn:ignore for this dir
    code, files = utils.launch_process('svn propget svn:ignore .',
                                       print_output=0)

    # If there's some valid data
    if not code:
        for f in files.split():
            if os.path.isdir(f):
                utils.launch_process('rm -Rf ' + f, print_command=1)
            else:
                utils.launch_process('rm -f ' + f, print_command=1)

        # Remove the ignored files

        for d in os.listdir('.'):
            if os.path.isdir(d):
                clean_dir(d)

    os.chdir('..')
Example #18
0
def snapshot_to_checkpoint(volume, snapshot, folder_path):
    """Turns a QEMU internal snapshot into a QCOW file."""
    create_folder(folder_path)

    name = snapshot.getName()
    path = os.path.join(folder_path, '%s.qcow2' % name)

    process = launch_process(QEMU_IMG, "convert", "-f", "qcow2", "-o",
                             "backing_file=%s" % volume_backing_path(volume),
                             "-O", "qcow2", "-s", name,
                             volume_path(volume), path)
    collect_process_output(process)

    return path
Example #19
0
	def collect_required_packages(self):
		# Have two data structures: current requires, and current supplied provides (these structures deal with rpms names, not filenames)
		#  Start adding new packages to get rid of the requires list

		print "Required packages:"

		# Add initial deps
		missing_packs = []
		for req_rpm in self.orig_required_rpms:
			if not self.add_package(req_rpm):
				missing_packs.append(req_rpm)

		if missing_packs:
			print "ERROR!: missing requested packages: %s" % (" ".join(missing_packs))
			sys.exit(1)

		#print "Current requires:"
		#for i in self.requires:
		#	print i


		# Solve remaining deps
		while(len(self.requires)):
			# remove requires that are provides by our current list of packages
			for req in self.requires.keys():
				if self.provides.has_key(req):
					# remove from requires
					self.requires.pop(req)

			# Add a package for each of the remaining requires
			for req in self.requires.keys():
				if self.provide_map.has_key(req):
					self.add_package(self.provide_map[req])
				else:
					print "ERROR!: need requirement '%s' but do not have a package to satisfy it!" % req
					print "\tmake sure you have the correct arch types in valid_arch in your jail config"
					print "Current Distro Hint:"
					(status, output) = utils.launch_process("""rpm -q --whatprovides '%s' """ % req)
					print output
					sys.exit(1)

			#print self.requires
			#print "***provides***"
			#print self.provides

		# When you make it here, you've got all your deps!
		self.required_rpms.sort()
		print self.required_rpms
Example #20
0
def commandline():

    if len(sys.argv) < 2:
        usage()
        sys.exit(1)

# Options for launch process
    try:
        opts, command = getopt.getopt(sys.argv[1:], "", [
            "terminate_reg=", "output_timeout=", "max_output_size=",
            "kill_process_group", "env=", "working_dir=", "interruptable="
        ])
    except getopt.GetoptError:
        usage()
        sys.exit(1)

# Get args to pass to function
    args = {}
    for option, value in opts:
        if option == "--terminate_reg":
            args['terminate_reg'] = value
        if option == "--output_timeout":
            args['output_timeout'] = int(value)
        if option == "--kill_process_group":
            args['kill_process_group'] = 1
        if option == "--max_output_size":
            args['max_output_size'] = float(value)
        if option == "--env":
            for e in value.split(','):
                k, v = e.split('=')
                os.environ[k] = v
        if option == "--working_dir":
            try:
                os.chdir(value)
            except:
                print "Failed to chdir: " + value
                print "Exiting..."
                sys.exit(1)
        if option == "--interruptable":
            args['interruptable'] = value

    command = " ".join(command)
    code, output = utils.launch_process(command, **args)
    sys.exit(code)
Example #21
0
	def copy_from(self, src, dest, compress=True, my_logger=""):
		"""Args: (src (list), dest) Returns: (exit_code, output)."""

		if not my_logger:
			my_logger = self.logger

		# fixed: You can't mput files outside the current local dir (going to have to chdir to each dir and issue separate smbclient commands)
		# Kinda ugly, but it works!
		current_dir = os.getcwd()
		command = ""
		for file in src:
			dir = os.path.dirname(file)
			if not dir: dir = "."
			filename = os.path.basename(file)

			command += "cd %s; smbclient //%s/%s -A %s -U %s -D %s -c 'prompt; recurse; mget %s' ; cd %s ;" % (dest, self.hostname, self.SMB_SHARE, config.smb_passfile, self.username, dir, filename, current_dir)


		return utils.launch_process(command, my_logger=my_logger)
Example #22
0
	def execute_function(self, func_name, var_to_echo=""):

		tmp_script = tempfile.mktemp()

		# Kind of a pain to maintain... any var you want to use in get_destroot bash function must be listed here
		my_script = open(tmp_script, 'w')
		my_script.write("DISTRO=%s\n" % self.package_env.info['distro'])
		my_script.write("ARCH=%s\n" % self.package_env.info['arch'])
		my_script.write("USE_ZIP_PKG=%s\n" % self.package_env.get_info_var('USE_ZIP_PKG') )
		my_script.write("HEAD_or_RELEASE=%s\n" % self.HEAD_or_RELEASE )
		my_script.write(self.info[func_name])
		if var_to_echo: my_script.write("echo ${%s}\n" % var_to_echo)
		my_script.close()

		(code, output) = utils.launch_process("sh %s" % tmp_script, print_output=0)

		os.unlink(tmp_script)

		return output
Example #23
0
	def latest_tree_revision(self):
		"""Get the last commit version.
		"""

		self.regulator()
		code, output = utils.launch_process('%s svn %s ls %s -v' % ( self.svn_env, self.svn_options, self.root ), print_output=0, output_timeout=output_timeout )

		versions = []
		for line in output.split('\n'):
			list = line.split()
			# Catch network/ssh errors
			try:
				versions.append(int(list[0]))
			except:
				return 0

		versions.sort()

		return versions.pop()
Example #24
0
	def copy_to(self, src, dest, compress=True, my_logger=""):
		"""Args: src (list of strings), dest, Returns: (exit_code, output).
		Optional args: compress=True of False"""

		if not my_logger:
			my_logger = self.logger

		src = " ".join(src)

		if compress:
			compress_option = "z"
			# This was causing problems with copy to the windows machine... hmm... ? (ssh problem?  Would explain the s390 problems)
			#compress_option = "j"
		else:
			compress_option = ""

		# Note: the -f - option to the remote tar is required for solaris tar, otherwise it tries to read from a tape
		command = "%s -%spc %s | ssh %s %s@%s 'cd %s ; %s -%spvxf - ' " % (self.local_tar_path, compress_option, src, self.options, self.username, self.hostname, dest, self.remote_tar_path, compress_option )
		if debug: print "* Executing: " + command
		return utils.launch_process(command, my_logger=my_logger)
Example #25
0
    def copy_from(self, src, dest, compress=True, my_logger=""):
        """Args: (src (list), dest) Returns: (exit_code, output)."""

        if not my_logger:
            my_logger = self.logger

        # fixed: You can't mput files outside the current local dir (going to have to chdir to each dir and issue separate smbclient commands)
        # Kinda ugly, but it works!
        current_dir = os.getcwd()
        command = ""
        for file in src:
            dir = os.path.dirname(file)
            if not dir: dir = "."
            filename = os.path.basename(file)

            command += "cd %s; smbclient //%s/%s -A %s -U %s -D %s -c 'prompt; recurse; mget %s' ; cd %s ;" % (
                dest, self.hostname, self.SMB_SHARE, config.smb_passfile,
                self.username, dir, filename, current_dir)

        return utils.launch_process(command, my_logger=my_logger)
Example #26
0
def commandline():

        if len(sys.argv) < 2:
                usage()
                sys.exit(1)

	# Options for launch process
	try:
		opts, command = getopt.getopt(sys.argv[1:], "", [ "terminate_reg=", "output_timeout=", "max_output_size=", "kill_process_group", "env=", "working_dir=", "interruptable=" ])
	except getopt.GetoptError:
                usage()
		sys.exit(1)

	# Get args to pass to function
	args = {}
	for option, value in opts:
		if option == "--terminate_reg":
			args['terminate_reg'] = value
		if option == "--output_timeout":
			args['output_timeout'] = int(value)
		if option == "--kill_process_group":
			args['kill_process_group'] = 1
		if option == "--max_output_size":
			args['max_output_size'] = float(value)
		if option == "--env":
			for e in value.split(','):
				k, v = e.split('=')
				os.environ[k] = v
		if option == "--working_dir":
			try:
				os.chdir(value)
			except:
				print "Failed to chdir: " + value
				print "Exiting..."
				sys.exit(1)
		if option == "--interruptable":
			args['interruptable'] = value

	command = " ".join(command)
	code, output = utils.launch_process(command, **args)
	sys.exit(code)
Example #27
0
	def copy_to(self, src, dest, compress=True, my_logger=""):
		"""Args: src (list of strings), dest, Returns: (exit_code, output).

		Optional args: compress=0 or 1 (but doesn't actually do anything in ssh protocol 2... oh well
		Note, in scp mode, recursive is on by default"""

		if not my_logger:
			my_logger = self.logger

		src = " ".join(src)

		if compress:
			# CompressionLevel only works for protocol 1... (bummer)
			compress_option = ' -o "Compression yes" -o "CompressionLevel 9" '
		else:
			compress_option = ""

		command = "scp -r %s %s %s@%s:%s" % (self.options + compress_option, src, self.username, self.hostname, dest)
		if debug: print "* Executing: " + command

		return utils.launch_process(command, my_logger=my_logger)
Example #28
0
    def execute_function(self, func_name, var_to_echo=""):

        tmp_script = tempfile.mktemp()

        # Kind of a pain to maintain... any var you want to use in get_destroot bash function must be listed here
        my_script = open(tmp_script, 'w')
        my_script.write("DISTRO=%s\n" % self.package_env.info['distro'])
        my_script.write("ARCH=%s\n" % self.package_env.info['arch'])
        my_script.write("USE_ZIP_PKG=%s\n" %
                        self.package_env.get_info_var('USE_ZIP_PKG'))
        my_script.write("HEAD_or_RELEASE=%s\n" % self.HEAD_or_RELEASE)
        my_script.write(self.info[func_name])
        if var_to_echo: my_script.write("echo ${%s}\n" % var_to_echo)
        my_script.close()

        (code, output) = utils.launch_process("sh %s" % tmp_script,
                                              print_output=0)

        os.unlink(tmp_script)

        return output
Example #29
0
    def latest_tree_revision(self):
        """Get the last commit version.
		"""

        self.regulator()
        code, output = utils.launch_process(
            '%s svn %s ls %s -v' % (self.svn_env, self.svn_options, self.root),
            print_output=0,
            output_timeout=output_timeout)

        versions = []
        for line in output.split('\n'):
            list = line.split()
            # Catch network/ssh errors
            try:
                versions.append(int(list[0]))
            except:
                return 0

        versions.sort()

        return versions.pop()
Example #30
0
	def bootstrap_install(self):

		# Generate a manifest file (list of rpm files)
		manifest_filename = tempfile.mktemp()
		manifest = open(manifest_filename, 'w')

		for rpm in self.required_rpms:
			path = self.available_rpms[rpm].full_path
			manifest.write(path + "\n")
		manifest.close()

		# This will work (using a manifest filename) as long as you're using rpm version 4 and above on the host machine
		command = """rpm --nodeps --root %s -i %s""" % (self.config.jail_dir, manifest_filename)
		print command
		(status, output) = utils.launch_process(command)
		print output
		if status:
			print "Error installing rpms inside the jail!!!"
			print "***Usually this is ok for now***"

		# Cleanup...
		os.unlink(manifest_filename)
Example #31
0
	def copy_from(self, src, dest, compress=True, my_logger=""):
		"""Args: (src (list), dest) Returns: (exit_code, output).

		Optional args: compress=0 or 1, mode=tar or scp.
		#  I guess I could also use zip/unzip... oh well (??)"""

		if not my_logger:
			my_logger = self.logger

		if compress:
			# CompressionLevel only works for protocol 1... (bummer)
			compress_option = ' -o "Compression yes" -o "CompressionLevel 9" '
		else:
			compress_option = ""

		files = ""

		for file in src:
			files += " %s@%s:%s " % (self.username, self.hostname, file)
		command = "scp -r %s %s %s" % (self.options + compress_option, files, dest)
		if debug: print "* Executing: " + command

		return utils.launch_process(command, my_logger=my_logger)
Example #32
0
    def copy_to(self, src, dest, compress=True, my_logger=""):
        """Args: src (list of strings), dest, Returns: (exit_code, output).

		Optional args: compress=0 or 1 (but doesn't actually do anything in ssh protocol 2... oh well
		Note, in scp mode, recursive is on by default"""

        if not my_logger:
            my_logger = self.logger

        src = " ".join(src)

        if compress:
            # CompressionLevel only works for protocol 1... (bummer)
            compress_option = ' -o "Compression yes" -o "CompressionLevel 9" '
        else:
            compress_option = ""

        command = "scp -r %s %s %s@%s:%s" % (self.options + compress_option,
                                             src, self.username, self.hostname,
                                             dest)
        if debug: print "* Executing: " + command

        return utils.launch_process(command, my_logger=my_logger)
Example #33
0
    def copy_to(self, src, dest, compress=True, my_logger=""):
        """Args: src (list of strings), dest, Returns: (exit_code, output).
		Optional args: compress=True of False"""

        if not my_logger:
            my_logger = self.logger

        src = " ".join(src)

        if compress:
            compress_option = "z"
            # This was causing problems with copy to the windows machine... hmm... ? (ssh problem?  Would explain the s390 problems)
            #compress_option = "j"
        else:
            compress_option = ""

        # Note: the -f - option to the remote tar is required for solaris tar, otherwise it tries to read from a tape
        command = "%s -%spc %s | ssh %s %s@%s 'cd %s ; %s -%spvxf - ' " % (
            self.local_tar_path, compress_option, src, self.options,
            self.username, self.hostname, dest, self.remote_tar_path,
            compress_option)
        if debug: print "* Executing: " + command
        return utils.launch_process(command, my_logger=my_logger)
Example #34
0
    def copy_from(self, src, dest, compress=True, my_logger=""):
        """Args: (src (list), dest) Returns: (exit_code, output).

		Optional args: compress=0 or 1, mode=tar or scp.
		#  I guess I could also use zip/unzip... oh well (??)"""

        if not my_logger:
            my_logger = self.logger

        if compress:
            # CompressionLevel only works for protocol 1... (bummer)
            compress_option = ' -o "Compression yes" -o "CompressionLevel 9" '
        else:
            compress_option = ""

        files = ""

        for file in src:
            files += " %s@%s:%s " % (self.username, self.hostname, file)
        command = "scp -r %s %s %s" % (self.options + compress_option, files,
                                       dest)
        if debug: print "* Executing: " + command

        return utils.launch_process(command, my_logger=my_logger)
Example #35
0
#!/usr/bin/env python

import sys

sys.path += ['..']

import utils

utils.launch_process('./timeout_script.sh', output_timeout=3)

Example #36
0
def process_memory_snapshot(snapshot_path, profile, plugin):
    process = launch_process('volatility', '--profile=%s' % profile,
                             '--filename=%s' % snapshot_path, plugin)
    file_name = '%s_%s.log' % (snapshot_path.split('.')[0], plugin)

    collect_process_output(process, file_name)
Example #37
0
				topdir = "monodevelop"

			_dir = os.path.join(dir.replace('[[version]]', _version),'*')
			candidates = glob.glob(_dir)
			latest = utils.version_sort(candidates).pop()
			#installer_dirs.append(latest)
			cwd = os.getcwd()

			splitter = os.sep + _version + os.sep
			(prefix, sync_dir) = latest.split(splitter)
			os.chdir(prefix)
			
			print "Syncing: %s" % os.path.join(_version,sync_dir)
			cmd ='rsync -avzR -e ssh %s %s/%s' % (os.path.join(_version,sync_dir), dest, topdir)
			#print cmd
			status, output = utils.launch_process(cmd)

			os.chdir(cwd)
		except Exception, e:
			print "******* ERROR syncing: " + dir
			print "\tarchive_version = %s" % _version
			print "\tException = " +  str(e)
			print "\tSkipping..."

# mirror OBS repos
url_prefix = 'download-' + bundle_obj.info['bundle_urlname']
obs_repos = utils.get_dict_var('obs_repos', bundle_obj.info)
if not skip_obs_repos:
	for obs_repo in obs_repos:
		repo_name = string.split(obs_repo, "/")[-2]
		print "Downloading %s..." % (repo_name)
Example #38
0
#!/usr/bin/env python

import utils

print_output = 0

print utils.launch_process('ls /tmp', capture_stderr=1, print_output=print_output)

print utils.launch_process('ls /junk', capture_stderr=1, print_output=print_output)

print utils.launch_process('ls /tmp', capture_stderr=0, print_output=print_output)

print utils.launch_process('ls /junk', capture_stderr=0, print_output=print_output)

print utils.launch_process('./a.out', capture_stderr=1, print_output=print_output)
Example #39
0
            _dir = os.path.join(dir.replace('[[version]]', _version), '*')
            candidates = glob.glob(_dir)
            latest = utils.version_sort(candidates).pop()
            #installer_dirs.append(latest)
            cwd = os.getcwd()

            splitter = os.sep + _version + os.sep
            (prefix, sync_dir) = latest.split(splitter)
            os.chdir(prefix)

            print "Syncing: %s" % os.path.join(_version, sync_dir)
            cmd = 'rsync -avzR -e ssh %s %s/%s' % (os.path.join(
                _version, sync_dir), dest, topdir)
            #print cmd
            status, output = utils.launch_process(cmd)

            os.chdir(cwd)
        except Exception, e:
            print "******* ERROR syncing: " + dir
            print "\tarchive_version = %s" % _version
            print "\tException = " + str(e)
            print "\tSkipping..."

# mirror OBS repos
url_prefix = 'download-' + bundle_obj.info['bundle_urlname']
obs_repos = utils.get_dict_var('obs_repos', bundle_obj.info)
if not skip_obs_repos:
    for obs_repo in obs_repos:
        repo_name = string.split(obs_repo, "/")[-2]
        print "Downloading %s..." % (repo_name)
Example #40
0
    def run(self):

        sync_log.log("sync thread starting...\n")

        while not self.cancelled():

            self.load_info()

            # Must base these dirs off 'trunk/release'
            dirs = []

            # Add tarball_map
            dirs += ['packaging/tarball_map']

            # Add config
            dirs += ['pyutils/config.py']

            #sync_log.log(" *** Gathering dirs ***\n")

            # Gather dirs to synchronize
            for i in ['HEAD', 'RELEASE']:
                i_dir = config.build_info_dir + os.sep + i
                if os.path.exists(i_dir):
                    for distro in os.listdir(i_dir):
                        distro_dir = i_dir + os.sep + distro
                        if os.path.exists(distro_dir):
                            for component in os.listdir(distro_dir):
                                # Get the last 'num_builds' number of elements from the list
                                versions = build.get_versions(
                                    i, distro,
                                    component)[-self.sync_num_builds:]
                                for j in versions:
                                    dirs.append(
                                        os.path.join('monobuild/www/builds', i,
                                                     distro, component, j))

                # Grab latest num_builds for tarball log files as well
                tarball_path = os.path.join(config.build_info_dir, '..',
                                            'tarball_logs', i)
                if os.path.exists(tarball_path):
                    for component in os.listdir(tarball_path):
                        component_dir = tarball_path + os.sep + component
                        versions = utils.version_sort(
                            os.listdir(component_dir))
                        for j in versions[-self.sync_num_builds:]:
                            dirs.append(
                                os.path.join('monobuild/www/tarball_logs', i,
                                             component, j))

            # conduct a dirs string up to the length of the max arg length, and run rsync for each of those blocks (what's the real maximum?)
            while len(dirs):

                dir_string = ""
                counter = 0
                for i in dirs:
                    # +1 is for the space char
                    if len(i) + 1 + len(dir_string) < self.sync_max_arg_len:
                        dir_string += " %s" % i
                    else:
                        break
                    counter += 1

                # Remove counter elements from dirs
                dirs = dirs[counter:]

                #sync_log.log(" *** Syncing ***\n")
                #  For some reason the --delete option crashes when running the second time to go-mono.com and mono.ximian.com ... ?
                # rsync all files over, and don't include the builds... just logs and info.xml
                command = 'cd %s; rsync -avzR -e "ssh %s" --exclude "files/downloads" --exclude "files/*.tar.*" --exclude "files/steps/*/*.tar.*" %s %s:%s' % (
                    config.release_repo_root, config.ssh_options, dir_string,
                    self.sync_host, self.sync_target_dir)
                #sync_log.log(command + "\n")
                status, output = utils.launch_process(command,
                                                      print_output=0,
                                                      output_timeout=600)

                #sync_log.log(output)
                if status:
                    sync_log.log("Error running rsync: " + output)

            #sync_log.log(" *** sync Sleeping ***\n")
            time.sleep(self.sync_sleep_time)

        sync_log.log("sync thread shutting down...\n")
Example #41
0
    def run(self):

        tarball_log.log("Tarball creator starting...\n")

        while not self.cancelled():

            start_time = utils.get_time()

            # routinely check for updates (sleep every so often)
            for pack_name, pack_obj in self.pack_objs.iteritems():

                # We are reloading pack_objs, but this for loop won't notice it until we enter it again
                if self.cancelled():
                    continue

                # get latest version from the tree
                latest_tree_rev = self.src_repo.latest_tree_revision()
                # print "Latest tree rev: %d (%s)" % (latest_tree_rev, pack_name)

                if not latest_tree_rev:
                    tarball_log.log("Error getting latest tree rev, trying later... (%s)\n" % pack_name)

                    # Restart for loop over...
                    break

                    # Only do for the last couple of commits, rather than constantly updating a base revision
                if latest_tree_rev <= self.num_sequential:
                    starting_rev = 1
                else:
                    starting_rev = latest_tree_rev - self.num_sequential

                    # If we're not building each and every checkin, only build the latest
                if not self.sequential:
                    starting_rev = latest_tree_rev

                    # Pretty much do every commit (for binary search on regressions) (should be adjustable)
                    #  The + 1 is so that the latest tree revision will be checked (range func does not include the last number in the sequence)
                for i in range(starting_rev, latest_tree_rev + 1):

                    latest_for_package = self.src_repo.latest_path_revision(pack_obj.info["HEAD_PATH"], revision=i)
                    if not latest_for_package:
                        tarball_log.log("Error getting revision %d, trying later... (%s)\n" % (i, pack_name))
                        # Skip to next pack...
                        break

                    if not self.distfiles.contains("HEAD", pack_name, str(latest_for_package)) and not self.cancelled():
                        command = "cd %s; ./mktarball --snapshot %s %d" % (
                            config.packaging_dir,
                            pack_name,
                            latest_for_package,
                        )
                        tarball_log.log("Executing: %s\n" % (command))

                        # TODO: the system needs to be smarter about reinstalling the same rpms over and over...

                        # This will show console output, but not write to the log
                        #  Log will be for brief info, and the console will watch what's currently going on
                        # (For some reason my signal gets ignored if I'm using os.system... seems to work with popen)
                        (code, output) = utils.launch_process(command, print_output=0)
                        tarball_log.log("Exit code: %d (%s)\n" % (code, pack_name))

                        # handle jail busy errors (exit code of 2)
                        if code == 2:
                            tarball_log.log("Jail busy, retrying later... (%s)\n" % pack_name)

                            # handle svn timeouts
                        elif code == utils.KILLED_EXIT_CODE:
                            tarball_log.log("svn commands killed, retrying later... (%s)\n" % pack_name)

                            # Handle failed tarballs...
                        elif code:
                            tarball_log.log("Tarball creation failed...(%s)\n" % pack_name)

                            # Send out the log with the tarball, or at least a link... ?
                            link = "http://mono.ximian.com/monobuild/tarball_logs/HEAD/%s/%d.log" % (
                                pack_name,
                                latest_for_package,
                            )
                            utils.send_mail(
                                "*****@*****.**",
                                "*****@*****.**",
                                "mktarball failed (%s %d)" % (pack_name, latest_for_package),
                                "mktarball has failed for package %s revision %d\n\n%s"
                                % (pack_name, latest_for_package, link),
                            )

            time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60

            # Only sleep if this loop was shorter than max_poll_interval
            #  and if we do sleep, discount the time_duration
            if not self.cancelled() and time_duration < self.max_poll_interval:
                # tarball_log.log("Sleeping for %d seconds...\n" % (self.max_poll_interval - time_duration) )
                time.sleep(self.max_poll_interval - time_duration)

                # Exiting because we've been cancelled
        tarball_log.log("Tarball creator shutting down...\n")
Example #42
0
    def run(self):

        sync_log.log("sync thread starting...\n")

        while not self.cancelled():

            self.load_info()

            # Must base these dirs off 'trunk/release'
            dirs = []

            # Add tarball_map
            dirs += ["packaging/tarball_map"]

            # Add config
            dirs += ["pyutils/config.py"]

            # sync_log.log(" *** Gathering dirs ***\n")

            # Gather dirs to synchronize
            for i in ["HEAD", "RELEASE"]:
                i_dir = config.build_info_dir + os.sep + i
                if os.path.exists(i_dir):
                    for distro in os.listdir(i_dir):
                        distro_dir = i_dir + os.sep + distro
                        if os.path.exists(distro_dir):
                            for component in os.listdir(distro_dir):
                                # Get the last 'num_builds' number of elements from the list
                                versions = build.get_versions(i, distro, component)[-self.sync_num_builds :]
                                for j in versions:
                                    dirs.append(os.path.join("monobuild/www/builds", i, distro, component, j))

                                    # Grab latest num_builds for tarball log files as well
                tarball_path = os.path.join(config.build_info_dir, "..", "tarball_logs", i)
                if os.path.exists(tarball_path):
                    for component in os.listdir(tarball_path):
                        component_dir = tarball_path + os.sep + component
                        versions = utils.version_sort(os.listdir(component_dir))
                        for j in versions[-self.sync_num_builds :]:
                            dirs.append(os.path.join("monobuild/www/tarball_logs", i, component, j))

                            # conduct a dirs string up to the length of the max arg length, and run rsync for each of those blocks (what's the real maximum?)
            while len(dirs):

                dir_string = ""
                counter = 0
                for i in dirs:
                    # +1 is for the space char
                    if len(i) + 1 + len(dir_string) < self.sync_max_arg_len:
                        dir_string += " %s" % i
                    else:
                        break
                    counter += 1

                    # Remove counter elements from dirs
                dirs = dirs[counter:]

                # sync_log.log(" *** Syncing ***\n")
                #  For some reason the --delete option crashes when running the second time to go-mono.com and mono.ximian.com ... ?
                # rsync all files over, and don't include the builds... just logs and info.xml
                command = (
                    'cd %s; rsync -avzR -e "ssh %s" --exclude "files/downloads" --exclude "files/*.tar.*" --exclude "files/steps/*/*.tar.*" %s %s:%s'
                    % (config.release_repo_root, config.ssh_options, dir_string, self.sync_host, self.sync_target_dir)
                )
                # sync_log.log(command + "\n")
                status, output = utils.launch_process(command, print_output=0, output_timeout=600)

                # sync_log.log(output)
                if status:
                    sync_log.log("Error running rsync: " + output)

                    # sync_log.log(" *** sync Sleeping ***\n")
            time.sleep(self.sync_sleep_time)

        sync_log.log("sync thread shutting down...\n")
Example #43
0
    def run(self):

        distro = self.distro
        scheduler_log.log("%s:\tStarting scheduler\n" % (distro))

        while not self.cancelled():

            packages_to_build = []
            for pack_def in config.sd_latest_build_packages:
                pack_obj = packaging.package("", pack_def)
                if pack_obj.valid_build_platform(distro):
                    packages_to_build.append(pack_def)

            num_started_builds = 0
            start_time = utils.get_time()

            # Build each package for this jail
            for package_name in packages_to_build:

                # Skip builds so we can exit
                if self.cancelled():
                    continue

                # Check to see what the latest tarball is
                # The src_file_repo class is not threadsafe, so provide a mutex here
                tarball_lock.acquire()
                try:
                    tarball_filename = tarballs.get_latest_tarball("HEAD", package_name)
                except:
                    # catch this in case the filename is being edited by hand
                    tarball_filename = ""
                tarball_lock.release()

                if not tarball_filename:
                    # scheduler_log.log("%s:\t*** Error getting latest tarball (%s) (Probably doesn't exist...)!!!\n" % (distro, package_name) )
                    pass

                else:

                    # print "Latest tarball: " + tarball_filename

                    # Get version
                    version, ext = version_re.search(tarball_filename).groups()

                    info = datastore.build_info("HEAD", distro, package_name, version)

                    # Build if the build doesn't exist already
                    if not info.exists:
                        command = "cd %s; ./build --suppress_output %s %s %s" % (
                            config.packaging_dir,
                            distro,
                            package_name,
                            version,
                        )
                        scheduler_log.log("%s:\t%s\n" % (distro, command))

                        num_started_builds += 1
                        # TODO: hmm... is this not blocking?  Seems this code continues before being able to run tests?
                        (code, output) = utils.launch_process(command, print_output=0)
                        # Testing...
                        # code = 2

                        # Is the jail busy?  if so, just repeat this loop (and select a new tarball if a newer one exists)
                        # Hmm... this really shouldn't happen, as much at least
                        if code == 2:
                            # scheduler_log.log("%s:\tJail is busy or offline... will retry again (%s)\n" % (distro, package_name) )
                            num_started_builds -= 1

                        if code == 5:
                            scheduler_log.log(
                                "%s:\tbuild info is missing, but packages exist... ?? will retry again (%s)\n"
                                % (distro, package_name)
                            )
                            num_started_builds -= 1
                    else:
                        # scheduler_log.log("%s:\tSkipping existing build (%s, %s)\n" % (distro, package_name, version) )
                        pass

            time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60
            if num_started_builds == 0 and time_duration < config.sd_wakeup_interval:
                # scheduler_log.log("%s:\tSleeping %d seconds...\n" % (distro, config.sd_wakeup_interval - time_duration) )
                time.sleep(config.sd_wakeup_interval - time_duration)

                # Exiting because we've been removed from the configuration
        scheduler_log.log("%s:\tExiting upon user request...\n" % distro)
Example #44
0
    def run(self):

        tarball_log.log("Tarball creator starting...\n")

        while not self.cancelled():

            start_time = utils.get_time()

            # routinely check for updates (sleep every so often)
            for pack_name, pack_obj in self.pack_objs.iteritems():

                # We are reloading pack_objs, but this for loop won't notice it until we enter it again
                if self.cancelled(): continue

                # get latest version from the tree
                latest_tree_rev = self.src_repo.latest_tree_revision()
                #print "Latest tree rev: %d (%s)" % (latest_tree_rev, pack_name)

                if not latest_tree_rev:
                    tarball_log.log(
                        "Error getting latest tree rev, trying later... (%s)\n"
                        % pack_name)

                    # Restart for loop over...
                    break

                # Only do for the last couple of commits, rather than constantly updating a base revision
                if latest_tree_rev <= self.num_sequential:
                    starting_rev = 1
                else:
                    starting_rev = latest_tree_rev - self.num_sequential

                # If we're not building each and every checkin, only build the latest
                if not self.sequential:
                    starting_rev = latest_tree_rev

                # Pretty much do every commit (for binary search on regressions) (should be adjustable)
                #  The + 1 is so that the latest tree revision will be checked (range func does not include the last number in the sequence)
                for i in range(starting_rev, latest_tree_rev + 1):

                    latest_for_package = self.src_repo.latest_path_revision(
                        pack_obj.info['HEAD_PATH'], revision=i)
                    if not latest_for_package:
                        tarball_log.log(
                            "Error getting revision %d, trying later... (%s)\n"
                            % (i, pack_name))
                        # Skip to next pack...
                        break

                    if not self.distfiles.contains(
                            'HEAD', pack_name,
                            str(latest_for_package)) and not self.cancelled():
                        command = "cd %s; ./mktarball --snapshot %s %d" % (
                            config.packaging_dir, pack_name,
                            latest_for_package)
                        tarball_log.log("Executing: %s\n" % (command))

                        # TODO: the system needs to be smarter about reinstalling the same rpms over and over...

                        # This will show console output, but not write to the log
                        #  Log will be for brief info, and the console will watch what's currently going on
                        # (For some reason my signal gets ignored if I'm using os.system... seems to work with popen)
                        (code, output) = utils.launch_process(command,
                                                              print_output=0)
                        tarball_log.log("Exit code: %d (%s)\n" %
                                        (code, pack_name))

                        # handle jail busy errors (exit code of 2)
                        if code == 2:
                            tarball_log.log(
                                "Jail busy, retrying later... (%s)\n" %
                                pack_name)

                        # handle svn timeouts
                        elif code == utils.KILLED_EXIT_CODE:
                            tarball_log.log(
                                "svn commands killed, retrying later... (%s)\n"
                                % pack_name)

                        # Handle failed tarballs...
                        elif code:
                            tarball_log.log(
                                "Tarball creation failed...(%s)\n" % pack_name)

                            # Send out the log with the tarball, or at least a link... ?
                            link = "http://mono.ximian.com/monobuild/tarball_logs/HEAD/%s/%d.log" % (
                                pack_name, latest_for_package)
                            utils.send_mail(
                                '*****@*****.**', '*****@*****.**',
                                'mktarball failed (%s %d)' %
                                (pack_name, latest_for_package),
                                "mktarball has failed for package %s revision %d\n\n%s"
                                % (pack_name, latest_for_package, link))

            time_duration = utils.time_duration_asc(start_time,
                                                    utils.get_time()) * 60

            # Only sleep if this loop was shorter than max_poll_interval
            #  and if we do sleep, discount the time_duration
            if not self.cancelled() and time_duration < self.max_poll_interval:
                #tarball_log.log("Sleeping for %d seconds...\n" % (self.max_poll_interval - time_duration) )
                time.sleep(self.max_poll_interval - time_duration)

        # Exiting because we've been cancelled
        tarball_log.log("Tarball creator shutting down...\n")
Example #45
0
	# Get svn:ignore for this dir
	code, files = utils.launch_process('svn propget svn:ignore .', print_output=0)

	# If there's some valid data
	if not code:
		for f in files.split():
			if os.path.isdir(f):
				utils.launch_process('rm -Rf ' + f, print_command=1)
			else:
				utils.launch_process('rm -f ' + f, print_command=1)


		# Remove the ignored files

		for d in os.listdir('.'):
			if os.path.isdir(d):
				clean_dir(d)

	os.chdir('..')


# Clean up a repo
clean_dir(dir)

# Don't think I want to do this yet...  (keep local changes)
#utils.launch_process("cd %s; svn -R revert ." % dir)

utils.launch_process("cd %s; svn update" % dir)

Example #46
0
 def start_processing_handler(self, event):
     self.logger.debug("Event %s: start analysis of %s.",
                       event, self.pcap_path)
     self.analysis_process = launch_process(
         TSHARK, '-r', self.pcap_path,
         '-T', self.configuration.get('log_format', 'text'))
Example #47
0
    def run(self):

        distro = self.distro
        scheduler_log.log("%s:\tStarting scheduler\n" % (distro))

        while not self.cancelled():

            packages_to_build = []
            for pack_def in config.sd_latest_build_packages:
                pack_obj = packaging.package("", pack_def)
                if pack_obj.valid_build_platform(distro):
                    packages_to_build.append(pack_def)

            num_started_builds = 0
            start_time = utils.get_time()

            # Build each package for this jail
            for package_name in packages_to_build:

                # Skip builds so we can exit
                if self.cancelled(): continue

                # Check to see what the latest tarball is
                # The src_file_repo class is not threadsafe, so provide a mutex here
                tarball_lock.acquire()
                try:
                    tarball_filename = tarballs.get_latest_tarball(
                        "HEAD", package_name)
                except:
                    # catch this in case the filename is being edited by hand
                    tarball_filename = ""
                tarball_lock.release()

                if not tarball_filename:
                    #scheduler_log.log("%s:\t*** Error getting latest tarball (%s) (Probably doesn't exist...)!!!\n" % (distro, package_name) )
                    pass

                else:

                    #print "Latest tarball: " + tarball_filename

                    # Get version
                    version, ext = version_re.search(tarball_filename).groups()

                    info = datastore.build_info("HEAD", distro, package_name,
                                                version)

                    # Build if the build doesn't exist already
                    if not info.exists:
                        command = "cd %s; ./build --suppress_output %s %s %s" % (
                            config.packaging_dir, distro, package_name,
                            version)
                        scheduler_log.log("%s:\t%s\n" % (distro, command))

                        num_started_builds += 1
                        # TODO: hmm... is this not blocking?  Seems this code continues before being able to run tests?
                        (code, output) = utils.launch_process(command,
                                                              print_output=0)
                        # Testing...
                        #code = 2

                        # Is the jail busy?  if so, just repeat this loop (and select a new tarball if a newer one exists)
                        # Hmm... this really shouldn't happen, as much at least
                        if code == 2:
                            #scheduler_log.log("%s:\tJail is busy or offline... will retry again (%s)\n" % (distro, package_name) )
                            num_started_builds -= 1

                        if code == 5:
                            scheduler_log.log(
                                "%s:\tbuild info is missing, but packages exist... ?? will retry again (%s)\n"
                                % (distro, package_name))
                            num_started_builds -= 1
                    else:
                        #scheduler_log.log("%s:\tSkipping existing build (%s, %s)\n" % (distro, package_name, version) )
                        pass

            time_duration = utils.time_duration_asc(start_time,
                                                    utils.get_time()) * 60
            if num_started_builds == 0 and time_duration < config.sd_wakeup_interval:
                #scheduler_log.log("%s:\tSleeping %d seconds...\n" % (distro, config.sd_wakeup_interval - time_duration) )
                time.sleep(config.sd_wakeup_interval - time_duration)

        # Exiting because we've been removed from the configuration
        scheduler_log.log("%s:\tExiting upon user request...\n" % distro)
Example #48
0
    def latest_path_revision(self, path, revision=0):
        """given a svn dir path, what's the latest revision for that url at a given revision.

		path can either be a string or sequence of strings
		"""

        # Convert to list
        if path.__class__ == str:
            path = [path]

        versions = []

        rev_arg = ""
        if revision: rev_arg = "-r " + str(revision)

        for item in path:
            dirname = os.path.dirname(item)
            module = os.path.basename(item)

            command = '%s svn %s ls %s/%s %s -v' % (
                self.svn_env, self.svn_options, self.root, dirname, rev_arg)
            self.debug_print("Command: " + command)

            # Cache output for this command, should lessen load from svn server
            #  Only check if we have a revision
            if revision and self.latest_path_revision_cache.has_key(command):
                self.debug_print("CACHE:hit!")
                (code, output) = self.latest_path_revision_cache[command]

                # find hit and put it at the end of the list
                self.cache_lru.append(
                    self.cache_lru.pop(self.cache_lru.index(command)))
            else:
                self.debug_print("CACHE:miss...")
                self.regulator()

                code, output = utils.launch_process(
                    command, print_output=0, output_timeout=output_timeout)

                self.latest_path_revision_cache[command] = (code, output)
                self.cache_lru.append(command)

                # Cache cleanup, so we don't use up all memory since this is a long running process
                if len(self.cache_lru) > self.cache_max_size:
                    self.debug_print("Removing old item from cache")
                    self.latest_path_revision_cache.pop(self.cache_lru.pop(0))

            for line in output.split('\n'):
                list = line.split()

                # Catch network/ssh errors
                try:
                    version = int(list[0])
                except:
                    return 0

                tmp_module = os.path.dirname(list.pop())

                if tmp_module == module:
                    versions += [version]

        versions.sort()
        return versions.pop()
Example #49
0
    os.chdir(dir)

    # Get svn:ignore for this dir
    code, files = utils.launch_process('svn propget svn:ignore .',
                                       print_output=0)

    # If there's some valid data
    if not code:
        for f in files.split():
            if os.path.isdir(f):
                utils.launch_process('rm -Rf ' + f, print_command=1)
            else:
                utils.launch_process('rm -f ' + f, print_command=1)

        # Remove the ignored files

        for d in os.listdir('.'):
            if os.path.isdir(d):
                clean_dir(d)

    os.chdir('..')


# Clean up a repo
clean_dir(dir)

# Don't think I want to do this yet...  (keep local changes)
#utils.launch_process("cd %s; svn -R revert ." % dir)

utils.launch_process("cd %s; svn update" % dir)
Example #50
0
 def start_processing_handler(self, event):
     self.logger.debug("Event %s: start analysis of %s.", event,
                       self.pcap_path)
     self.analysis_process = launch_process(
         TSHARK, '-r', self.pcap_path, '-T',
         self.configuration.get('log_format', 'text'))