Example #1
1
def submit_job(command, params, test_run=False):
    from subprocess import Popen, PIPE

    clear_directories('inputs', 'stdout', 'stderr')

    with open(params_path, 'w') as file:
        json.dump(params, file)

    qsub_command = (
            'qsub',
            '-cwd',
            '-S', '/bin/sh',
            '-o', 'stdout',
            '-e', 'stderr',
            '-l', 'h_rt=6:00:00' if not test_run else 'h_rt=0:30:00',
            '-l', 'mem_free=1G',
            '-l', 'arch=linux-x64',
            '-l', 'netapp=1G',
            '-t', '1-{0}'.format(len(params)),
            '-N', command,
    )

    process = Popen(qsub_command, stdin=PIPE)
    process.stdin.write('module load imp-fast;')
    process.stdin.write('PYTHONPATH=.:$PYTHONPATH;')
    process.stdin.write('/netapp/home/kale/.local/bin/python2.7 ' + command)
    process.stdin.close()
    process.wait()
Example #2
0
def exec_in_shell(cmd):
	# using Popen instead of os.system - as recommended by python docs
	from subprocess import Popen
	import tempfile

	with tempfile.TemporaryFile() as stdout:
		with tempfile.TemporaryFile() as stderr:
			p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
			p.wait()

			stdout.seek(0)
			out = stdout.read()
			if out: out = out.decode('utf-8')

			stderr.seek(0)
			err = stderr.read()
			if err: err = err.decode('utf-8')

	if err and any((kw in err.lower() for kw in ["traceback", "error", "exception"])):
		print out
		raise Exception, err
	else:
		print "."

	return out
 def _orientation(self, image):
     if settings.THUMBNAIL_CONVERT.endswith('gm convert'):
         args = settings.THUMBNAIL_IDENTIFY.split()
         args.extend([ '-format', '%[exif:orientation]', image['source'] ])
         p = Popen(args, stdout=PIPE)
         p.wait()
         result = p.stdout.read().strip()
         if result:
             result = int(result)
             options = image['options']
             if result == 2:
                 options['flop'] = None
             elif result == 3:
                 options['rotate'] = '180'
             elif result == 4:
                 options['flip'] = None
             elif result == 5:
                 options['rotate'] = '90'
                 options['flop'] = None
             elif result == 6:
                 options['rotate'] = '90'
             elif result == 7:
                 options['rotate'] = '-90'
                 options['flop'] = None
             elif result == 8:
                 options['rotate'] = '-90'
     else:
         # ImageMagick also corrects the orientation exif data for
         # destination
         image['options']['auto-orient'] = None
     return image
Example #4
0
class PopenWrapperClass(object):
    """ context wrapper around subprocess.Popen """
    def __init__(self, command):
        """ init fn """
        self.command = command
        self.pop_ = Popen(self.command, shell=True, stdout=PIPE)

    def __iter__(self):
        return self.pop_.stdout

    def __enter__(self):
        """ enter fn """
        return self.pop_.stdout

    def __exit__(self, exc_type, exc_value, traceback):
        """ exit fn """
        if hasattr(self.pop_, '__exit__'):
            efunc = getattr(self.pop_, '__exit__')
            return efunc(exc_type, exc_value, traceback)
        else:
            self.pop_.wait()
            if exc_type or exc_value or traceback:
                return False
            else:
                return True
Example #5
0
def display( screen ):
    ppm_name = 'pic.ppm'
    save_ppm( screen, ppm_name )
    try:
        Popen.wait(Popen( ['display', ppm_name], stdin=PIPE, stdout = PIPE ))
    except WindowsError:
        Popen.wait(Popen( ['imdisplay', ppm_name], stdin=PIPE, stdout = PIPE ))
Example #6
0
    def get_cmdline(self, proc):
        if mozinfo.os == "win":
            # The psutil.cmdline() implementation on Windows is pretty busted,
            # in particular it doesn't handle getting the command line of a
            # 64-bit process from a 32-bit python process very well.
            #
            # Instead we just shell out the WMIC command which works rather
            # well.
            cmd = "WMIC path win32_process where handle='%d' get Commandline" % (proc.pid)
            process = Popen(cmd.split(), stdout=PIPE)
            (output, err) = process.communicate()
            process.wait()

            # The output of WMIC is something like:
            #   Commandline
            #
            #
            #   path/to/exe --args etc

            buf = StringIO.StringIO(output)
            buf.readline()  # header
            for line in buf:
                if line.strip():
                    return line.strip()

            # If all else fails, just return the executable path.
            return p.exe()
        else:
            return " ".join(proc.cmdline())
Example #7
0
    def run(self, copy_to_current_on_exit=False):
        """
        Write the input file to the scratch directory, run packmol and return
        the packed molecule.

        Args:
            copy_to_current_on_exit (bool): Whether or not to copy the packmol
                input/output files from the scratch directory to the current
                directory.

        Returns:
                Molecule object
        """
        scratch = tempfile.gettempdir()
        with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir:
            self._write_input(input_dir=scratch_dir)
            packmol_bin = ['packmol']
            packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r')
            p = Popen(packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE)
            p.wait()
            (stdout, stderr) = p.communicate()
            output_file = os.path.join(scratch_dir, self.control_params["output"])
            if os.path.isfile(output_file):
                packed_mol = BabelMolAdaptor.from_file(output_file)
                print("packed molecule written to {}".format(
                    self.control_params["output"]))
                return packed_mol.pymatgen_mol
            else:
                print("Packmol execution failed")
                print(stdout, stderr)
                return None
    def process_IN_MODIFY(self, event):
        # We have explicitely registered for this kind of event.
        filename = event.name.split('.')
        file_ext = filename[-1]

        if file_ext == 'rst' or file_ext == 'py': 
            retVal = Popen(self.cmd, shell=True, stderr=PIPE, stdout=PIPE)
            retVal.wait()

            if retVal.returncode != 0:
                notice = retVal.communicate()
                if PYNOTIFY:
                    n = pynotify.Notification("Failure!", notice)
                    n.show()
                    
                f_handle = file('notify.log', 'a')
                f_handle.write(repr(notice) + '\n')
                f_handle.flush()
                f_handle.close()
                        
                
            
            f_handle = file('applet-notify.log', 'a')
            f_handle.write(repr(retVal.returncode) + '\n')
            f_handle.flush()
            f_handle.close()
            self.last_call = time.time()
Example #9
0
def mongo(host='localhost', output='/opt/canopsis/var/backups'):
	logger = logging.getLogger()
	logger.debug('Mongo Backup start')
	logger.debug('Host  : %s' % host)
	logger.debug('Output: %s' % output)

	logger.debug('Create temp dir')
	archive_name = 'backup_mongodb'
	tmp_dir = mkdtemp(prefix='/opt/canopsis/tmp/')
	os.makedirs('%s/%s' % (tmp_dir, archive_name))

	logger.debug('Create output dir if not exists')
	if not os.path.exists(output):
		os.makedirs(output)	


	logger.debug('Launch mongodump')
	mongodump_cmd = '/opt/canopsis/bin/mongodump --host %s --out %s/%s' % (host, tmp_dir, archive_name)
	logger.debug('Command: %s' % mongodump_cmd)
	dump_output = Popen(mongodump_cmd, shell=True)
	dump_output.wait()

	logger.debug('Create archive into %s' % output)

	shutil.make_archive('%s/%s' % (output, archive_name),
						'zip',
						tmp_dir)

	logger.debug('Remove temp dir')
	shutil.rmtree(tmp_dir)
	
	logger.debug('Mongo Backup finished')
Example #10
0
def config(output='/opt/canopsis/var/backups'):
	logger = logging.getLogger()
	logger.debug('Config Backup start')
	logger.debug('Output: %s' % output)
	
	logger.debug('Create output dir if not exists')
	if not os.path.exists(output):
		os.makedirs(output)	

	logger.debug('Create temp dir')
	archive_name = 'backup_config'
	tmp_dir = mkdtemp(prefix='/opt/canopsis/tmp/')

	logger.debug('Create file with installed packages')
	export_output = Popen('pkgmgr export', shell=True, stdout=PIPE)
	export_output.wait()
	f = open('/opt/canopsis/etc/.packages', 'w')
	f.writelines(export_output.stdout.read())
	f.close()

	logger.debug('Copy config files into tmp folder')
	shutil.copytree('/opt/canopsis/etc', '%s/%s' % (tmp_dir, archive_name))
	
	logger.debug('Create archive into %s' % output)
	logger.debug('Archive name: %s' % archive_name)
	shutil.make_archive('%s/%s' % (output, archive_name),
						'zip',
						tmp_dir)

	logger.debug('Remove temp dir')
	shutil.rmtree(tmp_dir)

	logger.debug('Config Backup finished')
Example #11
0
class process(object):
  _proc = None
  _error = None
  _groups = None
  _timed_out = None

  def __init__(self, cmd, group=None):
    if StringType == type(cmd):
      cmd = split(cmd)

    self._proc = Popen(cmd, stderr=STDOUT, stdout=PIPE, shell=False)

    self._error = None
    self._groups = []
    self._timed_out = False

    if group is not None:
      group.add_process(self)

  def _call(self):
    # annoyingly, KeyboardInterrupts are transported to threads, while most
    # other Exceptions aren't in python
    try:
      self._proc.wait()
    except Exception, err:
      self._error = err
Example #12
0
def exec_interpreter(code, interp='python'):
    """(str) -> (str, str, int)

       Execute given @code using given interpreter.
       :returns: touple (stdout, strerr, return code).

       .. TODO::
          * Use non-blocking poll instead of wait.
          * Timeout killing
    """
    try:
        p = Popen((interp,), stdin=PIPE, stdout=PIPE, stderr=PIPE)
    except Exception as e:
        return ('',
                'Cannot start interpreter({0}) pipe. Got exception: {1}'.format(interp, str(e)),
                666)
    try:
        p.stdin.write(code)
    except Exception as e:
        return ('',
                'Cannot write to interpreter({0}) pipe. Got exception: {1}'.format(interp, str(e)),
                666)
    p.stdin.close()
    p.wait()
    return (p.stdout.read(), p.stderr.read(), p.returncode)
Example #13
0
def encode(source, dest):
    log.info(u'Started encoding {0}'.format(util.displayable_path(source)))

    command = get_command()
    opts = []

    for arg in command:
        arg = arg.encode('utf-8')
        opts.append(Template(arg).substitute({
            'source':   source,
            'dest':     dest
        }))

    encode = Popen(opts, close_fds=True, stderr=DEVNULL)
    encode.wait()

    if encode.returncode != 0:
        # Something went wrong (probably Ctrl+C), remove temporary files
        log.info(u'Encoding {0} failed. Cleaning up...'
                 .format(util.displayable_path(source)))
        util.remove(dest)
        util.prune_dirs(os.path.dirname(dest))
        return

    log.info(u'Finished encoding {0}'.format(util.displayable_path(source)))
Example #14
0
 def run(self):
     giza_cmd = ['mgiza', self.input()['src2tgt.gizacfg'].fn]
     src2tgt_p = Popen(giza_cmd)
     giza_cmd = ['mgiza', self.input()['tgt2src.gizacfg'].fn]
     tgt2src_p = Popen(giza_cmd)
     src2tgt_p.wait()
     tgt2src_p.wait()
def bitrate_via_ffmpeg(filename):
    matcher = _create_matcher()
    # create a file handle to which we redirect the stderr output
    stderrhandle = open(TEMPORARY_STDERR_FILE, "w")
    # open a child process that runs ffmpeg
    child = Popen("ffmpeg -i %s" % filename, shell=True, stderr=stderrhandle)
    child.wait()
    stderrhandle.close()
    # information on the video file is now stored in the temporary file.
    # open the file, parse it line by line and match each line against the
    # regular expression we defined in _create_matcher.
    stderrhandle = open(TEMPORARY_STDERR_FILE, "r")
    try:
        for line in stderrhandle.readlines():
            parsed = matcher.findall(line)
            if len(parsed) > 0:
                # the line matches, now return the bitrate in b/s as an int
                # the '* 1024 / 8' is needed, since ffmpeg prints the bitrate
                # in kb/s, but we need it in byte/s
                bitrate_in_byte = int(parsed[0]) * 1024 / 8
                duration = extract_duration(line)
                return bitrate_in_byte, duration
    finally:
        # close the file handle and delete the temporary file
        stderrhandle.close()
        os.unlink(TEMPORARY_STDERR_FILE)
Example #16
0
def exec_cmd2(cmd_args, *args, **kwargs):
    """
    Similar to ``exec_cmd``, however does not capture stdout, stderr (therefore
    allowing it to print to console).  All additional ``*args`` and
    ``**kwargs`` are passed directly to ``subprocess.Popen``.  See `Subprocess
    <http://docs.python.org/library/subprocess.html>`_ for more information
    on the features of ``Popen()``.

    Args:
        cmd_args (list): List of command line arguments

    Other Parameters:
        args: Additional arguments are passed to ``Popen()``
        kwargs: Additional keyword arguments are passed to ``Popen()``

    Returns:
        int: The integer return code of the command.

    Example:

        .. code-block:: python

            from cement.utils import shell

            exitcode = shell.exec_cmd2(['echo', 'helloworld'])

    """
    proc = Popen(cmd_args, *args, **kwargs)
    proc.wait()
    return proc.returncode
Example #17
0
 def is_commit(self, object_id):
     current_dir = os.getcwd()
     os.chdir(self.basedir)
     proc = Popen(('git cat-file -t %s' % object_id).split(), stdout=PIPE)
     proc.wait()
     os.chdir(current_dir)
     return proc.stdout.read().strip() == 'commit'
Example #18
0
def get_Kstat():
    Kstats = [
        "hw.pagesize",
        "hw.physmem",
        "kern.maxusers",
        "vm.kmem_map_free",
        "vm.kmem_map_size",
        "vm.kmem_size",
        "vm.kmem_size_max",
        "vm.kmem_size_min",
        "vm.kmem_size_scale",
        "vm.stats",
        "kstat.zfs",
        "vfs.zfs"
    ]

    sysctls = " ".join(str(x) for x in Kstats)
    p = Popen("/sbin/sysctl -q %s" % sysctls, stdin=PIPE,
        stdout=PIPE, stderr=PIPE, shell=True, close_fds=True)
    p.wait()

    kstat_pull = p.communicate()[0].split('\n')
    if p.returncode != 0:
        sys.exit(1)

    Kstat = {}
    for kstat in kstat_pull:
        kstat = kstat.strip()
        mobj = kstat_pobj.match(kstat)
        if mobj:
            key = mobj.group(1).strip()
            val = mobj.group(2).strip()
            Kstat[key] = D(val)

    return Kstat
Example #19
0
    def submit_to_queue(self, script_file):

        if not os.path.exists(script_file):
            raise self.Error('Cannot find script file located at: {}'.format(script_file))

        # submit the job
        try:
            cmd = ['qsub', scriprocesst_file]
            process = Popen(cmd, stdout=PIPE, stderr=PIPE)
            process.wait()

            # grab the returncode. PBS returns 0 if the job was successful
            if process.returncode == 0:
                try:
                    # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
                    queue_id = int(process.stdout.read().split('.')[0])
                    logger.info('Job submission was successful and queue_id is {}'.format(queue_id))

                except:
                    # probably error parsing job code
                    logger.warning("Could not parse job id following qsub...")
                    queue_id = None

                finally:
                    return process, queue_id

            else:
                # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
                msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 
                      'The error response reads: {}'.format(process.stderr.read()))

        except:
            # random error, e.g. no qsub on machine!
            raise self.Error("Running qsub caused an error...")
Example #20
0
    def do_apply(self):
        if not self.netconf.locked("dhcp") or self.netconf.ip4_changed:
            if self.netconf.ip4_changed:
                self.do_remove()

            config_file, config_path = mkstemp(prefix="udhcpd-")
            os.write(config_file, self._generate_config().encode('UTF-8'))
            os.close(config_file)

            dprint("Running udhcpd with config file %s" % config_path)
            cmd = [have("udhcpd"), "-S", config_path]
            p = Popen(cmd)
            p.wait()

            # udhcpd takes time to create pid file
            sleep(0.1)

            pid = read_pid_file("/var/run/udhcpd.pan1.pid")

            if p.pid and is_running("udhcpd", pid):
                dprint("udhcpd started correctly")
                self.pid = pid
                dprint("pid", self.pid)
                self.netconf.lock("dhcp")
            else:
                raise Exception("udhcpd failed to start. Check the system log for errors")

            os.remove(config_path)
    def test_012(self):
        """
        Test the --delay parameter of the invoker.
        """

        # launch an app with invoker --delay n
        print "launching fala_ft_hello ..."
        p = Popen(['/usr/bin/invoker', '--delay', '10', '--type=m', '--no-wait',
                   '/usr/bin/fala_ft_hello.launch'],
                  shell=False, 
                  stdout=DEV_NULL, stderr=DEV_NULL)

        # wait a little
        print "waiting ..."
        time.sleep(5)

        success = True

        if p.poll() == None:
            print "NOT DEAD"
        else:
            print "DEAD"
            success = False

        print "waiting for invoker to terminate ..."
        p.wait()

        print "terminating fala_ft_hello ..."
        Popen(['pkill', 'fala_ft_hello']).wait()

        self.assert_(success, "invoker terminated before delay elapsed")
Example #22
0
    def get_njobs_in_queue(self, username=None):
        if username is None:
            username = getpass.getuser()

        cmd = ['squeue', '-o "%u"', '-u', username]
        process = Popen(cmd, shell=False, stdout=PIPE)
        process.wait()

        # parse the result
        if process.returncode == 0:
            # lines should have this form
            # username
            # count lines that include the username in it

            outs = process.stdout.readlines()
            njobs = len([line.split() for line in outs if username in line])
            logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
            return njobs

        # there's a problem talking to squeue server?
        err_msg = ('Error trying to get the number of jobs in the queue using squeue service' + 
                   'The error response reads: {}'.format(process.stderr.read()))
        logger.critical(err_msg)

        return None
Example #23
0
class MongodbPlugin(object):

    def __init__(self):
        self.mongo = None
        self.tmpdir = tempfile.mkdtemp()

    def pytest_sessionstart(self, session):
        port = session.config.getvalue('mongodb_port')
        self.mongo = Popen(["mongod", "--dbpath", self.tmpdir,
                            "--port", str(port)],
                           stdin=PIPE, stdout=PIPE, stderr=PIPE)

        for each in range(10):
            if 'waiting for connections' in self.mongo.stdout.readline():
                break
        else:
            raise OSError('Mongodb start timeout.')

    def pytest_sessionfinish(self, session):
        if self.mongo is not None:
            try:
                self.mongo.kill()
                self.mongo.communicate()
                self.mongo.wait()
            finally:
                shutil.rmtree(self.tmpdir)
Example #24
0
 def compilemc(self, shortest=False):
     print ("Compiling model checker: gcc ... -o pan pan.c")
     if shortest:
         proc = Popen(['gcc', '-o2', '-DSAFETY', '-DCOLLAPSE', '-DVECTORSZ=100000', '-DREACH', '-o', 'pan', 'pan.c'])
     else:
         proc = Popen(['gcc', '-o2', '-DSAFETY', '-DCOLLAPSE', '-DVECTORSZ=100000', '-o', 'pan', 'pan.c'])
     proc.wait()
Example #25
0
def get_rpm_names_from_specfile(spec_path, source_dir, other=[]):
  cmd = ['rpmspec', '-q', '-D', '_sourcedir %s' % source_dir] + other + \
        ['--qf', '%{NAME}\t%{VERSION}\n%{PROVIDES}\t%{VERSION}\n', spec_path]
  pid = Popen(cmd, stdout=PIPE, stderr=PIPE)
  stdout = pid.communicate()[0][:-1] #get rid of trailing newline
  pid.wait()
  return map(lambda x:x.split('\t'), stdout.splitlines())
Example #26
0
def test_exit_status_terminated_by_signal(signal, both_debug_modes, both_setsid_modes):
    """dumb-init should exit with status 128 + signal when the child process is
    terminated by a signal.
    """
    proc = Popen(('dumb-init', 'sh', '-c', 'kill -{0} $$'.format(signal)))
    proc.wait()
    assert proc.returncode == 128 + signal
Example #27
0
def test_exit_status_regular_exit(exit_status, both_debug_modes, both_setsid_modes):
    """dumb-init should exit with the same exit status as the process that it
    supervises when that process exits normally.
    """
    proc = Popen(('dumb-init', 'sh', '-c', 'exit {0}'.format(exit_status)))
    proc.wait()
    assert proc.returncode == exit_status
  def stop(self, job_id, configs={}):
    """
    Stops a Samza job using the bin/kill-yarn-job.sh script.

    param: job_id -- A unique ID used to idenitfy a Samza job.
    param: configs -- Map of config key/values pairs. Valid keys include:

    package_id: The package_id for the package that contains the code for job_id.
    Usually, the package_id refers to the .tgz job tarball that contains the
    code necessary to run job_id.
    """
    configs = self._get_merged_configs(configs)
    self._validate_configs(configs, ['package_id'])

    # Get configs.
    package_id = configs.get('package_id')

    # Get the application_id for the job.
    application_id = self.app_ids.get(job_id)

    # Kill the job, if it's been started, or WARN and return if it's hasn't.
    if not application_id:
      logger.warn("Can't stop a job that was never started: {0}".format(job_id))
    else:
      command = "{0} {1}".format(os.path.join(package_id, "bin/kill-yarn-job.sh"), application_id)
      env = self._get_env_vars(package_id)
      p = Popen(command.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
      p.wait()
      assert p.returncode == 0, "Command returned non-zero exit code ({0}): {1}".format(p.returncode, command)
Example #29
0
def read_url_with_cache(url, kt_user, kt_pass, kt_file_cache):
    "Read a kontagent file possibly from a cache (store in dir KT_FILECACHE)"
    f = filenameify(url)
    filepath = os.path.join(kt_file_cache, f)
    if os.path.exists(filepath):
        log.info('Kontagent: cache hit: %s', filepath)
        return filepath
    else:
        tmpfile = os.path.join(kt_file_cache, str(hash(url)) + '.tmp')
        command = ['wget', '--user', kt_user, '--password', kt_pass, '-q', '-O', tmpfile, url]
        p = Popen(command, stdin=PIPE)
        p.stdin.close()
        p.wait()
        if p.returncode != 0:
            raise Exception('Unable to retrieve %s' % url)
        if not os.path.exists(os.path.dirname(filepath)):
            try:
                #ensure base directory exists.
                os.makedirs(os.path.dirname(filepath))
            except OSError, e:
                if e.errno == 17:  # File Exists.
                    pass
                else:
                    raise e
        if os.stat(tmpfile).st_size > 0:
            os.rename(tmpfile, filepath)
            log.info('Kontagent: cache store: %s', filepath)
            return filepath
        else:
            raise Exception('Failed to retrieve url %s' % url)
def cp_file(from_file, to_file):
    # when copying state.bssid to state, use tor user
    #os.system("sudo -u %s -H cp %s %s" % (TOR_USER, from_file, to_file))
    #shutil.copy2(from_file, to_file)
    p = Popen(['cp', '-p', '--preserve', from_file, to_file])
    p.wait()
    logger.info("cp %s %s" % (from_file, to_file))
Example #31
0
def run(args):
    p = Popen(args, stdout=PIPE, bufsize=1)
    with p.stdout:
        for line in iter(p.stdout.readline, b''):
            print line,
    p.wait()
Example #32
0
def check_installed(binary):
    try:
        check_call('which ' + binary + ' > /dev/null 2>&1', shell=True)
        return True
    except:
        print 'Could not find ' + binary + ' executable, please install before running Flarelyzer'
        return False


if sys.stdin.isatty():  # Running from terminal
    sudo = 'sudo'
else:
    #select sudo frontend
    sudo = filter(check_installed, ['gksu', 'kdesudo'])[0]
if not sudo or not all(map(check_installed, ['pypy', 'notify-send'])):
    quit()  #if the requirements are not met

try:
    agent = Popen(['pypy', 'agent.py'])
    while not os.path.exists(sockfile):
        time.sleep(0.05)  #probably a hack
    scanner = Popen([sudo, 'pypy', 'memscan.py'])
    scanerror = scanner.wait()
    if not scanerror:
        agent.wait()
    else:
        agent.terminate()
except KeyboardInterrupt:
    print '\nKeyboard Interrupt, closing...'
    quit()
Example #33
0
  def scm_fetch(self,url,files,scm_name="svn",revision=None,branch=None,do_cmds=None):
    fetch_message = 'Fetching benchmark {0} from {1}:'.format(self.benchmark_name,scm_name)
    print fetch_message
    print '-'*len(fetch_message)

    if scm_name=="svn":
      self.get_credentials()

    # create scm directory if it does not exist
    if revision is None:
      revision = [-1]

    for rev in revision:
      if rev is -1:
        benchresource_dir=os.path.join(self.resource_dir,self.benchmark_name,scm_name)
      else:
        benchresource_dir=os.path.join(self.resource_dir,self.benchmark_name,scm_name,rev)

      if not os.path.exists(benchresource_dir):
        os.makedirs(benchresource_dir)
      fetched = False

      for file_bench in files:
        urlparsed = urlparse(url)
        if not os.path.isabs(file_bench):
          file_bench ="/"+file_bench

        url_file=urljoin(url,urlparsed.path+file_bench)
        base_name = os.path.basename(file_bench)

        fetch_command=""
        if scm_name=="svn":
          fetch_command = self.svn_fetch(url_file,base_name,self.login,self.password,rev)

        elif scm_name=="git":

          # Get the name of the global repository
          benchs_name=os.path.join(url.split('/')[-1].split('.')[0])

          fetch_command = self.git_fetch(url,benchs_name,benchresource_dir,rev,branch)


        if not fetched:
          # Execute the fetch command just one time

          fetch_process = Popen(fetch_command,cwd=benchresource_dir,shell=True)
          fetch_process.wait()
          fetched = True

        fetch_dir=os.path.join(self.resource_dir,self.benchmark_name,scm_name)

        # Create symbolic link to the file bench with its full path 
        if rev > 0:
          dest_symlink = os.path.join(fetch_dir,rev+"_"+base_name)
          if not os.path.exists(dest_symlink):
            os.symlink(os.path.join(benchresource_dir+file_bench),dest_symlink)

        # Check if the files exist in the directory
        if not os.path.isdir(os.path.join(benchresource_dir+file_bench)):
            print "WARNING :", os.path.join(benchresource_dir+file_bench)," does not exist"

        # Execute actions from do tags
        if do_cmds:
          for do_cmd in do_cmds :
            do_process=Popen(do_cmd,cwd=os.path.join(benchresource_dir,file_bench[1:]),shell=True)
            do_process.wait()

    print 'Benchmark {0} fetched'.format(self.benchmark_name)
Example #34
0
        r, w, e = select.select(fdset, [], fdset)
        if e:
            break
            
        for fd in r:
            line = fd.readline()
            sys.stdout.write(line)
            if line.find('starting server at port') >= 0:
                ready_count += 1
        
        if ready_count == 2 and p3 is None:
            p3 = Popen(['curl', 'http://www.example.com/', '-v', '-L',
                        '--socks5-hostname', '127.0.0.1:1080'], shell=False,
                        bufsize=0,  close_fds=True)
            break
            
    if p3 is not None:
        r = p3.wait()
        if r == 0:
            print 'test passed'
        sys.exit(r)
    
finally:
    for p in [p1, p2]:
        try:
            p.kill()
        except OSError:
            pass
   
sys.exit(-1)
 def runMopac(tmpdir, target='reactant.mop'):
     input_path = path.join(tmpdir, target)
     p = Popen(['mopac', input_path])
     p.wait()
Example #36
0
def strip_private_layer(client, tag, parent_id, private_layer_id):

    build_tar = tempfile.NamedTemporaryFile()
    logger.info('saving tar file from build %s', build_tar.name)

    # TODO save using client
    p_args = ['docker', 'save', '--output', build_tar.name, tag]
    p = Popen(p_args)

    res = p.wait()
    if res != 0:
        sys.exit(res)

    try:
        client.remove_image(tag)
    except Exception:
        pass

    extract_dir = tempfile.mkdtemp()
    logger.info('extract the build tar %s', extract_dir)

    try:
        with tarfile.open(mode='r', fileobj=build_tar) as tar:
            tar.extractall(path=extract_dir)

        # prune away image layers under private_id
        # we alreayd have them, don't need them again
        def prune(basepath, start_id):
            json_path = basepath + '/' + start_id + '/json'
            f = open(json_path, 'r+')
            content = json.load(f)
            f.close()
            if content.has_key('parent'):
                prune(basepath, content['parent'])
            elif content.has_key('Parent'):
                prune(basepath, content['Parent'])
            logger.debug('pruning %s', start_id)
            shutil.rmtree(basepath + '/' + start_id)

        logger.info('Splice out private layer id %s', private_layer_id)
        prune(extract_dir, private_layer_id)

        for (dirpath, dirnames, filenames) in walk(extract_dir):
            for dir in dirnames:
                json_path = extract_dir + '/' + dir + '/json'

                f = open(json_path, 'r+')
                content = json.load(f)
                if content.has_key(
                        'parent') and content['parent'] == private_layer_id:
                    content['parent'] = parent_id
                    content['Parent'] = parent_id
                    content['config']['Image'] = parent_id
                    content['container_config']['Image'] = parent_id
                    f.seek(0)
                    json.dump(content, f)
                    f.truncate()
                elif content.has_key(
                        'Parent') and content['Parent'] == private_layer_id:
                    content['parent'] = parent_id
                    content['Parent'] = parent_id
                    content['config']['Image'] = parent_id
                    content['container_config']['Image'] = parent_id
                    f.seek(0)
                    json.dump(content, f)
                    f.truncate()
                f.close()

        logger.info('make final tarball')

        tmp_fpath = tempfile.mkstemp()
        try:
            tmp_file = tmp_fpath[0]
            tmp_path = tmp_fpath[1]

            with tarfile.open(name=tmp_path, mode='w') as tar:
                tar.add(extract_dir, arcname='')

            os.fsync(tmp_file)

            logger.info('loading final image %s', tmp_path)
            p_args = ['docker', 'load', '--input', tmp_path]
            p = Popen(p_args)

            res = p.wait()
            if res != 0:
                sys.exit(res)
        finally:
            os.remove(tmp_fpath[1])

    finally:
        shutil.rmtree(extract_dir)
Example #37
0
def run_process(cmds):
    p = Popen(cmds)
    p.wait()
    return p.returncode
def main():
    global use_agens, graph_st, use_dump
    graph_name = ""
    s_file = ""
    graph_st = ""
    opt = ""

    for arg in sys.argv[1:]:
        if arg == "--import-to-agens":
            use_agens = True
            continue
        if arg == "--use-dump":
            use_dump = True
            continue
        m1 = re.search(r'^--graph=(\S+)$', arg)
        if m1:
            graph_name = m1.group(1)
            continue
        m1 = re.search(r'^(--)(dbname|host|port|username)(=\S+)$', arg)
        if m1:
            opt = " " + m1.group(0)
            continue
        m1 = re.search(r'^(--)(no-password|password)$', arg)
        if m1:
            opt = " " + m1.group(0)
            continue
        m1 = re.search(r'^--', arg)
        m2 = re.search(r'^--(h|help)$', arg)
        if m1 or m2:
            print(
                "USAGE: python " + sys.argv[0] +
                " [--import-to-agens] [--graph=GRAPH_NAME] [--use-dump] [--help] [filename (optional if STDIN is provided)]"
            )
            print(
                "   Additional optional parameters for the AgensGraph integration:"
            )
            print("      [--dbname=DBNAME] : Database name")
            print("      [--host=HOST]     : Hostname or IP")
            print("      [--port=PORT]     : Port")
            print("      [--username=USER] : Username")
            print("      [--no-password]   : No password")
            print(
                "      [--password]      : Ask password (should happen automatically)"
            )
            exit(0)
        s_file = arg
    if not graph_name:
        print(
            "Please specify the --graph= parameter to initialize the graph repository."
        )
        exit(1)
    if s_file:
        if not os.path.isfile(s_file):
            print("File not found: " + s_file)
            exit(1)
    graph_st = make_graph_st(graph_name)
    if use_agens:
        global ipc
        ret = ""
        not_avail = "agens client is not available"
        try:
            ret = Popen(['agens', '--help'],
                        stdin=None,
                        stdout=PIPE,
                        stderr=STDOUT)
        except OSError:
            print(not_avail)
            exit(1)
        ret.wait()
        if ret.returncode != 0:
            print(not_avail)
            exit(1)
        ipc = Popen(['agens', opt], stdin=PIPE, stderr=STDOUT)
        graph_st = re.sub("$", "\n", graph_st)
        ipc.stdin.write(graph_st.encode())
    else:
        print(graph_st)

    if not s_file == "":
        x = load_file(s_file)
        for ls in x:
            out(ls)
    else:
        for ls in sys.stdin:
            out(ls)
    if use_agens:
        ipc.stdin.close()
Example #39
0
#!/usr/bin/python

from subprocess import Popen, PIPE
from sys import argv, stderr, exit
import os
import shutil

if (len(argv) != 3):
    stderr.write("Usage: %s <src_file> <dst_file>\n" % argv[0])
    exit(1)

with open(argv[1], 'rb') as fsrc:
    subproc = Popen("gnuplot", shell=True, stdin=PIPE)
    subproc.stdin.write("set terminal pdf\nset output '%s'\n" % argv[2])
    shutil.copyfileobj(fsrc, subproc.stdin)
    subproc.stdin.write("exit\n")
    subproc.wait()
Example #40
0
class RemoteRepository:
    extra_test_args = []

    class RPCError(Exception):
        def __init__(self, unpacked):
            # for borg < 1.1: unpacked only has b'exception_class' as key
            # for borg 1.1+: unpacked has keys: b'exception_args', b'exception_full', b'exception_short', b'sysinfo'
            self.unpacked = unpacked

        def get_message(self):
            if b'exception_short' in self.unpacked:
                return b'\n'.join(self.unpacked[b'exception_short']).decode()
            else:
                return self.exception_class

        @property
        def exception_class(self):
            return self.unpacked[b'exception_class'].decode()

        @property
        def exception_full(self):
            if b'exception_full' in self.unpacked:
                return b'\n'.join(self.unpacked[b'exception_full']).decode()
            else:
                return self.get_message() + '\nRemote Exception (see remote log for the traceback)'

        @property
        def sysinfo(self):
            if b'sysinfo' in self.unpacked:
                return self.unpacked[b'sysinfo'].decode()
            else:
                return ''

    class RPCServerOutdated(Error):
        """Borg server is too old for {}. Required version {}"""

        @property
        def method(self):
            return self.args[0]

        @property
        def required_version(self):
            return self.args[1]

    # If compatibility with 1.0.x is not longer needed, replace all checks of this with True and simplify the code
    dictFormat = False  # outside of __init__ for testing of legacy free protocol

    def __init__(self, location, create=False, exclusive=False, lock_wait=None, lock=True, append_only=False, args=None):
        self.location = self._location = location
        self.preload_ids = []
        self.msgid = 0
        self.to_send = b''
        self.chunkid_to_msgids = {}
        self.ignore_responses = set()
        self.responses = {}
        self.ratelimit = SleepingBandwidthLimiter(args.remote_ratelimit * 1024 if args and args.remote_ratelimit else 0)

        self.unpacker = msgpack.Unpacker(use_list=False)
        self.server_version = parse_version('1.0.8')  # fallback version if server is too old to send version information
        self.p = None
        testing = location.host == '__testsuite__'
        borg_cmd = self.borg_cmd(args, testing)
        env = dict(os.environ)
        if not testing:
            borg_cmd = self.ssh_cmd(location) + borg_cmd
            # pyinstaller binary modifies LD_LIBRARY_PATH=/tmp/_ME... but we do not want
            # that the system's ssh binary picks up (non-matching) libraries from there.
            # thus we install the original LDLP, before pyinstaller has modified it:
            lp_key = 'LD_LIBRARY_PATH'
            lp_orig = env.get(lp_key + '_ORIG')  # pyinstaller >= 20160820 has this
            if lp_orig is not None:
                env[lp_key] = lp_orig
            else:
                env.pop(lp_key, None)
        env.pop('BORG_PASSPHRASE', None)  # security: do not give secrets to subprocess
        env['BORG_VERSION'] = __version__
        self.p = Popen(borg_cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
        self.stdin_fd = self.p.stdin.fileno()
        self.stdout_fd = self.p.stdout.fileno()
        self.stderr_fd = self.p.stderr.fileno()
        fcntl.fcntl(self.stdin_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdin_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
        fcntl.fcntl(self.stdout_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdout_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
        fcntl.fcntl(self.stderr_fd, fcntl.F_SETFL, fcntl.fcntl(self.stderr_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
        self.r_fds = [self.stdout_fd, self.stderr_fd]
        self.x_fds = [self.stdin_fd, self.stdout_fd, self.stderr_fd]

        try:
            try:
                version = self.call('negotiate', {'client_data': {b'client_version': BORG_VERSION}})
            except ConnectionClosed:
                raise ConnectionClosedWithHint('Is borg working on the server?') from None
            if version == RPC_PROTOCOL_VERSION:
                self.dictFormat = False
            elif isinstance(version, dict) and b'server_version' in version:
                self.dictFormat = True
                self.server_version = version[b'server_version']
            else:
                raise Exception('Server insisted on using unsupported protocol version %s' % version)

            def do_open():
                self.id = self.open(path=self.location.path, create=create, lock_wait=lock_wait,
                                    lock=lock, exclusive=exclusive, append_only=append_only)

            if self.dictFormat:
                do_open()
            else:
                # Ugly detection of versions prior to 1.0.7: If open throws it has to be 1.0.6 or lower
                try:
                    do_open()
                except self.RPCError as err:
                    if err.exception_class != 'TypeError':
                        raise
                    msg = """\
Please note:
If you see a TypeError complaining about the number of positional arguments
given to open(), you can ignore it if it comes from a borg version < 1.0.7.
This TypeError is a cosmetic side effect of the compatibility code borg
clients >= 1.0.7 have to support older borg servers.
This problem will go away as soon as the server has been upgraded to 1.0.7+.
"""
                    # emit this msg in the same way as the 'Remote: ...' lines that show the remote TypeError
                    sys.stderr.write(msg)
                    self.server_version = parse_version('1.0.6')
                    compatMap['open'] = ('path', 'create', 'lock_wait', 'lock', ),
                    # try again with corrected version and compatMap
                    do_open()
        except Exception:
            self.close()
            raise

    def __del__(self):
        if len(self.responses):
            logging.debug('still %d cached responses left in RemoteRepository' % (len(self.responses),))
        if self.p:
            self.close()
            assert False, 'cleanup happened in Repository.__del__'

    def __repr__(self):
        return '<%s %s>' % (self.__class__.__name__, self.location.canonical_path())

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        try:
            if exc_type is not None:
                self.rollback()
        finally:
            # in any case, we want to cleanly close the repo, even if the
            # rollback can not succeed (e.g. because the connection was
            # already closed) and raised another exception:
            self.close()

    @property
    def id_str(self):
        return bin_to_hex(self.id)

    def borg_cmd(self, args, testing):
        """return a borg serve command line"""
        # give some args/options to 'borg serve' process as they were given to us
        opts = []
        if args is not None:
            opts.append('--umask=%03o' % args.umask)
            root_logger = logging.getLogger()
            if root_logger.isEnabledFor(logging.DEBUG):
                opts.append('--debug')
            elif root_logger.isEnabledFor(logging.INFO):
                opts.append('--info')
            elif root_logger.isEnabledFor(logging.WARNING):
                pass  # warning is default
            elif root_logger.isEnabledFor(logging.ERROR):
                opts.append('--error')
            elif root_logger.isEnabledFor(logging.CRITICAL):
                opts.append('--critical')
            else:
                raise ValueError('log level missing, fix this code')
        env_vars = []
        if yes(env_var_override='BORG_HOSTNAME_IS_UNIQUE', env_msg=None, prompt=False):
            env_vars.append('BORG_HOSTNAME_IS_UNIQUE=yes')
        if testing:
            return env_vars + [sys.executable, '-m', 'borg.archiver', 'serve'] + opts + self.extra_test_args
        else:  # pragma: no cover
            remote_path = args.remote_path or os.environ.get('BORG_REMOTE_PATH', 'borg')
            remote_path = replace_placeholders(remote_path)
            return env_vars + [remote_path, 'serve'] + opts

    def ssh_cmd(self, location):
        """return a ssh command line that can be prefixed to a borg command line"""
        args = shlex.split(os.environ.get('BORG_RSH', 'ssh'))
        if location.port:
            args += ['-p', str(location.port)]
        if location.user:
            args.append('%s@%s' % (location.user, location.host))
        else:
            args.append('%s' % location.host)
        return args

    def named_to_positional(self, method, kwargs):
        return [kwargs[name] for name in compatMap[method]]

    def call(self, cmd, args, **kw):
        for resp in self.call_many(cmd, [args], **kw):
            return resp

    def call_many(self, cmd, calls, wait=True, is_preloaded=False):
        if not calls:
            return

        def pop_preload_msgid(chunkid):
            msgid = self.chunkid_to_msgids[chunkid].pop(0)
            if not self.chunkid_to_msgids[chunkid]:
                del self.chunkid_to_msgids[chunkid]
            return msgid

        def handle_error(unpacked):
            error = unpacked[b'exception_class'].decode()
            old_server = b'exception_args' not in unpacked
            args = unpacked.get(b'exception_args')

            if error == 'DoesNotExist':
                raise Repository.DoesNotExist(self.location.orig)
            elif error == 'AlreadyExists':
                raise Repository.AlreadyExists(self.location.orig)
            elif error == 'CheckNeeded':
                raise Repository.CheckNeeded(self.location.orig)
            elif error == 'IntegrityError':
                if old_server:
                    raise IntegrityError('(not available)')
                else:
                    raise IntegrityError(args[0].decode())
            elif error == 'PathNotAllowed':
                raise PathNotAllowed()
            elif error == 'ObjectNotFound':
                if old_server:
                    raise Repository.ObjectNotFound('(not available)', self.location.orig)
                else:
                    raise Repository.ObjectNotFound(args[0].decode(), self.location.orig)
            elif error == 'InvalidRPCMethod':
                if old_server:
                    raise InvalidRPCMethod('(not available)')
                else:
                    raise InvalidRPCMethod(args[0].decode())
            else:
                raise self.RPCError(unpacked)

        calls = list(calls)
        waiting_for = []
        while wait or calls:
            while waiting_for:
                try:
                    unpacked = self.responses.pop(waiting_for[0])
                    waiting_for.pop(0)
                    if b'exception_class' in unpacked:
                        handle_error(unpacked)
                    else:
                        yield unpacked[RESULT]
                        if not waiting_for and not calls:
                            return
                except KeyError:
                    break
            if self.to_send or ((calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT):
                w_fds = [self.stdin_fd]
            else:
                w_fds = []
            r, w, x = select.select(self.r_fds, w_fds, self.x_fds, 1)
            if x:
                raise Exception('FD exception occurred')
            for fd in r:
                if fd is self.stdout_fd:
                    data = os.read(fd, BUFSIZE)
                    if not data:
                        raise ConnectionClosed()
                    self.unpacker.feed(data)
                    for unpacked in self.unpacker:
                        if isinstance(unpacked, dict):
                            msgid = unpacked[MSGID]
                        elif isinstance(unpacked, tuple) and len(unpacked) == 4:
                            # The first field 'type' was always 1 and has always been ignored
                            _, msgid, error, res = unpacked
                            if error:
                                # ignore res, because it is only a fixed string anyway.
                                unpacked = {MSGID: msgid, b'exception_class': error}
                            else:
                                unpacked = {MSGID: msgid, RESULT: res}
                        else:
                            raise UnexpectedRPCDataFormatFromServer()
                        if msgid in self.ignore_responses:
                            self.ignore_responses.remove(msgid)
                            if b'exception_class' in unpacked:
                                handle_error(unpacked)
                        else:
                            self.responses[msgid] = unpacked
                elif fd is self.stderr_fd:
                    data = os.read(fd, 32768)
                    if not data:
                        raise ConnectionClosed()
                    data = data.decode('utf-8')
                    for line in data.splitlines(keepends=True):
                        handle_remote_line(line)
            if w:
                while not self.to_send and (calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT:
                    if calls:
                        if is_preloaded:
                            assert cmd == 'get', "is_preload is only supported for 'get'"
                            if calls[0]['id'] in self.chunkid_to_msgids:
                                waiting_for.append(pop_preload_msgid(calls.pop(0)['id']))
                        else:
                            args = calls.pop(0)
                            if cmd == 'get' and args['id'] in self.chunkid_to_msgids:
                                waiting_for.append(pop_preload_msgid(args['id']))
                            else:
                                self.msgid += 1
                                waiting_for.append(self.msgid)
                                if self.dictFormat:
                                    self.to_send = msgpack.packb({MSGID: self.msgid, MSG: cmd, ARGS: args})
                                else:
                                    self.to_send = msgpack.packb((1, self.msgid, cmd, self.named_to_positional(cmd, args)))
                    if not self.to_send and self.preload_ids:
                        chunk_id = self.preload_ids.pop(0)
                        args = {'id': chunk_id}
                        self.msgid += 1
                        self.chunkid_to_msgids.setdefault(chunk_id, []).append(self.msgid)
                        if self.dictFormat:
                            self.to_send = msgpack.packb({MSGID: self.msgid, MSG: 'get', ARGS: args})
                        else:
                            self.to_send = msgpack.packb((1, self.msgid, 'get', self.named_to_positional(cmd, args)))

                if self.to_send:
                    try:
                        self.to_send = self.to_send[self.ratelimit.write(self.stdin_fd, self.to_send):]
                    except OSError as e:
                        # io.write might raise EAGAIN even though select indicates
                        # that the fd should be writable
                        if e.errno != errno.EAGAIN:
                            raise
        self.ignore_responses |= set(waiting_for)

    @api(since=parse_version('1.0.0'),
         append_only={'since': parse_version('1.0.7'), 'previously': False})
    def open(self, path, create=False, lock_wait=None, lock=True, exclusive=False, append_only=False):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def check(self, repair=False, save_space=False):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def commit(self, save_space=False):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def rollback(self):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def destroy(self):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def __len__(self):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def list(self, limit=None, marker=None):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.1.0b3'))
    def scan(self, limit=None, marker=None):
        """actual remoting is done via self.call in the @api decorator"""

    def get(self, id):
        for resp in self.get_many([id]):
            return resp

    def get_many(self, ids, is_preloaded=False):
        for resp in self.call_many('get', [{'id': id} for id in ids], is_preloaded=is_preloaded):
            yield resp

    @api(since=parse_version('1.0.0'))
    def put(self, id, data, wait=True):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def delete(self, id, wait=True):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def save_key(self, keydata):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def load_key(self):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def get_free_nonce(self):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def commit_nonce_reservation(self, next_unreserved, start_nonce):
        """actual remoting is done via self.call in the @api decorator"""

    @api(since=parse_version('1.0.0'))
    def break_lock(self):
        """actual remoting is done via self.call in the @api decorator"""

    def close(self):
        if self.p:
            self.p.stdin.close()
            self.p.stdout.close()
            self.p.wait()
            self.p = None

    def preload(self, ids):
        self.preload_ids += ids
Example #41
0
            SOURCEDIR=_DOC_PATH,
            BUILDDIR=os.path.abspath(os.path.join(_DOC_PATH, '..', 'build')),
            PYTHONPATH=':'.join([_PROJ_PATH, _LIBS_PATH]),
            PATH=':'.join([_SHIMS_PATH,
                           os.environ.get('PATH', '')]),
        ))

    pip_cmd = (where.first('pip'), 'install', '-r',
               os.path.join(_PROJ_PATH, 'requirements.txt'))
    print("Install pip requirements {cmd}...".format(cmd=repr(pip_cmd)))
    pip = Popen(pip_cmd,
                stdout=sys.stdout,
                stderr=sys.stderr,
                env=_env,
                cwd=_DOC_PATH)
    if pip.wait() != 0:
        raise ChildProcessError("Pip install failed with %d." %
                                (pip.returncode, ))

    pip_docs_cmd = (where.first('pip'), 'install', '-r',
                    os.path.join(_PROJ_PATH, 'requirements-doc.txt'))
    print("Install pip docs requirements {cmd}...".format(
        cmd=repr(pip_docs_cmd)))
    pip_docs = Popen(pip_docs_cmd,
                     stdout=sys.stdout,
                     stderr=sys.stderr,
                     env=_env,
                     cwd=_DOC_PATH)
    if pip_docs.wait() != 0:
        raise ChildProcessError("Pip docs install failed with %d." %
                                (pip.returncode, ))
Example #42
0
 def popenShell(self,command,sin=None):
     p = Popen(command,shell=True,stdin=PIPE,stdout=PIPE,stderr=PIPE)
     (stdoutdata, stderrdata) = p.communicate(input=sin)
     code = p.wait()
     MylogOper().popenLogger(command, stdoutdata, stderrdata)
     return code
    # open dakota log file
    dakota_log_file_name = "Dakota.log"
    if os.path.exists(dakota_log_file_name):
        os.remove(dakota_log_file_name)

    # run Dakota
    logfile.write("Executing Dakota.\n")
    #sys.path.append("../../../../src/Peridigm")
    command = ["dakota", "-in", "dakota_peridigm.in"]
    dakota_log_file = open(dakota_log_file_name, 'w')
    p = Popen(command,
              stdout=dakota_log_file,
              stderr=dakota_log_file,
              env=my_env)
    return_code = p.wait()
    dakota_log_file.close()
    if return_code != 0:
        result = return_code
    logfile.write("  Complete (return code " + str(return_code) + ").\n\n")

    # extract the modulus that Dakota solved for from the log file
    computed_bulk_modulus = 0.0
    dakota_log_file = open(dakota_log_file_name)
    lines = dakota_log_file.readlines()
    dakota_log_file.close()
    for i in range(len(lines)):
        line = lines[i]
        if "Best parameters" in line:
            vals = string.splitfields(line + lines[i + 1])
            for i in range(len(vals)):
    "/usr/bin/env", "python", script,

    "--account-key",
    "{{ letsencrypt_account_key }}",
    "--csr",
    "{{ acme_tiny_data_directory }}/csrs/" + cert['name'] + ".csr",
    "--acme-dir",
    "{{ acme_tiny_challenges_directory }}"
]

cmd = "/usr/bin/env " + " ".join(args)

p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
output = p.stdout.read()
p.stdin.close()
if p.wait() != 0:
    print "error while generating certificate for " + host
    print p.stderr.read()
else:
    f = open(cert['certpath'], 'w')
    f.write(output)
    f.close()
    os.chmod(cert['certpath'], {{ item.letsencrypt_cert_permissions | default(letsencrypt_default_cert_permissions) }})
{% if item.chainedcertpath is defined %}
    os.system("cat {{ item.certpath }} {{ letsencrypt_intermediate_cert_path }} > {{ item.chainedcertpath }}")
    os.chmod("{{item.chainedcertpath}}", {{ item.letsencrypt_cert_permissions | default(letsencrypt_default_cert_permissions) }})
{% endif %}

{% if item.fullchainedcertpath is defined %}
    os.system("cat {{ item.keypath }} {{ item.certpath }} {{ letsencrypt_intermediate_cert_path }} > {{ item.fullchainedcertpath }}")
    os.chmod("{{item.fullchainedcertpath}}", {{ item.letsencrypt_cert_permissions | default(letsencrypt_default_cert_permissions) }})
    'models/distilled/vgg16_cifar10_allLayers_reverse_predictive_pruning_adjustingW_postActivation_distilled_tol-00.pt',
]

for p in itertools.chain(source_data, target_models):
    if not os.path.exists(p):
        raise FileNotFoundError(p)

gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
curr_gpu_idx = 0
proc_list = []
for sd in source_data:
    for model in target_models:
        cmd = 'python attack_classifier.py %s --dataset %s --transfer_attack' % (model, sd)
        while len(proc_list) >= len(gpus):
            for i,p in enumerate(proc_list):
                if p.poll() is not None:
                    proc_list.pop(i)
            
            if len(proc_list) >= len(gpus):
                time.sleep(10)
            else:
                break
        
        print('gpu_id:',gpus[curr_gpu_idx], cmd)
        p = Popen(cmd.split(), env=dict(os.environ, CUDA_VISIBLE_DEVICES=gpus[curr_gpu_idx]))
        proc_list.append(p)        
        curr_gpu_idx = (curr_gpu_idx + 1) % len(gpus)              

for p in proc_list:
    p.wait()
Example #46
0
            break

    # quit if no more
    COUNT -= 1
    if COUNT == 0:
        return True
    return False


if __name__ == '__main__':

    # get message count on error queue
    pop = Popen(["sudo", "rabbitmqctl", "list_queues"],
                stdin=PIPE, stdout=PIPE, stderr=PIPE, env=os.environ)
    try:
        sts = pop.wait()  # wait for child to terminate and get status
    except Exception as e:
        print((str(e)))
    status = pop.returncode
    # print "returncode is:",status
    stdOut = pop.stdout.read()
    stdErr = pop.stderr.read()
    for line in stdOut.split('\n'):
        if line.startswith("error_queue"):
            COUNT = int(line.split()[1])
            break
    print(("Total number of messages in error_queue:", COUNT))
    if COUNT == 0:
        sys.exit()

    # Connect to RabbitMQ
Example #47
0
 def _build_package(self):
     uploader_email = ''
     packageversion = os.path.splitext(os.path.basename(self.dscfile))[0]
     builddir = os.path.join(self.poolpath, 'pool', packageversion)
     self._makedirs(builddir, True)
     if self.uploader:
         uploader_email = self.uploader[1].decode('utf-8')
     architecture = dom.opts.get('debomatic', 'architecture')
     if architecture == 'system':
         b_arch = check_output(['dpkg-architecture', '-qDEB_BUILD_ARCH'])
         architecture = b_arch.strip().decode('utf-8')
     mod = Module()
     mod.args.architecture = architecture
     mod.args.directory = self.poolpath
     mod.args.distribution = self.distribution
     mod.args.dists = dom.dists
     mod.args.dsc = self.dscfile
     mod.args.files = self.files
     mod.args.package = packageversion
     mod.args.uploader = uploader_email
     mod.args.hostarchitecture = self.hostarchitecture
     mod.execute_hook('pre_build')
     info(_('Building %s') % os.path.basename(self.dscfile))
     command = [
         'sbuild', '-A', '-s', '-d', self.distribution,
         '--arch=%s' % architecture, '--no-run-lintian', '-c',
         '%s-%s-debomatic' % (self.distribution, architecture), self.dscfile
     ]
     if self.hostarchitecture:
         command.pop(5)
         command.insert(5, '--host=%s' % self.hostarchitecture)
         command.insert(
             6, '--add-depends=libc6-dev:%s' % self.hostarchitecture)
     suite = dom.dists.get(self.distribution, 'suite')
     if self.distribution != suite:
         command.insert(-1, '--build-dep-resolver=aspcud')
         command.insert(
             -1, ('--aspcud-criteria=-removed,-changed,-new,'
                  '-count(solution,APT-Release:=/%s/)' % self.distribution))
     if self.distribution != self.suite:
         command.insert(-1, '--build-dep-resolver=aspcud')
         command.insert(-1,
                        ('--aspcud-criteria=-removed,-changed,-new,'
                         '-count(solution,APT-Release:=/%s/)' % self.suite))
     if self.extrabd:
         for extrabd in self.extrabd:
             command.insert(-1, '--add-depends=%s' % extrabd)
         command.insert(-1, '--build-dep-resolver=aspcud')
     if self.changesfile:
         with open(self.upload, 'r') as fd:
             data = fd.read()
         for file in findall(r'\s\w{32}\s\d+\s\S+\s\S+\s(.*)', data):
             if '.orig.' in file:
                 command.insert(-1, '--force-orig-source')
                 break
         try:
             command.insert(
                 -1, '--debbuildopt=-v%s~' %
                 findall(r' \S+ \((\S+)\) \S+; ', data)[-1])
         except IndexError:
             pass
         with open(os.path.join(self.incoming, self.changesfile)) as fd:
             data = fd.read()
         for resolver in findall(r'Debomatic-Resolver: (\S+)', data):
             command.insert(-1, '--build-dep-resolver=%s' % resolver)
     if self.binnmu:
         command.insert(-1, '--binNMU=%s' % self.binnmu[0])
         command.insert(-1, '--make-binNMU=%s' % self.binnmu[1])
         command.insert(-1, '--no-arch-all')
         buildlog = '%s+b%s_%s.build' % (packageversion, self.binnmu[0],
                                         architecture)
     else:
         buildlog = '%s_%s.build' % (packageversion, architecture)
     if self.hostarchitecture:
         buildlog = sub(r'(.*_)\S+(\.build)',
                        '\\1%s\\2' % self.hostarchitecture, buildlog)
     if self.maintainer:
         command.remove('-A')
         command.remove('-s')
         command.insert(-1, '--maintainer=%s' % self.maintainer)
     ext = {'.gz': 'gzip', '.bz2': 'bzip2', '.xz': 'xz'}
     for file in self.files:
         if os.path.isfile(file):
             if findall(r'(.*\.debian\..*)', file):
                 try:
                     command.insert(
                         -1, '--debbuildopt=-Z%s' %
                         ext[os.path.splitext(file)[1]])
                 except IndexError:
                     pass
     for sbuildcommand in self._commands(self.distribution, architecture,
                                         packageversion):
         command.insert(-1, sbuildcommand)
     if self.dpr:
         if dom.opts.get('dpr', 'repository'):
             command.insert(
                 -1, '--extra-repository=%s' %
                 (dom.opts.get('dpr', 'repository') % {
                     'dist': os.path.basename(self.poolpath)
                 }))
     with open(os.devnull, 'w') as fd:
         try:
             ppath = os.path.join(self.poolpath, 'pool', packageversion)
             buildlink = os.path.join(ppath, '%s.buildlog' % packageversion)
             if os.path.exists(buildlink):
                 os.unlink(buildlink)
             os.symlink(buildlog, buildlink)
             process = Popen(command,
                             stdout=fd,
                             stderr=fd,
                             cwd=ppath,
                             preexec_fn=self._demote(),
                             start_new_session=True)
             with self.buildtask.set_pid(process.pid):
                 process.wait()
             if process.returncode:
                 info(
                     _("Build of %s failed") %
                     os.path.basename(self.dscfile))
             else:
                 info(
                     _("Build of %s successful") %
                     os.path.basename(self.dscfile))
                 mod.args.success = True
         except OSError:
             error(_('Invocation of sbuild failed'))
     mod.execute_hook('post_build')
     self._remove_files()
     debug(_('Build of %s complete') % os.path.basename(self.dscfile))
Example #48
0
 def _setup_chroot(self):
     action = None
     self.buildpath = os.path.join(self.incoming, self.distribution)
     self._makedirs(self.buildpath)
     self._makedirs(self.poolpath)
     architecture = dom.opts.get('debomatic', 'architecture')
     if architecture == 'system':
         b_arch = check_output(['dpkg-architecture', '-qDEB_BUILD_ARCH'])
         architecture = b_arch.strip().decode('utf-8')
     debootstrap = dom.opts.get('debomatic', 'debootstrap')
     chrootname = '%s-%s-debomatic' % (self.distribution, architecture)
     self._lock_chroot(chrootname)
     with open(os.devnull, 'w') as fd:
         chroots = check_output(['schroot', '-l'], stderr=fd)
     if not search('chroot:%s' % chrootname, chroots.decode()):
         action = 'create'
     mod = Module()
     mod.args.architecture = architecture
     mod.args.action = action
     mod.args.directory = self.buildpath
     mod.args.distribution = self.distribution
     mod.args.dists = dom.dists
     mod.execute_hook('pre_chroot')
     for d in ('logs', 'pool'):
         self._makedirs(os.path.join(self.buildpath, d))
     if action:
         profile = dom.opts.get('chroots', 'profile')
         if not os.path.isdir(os.path.join('/etc/schroot', profile)):
             error(_('schroot profile %s not found') % profile)
             self._unlock_chroot(chrootname)
             raise DebomaticError
         logfile = (
             '%s/logs/%s.%s' %
             (self.buildpath, self.distribution, strftime('%Y%m%d_%H%M%S')))
         target = dom.dists.get(self.distribution, 'suite')
         if target == self.distribution:
             pattern = '%s-%s-debomatic' % (self.distribution, architecture)
         else:
             pattern = '%s-%s-%s-debomatic' % (target, architecture,
                                               self.distribution)
         if os.path.isdir(os.path.join(self.buildpath, self.distribution)):
             rmtree(os.path.join(self.buildpath, self.distribution))
         with open(logfile, 'w') as fd:
             try:
                 debug(
                     _('Creating chroot %(dist)s-%(arch)s-debomatic') % {
                         'dist': self.distribution,
                         'arch': architecture
                     })
                 components = ','.join(
                     dom.dists.get(self.distribution, 'components').split())
                 command = [
                     'sbuild-createchroot',
                     '--arch=%s' % architecture,
                     '--chroot-suffix=-debomatic',
                     '--debootstrap=%s' % debootstrap,
                     '--components=%s' % components, target,
                     os.path.join(self.buildpath, self.distribution),
                     dom.dists.get(self.distribution, 'mirror')
                 ]
                 if target != self.distribution:
                     command[2] = ('--chroot-suffix=-%s-debomatic' %
                                   self.distribution)
                 if dom.dists.has_option(self.distribution,
                                         'extrapackages'):
                     packages = dom.dists.get(self.distribution,
                                              'extrapackages').split()
                     command.insert(-3, '--include=%s' % ','.join(packages))
                     packages = '--include=%s' % ','.join(packages)
                 process = Popen(command, stdout=fd, stderr=fd)
                 with self.buildtask.set_pid(process.pid):
                     process.wait()
                 if process.returncode:
                     error(
                         _('Failed creating %(dist)s-%(arch)s-debomatic') %
                         {
                             'dist': self.distribution,
                             'arch': architecture
                         })
                     mod.execute_hook('post_chroot')
                     self._unlock_chroot(chrootname)
                     raise DebomaticError
             except OSError:
                 error(_('Unable to launch sbuild-createchroot'))
                 mod.execute_hook('post_chroot')
                 self._unlock_chroot(chrootname)
                 raise DebomaticError
         if dom.dists.has_option(self.distribution, 'extramirrors'):
             with open(
                     os.path.join(self.buildpath, self.distribution,
                                  'etc/apt/sources.list'), 'a') as fd:
                 fd.write(dom.dists.get(self.distribution, 'extramirrors'))
         if dom.opts.has_option('repository', 'pubring'):
             if os.path.isfile(dom.opts.get('repository', 'pubring')):
                 copy(
                     dom.opts.get('repository', 'pubring'),
                     os.path.join(self.buildpath, self.distribution,
                                  'etc/apt/trusted.gpg.d/debomatic.gpg'))
         chroots = '/etc/schroot/chroot.d'
         for file in os.listdir(chroots):
             if file.startswith(pattern):
                 with NamedTemporaryFile(mode='w+', delete=False) as tmp:
                     with open(os.path.join(chroots, file)) as fd:
                         for line in fd:
                             if line.startswith('['):
                                 tmp.write(
                                     '[%s-%s-debomatic]\n' %
                                     (self.distribution, architecture))
                             elif line.startswith('description'):
                                 tmp.write(
                                     line.replace(target,
                                                  self.distribution))
                             elif line.startswith('profile'):
                                 tmp.write('profile=%s\n' % profile)
                             else:
                                 tmp.write(line)
                 copymode(fd.name, tmp.name)
                 move(tmp.name, fd.name)
         mod.args.success = True
         mod.execute_hook('post_chroot')
     self._unlock_chroot(chrootname)
Example #49
0
def worker_func(idx):
    global exit_code
    java_options = ['java']
    if len(options.java_options) > 0:
        for java_option in options.java_options:
            java_options.append('-{0}'.format(java_option))
    if len(xconv_options['java_options']) > 0:
        for java_option in xconv_options['java_options']:
            java_options.append(java_option)

    java_options.append("-Dfile.encoding={0}".format(java_encoding))
    java_options.append('-jar')
    java_options.append(xconv_options['xresloader_path'])
    java_options.append('--stdin')

    once_pick_count = len(xconv_options['output_matrix']['outputs'])
    if once_pick_count <= 1:
        once_pick_count = 1
    pexec = None
    if not options.test:
        pexec = Popen(java_options,
                      stdin=PIPE,
                      stdout=PIPE,
                      stderr=PIPE,
                      shell=False)

        worker_thd_print_stdout = threading.Thread(target=print_stdout_func,
                                                   args=[pexec])
        worker_thd_print_stderr = threading.Thread(target=print_stderr_func,
                                                   args=[pexec])
        worker_thd_print_stdout.start()
        worker_thd_print_stderr.start()

        while True:
            cmd_picker_lock.acquire()
            if len(cmd_list) <= 0:
                cmd_picker_lock.release()
                break

            for _ in range(0, once_pick_count):
                if not cmd_list:
                    break
                pexec.stdin.write(' '.join(
                    cmd_list.pop()).encode(java_encoding))
                pexec.stdin.write(os.linesep.encode(java_encoding))

            cmd_picker_lock.release()
            pexec.stdin.flush()
        pexec.stdin.close()
        for output_line in pexec.stdout.readlines():
            print(output_line.decode(java_encoding))
        cmd_exit_code = pexec.wait()

        worker_thd_print_stdout.join()
        worker_thd_print_stderr.join()

        exit_code = exit_code + cmd_exit_code
    else:
        this_thd_cmds = []
        while True:
            cmd_picker_lock.acquire()
            if len(cmd_list) <= 0:
                cmd_picker_lock.release()
                break

            for _ in range(0, once_pick_count):
                if not cmd_list:
                    break

                # python2 must use encode string to bytes or there will be messy code
                # python3 must not use encode methed because it will transform string to bytes
                if sys.version_info.major < 3 and not conv_compat_py2_write_buffer:
                    this_thd_cmds.append(' '.join(
                        cmd_list.pop()).encode(console_encoding))
                else:
                    this_thd_cmds.append(' '.join(cmd_list.pop()))
            cmd_picker_lock.release()

        cprintf_stdout([print_style.FC_GREEN],
                       ('"{0}"' + os.linesep + '\t>{1}' + os.linesep).format(
                           '" "'.join(java_options),
                           (os.linesep + '\t>').join(this_thd_cmds)))
def apply_meta_config(args):
    if len(args) != 3:
        usage()

    if args[1].lower() != '-configurationmof':
        usage()

    if (not isfile(args[2])):
        errorMessage = 'The provided configurationmof file does not exist: ' + str(args[2])
        print(errorMessage)
        operationStatusUtility.write_failure_to_status_file_no_log(operation, 'Incorrect parameters to SetDscLocalConfigurationManager.py: ' + errorMessage)
        exit(1)

    fileHandle = open(args[2], 'r')
    try:
        fileContent = fileHandle.read()
        outtokens = []
        for char in fileContent:
            outtokens.append(str(ord(char)))

        omicli_path = join(helperlib.CONFIG_BINDIR, 'omicli')
        dsc_host_base_path = helperlib.DSC_HOST_BASE_PATH
        dsc_host_path = join(dsc_host_base_path, 'bin/dsc_host')
        dsc_host_output_path = join(dsc_host_base_path, 'output')
        dsc_host_lock_path = join(dsc_host_base_path, 'dsc_host_lock')
        dsc_host_switch_path = join(dsc_host_base_path, 'dsc_host_ready')

        if ("omsconfig" in helperlib.DSC_SCRIPT_PATH):
            write_omsconfig_host_switch_event(pathToCurrentScript, isfile(dsc_host_switch_path))

        if ("omsconfig" in helperlib.DSC_SCRIPT_PATH) and (isfile(dsc_host_switch_path)):
            use_omsconfig_host = True
        else:
            use_omsconfig_host = False

        parameters = []

        if use_omsconfig_host:
            parameters.append(dsc_host_path)
            parameters.append(dsc_host_output_path)
            parameters.append("SendMetaConfigurationApply")
            parameters.append(args[2])
        else:
            parameters.append(omicli_path)
            parameters.append("iv")
            parameters.append(helperlib.DSC_NAMESPACE)
            parameters.append("{")
            parameters.append("MSFT_DSCLocalConfigurationManager")
            parameters.append("}")
            parameters.append("SendMetaConfigurationApply")
            parameters.append("{")
            parameters.append("ConfigurationData")
            parameters.append("[")
            # Insert configurationmof data here
            for token in outtokens:
                parameters.append(token)
            parameters.append("]")
            parameters.append("}")

        exit_code = 0

        # Save the starting timestamp without milliseconds
        startDateTime = operationStatusUtility.get_current_time_no_ms()

        stdout = ''
        stderr = ''

        # Apply the metaconfig
        if use_omsconfig_host:
            try:
                # Open the dsc host lock file. This also creates a file if it does not exist
                dschostlock_filehandle = open(dsc_host_lock_path, 'w')
                print("Opened the dsc host lock file at the path '" + dsc_host_lock_path + "'")
                
                dschostlock_acquired = False

                # Acquire dsc host file lock
                for retry in range(10):
                    try:
                        flock(dschostlock_filehandle, LOCK_EX | LOCK_NB)
                        dschostlock_acquired = True
                        break
                    except IOError:
                        write_omsconfig_host_log('dsc_host lock file not acquired. retry (#' + str(retry) + ') after 60 seconds...', pathToCurrentScript)
                        sleep(60)

                if dschostlock_acquired:
                    p = Popen(parameters, stdout=PIPE, stderr=PIPE)
                    exit_code = p.wait()
                    stdout, stderr = p.communicate()
                    print(stdout)
                else:
                    print("dsc host lock already acuired by a different process")
            finally:
                # Release dsc host file lock
                flock(dschostlock_filehandle, LOCK_UN)

                # Close dsc host lock file handle
                dschostlock_filehandle.close()
        else:
            p = Popen(parameters, stdout=PIPE, stderr=PIPE)
            exit_code = p.wait()
            stdout, stderr = p.communicate()

        print(stdout)

        if ((exit_code != 0) or (stderr)):
            exit(1)

        # Python 3 returns an empty byte array into stderr on success
        if stderr == '' or (version_info >= (3, 0) and stderr.decode(encoding = 'UTF-8') == ''):
            operationStatusUtility.write_success_to_status_file(operation)
            print("Successfully applied metaconfig.")
        else:
            operationStatusUtility.write_failure_to_status_file(operation, startDateTime, stderr)
            print(stderr)
    finally:
        fileHandle.close()
from subprocess import Popen
import argparse
import os 
import time 

# variants
cfg = 'train_usa_disc_10'
ckpt = 'train_usa_disc_10'
l, r = 1, 36  # [l, r]


filename = f'checkpoints/single_seg/no_src_GANloss/{ckpt}/result.txt'
f = open(filename, 'a')
for i in range(l, r+1):
    start = time.time()
    proc = Popen(['python', './tools/single_seg_test.py',
                  f'configs/single_seg/no_src_GANloss/{cfg}.py',
                  f'checkpoints/single_seg/no_src_GANloss/{ckpt}/epoch_{i}.pth'],
                  stdout=f)
    # print(f'epoch_{i}.pth finished')
    proc.wait()
    end = time.time()
    last = int(end - start)
    print(f'epoch{i}.pth: {last // 60}min {last % 60}s')
f.close()
def run_racer(view, cmd_list):
    # Retrieve the entire buffer
    region = sublime.Region(0, view.size())
    content = view.substr(region)
    with_snippet = cmd_list[0] == "complete-with-snippet"

    # Figure out where to save the temp file so that racer can do
    # autocomplete based on other user files
    save_dir = determine_save_dir(view)
    print(save_dir)

    # Save that buffer to a temporary file for racer to use
    temp_file = tempfile.NamedTemporaryFile(mode='w',
                                            encoding='utf-8',
                                            delete=False,
                                            dir=save_dir)
    temp_file_path = temp_file.name
    temp_file.write(content)
    temp_file.close()
    cmd_list.insert(0, settings.racer_bin)
    cmd_list.append(temp_file_path)

    # Copy the system environment and add the source search
    # paths for racer to it
    expanded_search_paths = expand_all(settings.search_paths)
    env_path = ":".join(expanded_search_paths)
    env = os.environ.copy()
    env['RUST_SRC_PATH'] = env_path

    # Run racer
    startupinfo = None
    if os.name == 'nt':
        startupinfo = subprocess.STARTUPINFO()
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
    process = Popen(cmd_list, stdout=PIPE, env=env, startupinfo=startupinfo)
    (output, err) = process.communicate()
    exit_code = process.wait()

    #    print(output)

    # Remove temp file
    os.remove(temp_file_path)

    # Parse results
    results = []
    match_string = "MATCH "
    if exit_code == 0:
        for byte_line in output.splitlines():
            line = byte_line.decode("utf-8")
            if line.startswith(match_string):
                if with_snippet:
                    parts = line[len(match_string):].split(';', 7)
                else:
                    parts = line[len(match_string):].split(',', 6)
                    parts.insert(1, "")

                result = Result(parts)
                if result.path == view.file_name():
                    continue
                if result.path == temp_file_path:
                    result.path = view.file_name()
                results.append(result)
    else:
        print("failed: exit_code:", exit_code, output)
    return results
Example #53
0
def get_process_output(command, input=None):
    process = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    output, error = process.communicate(input)
    code = process.wait()
    return output, error, code
Example #54
0
def align_rna(run, workdir, ref_dir, gtf_dir=None, **kwargs):
    star = kwargs.get('star', 'STAR')
    flags = kwargs.get('star_flags', '')
    mapq = kwargs.get('mapq', 0)
    threads = kwargs.get('threads', 1)

    # Add a trailing '/' to force STAR to put files INSIDE the directory
    outdir = tempfile.mkdtemp(dir=workdir) + '/'

    ref = os.path.join(ref_dir, run.assembly)

    if gtf_dir:
        gtf = os.path.join(gtf_dir, run.assembly + '.gtf')
    else:
        gtf = None

    prefix = os.path.join(workdir, os.path.basename(run.url)[:-4])
    reads_file = prefix + '.bed.gz'
    reads_file_2 = os.path.join(outdir, 'Aligned.toTranscriptome.out.bam')

    cmd = [
        star, '--genomeDir', ref,
        '--outFileNamePrefix', outdir,
        '--outStd', 'SAM',
        # Report alignments to transcriptome (needed by RSEM)
        '--quantMode', 'TranscriptomeSAM',
        # Report unmapped reads
        '--outSAMunmapped', 'Within', 'KeepPairs',
        '--runThreadN', str(threads)
    ] + flags.split()

    if gtf and os.path.isfile(gtf):
        cmd += ['--sjdbGTFfile', gtf]

    if run.singleread:
        cmd += ['--readFilesIn', run.fq1_file]
    else:
        cmd += ['--readFilesIn', run.fq1_file, run.fq2_file]

    cmd += ['--readFilesCommand', 'gzip', '-cd']

    with open(reads_file, 'wb') as fh, open(os.devnull, 'w') as devnull:
        pop1 = Popen(cmd, stdout=PIPE, stderr=devnull)
        pop2 = Popen([SAM2BED, '-', str(mapq)], stdin=pop1.stdout, stdout=PIPE, stderr=PIPE)
        pop3 = Popen(['gzip', '-c'], stdin=pop2.stdout, stdout=fh, stderr=devnull)
        pop1.stdout.close()
        pop1.wait()
        out, err = pop2.communicate()
        pop3.wait()

    os.unlink(run.fq1_file)

    try:
        # Paired-end data
        os.unlink(run.fq2_file)
    except (OSError, TypeError):
        pass

    if pop1.returncode == pop2.returncode == pop3.returncode == 0 and os.path.isfile(reads_file) \
            and os.path.isfile(reads_file_2):
        reads, mapped_reads = err.decode('utf8').strip().split('\t')
        run.reads = int(reads) if run.singleread else int(reads) / 2
        run.mapped_reads = int(mapped_reads)
        run.is_aligned = True

        # Move BAM file
        shutil.move(reads_file_2, prefix + '.bam')
        run.reads_file_2 = prefix + '.bam'

        # Delete output directory (we don't use the other files)
        shutil.rmtree(outdir)

        # BED file
        run.reads_file = reads_file
        return True
    else:
        shutil.rmtree(outdir)

        try:
            os.unlink(reads_file)
        except OSError:
            pass
        finally:
            run.status = ERRORS['align']
            return False
Example #55
0
class Server(object):
    '''Specification of DNS server'''

    START_WAIT = 2
    START_WAIT_VALGRIND = 5
    STOP_TIMEOUT = 30
    COMPILE_TIMEOUT = 60
    DIG_TIMEOUT = 5

    # Instance counter.
    count = 0

    def __init__(self):
        self.proc = None
        self.valgrind = []
        self.start_params = None
        self.ctl_params = None
        self.compile_cmd = None

        self.data_dir = None

        self.nsid = None
        self.ident = None
        self.version = None

        self.addr = None
        self.port = None
        self.fixed_port = False
        self.ctlport = None
        self.external = False
        self.ctlkey = None
        self.ctlkeyfile = None
        self.tsig = None
        self.tsig_test = None

        self.zones = dict()

        self.ratelimit = None
        self.ratelimit_slip = None
        self.ratelimit_whitelist = None
        self.tcp_reply_timeout = None
        self.max_udp_payload = None
        self.max_udp4_payload = None
        self.max_udp6_payload = None
        self.disable_any = None
        self.disable_notify = None
        self.zonefile_sync = None
        self.journal_size = None
        self.zone_size_limit = None

        self.inquirer = None

        self.modules = []

        # Working directory.
        self.dir = None
        # Name of server instance.
        self.name = None
        self.fout = None
        self.ferr = None
        self.valgrind_log = None
        self.confile = None

    def _check_socket(self, proto, port):
        if ipaddress.ip_address(self.addr).version == 4:
            iface = "4%s@%s:%i" % (proto, self.addr, port)
        else:
            iface = "6%s@[%s]:%i" % (proto, self.addr, port)

        for i in range(5):
            proc = Popen(["lsof", "-t", "-i", iface],
                         stdout=PIPE,
                         stderr=PIPE,
                         universal_newlines=True)
            (out, err) = proc.communicate()

            # Create list of pids excluding last empty line.
            pids = list(filter(None, out.split("\n")))

            # Check for successful bind.
            if len(pids) == 1 and str(self.proc.pid) in pids:
                return True

            time.sleep(2)

        return False

    def set_master(self, zone, slave=None, ddns=False, ixfr=False):
        '''Set the server as a master for the zone'''

        if zone.name not in self.zones:
            master_file = zone.clone(self.dir + "/master")
            z = Zone(master_file, ddns, ixfr)
            self.zones[zone.name] = z
        else:
            z = self.zones[zone.name]

        if slave:
            z.slaves.add(slave)

    def set_slave(self, zone, master, ddns=False, ixfr=False):
        '''Set the server as a slave for the zone'''

        slave_file = zone.clone(self.dir + "/slave", exists=False)

        if zone.name not in self.zones:
            z = Zone(slave_file, ddns, ixfr)
            self.zones[zone.name] = z
        else:
            z = self.zones[zone.name]
            z.disable_master(slave_file)

        z.masters.add(master)

    def compile(self):
        try:
            p = Popen([self.control_bin] + self.compile_params,
                      stdout=self.fout,
                      stderr=self.ferr)
            p.communicate(timeout=Server.COMPILE_TIMEOUT)
        except:
            raise Failed("Can't compile server='%s'" % self.name)

    def start(self, clean=False):
        mode = "w" if clean else "a"

        try:
            if self.compile_cmd:
                self.ctl(self.compile_cmd)

            if self.daemon_bin != None:
                self.proc = Popen(self.valgrind + [self.daemon_bin] + \
                                  self.start_params,
                                  stdout=open(self.fout, mode=mode),
                                  stderr=open(self.ferr, mode=mode))

            if self.valgrind:
                time.sleep(Server.START_WAIT_VALGRIND)
            else:
                time.sleep(Server.START_WAIT)
        except OSError:
            raise Failed("Can't start server='%s'" % self.name)

        # Start inquirer if enabled.
        if params.test.stress and self.inquirer:
            self.inquirer.start(self)

    def ctl(self, cmd, availability=True):
        if availability:
            # Check for listening control interface.
            ok = False
            for i in range(0, 5):
                try:
                    self.ctl("status", availability=False)
                except Failed:
                    time.sleep(1)
                    continue
                ok = True
                break
            if not ok:
                self.backtrace()
                raise Failed("Unavailable remote control server='%s'" %
                             self.name)

        # Send control command.
        try:
            check_call([self.control_bin] + self.ctl_params + cmd.split(),
                       stdout=open(self.dir + "/call.out", mode="a"),
                       stderr=open(self.dir + "/call.err", mode="a"))
        except CalledProcessError as e:
            self.backtrace()
            raise Failed("Can't control='%s' server='%s', ret='%i'" %
                         (cmd, self.name, e.returncode))

    def reload(self):
        self.ctl("reload")
        time.sleep(Server.START_WAIT)

    def running(self):
        proc = psutil.Process(self.proc.pid)
        status = proc.status
        # psutil 2.0.0+ makes status a function
        if psutil.version_info[0] >= 2:
            status = proc.status()
        if status == psutil.STATUS_RUNNING or \
           status == psutil.STATUS_SLEEPING or \
           status == psutil.STATUS_DISK_SLEEP:
            return True
        else:
            return False

    def _valgrind_check(self):
        if not self.valgrind:
            return

        check_log("VALGRIND CHECK %s" % self.name)

        lock = False
        lost = 0
        reachable = 0
        errcount = 0

        try:
            f = open(self.valgrind_log, "r")
        except:
            detail_log("No err log file")
            detail_log(SEP)
            return

        for line in f:
            if re.search("(HEAP|LEAK) SUMMARY", line):
                lost = 0
                reachable = 0
                errcount = 0
                lock = True
                continue

            if lock:
                lost_line = re.search("lost:", line)
                if lost_line:
                    lost += int(line[lost_line.end():].lstrip(). \
                                split(" ")[0].replace(",", ""))
                    continue

                reach_line = re.search("reachable:", line)
                if reach_line:
                    reachable += int(line[reach_line.end():].lstrip(). \
                                     split(" ")[0].replace(",", ""))
                    continue

                err_line = re.search("ERROR SUMMARY:", line)
                if err_line:
                    errcount += int(line[err_line.end():].lstrip(). \
                                    split(" ")[0].replace(",", ""))

                    if lost > 0 or reachable > 0 or errcount > 0:
                        set_err("VALGRIND")
                        detail_log("%s memcheck: lost(%i B), reachable(%i B), " \
                                   "errcount(%i)" \
                                   % (self.name, lost, reachable, errcount))

                    lock = False
                    continue

        detail_log(SEP)
        f.close()

    def backtrace(self):
        if self.valgrind:
            check_log("BACKTRACE %s" % self.name)

            try:
                check_call([
                    params.gdb_bin, "-ex", "set confirm off", "-ex",
                    "target remote | %s --pid=%s" %
                    (params.vgdb_bin, self.proc.pid), "-ex", "info threads",
                    "-ex", "thread apply all bt full", "-ex", "q",
                    self.daemon_bin
                ],
                           stdout=open(self.dir + "/gdb.out", mode="a"),
                           stderr=open(self.dir + "/gdb.err", mode="a"))
            except:
                detail_log("!Failed to get backtrace")

            detail_log(SEP)

    def stop(self, check=True):
        if params.test.stress and self.inquirer:
            self.inquirer.stop()

        if self.proc:
            try:
                self.proc.terminate()
                self.proc.wait(Server.STOP_TIMEOUT)
            except ProcessLookupError:
                pass
            except:
                self.backtrace()
                check_log("WARNING: KILLING %s" % self.name)
                detail_log(SEP)
                self.kill()
        if check:
            self._valgrind_check()

    def kill(self):
        if params.test.stress and self.inquirer:
            self.inquirer.stop()

        if self.proc:
            # Store PID before kill.
            pid = self.proc.pid

            self.proc.kill()

            # Remove uncleaned vgdb pipes.
            for f in glob.glob("/tmp/vgdb-pipe*-%s-*" % pid):
                try:
                    os.remove(f)
                except:
                    pass

    def gen_confile(self):
        f = open(self.confile, mode="w")
        f.write(self.get_config())
        f.close()

    def dig(self,
            rname,
            rtype,
            rclass="IN",
            udp=None,
            serial=None,
            timeout=None,
            tries=3,
            flags="",
            bufsize=None,
            edns=None,
            nsid=False,
            dnssec=False,
            log_no_sep=False):
        key_params = self.tsig_test.key_params if self.tsig_test else dict()

        # Convert one item zone list to zone name.
        if isinstance(rname, list):
            if len(rname) != 1:
                raise Failed("One zone required")
            rname = rname[0].name

        rtype_str = rtype.upper()

        # Set port type.
        if rtype.upper() == "AXFR":
            # Always use TCP.
            udp = False
        elif rtype.upper() == "IXFR":
            # Use TCP if not specified.
            udp = udp if udp != None else False
            rtype_str += "=%i" % int(serial)
        else:
            # Use TCP or UDP at random if not specified.
            udp = udp if udp != None else random.choice([True, False])

        if udp:
            dig_flags = "+notcp"
        else:
            dig_flags = "+tcp"

        dig_flags += " +retry=%i" % (tries - 1)

        # Set timeout.
        if timeout is None:
            timeout = self.DIG_TIMEOUT
        dig_flags += " +time=%i" % timeout

        # Prepare query (useless for XFR).
        query = dns.message.make_query(rname, rtype, rclass)

        # Remove implicit RD flag.
        query.flags &= ~dns.flags.RD

        # Set packet flags.
        flag_names = flags.split()
        for flag in flag_names:
            if flag == "AA":
                query.flags |= dns.flags.AA
                dig_flags += " +aa"
            elif flag == "TC":
                query.flags |= dns.flags.TC
                dig_flags += " +tc"
            elif flag == "RD":
                query.flags |= dns.flags.RD
                dig_flags += " +rd"
            elif flag == "RA":
                query.flags |= dns.flags.RA
                dig_flags += " +ra"
            elif flag == "AD":
                query.flags |= dns.flags.AD
                dig_flags += " +ad"
            elif flag == "CD":
                query.flags |= dns.flags.CD
                dig_flags += " +cd"
            elif flag == "Z":
                query.flags |= 64
                dig_flags += " +z"

        # Set EDNS.
        if edns != None or bufsize or nsid:

            class NsidFix(object):
                '''Current pythondns doesn't implement NSID option.'''
                def __init__(self):
                    self.otype = dns.edns.NSID

                def to_wire(self, file=None):
                    pass

            if edns:
                edns = int(edns)
            else:
                edns = 0
            dig_flags += " +edns=%i" % edns

            if bufsize:
                payload = int(bufsize)
            else:
                payload = 1280
            dig_flags += " +bufsize=%i" % payload

            if nsid:
                options = [NsidFix()]
                dig_flags += " +nsid"
            else:
                options = None

            query.use_edns(edns=edns, payload=payload, options=options)

        # Set DO flag.
        if dnssec:
            query.want_dnssec()
            dig_flags += " +dnssec +bufsize=%i" % query.payload

        # Store function arguments for possible comparation.
        args = dict()
        params = inspect.getargvalues(inspect.currentframe())
        for param in params.args:
            if param != "self":
                args[param] = params.locals[param]

        check_log("DIG %s %s %s @%s -p %i %s" %
                  (rname, rtype_str, rclass, self.addr, self.port, dig_flags))
        if key_params:
            detail_log(
                "%s:%s:%s" %
                (self.tsig_test.alg, self.tsig_test.name, self.tsig_test.key))

        for t in range(tries):
            try:
                if rtype.upper() == "AXFR":
                    resp = dns.query.xfr(self.addr,
                                         rname,
                                         rtype,
                                         rclass,
                                         port=self.port,
                                         lifetime=timeout,
                                         use_udp=udp,
                                         **key_params)
                elif rtype.upper() == "IXFR":
                    resp = dns.query.xfr(self.addr,
                                         rname,
                                         rtype,
                                         rclass,
                                         port=self.port,
                                         lifetime=timeout,
                                         use_udp=udp,
                                         serial=int(serial),
                                         **key_params)
                elif udp:
                    resp = dns.query.udp(query,
                                         self.addr,
                                         port=self.port,
                                         timeout=timeout)
                else:
                    resp = dns.query.tcp(query,
                                         self.addr,
                                         port=self.port,
                                         timeout=timeout)

                if not log_no_sep:
                    detail_log(SEP)

                return dnstest.response.Response(self, resp, query, args)
            except dns.exception.Timeout:
                pass
            except:
                time.sleep(timeout)

        raise Failed("Can't query server='%s' for '%s %s %s'" % \
                     (self.name, rname, rclass, rtype))

    def create_sock(self, socket_type):
        family = socket.AF_INET
        if ipaddress.ip_address(self.addr).version == 6:
            family = socket.AF_INET6
        return socket.socket(family, socket_type)

    def send_raw(self, data, sock=None):
        if sock is None:
            sock = self.create_sock(socket.SOCK_DGRAM)
        sent = sock.sendto(bytes(data, 'utf-8'), (self.addr, self.port))
        if sent != len(data):
            raise Failed("Can't send RAW data (%d bytes) to server='%s'" %
                         (len(data), self.name))

    def log_search(self, pattern):
        with open(self.fout) as log:
            for line in log:
                if pattern in line:
                    return True
        return False

    def zone_wait(self, zone, serial=None, equal=False, greater=True):
        '''Try to get SOA record. With an optional serial number and given
           relation (equal or/and greater).'''

        zone = zone_arg_check(zone)

        _serial = 0

        check_log("ZONE WAIT %s: %s" % (self.name, zone.name))

        for t in range(60):
            try:
                resp = self.dig(zone.name,
                                "SOA",
                                udp=True,
                                tries=1,
                                timeout=2,
                                log_no_sep=True)
            except:
                pass
            else:
                if resp.resp.rcode() == 0:
                    if not resp.resp.answer:
                        raise Failed(
                            "No SOA in ANSWER, zone='%s', server='%s'" %
                            (zone.name, self.name))

                    soa = str((resp.resp.answer[0]).to_rdataset())
                    _serial = int(soa.split()[5])

                    if not serial:
                        break
                    elif equal and serial == _serial:
                        break
                    elif greater and serial < _serial:
                        break
            time.sleep(2)
        else:
            self.backtrace()
            serial_str = ""
            if serial:
                serial_str = "%s%s%i" % (">" if greater else "",
                                         "=" if equal else "", serial)
            raise Failed("Can't get SOA%s, zone='%s', server='%s'" %
                         (serial_str, zone.name, self.name))

        detail_log(SEP)

        return _serial

    def zones_wait(self, zone_list, serials=None, equal=False, greater=True):
        new_serials = dict()

        for zone in zone_list:
            old_serial = serials[zone.name] if serials else None
            new_serial = self.zone_wait(zone,
                                        serial=old_serial,
                                        equal=equal,
                                        greater=greater)
            new_serials[zone.name] = new_serial

        return new_serials

    def zone_verify(self, zone):
        zone = zone_arg_check(zone)

        self.zones[zone.name].zfile.dnssec_verify()

    def check_nsec(self, zone, nsec3=False, nonsec=False):
        zone = zone_arg_check(zone)

        resp = self.dig("0-x-not-existing-x-0." + zone.name,
                        "ANY",
                        dnssec=True)
        resp.check_nsec(nsec3=nsec3, nonsec=nonsec)

    def update(self, zone):
        zone = zone_arg_check(zone)

        key_params = self.tsig_test.key_params if self.tsig_test else dict()

        return dnstest.update.Update(
            self, dns.update.Update(zone.name, **key_params))

    def gen_key(self, zone, **args):
        zone = zone_arg_check(zone)

        key = dnstest.keys.Key(self.keydir, zone.name, **args)
        key.generate()

        return key

    def use_keys(self, zone):
        zone = zone_arg_check(zone)
        # copy all keys, even for other zones
        distutils.dir_util.copy_tree(zone.key_dir, self.keydir, update=True)

    def dnssec(self, zone):
        zone = zone_arg_check(zone)

        return self.zones[zone.name].dnssec

    def enable_nsec3(self, zone, **args):
        zone = zone_arg_check(zone)

        self.zones[zone.name].zfile.enable_nsec3(**args)

    def disable_nsec3(self, zone):
        zone = zone_arg_check(zone)

        self.zones[zone.name].zfile.disable_nsec3()

    def backup_zone(self, zone):
        zone = zone_arg_check(zone)

        self.zones[zone.name].zfile.backup()

    def update_zonefile(self, zone, version=None, random=False):
        zone = zone_arg_check(zone)

        if random:
            self.zones[zone.name].zfile.update_rnd()
        else:
            self.zones[zone.name].zfile.upd_file(storage=self.data_dir,
                                                 version=version)

    def add_module(self, zone, module):
        zone = zone_arg_check(zone)

        if zone:
            self.zones[zone.name].add_module(module)
        else:
            self.modules.append(module)

    def clean(self, zone=True, timers=True):
        if zone:
            # Remove all zonefiles.
            if zone is True:
                for _z in sorted(self.zones):
                    zfile = self.zones[_z].zfile.path
                    try:
                        os.remove(zfile)
                    except:
                        pass
            # Remove specified zonefile.
            else:
                zfile = self.zones[zone.name].zfile.path
                try:
                    os.remove(zfile)
                except:
                    pass

        if timers:
            try:
                shutil.rmtree(self.dir + "/timers")
            except:
                pass
Example #56
0
def merge_runs(dset, workdir, ratio=0, **kwargs):
    maxmem = kwargs.get('maxmem', 0)
    samtools = kwargs.get('samtools', 'samtools')

    if dset.sample_id:
        basename = dset.sample_id + '_' + dset.exp_id
    else:
        basename = dset.exp_id

    if len(dset.runs) == 1:
        basename += '_' + os.path.basename(dset.runs[0].url[:-4])

    tot_reads = sum([r.reads for r in dset.runs])
    tot_mapped_reads = sum([r.mapped_reads for r in dset.runs])

    if tot_mapped_reads < ratio * tot_reads:
        for r in dset.runs:
            os.unlink(r.reads_file)

        if dset.data_type == DT_RNA:
            for r in dset.runs:
                os.unlink(r.reads_file_2)

        dset.status = ERRORS['ratio']
        return False
    else:
        tmp_dir = tempfile.mkdtemp(dir=workdir)

        if dset.data_type == DT_HIC:
            sort_cmd = ['sort', '-k1,1V', '-k2,2n', '-k4,4V', '-k5,5n', '-T', tmp_dir]
        else:
            sort_cmd = ['sort', '-k1,1V', '-k2,2n', '-k6,6', '-T', tmp_dir]

        if maxmem:
            sort_cmd.append('--buffer-size={}M'.format(maxmem))

        reads_files = [r.reads_file for r in dset.runs]
        reads_file = os.path.join(workdir, basename) + '.bed.gz'

        with open(reads_file, 'wb') as fh, open(os.devnull, 'w') as devnull:
            pop1 = Popen(['gzip', '-cd'] + reads_files, stdout=PIPE, stderr=devnull)
            pop2 = Popen(sort_cmd, stdin=pop1.stdout, stdout=PIPE, stderr=devnull)
            pop3 = Popen(['gzip', '-c'], stdin=pop2.stdout, stdout=fh, stderr=devnull)
            pop1.stdout.close()
            pop2.stdout.close()
            pop1.wait()
            pop2.wait()
            pop3.wait()

        shutil.rmtree(tmp_dir)

        for f in reads_files:
            os.unlink(f)

        if pop1.returncode == pop2.returncode == pop3.returncode == 0 and os.path.isfile(reads_file):
            if dset.data_type == DT_RNA:
                reads_files = [r.reads_file_2 for r in dset.runs]
                reads_file_2 = os.path.join(workdir, basename) + '.bam'

                if len(reads_files) > 1:
                    with open(os.devnull, 'w') as devnull:
                        pop = Popen([samtools, 'merge', reads_file_2] + reads_files, stdout=devnull, stderr=devnull)
                        pop.wait()

                    for f in reads_files:
                        os.unlink(f)

                    if pop.returncode == 0 and os.path.isfile(reads_file_2):
                        dset.reads_file_2 = reads_file_2
                    else:
                        os.unlink(reads_file)
                        try:
                            os.unlink(reads_file_2)
                        except OSError:
                            pass
                        finally:
                            return False
                else:
                    move(reads_files[0], reads_file_2)
                    dset.reads_file_2 = reads_file_2

            dset.is_merged = True
            dset.reads_file = reads_file
            return True
        else:
            try:
                os.unlink(reads_file)
            except OSError:
                pass
            finally:
                dset.status = ERRORS['merge']
                return False
Example #57
0
 def getDomain(self):
     proc = Popen(['hostname', '-d'], stdout=PIPE, stderr=PIPE)
     result = proc.wait()
     if result == 0:
         self.domain = proc.stdout.read()
     return self.domain
Example #58
0
def align_hic(run, workdir, ref_dir, **kwargs):
    bowtie2 = kwargs.get('bowtie2', 'bowtie2')
    flags = kwargs.get('bowtie2_flags', '')
    mapq = kwargs.get('mapq', 0)
    threads = kwargs.get('threads', 1)

    ref = os.path.join(ref_dir, run.assembly)
    prefix = os.path.join(workdir, os.path.basename(run.url)[:-4])

    fw_reads_file = prefix + '_1.bed.gz'
    rv_reads_file = prefix + '_2.bed.gz'
    reads_file = prefix + '.tsv.gz'

    cmd = [bowtie2, '-x', ref, '-p', str(threads), '--reorder'] + flags.split()

    with open(fw_reads_file, 'wb') as fh, open(os.devnull, 'w') as devnull:
        pop1 = Popen(cmd + ['-U', run.fq1_file], stdout=PIPE, stderr=devnull)
        pop2 = Popen([SAM2BED, '-', str(mapq), '--report-all'], stdin=pop1.stdout, stdout=PIPE, stderr=PIPE)
        pop3 = Popen(['gzip', '-c'], stdin=pop2.stdout, stdout=fh, stderr=devnull)
        pop1.stdout.close()
        pop1.wait()
        pop2.wait()
        pop3.wait()

    os.unlink(run.fq1_file)

    if pop1.returncode != 0 or pop2.returncode != 0 or pop3.returncode != 0 or not os.path.isfile(fw_reads_file):
        os.unlink(run.fq2_file)

        try:
            os.unlink(fw_reads_file)
        except OSError:
            pass
        finally:
            run.status = ERRORS['align']
            return False

    with open(rv_reads_file, 'wb') as fh, open(os.devnull, 'w') as devnull:
        pop1 = Popen(cmd + ['-U', run.fq2_file], stdout=PIPE, stderr=devnull)
        pop2 = Popen([SAM2BED, '-', str(mapq), '--report-all'], stdin=pop1.stdout, stdout=PIPE, stderr=PIPE)
        pop3 = Popen(['gzip', '-c'], stdin=pop2.stdout, stdout=fh, stderr=devnull)
        pop1.stdout.close()
        pop1.wait()
        pop2.wait()
        pop3.wait()

    os.unlink(run.fq2_file)

    if pop1.returncode != 0 or pop2.returncode != 0 or pop3.returncode != 0 or not os.path.isfile(rv_reads_file):
        try:
            os.unlink(rv_reads_file)
        except OSError:
            pass
        finally:
            run.status = ERRORS['align']
            return False

    # Create named pipe (forward)
    fd, fw_tmp = tempfile.mkstemp()
    os.close(fd)
    os.unlink(fw_tmp)
    os.mkfifo(fw_tmp)

    # Create named pipe (forward)
    fd, fw_tmp = tempfile.mkstemp()
    os.close(fd)
    os.unlink(fw_tmp)
    os.mkfifo(fw_tmp)

    # Create named pipe (reverse)
    fd, rv_tmp = tempfile.mkstemp()
    os.close(fd)
    os.unlink(rv_tmp)
    os.mkfifo(rv_tmp)

    # Listen on named pipes
    fh = open(reads_file, 'wb')
    devnull = open(os.devnull, 'w')
    pop1 = Popen([JOINBED, fw_tmp, rv_tmp], stdout=PIPE, stderr=PIPE)
    pop2 = Popen(['gzip', '-c'], stdin=pop1.stdout, stdout=fh, stderr=devnull)

    # Write to named pipes
    fh_fw = open(fw_tmp, 'w')
    fh_rv = open(rv_tmp, 'w')
    pop3 = Popen(['gzip', '-cd', fw_reads_file], stdout=fh_fw, stderr=devnull)
    pop4 = Popen(['gzip', '-cd', rv_reads_file], stdout=fh_rv, stderr=devnull)

    # Close file handlers
    fh_fw.close()
    fh_rv.close()
    devnull.close()

    # Wait for writing processes to finish
    pop3.wait()
    pop4.wait()

    # Wait for joinbed/gzip to finish
    pop2.wait()
    out, err = pop1.communicate()

    # Delete named pipes
    os.unlink(fw_tmp)
    os.unlink(rv_tmp)

    # Delete independent aligned files
    os.unlink(fw_reads_file)
    os.unlink(rv_reads_file)

    if pop1.returncode == pop2.returncode == pop3.returncode == pop4.returncode and os.path.isfile(reads_file):
        reads, mapped_reads = err.decode('utf8').strip().split('\t')
        run.reads = int(reads) if run.singleread else int(reads) / 2
        run.mapped_reads = int(mapped_reads)
        run.is_aligned = True
        run.reads_file = reads_file
        return True
    else:
        try:
            os.unlink(reads_file)
        except OSError:
            pass
        finally:
            run.status = ERRORS['post-align']
            return False
Example #59
0
 def getHostName(self):
     proc = Popen(['hostname', '-A'], stdout=PIPE, stderr=PIPE)
     result = proc.wait()
     if result == 0:
         self.hostname = proc.stdout.read()
     return self.hostname
 def analyzeFileToFile(self, in_fn, out_fn):
     """Morphologicaly analyze file `in_fn`, store output in `out_fn`
     """
     p = Popen([self.fm_bin, 'NOSTDIO', in_fn, out_fn, self.fm_dict])
     p.wait()