Example #1
0
def write_xar(fn, hdr, tocdata, heap, keep_old=False):
    ztocdata = zlib.compress(tocdata)
    digest = toc_digest(hdr, ztocdata)
    newhdr = dict(hdr,
                  toc_length_uncompressed=len(tocdata),
                  toc_length_compressed=len(ztocdata))
    outf = NamedTemporaryFile(prefix='.' + os.path.basename(fn),
                              dir=os.path.dirname(fn),
                              delete=False)
    try:
        st_mode = os.stat(fn).st_mode
        if os.fstat(outf.fileno()) != st_mode:
            os.fchmod(outf.fileno(), st_mode)
    except OSError:
        pass
    try:
        outf.writelines([HEADER.pack(newhdr), ztocdata, digest])
        copyfileobj(heap, outf)
        outf.close()
    except:
        outf.close()
        os.unlink(outf.name)
        raise
    if keep_old:
        oldfn = fn + '.old'
        if os.path.exists(oldfn):
            os.unlink(oldfn)
        os.link(fn, oldfn)
    os.rename(outf.name, fn)
Example #2
0
def saveFile(filename, data, mode=420):
    tmpFilename = None
    try:
        f = NamedTemporaryFile(prefix='.%s.' % path.basename(filename),
                               dir=path.dirname(filename),
                               delete=False)
        tmpFilename = f.name
        if isinstance(data, list):
            for x in data:
                f.write(x)

        else:
            f.write(data)
        f.flush()
        fsync(f.fileno())
        fchmod(f.fileno(), mode)
        f.close()
        rename(tmpFilename, filename)
    except Exception as e:
        print 'saveFile: failed to write to %s: %s' % (filename, e)
        if tmpFilename and path.exists(tmpFilename):
            unlink(tmpFilename)
        return False

    return True
def write_xar(fn, hdr, tocdata, heap, keep_old=False):
    ztocdata = zlib.compress(tocdata)
    digest = toc_digest(hdr, ztocdata)
    newhdr = dict(hdr,
                  toc_length_uncompressed=len(tocdata),
                  toc_length_compressed=len(ztocdata))
    outf = NamedTemporaryFile(prefix='.' + os.path.basename(fn),
                              dir=os.path.dirname(fn),
                              delete=False)
    try:
        st_mode = os.stat(fn).st_mode
        if os.fstat(outf.fileno()) != st_mode:
            os.fchmod(outf.fileno(), st_mode)
    except OSError:
        pass
    try:
        outf.writelines([HEADER.pack(newhdr),
                         ztocdata,
                         digest])
        copyfileobj(heap, outf)
        outf.close()
    except:
        outf.close()
        os.unlink(outf.name)
        raise
    if keep_old:
        oldfn = fn + '.old'
        if os.path.exists(oldfn):
            os.unlink(oldfn)
        os.link(fn, oldfn)
    os.rename(outf.name, fn)
Example #4
0
def stdio_capture(should_capture):
    """
    Capture stdout in a re-entrant manner. See pause_stdio_capture().

    If should_capture is False it simply returns (stdout, stderr)
    which simplifies conditional "with" clauses. Ie:

        with stdio_capture(should_capture) as (so, se):
            important_stuff(so, se)

    as opposed to:

        if should_capture:
            with stdio_capture(should_capture) as (so, se):
                important_stuff(so, se)
        else:
            # repeating the above
            important_stuff(sys.stdout, sys.stderr)

    """

    if not should_capture:
        yield sys.stdout, sys.stderr
    else:
        global redirect_depth
        global so_root_save_fd, so_curr_tmpfile
        global se_root_save_fd, se_curr_tmpfile

        so_save_fd = os.dup(so_orig_fd)
        se_save_fd = os.dup(se_orig_fd)
        if redirect_depth == 0:
            so_root_save_fd = so_save_fd
            se_root_save_fd = se_save_fd

        so_tmpfile = NamedTemporaryFile(mode="w+b")
        se_tmpfile = NamedTemporaryFile(mode="w+b")

        so_prev_tmpfile = so_curr_tmpfile
        se_prev_tmpfile = se_curr_tmpfile

        so_curr_tmpfile = so_tmpfile
        se_curr_tmpfile = se_tmpfile

        redirect_depth += 1
        try:
            _redirect_stdout(so_tmpfile.fileno())
            _redirect_stderr(se_tmpfile.fileno())
            yield (so_tmpfile, se_tmpfile)
            _redirect_stderr(se_save_fd)
            _redirect_stdout(so_save_fd)
        finally:
            redirect_depth -= 1
            so_tmpfile.close()
            se_tmpfile.close()
            so_curr_tmpfile = so_prev_tmpfile
            se_curr_tmpfile = se_prev_tmpfile
            os.close(so_save_fd)
            os.close(se_save_fd)
Example #5
0
    def _dump_passphrase_to_dir(self, path, passphrase=None):
        """Dump passphrase to "passphrase" file in "path".

        1. File permission should already be user-read-write-only on
           creation by mkstemp.
        2. The combination of os.fsync and os.rename should guarentee
           that we don't end up with an incomplete passphrase file.
        3. Perhaps we should use uuid.uuid4() to generate the passphrase?
        """
        mkdir_p(path)
        from tempfile import NamedTemporaryFile
        handle = NamedTemporaryFile(
            prefix=self.PASSPHRASE_FILE_BASE, dir=path, delete=False)
        # Note: Perhaps a UUID might be better here?
        if passphrase is None:
            import random
            passphrase = ''.join(
                random.sample(self.PASSPHRASE_CHARSET, self.PASSPHRASE_LEN))
        handle.write(passphrase)
        os.fsync(handle.fileno())
        handle.close()
        passphrase_file_name = os.path.join(
            path, self.PASSPHRASE_FILE_BASE)
        os.rename(handle.name, passphrase_file_name)
        if cylc.flags.verbose:
            print 'Generated suite passphrase: %s' % passphrase_file_name
Example #6
0
    def save(self, filename, mtime=1300507380.0):
        """
        Serialize this RingData instance to disk.

        :param filename: File into which this instance should be serialized.
        :param mtime: time used to override mtime for gzip, default or None
                      if the caller wants to include time
        """
        # Override the timestamp so that the same ring data creates
        # the same bytes on disk. This makes a checksum comparison a
        # good way to see if two rings are identical.
        #
        # This only works on Python 2.7; on 2.6, we always get the
        # current time in the gzip output.
        tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
        if 'mtime' in inspect.getargspec(GzipFile.__init__).args:
            gz_file = GzipFile(filename, mode='wb', fileobj=tempf, mtime=mtime)
        else:
            gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
        self.serialize_v1(gz_file)
        gz_file.close()
        tempf.flush()
        os.fsync(tempf.fileno())
        tempf.close()
        os.chmod(tempf.name, 0o644)
        os.rename(tempf.name, filename)
Example #7
0
def fetch(year, month, day, n):
    kwargs = {"year": year, "month": month, "day": day, "n": n}
    local_fn = filename.format(**kwargs)

    # Skip if the file exists.
    if os.path.exists(local_fn):
        return

    # Download the remote file.
    remote = url.format(**kwargs)
    r = requests.get(remote)
    if r.status_code == requests.codes.ok:
        # Atomically write to disk.
        # http://stackoverflow.com/questions/2333872/ \
        #        atomic-writing-to-file-with-python
        f = NamedTemporaryFile("wb", delete=False)
        f.write(r.content)
        f.flush()
        os.fsync(f.fileno())
        f.close()
        shutil.move(f.name, local_fn)
        log.log('Downloading {0}-{1:02d}-{2:02d}-{3}.json.gz finished.'.format(
            year, month, day, n))
    else:
        log.log(
            'Fail downloading {0}-{1:02d}-{2:02d}-{3}.json.gz. ({4}).'.format(
                year, month, day, n, r.status_code), log.ERROR)
Example #8
0
def fetch_one(year, month, day, hour):
    '''Fecch one archived timeline.'''
    local_fn = local_url.format(year=year, month=month, day=day, hour=hour)
    if os.path.exists(local_fn):
        print '%s exists.' % local_fn
        return local_fn
    else:
        url = archive_url.format(year=year, month=month, day=day, hour=hour)
        r = None
        try:
            r = requests.get(url, timeout=120)
            if r.status_code == 200:
                f = NamedTemporaryFile("wb", delete=False)
                f.write(r.content)
                f.flush()
                os.fsync(f.fileno())
                f.close()
                shutil.move(f.name, local_fn)
                print("Fetching %s successded." % url)
                return local_fn
            else:
                print("Fetching %s failed." % url)
        except:
            return None
        finally:
            if r is not None:
                r.close()
    return None
Example #9
0
    def save(self, filename):
        """
        Serialize this RingData instance to disk.

        :param filename: File into which this instance should be serialized.
        """
        # Override the timestamp so that the same ring data creates
        # the same bytes on disk. This makes a checksum comparison a
        # good way to see if two rings are identical.
        #
        # This only works on Python 2.7; on 2.6, we always get the
        # current time in the gzip output.
        tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
        try:
            gz_file = GzipFile(filename, mode='wb', fileobj=tempf,
                               mtime=1300507380.0)
        except TypeError:
            gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
        self.serialize_v1(gz_file)
        gz_file.close()
        tempf.flush()
        os.fsync(tempf.fileno())
        tempf.close()
        os.chmod(tempf.name, 0o644)
        os.rename(tempf.name, filename)
Example #10
0
def load_button_pixbufs(color):
    global BUTTONS_SVG

    if BUTTONS_SVG is None:
        image_path = os.path.join(MODULE_DIR, 'images', 'mouse.svg')
        with open(image_path) as svg_file:
            BUTTONS_SVG = svg_file.readlines()

    if not isinstance(color, str):
        # Gdk.Color
        color = 'rgb({}, {}, {})'.format(round(color.red_float * 255),
                                         round(color.green_float * 255),
                                         round(color.blue_float * 255))
    button_pixbufs = []
    svg = NamedTemporaryFile(mode='w', suffix='.svg')
    for line in BUTTONS_SVG[1:-1]:
        svg.seek(0)
        svg.truncate()
        svg.writelines((
            BUTTONS_SVG[0],
            line.replace('#fff', color),
            BUTTONS_SVG[-1],
        ))
        svg.flush()
        os.fsync(svg.fileno())
        button_pixbufs.append(GdkPixbuf.Pixbuf.new_from_file(svg.name))
    svg.close()
    return button_pixbufs
Example #11
0
    def save(self, filename):
        """
        Serialize this RingData instance to disk.

        :param filename: File into which this instance should be serialized.
        """
        # Override the timestamp so that the same ring data creates
        # the same bytes on disk. This makes a checksum comparison a
        # good way to see if two rings are identical.
        #
        # This only works on Python 2.7; on 2.6, we always get the
        # current time in the gzip output.
        tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
        try:
            gz_file = GzipFile(filename,
                               mode='wb',
                               fileobj=tempf,
                               mtime=1300507380.0)
        except TypeError:
            gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
        self.serialize_v1(gz_file)
        gz_file.close()
        tempf.flush()
        os.fsync(tempf.fileno())
        tempf.close()
        os.chmod(tempf.name, 0o644)
        os.rename(tempf.name, filename)
Example #12
0
    def save(self, filename, mtime=1300507380.0):
        """
        Serialize this RingData instance to disk.

        :param filename: File into which this instance should be serialized.
        :param mtime: time used to override mtime for gzip, default or None
                      if the caller wants to include time
        """
        # Override the timestamp so that the same ring data creates
        # the same bytes on disk. This makes a checksum comparison a
        # good way to see if two rings are identical.
        #
        # This only works on Python 2.7; on 2.6, we always get the
        # current time in the gzip output.
        tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
        if 'mtime' in inspect.getargspec(GzipFile.__init__).args:
            gz_file = GzipFile(filename, mode='wb', fileobj=tempf,
                               mtime=mtime)
        else:
            gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
        self.serialize_v1(gz_file)
        gz_file.close()
        tempf.flush()
        os.fsync(tempf.fileno())
        tempf.close()
        os.chmod(tempf.name, 0o644)
        os.rename(tempf.name, filename)
Example #13
0
 def _atomic_write(self,
                   path,
                   mode="w",
                   newline=None,
                   sync_directory=True,
                   replace_fn=os.replace):
     directory = os.path.dirname(path)
     tmp = NamedTemporaryFile(
         mode=mode,
         dir=directory,
         delete=False,
         prefix=".Radicale.tmp-",
         newline=newline,
         encoding=None if "b" in mode else self._encoding)
     try:
         yield tmp
         tmp.flush()
         try:
             self._storage._fsync(tmp.fileno())
         except OSError as e:
             raise RuntimeError("Fsync'ing file %r failed: %s" %
                                (path, e)) from e
         tmp.close()
         replace_fn(tmp.name, path)
     except BaseException:
         tmp.close()
         os.remove(tmp.name)
         raise
     if sync_directory:
         self._storage._sync_directory(directory)
def get_pairwise_distances(seq_series, tree_file = None, seq_file = None):
    
    if seq_file is None:
        fasta_handle = NTF()
    if tree_file is None:
        tree_handle = NTF()
    else:
        tree_handle = open(tree_file, 'w')
    for (pat, visit), seq in zip(seq_series.index, seq_series.values):
        nheader = '%s-%s' % (pat, visit)
        fasta_handle.write('>%s\n%s\n' % (nheader, ''.join(seq)))
    fasta_handle.flush()
    os.fsync(fasta_handle.fileno())
    cmd = 'muscle -in %(ifile)s -tree2 %(treefile)s -gapopen -2.9'
    cmdlist = shlex.split(cmd % {
                                 'ifile':fasta_handle.name, 
                                 'treefile':tree_handle.name
                                 })
    t = check_call(cmdlist)
    tree = Phylo.read(open(tree_handle.name), 'newick')
    seq_names = tree.get_terminals()
    dmat = {}
    for p1, p2 in combinations(seq_names, 2):
        d = tree.distance(p1, p2)
        dmat[(p1.name, p2.name)] = d
        dmat[(p2.name, p1.name)] = d
        
    return dmat
def download(clobber=False):
    if os.path.exists(_FILENAME) and not clobber:
        print("File '{0}' already exists".format(_FILENAME))
        return

    try:
        os.makedirs(os.path.dirname(_FILENAME))
    except os.error:
        pass

    # Fetch the remote file.
    logging.info("Downloading file from: '{0}'".format(_URL))
    r = urllib.request.Request(_URL)
    handler = urllib.request.urlopen(r)
    code = handler.getcode()
    assert int(code) == 200, "Download returned HTTP error {0}".format(code)

    # Atomically write to disk.
    # http://stackoverflow.com/questions/2333872/ \
    #        atomic-writing-to-file-with-python
    logging.info("Saving file to: '{0}'".format(_FILENAME))
    f = NamedTemporaryFile("wb", delete=False)
    f.write(handler.read())
    f.flush()
    os.fsync(f.fileno())
    f.close()
    shutil.move(f.name, _FILENAME)
Example #16
0
def fetch_one(year, month, day, hour):
    '''Fecch one archived timeline.'''
    local_fn = local_url.format(year=year, month=month, day=day, hour=hour)
    if os.path.exists(local_fn):
        print '%s exists.' % local_fn
        return local_fn
    else:
        url = archive_url.format(year=year, month=month, day=day, hour=hour)
        r = None
        try:
            r = requests.get(url, timeout=120)
            if r.status_code == 200:
                f = NamedTemporaryFile("wb", delete=False)
                f.write(r.content)
                f.flush()
                os.fsync(f.fileno())
                f.close()
                shutil.move(f.name, local_fn)
                print("Fetching %s successded." % url)
                return local_fn
            else:
                print("Fetching %s failed." % url)
        except:
            return None
        finally:
            if r is not None:
                r.close()
    return None
Example #17
0
    def __init__(self, video_generators, video_format):
        from textwrap import dedent
        from tempfile import NamedTemporaryFile
        from subprocess import CalledProcessError, check_output, STDOUT
        from random import randint
        self.lighttpd_pid = None
        self.video_generators = dict(video_generators)
        video_cache_dir = _gen_video_cache_dir()
        mkdir_p(video_cache_dir)
        lighttpd_config_file = NamedTemporaryFile(
            prefix='stbt-camera-lighttpd-', suffix='.conf', delete=False)
        pidfile = NamedTemporaryFile(prefix="stbt-camera-lighttpd-",
                                     suffix=".pidfile")
        # This is an awful way to start listening on a random port and not a
        # great way of tracking the sub-process.
        port = None
        while port is None:
            try:
                lighttpd_config_file.seek(0)
                lighttpd_config_file.truncate(0)
                try_port = randint(10000, 30000)
                lighttpd_config_file.write(
                    dedent("""\
                    # This file is generated automatically by stb-tester.
                    # DO NOT EDIT.
                    server.document-root = "%s"

                    server.port = %i

                    server.pid-file            = "%s"

                    mimetype.assign = (
                      ".png" => "image/png",
                      ".mp4" => "video/mp4",
                      ".ts" => "video/MP2T"
                    )""") % (video_cache_dir, try_port, pidfile.name))
                lighttpd_config_file.flush()
                check_output(['lighttpd', '-f', lighttpd_config_file.name],
                             close_fds=True,
                             stderr=STDOUT)
                port = try_port
            except CalledProcessError as e:
                if e.output.find('Address already in use') != -1:
                    pass
                else:
                    sys.stderr.write("lighttpd failed to start: %s\n" %
                                     e.output)
                    raise
        # lighttpd writes its pidfile out after forking rather than before
        # causing a race.  The real fix is to patch lighttpd to support socket
        # passing and then open the listening socket ourselves.
        while os.fstat(pidfile.fileno()).st_size == 0:
            sleep(0.1)
        self.lighttpd_pid = int(pidfile.read())
        self.base_url = "http://%s:%i/" % (_get_external_ip(), port)
        self.video_format = video_format
Example #18
0
 def _atomic_write(self, path, mode="w", newline=None):
     directory = os.path.dirname(path)
     tmp = NamedTemporaryFile(
         mode=mode, dir=directory, encoding=self.encoding,
         delete=False, prefix=".Radicale.tmp-", newline=newline)
     try:
         yield tmp
         if self.configuration.getboolean("storage", "fsync"):
             if os.name == "posix" and hasattr(fcntl, "F_FULLFSYNC"):
                 fcntl.fcntl(tmp.fileno(), fcntl.F_FULLFSYNC)
             else:
                 os.fsync(tmp.fileno())
         tmp.close()
         os.replace(tmp.name, path)
     except:
         tmp.close()
         os.remove(tmp.name)
         raise
     self._sync_directory(directory)
Example #19
0
    def __init__(self, video_generators, video_format):
        from textwrap import dedent
        from tempfile import NamedTemporaryFile
        from subprocess import CalledProcessError, check_output, STDOUT
        from random import randint
        self.lighttpd_pid = None
        self.video_generators = dict(video_generators)
        video_cache_dir = _gen_video_cache_dir()
        mkdir_p(video_cache_dir)
        lighttpd_config_file = NamedTemporaryFile(
            prefix='stbt-camera-lighttpd-', suffix='.conf', delete=False)
        pidfile = NamedTemporaryFile(
            prefix="stbt-camera-lighttpd-", suffix=".pidfile")
        # This is an awful way to start listening on a random port and not a
        # great way of tracking the sub-process.
        port = None
        while port is None:
            try:
                lighttpd_config_file.seek(0)
                lighttpd_config_file.truncate(0)
                try_port = randint(10000, 30000)
                lighttpd_config_file.write(dedent("""\
                    # This file is generated automatically by stb-tester.
                    # DO NOT EDIT.
                    server.document-root = "%s"

                    server.port = %i

                    server.pid-file            = "%s"

                    mimetype.assign = (
                      ".png" => "image/png",
                      ".mp4" => "video/mp4",
                      ".ts" => "video/MP2T"
                    )""") % (video_cache_dir, try_port, pidfile.name))
                lighttpd_config_file.flush()
                check_output(['lighttpd', '-f', lighttpd_config_file.name],
                             close_fds=True, stderr=STDOUT)
                port = try_port
            except CalledProcessError as e:
                if e.output.find('Address already in use') != -1:
                    pass
                else:
                    sys.stderr.write("lighttpd failed to start: %s\n" %
                                     e.output)
                    raise
        # lighttpd writes its pidfile out after forking rather than before
        # casuing a race.  The real fix is to patch lighttpd to support socket
        # passing and then open the listening socket ourselves.
        while os.fstat(pidfile.fileno()).st_size == 0:
            sleep(0.1)
        self.lighttpd_pid = int(pidfile.read())
        self.base_url = "http://%s:%i/" % (_get_external_ip(), port)
        self.video_format = video_format
Example #20
0
def add_to_etc_hosts(ip_address, hosts=[]):
    """
    Add a line with the list of ``hosts`` for the given ``ip_address`` to
    ``/etc/hosts``.

    If a line with the provided ``ip_address`` already exist in the file, keep
    any existing hostnames and also add the otherwise not found new ``hosts``
    to the given line.
    """
    try:
        if not ip_address:
            log.error('No ip address provided when calling add_to_etc_hosts. Ignoring...')
            return
        etc_hosts = open('/etc/hosts', 'r')
        # Pull out all the lines from /etc/hosts that do not have an entry
        # matching a value in `hosts` argument
        tmp = NamedTemporaryFile()
        existing_line = None
        for l in etc_hosts:
            contained = False
            for hostname in hosts:
                if hostname in l.split():
                    contained = True
            if ip_address in l:
                contained = True
            if not contained:
                tmp.write(l)
            else:
                existing_line = l.strip()
        etc_hosts.close()
        if existing_line:
            ip_address = existing_line.split()[0]
            hostnames = existing_line.split()[1:]
            for hostname in hosts:
                if hostname not in hostnames:
                    hostnames.append(hostname)
            # Append new hosts to the exisiting line
            line = "{0} {1}\n".format(ip_address, ' '.join(hostnames))
        else:
            # Compose a new line with the hosts for the specified IP address
            line = '{0} {1}\n'.format(ip_address, ' '.join(hosts))
        tmp.write(line)
        # Make sure the changes are written to disk
        tmp.flush()
        os.fsync(tmp.fileno())
        # Swap out /etc/hosts
        run('cp /etc/hosts /etc/hosts.orig')
        run('cp {0} /etc/hosts'.format(tmp.name))
        run('chmod 644 /etc/hosts')
        log.debug("Added the following line to /etc/hosts: {0}".format(line))
    except (IOError, OSError) as e:
        log.error('Could not update /etc/hosts. {0}'.format(e))
Example #21
0
    def testReadDifferentCacheFileSize(self):

        preobj = complex(1, 2)
        pstobj = complex(42, 1)
        foo = NamedTemporaryFile(mode='wt')
        dump(preobj, foo)
        #FIXME: this shall be called by dump
        foo.flush()
        foo.seek(0, 0)

        st1 = os.fstat(foo.fileno())
        cache = NamedTemporaryFile(mode='wb')
        write_cache(preobj, foo.name, cache.name)

        dump(pstobj, foo)
        #FIXME: likewise
        foo.flush()
        st2 = os.fstat(foo.fileno())

        with self.assertRaisesRegexp(CacheMismatchError, "size mismatch"):
            ret = read_cache(foo.name, cache.name)
            self.assertIsNone(ret)
Example #22
0
File: IO.py Project: popazerty/12
def saveFile(filename, data, mode=0644):
	tmpFilename = None
	try:
		f = NamedTemporaryFile(prefix='.%s.' % path.basename(filename), dir=path.dirname(filename), delete=False)
		tmpFilename = f.name
		if isinstance(data, list):
			for x in data:
				f.write(x)
		else:
			f.write(data)
		f.flush()
		fsync(f.fileno())
		fchmod(f.fileno(), mode)
		f.close()
		rename(tmpFilename, filename)
	except Exception as e:
		print 'saveFile: failed to write to %s: %s' % (filename, e)
		if tmpFilename and path.exists(tmpFilename):
			unlink(tmpFilename)
		return False

	return True
Example #23
0
    def _reconfig_ssh(self) -> None:
        temp_files = []  # type: list
        ssh_options = []  # type: List[str]

        # ssh_config
        self.mgr.ssh_config_fname = self.mgr.ssh_config_file
        ssh_config = self.mgr.get_store("ssh_config")
        if ssh_config is not None or self.mgr.ssh_config_fname is None:
            if not ssh_config:
                ssh_config = DEFAULT_SSH_CONFIG
            f = NamedTemporaryFile(prefix='cephadm-conf-')
            os.fchmod(f.fileno(), 0o600)
            f.write(ssh_config.encode('utf-8'))
            f.flush()  # make visible to other processes
            temp_files += [f]
            self.mgr.ssh_config_fname = f.name
        if self.mgr.ssh_config_fname:
            self.mgr.validate_ssh_config_fname(self.mgr.ssh_config_fname)
            ssh_options += ['-F', self.mgr.ssh_config_fname]
        self.mgr.ssh_config = ssh_config

        # identity
        ssh_key = self.mgr.get_store("ssh_identity_key")
        ssh_pub = self.mgr.get_store("ssh_identity_pub")
        self.mgr.ssh_pub = ssh_pub
        self.mgr.ssh_key = ssh_key
        if ssh_key and ssh_pub:
            self.mgr.tkey = NamedTemporaryFile(prefix='cephadm-identity-')
            self.mgr.tkey.write(ssh_key.encode('utf-8'))
            os.fchmod(self.mgr.tkey.fileno(), 0o600)
            self.mgr.tkey.flush()  # make visible to other processes
            tpub = open(self.mgr.tkey.name + '.pub', 'w')
            os.fchmod(tpub.fileno(), 0o600)
            tpub.write(ssh_pub)
            tpub.flush()  # make visible to other processes
            temp_files += [self.mgr.tkey, tpub]
            ssh_options += ['-i', self.mgr.tkey.name]

        self.mgr._temp_files = temp_files
        if ssh_options:
            self.mgr._ssh_options = ' '.join(ssh_options)
        else:
            self.mgr._ssh_options = None

        if self.mgr.mode == 'root':
            self.mgr.ssh_user = self.mgr.get_store('ssh_user', default='root')
        elif self.mgr.mode == 'cephadm-package':
            self.mgr.ssh_user = '******'

        self._reset_cons()
Example #24
0
 def install_package(self, with_custom_actions=True):
     response_file = NamedTemporaryFile(mode='w')
     response_file.write("NO_CUSTOM_ACTIONS={}".format(int(not with_custom_actions)))
     response_file.flush()
     os.fsync(response_file.fileno())
     with prevent_access_to_pypi_servers(), prevent_access_to_gcc():
         zipped_package_name = self.get_package()
         unzipped_package_name = zipped_package_name[:-3]
         execute_assert_success('gunzip -c {} > {}'.format(zipped_package_name, unzipped_package_name), shell=True)
         execute_assert_success(['pkgadd',
                                 '-n',
                                 '-a', self.admin_file.name,
                                 '-r', response_file.name,
                                 '-d', unzipped_package_name,
                                 self.package_name])
Example #25
0
 def _atomic_write(self, path, mode="w", newline=None):
     directory = os.path.dirname(path)
     tmp = NamedTemporaryFile(
         mode=mode, dir=directory, encoding=self.encoding,
         delete=False, prefix=".Radicale.tmp-", newline=newline)
     try:
         yield tmp
         self._fsync(tmp.fileno())
         tmp.close()
         os.replace(tmp.name, path)
     except:
         tmp.close()
         os.remove(tmp.name)
         raise
     self._sync_directory(directory)
Example #26
0
def modify_file(filename, data):
    from tempfile import NamedTemporaryFile
    f = NamedTemporaryFile(dir=os.path.dirname(filename), delete=False)
    f.write(data)
    f.flush()
    os.fsync(f.fileno())

    # If filename exists, get its stats and apply them to the temp file
    try:
        stat = os.stat(filename)
        os.chown(f.name, stat.st_uid, stat.st_gid)
        os.chmod(f.name, stat.st_mode)
    except:
        pass

    os.rename(f.name, filename)
Example #27
0
def modify_file(filename, data):
    from tempfile import NamedTemporaryFile
    f = NamedTemporaryFile(dir=os.path.dirname(filename), delete=False)
    f.write(data)
    f.flush()
    os.fsync(f.fileno())

    # If filename exists, get its stats and apply them to the temp file
    try:
        stat = os.stat(filename)
        os.chown(f.name, stat.st_uid, stat.st_gid)
        os.chmod(f.name, stat.st_mode)
    except:
        pass

    os.rename(f.name, filename)
Example #28
0
def main():
    LOG.addHandler(logging.StreamHandler())

    parser = argparse.ArgumentParser(
        description="Convert polygons in an OBJ file to paths in SVG format.")
    parser.add_argument("--verbose",
                        "-v",
                        action="count",
                        default=0,
                        help="Print verbose information for debugging.")
    parser.add_argument("--quiet",
                        "-q",
                        action="count",
                        default=0,
                        help="Suppress warnings.")

    parser.add_argument("--unit",
                        "-u",
                        action="store",
                        default="",
                        help="Units, eg. “mm”, “px”.")

    parser.add_argument("obj", metavar="OBJ", help="Path to OBJ file.")

    parser.add_argument("svg",
                        metavar="SVG",
                        nargs="?",
                        help="Path to SVG file.")

    args = parser.parse_args()

    level = (logging.ERROR, logging.WARNING, logging.INFO,
             logging.DEBUG)[max(0, min(3, 1 + args.verbose - args.quiet))]
    LOG.setLevel(level)

    if args.svg:
        out = NamedTemporaryFile("w", encoding="utf=8", delete=False)
        os.fchmod(out.fileno(), os.stat(args.obj).st_mode)
    else:
        out = sys.stdout

    with open(args.obj, "r", encoding="utf-8") as obj:
        obj2svg(out, obj, unit=args.unit)

    if args.svg:
        out.close()
        shutil.move(out.name, args.svg)
Example #29
0
 def install_package(self, with_custom_actions=True):
     response_file = NamedTemporaryFile(mode='w')
     response_file.write("NO_CUSTOM_ACTIONS={}".format(
         int(not with_custom_actions)))
     response_file.flush()
     os.fsync(response_file.fileno())
     with prevent_access_to_pypi_servers(), prevent_access_to_gcc():
         zipped_package_name = self.get_package()
         unzipped_package_name = zipped_package_name[:-3]
         execute_assert_success('gunzip -c {} > {}'.format(
             zipped_package_name, unzipped_package_name),
                                shell=True)
         execute_assert_success([
             'pkgadd', '-n', '-a', self.admin_file.name, '-r',
             response_file.name, '-d', unzipped_package_name,
             self.package_name
         ])
Example #30
0
def _dump_item(path, item, value):
    """Dump "value" to a file called "item" in the directory "path".

    1. File permission should already be user-read-write-only on
       creation by mkstemp.
    2. The combination of os.fsync and os.rename should guarantee
       that we don't end up with an incomplete file.
    """
    mkdir_p(path)
    from tempfile import NamedTemporaryFile
    handle = NamedTemporaryFile(prefix=item, dir=path, delete=False)
    handle.write(value)
    os.fsync(handle.fileno())
    handle.close()
    fname = os.path.join(path, item)
    os.rename(handle.name, fname)
    print 'Generated %s' % fname
class ShutUp(object):
    """
    A little helper class to keep GAUDI app mgr silent...
    """
    def __init__(self):
        self.save  = file( '/dev/null', 'w' )
        self.quiet = NamedTemporaryFile( suffix = ".msg.log" )
        os.dup2( sys.stdout.fileno(), self.save.fileno() )
        return
    
    def mute(self):
        os.dup2( self.quiet.fileno(), sys.stdout.fileno() )
        return
    
    def unMute(self):
        os.dup2( self.save.fileno(), sys.stdout.fileno() )
        return
class ShutUp(object):
    """
    A little helper class to keep GAUDI app mgr silent...
    """
    def __init__(self):
        self.save = file('/dev/null', 'w')
        self.quiet = NamedTemporaryFile(suffix=".msg.log")
        os.dup2(sys.stdout.fileno(), self.save.fileno())
        return

    def mute(self):
        os.dup2(self.quiet.fileno(), sys.stdout.fileno())
        return

    def unMute(self):
        os.dup2(self.save.fileno(), sys.stdout.fileno())
        return
Example #33
0
    def fetch(self, clobber=False):
        """
        Download the data file from the server and save it locally. The local
        file will be saved in the directory specified by the ``data_root``
        property of the API.

        :param clobber:
            Should an existing local file be overwritten? (default: False)

        """
        # Check if the file already exists.
        filename = self.filename
        if os.path.exists(filename) and not clobber:
            logging.info("Found local file: '{0}'".format(filename))
            return self

        # Fetch the remote file.
        url = self.url
        logging.info("Downloading file from: '{0}'".format(url))
        r = urllib2.Request(url)
        handler = urllib2.urlopen(r)
        code = handler.getcode()
        if int(code) != 200:
            raise APIError(code, url, "")

        # Make sure that the root directory exists.
        try:
            os.makedirs(self.base_dir)
        except os.error:
            pass

        # Save the contents of the file.
        logging.info("Saving file to: '{0}'".format(filename))

        # Atomically write to disk.
        # http://stackoverflow.com/questions/2333872/ \
        #        atomic-writing-to-file-with-python
        f = NamedTemporaryFile("wb", delete=False)
        f.write(handler.read())
        f.flush()
        os.fsync(f.fileno())
        f.close()
        shutil.move(f.name, filename)

        return self
Example #34
0
class PkgInstaller(Installer):
    package_extension = 'pkg.gz'

    def __init__(self, *args, **kwargs):
        super(PkgInstaller, self).__init__(*args, **kwargs)
        admin_file_content = '\n'.join([
            'partial=nocheck', 'runlevel=nocheck', 'idepend=nocheck',
            'rdepend=nocheck', 'setuid=nocheck', 'action=nocheck',
            'partial=nocheck', 'conflict=nocheck', 'authentication=quit',
            'instance=overwrite', 'basedir=default'
        ])
        self.admin_file = NamedTemporaryFile(mode='w')
        self.admin_file.write(admin_file_content)
        self.admin_file.flush()
        os.fsync(self.admin_file.fileno())

    def is_product_installed(self):
        return 0 == execute(["pkginfo", self.package_name]).get_returncode()

    def install_package(self, with_custom_actions=True):
        response_file = NamedTemporaryFile(mode='w')
        response_file.write("NO_CUSTOM_ACTIONS={}".format(
            int(not with_custom_actions)))
        response_file.flush()
        os.fsync(response_file.fileno())
        with prevent_access_to_pypi_servers(), prevent_access_to_gcc():
            zipped_package_name = self.get_package()
            unzipped_package_name = zipped_package_name[:-3]
            execute_assert_success('gunzip -c {} > {}'.format(
                zipped_package_name, unzipped_package_name),
                                   shell=True)
            execute_assert_success([
                'pkgadd', '-n', '-a', self.admin_file.name, '-r',
                response_file.name, '-d', unzipped_package_name,
                self.package_name
            ])

    def uninstall_package(self, with_custom_actions=True):
        # with_custom_actions is actually ignored here. This flag is passed to the installer through the response file.
        # Luckily, the preremove scripts also gets this info (it's saved somwhere in the os until the removal)
        execute_assert_success(
            ['pkgrm', '-n', '-a', self.admin_file.name, self.package_name],
            allowed_return_codes=[
                0,
            ])
Example #35
0
    def fetch(self, clobber=False):
        """
        Download the data file from the server and save it locally. The local
        file will be saved in the directory specified by the ``data_root``
        property of the API.

        :param clobber:
            Should an existing local file be overwritten? (default: False)

        """
        # Check if the file already exists.
        filename = self.filename
        if os.path.exists(filename) and not clobber:
            logging.info("Found local file: '{0}'".format(filename))
            return self

        # Fetch the remote file.
        url = self.url
        logging.info("Downloading file from: '{0}'".format(url))
        r = urllib2.Request(url)
        handler = urllib2.urlopen(r)
        code = handler.getcode()
        if int(code) != 200:
            raise APIError(code, url, "")

        # Make sure that the root directory exists.
        try:
            os.makedirs(self.base_dir)
        except os.error:
            pass

        # Save the contents of the file.
        logging.info("Saving file to: '{0}'".format(filename))

        # Atomically write to disk.
        # http://stackoverflow.com/questions/2333872/ \
        #        atomic-writing-to-file-with-python
        f = NamedTemporaryFile("wb", delete=False)
        f.write(handler.read())
        f.flush()
        os.fsync(f.fileno())
        f.close()
        shutil.move(f.name, filename)

        return self
Example #36
0
def main():
    log_geo = logging.getLogger("geo")
    for log in LOG, log_geo:
        log.addHandler(logging.StreamHandler())
        color_log(log)

    parser = argparse.ArgumentParser(
        description="Convert paths in an SVG file to polygons in OBJ format.")
    parser.add_argument("--verbose",
                        "-v",
                        action="count",
                        default=0,
                        help="Print verbose information for debugging.")
    parser.add_argument("--quiet",
                        "-q",
                        action="count",
                        default=0,
                        help="Suppress warnings.")

    parser.add_argument("svg", metavar="SVG", help="Path to SVG file.")

    parser.add_argument("obj",
                        metavar="OBJ",
                        nargs="?",
                        help="Path to OBJ file.")

    args = parser.parse_args()

    level = (logging.ERROR, logging.WARNING, logging.INFO,
             logging.DEBUG)[max(0, min(3, 1 + args.verbose - args.quiet))]
    for log in LOG, log_geo:
        log.setLevel(level)

    if args.obj:
        out = NamedTemporaryFile("w", encoding="utf=8", delete=False)
        os.fchmod(out.fileno(), os.stat(args.svg).st_mode)
    else:
        out = sys.stdout

    with open(args.svg, "r", encoding="utf-8") as svg:
        svg2obj(out, svg)

    if args.obj:
        out.close()
        shutil.move(out.name, args.obj)
Example #37
0
 def _atomic_write(self, path, mode="w", newline=None):
     directory = os.path.dirname(path)
     tmp = NamedTemporaryFile(mode=mode,
                              dir=directory,
                              encoding=self.encoding,
                              delete=False,
                              prefix=".Radicale.tmp-",
                              newline=newline)
     try:
         yield tmp
         self._fsync(tmp.fileno())
         tmp.close()
         os.replace(tmp.name, path)
     except:
         tmp.close()
         os.remove(tmp.name)
         raise
     self._sync_directory(directory)
Example #38
0
    def _dump_item(self, path, item, value):
        """Dump "value" to a file called "item" in the directory "path".

        1. File permission should already be user-read-write-only on
           creation by mkstemp.
        2. The combination of os.fsync and os.rename should guarentee
           that we don't end up with an incomplete file.
        """
        mkdir_p(path)
        from tempfile import NamedTemporaryFile
        handle = NamedTemporaryFile(prefix=item, dir=path, delete=False)
        handle.write(value)
        os.fsync(handle.fileno())
        handle.close()
        fname = os.path.join(path, item)
        os.rename(handle.name, fname)
        if cylc.flags.verbose:
            print 'Generated %s' % fname
Example #39
0
class PkgInstaller(Installer):
    package_extension = 'pkg.gz'

    def __init__(self, *args, **kwargs):
        super(PkgInstaller, self).__init__(*args, **kwargs)
        admin_file_content = '\n'.join(['partial=nocheck',
                                    'runlevel=nocheck',
                                    'idepend=nocheck',
                                    'rdepend=nocheck',
                                    'setuid=nocheck',
                                    'action=nocheck',
                                    'partial=nocheck',
                                    'conflict=nocheck',
                                    'authentication=quit',
                                    'instance=overwrite',
                                    'basedir=default'])
        self.admin_file = NamedTemporaryFile(mode='w')
        self.admin_file.write(admin_file_content)
        self.admin_file.flush()
        os.fsync(self.admin_file.fileno())

    def is_product_installed(self):
        return 0 == execute(["pkginfo", self.package_name]).get_returncode()

    def install_package(self, with_custom_actions=True):
        response_file = NamedTemporaryFile(mode='w')
        response_file.write("NO_CUSTOM_ACTIONS={}".format(int(not with_custom_actions)))
        response_file.flush()
        os.fsync(response_file.fileno())
        with prevent_access_to_pypi_servers(), prevent_access_to_gcc():
            zipped_package_name = self.get_package()
            unzipped_package_name = zipped_package_name[:-3]
            execute_assert_success('gunzip -c {} > {}'.format(zipped_package_name, unzipped_package_name), shell=True)
            execute_assert_success(['pkgadd',
                                    '-n',
                                    '-a', self.admin_file.name,
                                    '-r', response_file.name,
                                    '-d', unzipped_package_name,
                                    self.package_name])

    def uninstall_package(self, with_custom_actions=True):
        # with_custom_actions is actually ignored here. This flag is passed to the installer through the response file.
        # Luckily, the preremove scripts also gets this info (it's saved somwhere in the os until the removal)
        execute_assert_success(['pkgrm', '-n', '-a', self.admin_file.name, self.package_name], allowed_return_codes=[0,])
Example #40
0
class HTTPData:
    def __init__(self, size):
        self.size = size
        self.current_size = 0

        if self.size > 100 * 1024:
            self.data_stream = NamedTemporaryFile(mode="w+b")
        else:
            self.data_stream = io.BytesIO()

    def io_size(self):
        if isinstance(self.data_stream, io.BytesIO):
            return self.data_stream.getbuffer().nbytes
        else:
            return os.stat(self.data_stream.name).st_size

    def completed(self):
        return self.io_size() == self.size

    def feed(self, data):
        if self.io_size() + len(data) > self.size:
            raise HTTPError(413)
        self.data_stream.write(data)

        if self.completed():
            self.seek()

    def close(self):
        self.data_stream.close()

    def __str__(self):
        return "HTTPData(size=" + str(os.fstat(self.data_stream.fileno()).st_size) + "/" + \
               str(self.size) + \
               ", completed=" + str(self.completed()) + ")"

    def read(self, size=-1):
        return self.data_stream.read(size)

    def seek(self, n=0):
        self.data_stream.seek(n)

    def json(self):
        wrapper_file = codecs.getreader('utf-8')(self.data_stream)
        return json.load(wrapper_file)
Example #41
0
def fetch(year, month, day, n):
    kwargs = {"year": year, "month": month, "day": day, "n": n}
    local_fn = filename.format(**kwargs)
    # Skip if the file exists.
    if os.path.exists(local_fn):
        return
    # Download the remote file.
    remote = url.format(**kwargs)
    r = requests.get(remote)
    if r.status_code == requests.codes.ok:
        # Atomically write to disk.
        # http://stackoverflow.com/questions/2333872/ \
        #        atomic-writing-to-file-with-python
        f = NamedTemporaryFile("wb", delete=False)
        f.write(r.content)
        f.flush()
        os.fsync(f.fileno())
        f.close()
        shutil.move(f.name, local_fn)
Example #42
0
def get_pairwise_distances(npalign, tree_file=None, seq_file=None):

    if seq_file is None:
        fasta_handle = NTF(mode="w")
    else:
        fasta_handle = open("/tmp/tmp.fasta", "w")
    if tree_file is None:
        tree_handle = NTF()
    else:
        tree_handle = open(tree_file, "w")
    seq_names = fasta_write(fasta_handle, npalign)

    fasta_handle.flush()
    os.fsync(fasta_handle.fileno())
    cmd = "muscle -in %(ifile)s -tree2 %(treefile)s -gapopen -2.9"
    cmdlist = shlex.split(cmd % {"ifile": fasta_handle.name, "treefile": tree_handle.name})

    try:
        t = check_output(cmdlist)
        tree = Phylo.read(open(tree_handle.name), "newick")
    except CalledProcessError:
        # print('Could not make tree')
        return None
    except ValueError:
        # print('no tree present')
        return None
    except RuntimeError:
        return None

    seq_names = sorted(tree.get_terminals(), key=lambda x: x.name)
    net = Phylo.to_networkx(tree)
    dmat = networkx.all_pairs_shortest_path(net)
    terminals = tree.get_terminals()
    dists = np.zeros((npalign.shape[0], npalign.shape[0]))
    for t1, t2 in product(terminals, terminals):
        path = dmat[t1][t2]
        dist = sum(c.branch_length for c in path)
        i1 = int(t1.name.split("-")[1])
        i2 = int(t2.name.split("-")[1])
        dists[i1, i2] = dist

    return dists
Example #43
0
    def _dump_item(path, item, value):
        """Dump "value" to a file called "item" in the directory "path".

        1. File permission should already be user-read-write-only on
           creation by mkstemp.
        2. The combination of os.fsync and os.rename should guarantee
           that we don't end up with an incomplete file.
        """
        os.makedirs(path, exist_ok=True)
        from tempfile import NamedTemporaryFile
        handle = NamedTemporaryFile(prefix=item, dir=path, delete=False)
        try:
            handle.write(value.encode())
        except AttributeError:
            handle.write(value)
        os.fsync(handle.fileno())
        handle.close()
        fname = os.path.join(path, item)
        os.rename(handle.name, fname)
        LOG.debug('Generated %s', fname)
Example #44
0
    def _dump_item(path, item, value):
        """Dump "value" to a file called "item" in the directory "path".

        1. File permission should already be user-read-write-only on
           creation by mkstemp.
        2. The combination of os.fsync and os.rename should guarantee
           that we don't end up with an incomplete file.
        """
        os.makedirs(path, exist_ok=True)
        from tempfile import NamedTemporaryFile
        handle = NamedTemporaryFile(prefix=item, dir=path, delete=False)
        try:
            handle.write(value.encode())
        except AttributeError:
            handle.write(value)
        os.fsync(handle.fileno())
        handle.close()
        fname = os.path.join(path, item)
        os.rename(handle.name, fname)
        LOG.debug('Generated %s', fname)
Example #45
0
def remove_from_etc_hosts(hostname):
    """
    Remove ``hostname`` from ``/etc/hosts``
    """
    try:
        etc_hosts = open("/etc/hosts", "r")
        tmp = NamedTemporaryFile()
        for l in etc_hosts:
            if not hostname in l:
                tmp.write(l)
        etc_hosts.close()

        # make sure changes are written to disk
        tmp.flush()
        os.fsync(tmp.fileno())
        # swap out /etc/hosts
        run("cp /etc/hosts /etc/hosts.orig")
        run("cp {0} /etc/hosts".format(tmp.name))
        run("chmod 644 /etc/hosts")
    except (IOError, OSError) as e:
        log.error("could not update /etc/hosts. {0}".format(e))
Example #46
0
    def _save_fetched_file(self, data):
        # Make sure that the root directory exists.
        try:
            os.makedirs(self.base_dir)
        except os.error:
            pass

        # Save the contents of the file.
        filename = self.filename
        logging.info("Saving file to: '{0}'".format(filename))

        # Atomically write to disk.
        # http://stackoverflow.com/questions/2333872/ \
        #        atomic-writing-to-file-with-python
        f = NamedTemporaryFile("wb", delete=False)
        f.write(data)
        f.flush()
        os.fsync(f.fileno())
        f.close()
        shutil.move(f.name, filename)

        return self
Example #47
0
 def _atomic_write(self, path, mode="w", newline=None, sync_directory=True,
                   replace_fn=os.replace):
     directory = os.path.dirname(path)
     tmp = NamedTemporaryFile(
         mode=mode, dir=directory, delete=False, prefix=".Radicale.tmp-",
         newline=newline, encoding=None if "b" in mode else self._encoding)
     try:
         yield tmp
         tmp.flush()
         try:
             self._fsync(tmp.fileno())
         except OSError as e:
             raise RuntimeError("Fsync'ing file %r failed: %s" %
                                (path, e)) from e
         tmp.close()
         replace_fn(tmp.name, path)
     except BaseException:
         tmp.close()
         os.remove(tmp.name)
         raise
     if sync_directory:
         self._sync_directory(directory)
Example #48
0
    def _save_fetched_file(self, data):
        # Make sure that the root directory exists.
        try:
            os.makedirs(self.base_dir)
        except os.error:
            pass

        # Save the contents of the file.
        filename = self.filename
        logging.info("Saving file to: '{0}'".format(filename))

        # Atomically write to disk.
        # http://stackoverflow.com/questions/2333872/ \
        #        atomic-writing-to-file-with-python
        f = NamedTemporaryFile("wb", delete=False)
        f.write(data)
        f.flush()
        os.fsync(f.fileno())
        f.close()
        shutil.move(f.name, filename)

        return self
Example #49
0
def add_to_etc_hosts(hostname, ip_address):
    """
    Add ``hostname`` and its ``ip_address`` to ``/etc/hosts``
    """
    try:
        etc_hosts = open("/etc/hosts", "r")
        tmp = NamedTemporaryFile()
        for l in etc_hosts:
            if not hostname in l:
                tmp.write(l)
        etc_hosts.close()
        # add a line for the new hostname
        tmp.write("{0} {1}\n".format(ip_address, hostname))

        # make sure changes are written to disk
        tmp.flush()
        os.fsync(tmp.fileno())
        # swap out /etc/hosts
        run("cp /etc/hosts /etc/hosts.orig")
        run("cp {0} /etc/hosts".format(tmp.name))
        run("chmod 644 /etc/hosts")
    except (IOError, OSError) as e:
        log.error("could not update /etc/hosts. {0}".format(e))
Example #50
0
def remove_from_etc_hosts(host):
    """Remove ``host`` (hostname or IP) from ``/etc/hosts``."""
    if not host:
        log.debug("Cannot remove empty host from /etc/hosts")
        return
    try:
        log.debug("Removing host {0} from /etc/hosts".format(host))
        etc_hosts = open('/etc/hosts', 'r')
        tmp = NamedTemporaryFile()
        for l in etc_hosts:
            if host not in l:
                tmp.write(l)
        etc_hosts.close()

        # make sure changes are written to disk
        tmp.flush()
        os.fsync(tmp.fileno())
        # swap out /etc/hosts
        run('cp /etc/hosts /etc/hosts.orig')
        run('cp {0} /etc/hosts'.format(tmp.name))
        run('chmod 644 /etc/hosts')
    except (IOError, OSError) as e:
        log.error('could not update /etc/hosts. {0}'.format(e))
Example #51
0
def ShellCommandResults(CmdLine, Opt):
    """ Execute the command, returning the output content """
    file_list = NamedTemporaryFile(delete=False)
    filename = file_list.name
    Results = []

    returnValue = 0
    try:
        subprocess.check_call(args=shlex.split(CmdLine), stderr=subprocess.STDOUT, stdout=file_list)
    except subprocess.CalledProcessError as err_val:
        file_list.close()
        if not Opt.silent:
            sys.stderr.write("ERROR : %d : %s\n" % (err_val.returncode, err_val.__str__()))
            if os.path.exists(filename):
                sys.stderr.write("      : Partial results may be in this file: %s\n" % filename)
            sys.stderr.flush()
        returnValue = err_val.returncode

    except IOError as err_val:
        (errno, strerror) = err_val.args
        file_list.close()
        if not Opt.silent:
            sys.stderr.write("I/O ERROR : %s : %s\n" % (str(errno), strerror))
            sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
            if os.path.exists(filename):
                sys.stderr.write("      : Partial results may be in this file: %s\n" % filename)
            sys.stderr.flush()
        returnValue = errno

    except OSError as err_val:
        (errno, strerror) = err_val.args
        file_list.close()
        if not Opt.silent:
            sys.stderr.write("OS ERROR : %s : %s\n" % (str(errno), strerror))
            sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
            if os.path.exists(filename):
                sys.stderr.write("      : Partial results may be in this file: %s\n" % filename)
            sys.stderr.flush()
        returnValue = errno

    except KeyboardInterrupt:
        file_list.close()
        if not Opt.silent:
            sys.stderr.write("ERROR : Command terminated by user : %s\n" % CmdLine)
            if os.path.exists(filename):
                sys.stderr.write("      : Partial results may be in this file: %s\n" % filename)
            sys.stderr.flush()
        returnValue = 1

    finally:
        if not file_list.closed:
            file_list.flush()
            os.fsync(file_list.fileno())
            file_list.close()

    if os.path.exists(filename):
        fd_ = open(filename, 'r')
        Results = fd_.readlines()
        fd_.close()
        os.unlink(filename)

    if returnValue > 0:
        return returnValue

    return Results
Example #52
0
    def get_file(self, user, suite, path, path_in_tar=None, mode=None):
        """Returns file information / content or a cherrypy response."""
        f_name = self._get_user_suite_dir(user, suite, path)
        conf = ResourceLocator.default().get_conf()
        view_size_max = int(conf.get_value(
            ["rose-bush", "view-size-max"], self.VIEW_SIZE_MAX))
        if path_in_tar:
            tar_f = tarfile.open(f_name, "r:gz")
            try:
                tar_info = tar_f.getmember(path_in_tar)
            except KeyError:
                raise cherrypy.HTTPError(404)
            f_size = tar_info.size
            handle = tar_f.extractfile(path_in_tar)
            if handle.read(2) == "#!":
                mime = self.MIME_TEXT_PLAIN
            else:
                mime = mimetypes.guess_type(
                    urllib.pathname2url(path_in_tar))[0]
            handle.seek(0)
            if (mode == "download" or
                    f_size > view_size_max or
                    mime and
                    (not mime.startswith("text/") or mime.endswith("html"))):
                temp_f = NamedTemporaryFile()
                f_bsize = os.fstatvfs(temp_f.fileno()).f_bsize
                while True:
                    bytes_ = handle.read(f_bsize)
                    if not bytes_:
                        break
                    temp_f.write(bytes_)
                cherrypy.response.headers["Content-Type"] = mime
                try:
                    return cherrypy.lib.static.serve_file(temp_f.name, mime)
                finally:
                    temp_f.close()
            text = handle.read()
        else:
            f_size = os.stat(f_name).st_size
            if open(f_name).read(2) == "#!":
                mime = self.MIME_TEXT_PLAIN
            else:
                mime = mimetypes.guess_type(urllib.pathname2url(f_name))[0]
            if not mime:
                mime = self.MIME_TEXT_PLAIN
            if (mode == "download" or
                    f_size > view_size_max or
                    mime and
                    (not mime.startswith("text/") or mime.endswith("html"))):
                cherrypy.response.headers["Content-Type"] = mime
                return cherrypy.lib.static.serve_file(f_name, mime)
            text = open(f_name).read()
        try:
            if mode in [None, "text"]:
                text = jinja2.escape(text)
            lines = [unicode(line) for line in text.splitlines()]
        except UnicodeDecodeError:
            if path_in_tar:
                handle.seek(0)
                # file closed by cherrypy
                return cherrypy.lib.static.serve_fileobj(
                    handle, self.MIME_TEXT_PLAIN)
            else:
                return cherrypy.lib.static.serve_file(
                    f_name, self.MIME_TEXT_PLAIN)
        else:
            if path_in_tar:
                handle.close()
        name = path
        if path_in_tar:
            name = "log/" + path_in_tar
        job_entry = None
        if name.startswith("log/job"):
            names = self.bush_dao.parse_job_log_rel_path(name)
            if len(names) == 4:
                cycle, task, submit_num, _ = names
                entries = self.bush_dao.get_suite_job_entries(
                    user, suite, [cycle], [task],
                    None, None, None, None, None)[0]
                for entry in entries:
                    if entry["submit_num"] == int(submit_num):
                        job_entry = entry
                        break
        if fnmatch(os.path.basename(path), "rose*.conf"):
            file_content = "rose-conf"
        else:
            file_content = self.bush_dao.is_conf(path)

        return lines, job_entry, file_content, f_name
Example #53
0
def download_database(data_root=None, clobber=False):
    """
    Download a SQLite database containing the limb darkening coefficients
    computed by `Claret & Bloemen (2011)
    <http://adsabs.harvard.edu/abs/2011A%26A...529A..75C>`_. The table is
    available online on `Vizier
    <http://vizier.cfa.harvard.edu/viz-bin/VizieR?-source=J/A+A/529/A75>`_.
    Using the ASCII data table, the SQLite database was generated with the
    following Python commands:

    .. code-block:: python

        import sqlite3
        import numpy as np

        with sqlite3.connect("ldcoeffs.db") as conn:
            c = conn.cursor()
            c.execute("CREATE TABLE IF NOT EXISTS claret11 "
                    "(teff REAL, logg REAL, feh REAL, veloc REAL, mu1 REAL, "
                    "mu2 REAL)")
            data = np.loadtxt("claret11.txt", skiprows=59, delimiter="|",
                            usecols=range(1, 7))
            c.executemany("INSERT INTO claret11 (logg,teff,feh,veloc,mu1,mu2) "
                        "VALUES (?,?,?,?,?,?)", data)

    """
    # Figure out the local filename for the database.
    if data_root is None:
        data_root = KPLR_ROOT
    filename = os.path.join(data_root, DB_FILENAME)

    if not clobber and os.path.exists(filename):
        return filename

    # Make sure that the target directory exists.
    try:
        os.makedirs(data_root)
    except os.error:
        pass

    # MAGIC: specify the URL for the remote file.
    url = "http://bbq.dfm.io/~dfm/ldcoeffs.db"

    # Fetch the database from the server.
    logging.info("Downloading file from: '{0}'".format(url))
    r = urllib.request.Request(url)
    handler = urllib.request.urlopen(r)
    code = handler.getcode()
    if int(code) != 200:
        raise RuntimeError("Couldn't download file from {0}. Returned: {1}"
                           .format(url, code))

    # Save the contents of the file.
    logging.info("Saving file to: '{0}'".format(filename))

    # Atomically write to disk.
    # http://stackoverflow.com/questions/2333872/ \
    #        atomic-writing-to-file-with-python
    f = NamedTemporaryFile("wb", delete=False)
    f.write(handler.read())
    f.flush()
    os.fsync(f.fileno())
    f.close()
    shutil.move(f.name, filename)

    return filename
Example #54
0
class Bap(object):
    def __init__(self, server={}):
        if isinstance(server, dict):
            self.__dict__.update(spawn_server(**server))
        else:
            self.url = server

        self.last_id = 0
        for attempt in range(RETRIES):
            try:
                self.capabilities = self.call({
                    'init': {
                        'version': '0.1'
                    }
                }).next()['capabilities']
                break
            except Exception:
                if attempt + 1 == RETRIES:
                    raise
                else:
                    time.sleep(0.1 * attempt)

        if not "capabilities" in self.__dict__:
            raise RuntimeError("Failed to connect to BAP server")
        self.data = {}
        self.temp = NamedTemporaryFile('rw+b', prefix="bap-")

    def insns(self, src, **kwargs):
        req = {'resource': src}
        req.update(kwargs)
        res = self.call({'get_insns': req})
        for msg in res:
            if 'error' in msg:
                err = Error(msg)
                if err.severity in DEBUG_LEVEL:
                    print(err)
            else:
                return (parse_insn(js) for js in msg['insns'])

    def close(self):
        self.__exit__()

    def load_file(self, name):
        return self._load_resource({'load_file': {'url': 'file://' + name}})

    def get_resource(self, name):
        return self.call({'get_resource': name}).next()

    def load_chunk(self, data, **kwargs):
        kwargs.setdefault('url', self.mmap(data))
        kwargs.setdefault('arch', 'i386')
        kwargs.setdefault('addr', 0)
        addr = kwargs['addr']
        if isinstance(addr, str):
            addr = int(addr, 0)
        kwargs['addr'] = '0x{0:x}'.format(addr)

        return self._load_resource({'load_memory_chunk': kwargs})

    def __exit__(self):
        if 'server' in self.__dict__:
            self.server.terminate()
        self.temp.close()

    def dumps(self, dic):
        self.last_id += 1
        dic['id'] = Id(self.last_id)
        return json.dumps(dic, default=str)

    def call(self, data):
        if isinstance(data, dict):
            return jsons(request.post(self.url, data=self.dumps(data)))
        else:
            gen = (self.dumps(msg) for msg in data)
            return jsons(request.post(self.url, data=gen))

    def mmap(self, data):
        url = "mmap://{0}?offset=0&length={1}".format(self.temp.name,
                                                      len(data))
        os.ftruncate(self.temp.fileno(), 4096)
        mm = mmap(self.temp.fileno(), 4096)
        mm.write(data)
        mm.close()
        return url

    def _load_resource(self, res):
        rep = self.call(res).next()
        if 'error' in rep:
            raise ServerError(rep)
        return Id(rep['resource'])
Example #55
0
class CallbackFileWrapper(object):
    """
    Small wrapper around a fp object which will tee everything read into a
    buffer, and when that file is closed it will execute a callback with the
    contents of that buffer.

    All attributes are proxied to the underlying file object.

    This class uses members with a double underscore (__) leading prefix so as
    not to accidentally shadow an attribute.

    The data is stored in a temporary file until it is all available.  As long
    as the temporary files directory is disk-based (sometimes it's a
    memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory
    pressure is high.  For small files the disk usually won't be used at all,
    it'll all be in the filesystem memory cache, so there should be no
    performance impact.
    """
    def __init__(self, fp, callback):
        self.__buf = NamedTemporaryFile("rb+", delete=True)
        self.__fp = fp
        self.__callback = callback

    def __getattr__(self, name):
        # The vaguaries of garbage collection means that self.__fp is
        # not always set.  By using __getattribute__ and the private
        # name[0] allows looking up the attribute value and raising an
        # AttributeError when it doesn't exist. This stop thigns from
        # infinitely recursing calls to getattr in the case where
        # self.__fp hasn't been set.
        #
        # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
        fp = self.__getattribute__("_CallbackFileWrapper__fp")
        return getattr(fp, name)

    def __is_fp_closed(self):
        try:
            return self.__fp.fp is None

        except AttributeError:
            pass

        try:
            return self.__fp.closed

        except AttributeError:
            pass

        # We just don't cache it then.
        # TODO: Add some logging here...
        return False

    def _close(self):
        if self.__callback:
            if self.__buf.tell() == 0:
                # Empty file:
                result = b""
            else:
                # Return the data without actually loading it into memory,
                # relying on Python's buffer API and mmap(). mmap() just gives
                # a view directly into the filesystem's memory cache, so it
                # doesn't result in duplicate memory use.
                self.__buf.seek(0, 0)
                result = memoryview(
                    mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ))
            self.__callback(result)

        # We assign this to None here, because otherwise we can get into
        # really tricky problems where the CPython interpreter dead locks
        # because the callback is holding a reference to something which
        # has a __del__ method. Setting this to None breaks the cycle
        # and allows the garbage collector to do it's thing normally.
        self.__callback = None

        # Closing the temporary file releases memory and frees disk space.
        # Important when caching big files.
        self.__buf.close()

    def read(self, amt=None):
        data = self.__fp.read(amt)
        if data:
            # We may be dealing with b'', a sign that things are over:
            # it's passed e.g. after we've already closed self.__buf.
            self.__buf.write(data)
        if self.__is_fp_closed():
            self._close()

        return data

    def _safe_read(self, amt):
        data = self.__fp._safe_read(amt)
        if amt == 2 and data == b"\r\n":
            # urllib executes this read to toss the CRLF at the end
            # of the chunk.
            return data

        self.__buf.write(data)
        if self.__is_fp_closed():
            self._close()

        return data
Example #56
0
def save_mapping(mapping):
    """Save the mapping (and tags for autocomplete) back to the disk.

    This function assumes that the mapping is already validated.

    :param mapping: the mapping object to save
    """
    # Create a reverse mapping (this is what gets saved to the disk)
    reverse_mapping = defaultdict(set)
    for tag, paths in mapping.items():
        for path in paths:
            reverse_mapping[path].add(tag)
    # Save the reverse mapping to a temporary file
    temp_mapping_file = NamedTemporaryFile(
        mode='w', dir=CFG_DIR, prefix='mapping.', delete=False
    )
    temp_mapping_file.close()
    try:
        with io.open(temp_mapping_file.name, 'wt') as temp_mapping_file:
            temp_mapping_file.write('\n'.join(
                path + ',' + ','.join(sorted(reverse_mapping[path])) + ','
                for path in sorted(reverse_mapping)
            ))
            temp_mapping_file.flush()
            os.fsync(temp_mapping_file.fileno())
    except (IOError, OSError) as temp_mapping_write_error:
        rm_files(temp_mapping_file.name)
        abort(
            message='Failed to write to {name}: {msg}\n'.format(
                name=temp_mapping_file.name,
                msg=temp_mapping_write_error.strerror
            ),
            exit_code=temp_mapping_write_error.errno
        )
    # Save the tags (for tab-completion) to a temporary file
    temp_tags_file = NamedTemporaryFile(
        mode='w', prefix='tags.', dir=CFG_DIR, delete=False
    )
    temp_tags_file.close()
    try:
        with io.open(temp_tags_file.name, 'wt') as temp_tags_file:
            temp_tags_file.write('\n'.join(sorted(mapping)))
            temp_tags_file.flush()
            os.fsync(temp_tags_file.fileno())
    except (IOError, OSError) as temp_tags_write_error:
        rm_files(temp_mapping_file.name, temp_tags_file.name)
        abort(
            message='Failed to write to {name}: {msg}\n'.format(
                name=temp_tags_file.name,
                msg=temp_tags_write_error.strerror
            ),
            exit_code=temp_tags_write_error.errno
        )
    try:  # Overwrite the mapping file with the temporary one
        shutil.move(temp_mapping_file.name, MAPPING_FILE)
    except (IOError, OSError) as mapping_rename_error:
        rm_files(temp_mapping_file.name, temp_tags_file.name)
        abort(
            message='Failed to move {src} to {dest}: {msg}\n'.format(
                src=temp_mapping_file.name,
                dest=MAPPING_FILE,
                msg=mapping_rename_error.strerror
            ),
            exit_code=mapping_rename_error.errno
        )
    try:  # Overwrite the tags file with the temporary one
        shutil.move(temp_tags_file.name, TAGS_FILE)
    except (IOError, OSError) as tags_rename_error:
        rm_files(temp_tags_file.name)
        abort(
            message='Failed to move {src} to {dest}: {msg}\n'.format(
                src=temp_tags_file.name,
                dest=TAGS_FILE,
                msg=tags_rename_error.strerror
            ),
            exit_code=tags_rename_error.errno
        )
Example #57
0
def GetHiResImage(ID):
    '''
    Queries the Palomar Observatory Sky Survey II catalog to
    obtain a higher resolution optical image of the star with EPIC number
    :py:obj:`ID`.

    '''

    # Get the TPF info
    client = kplr.API()
    star = client.k2_star(ID)
    k2ra = star.k2_ra
    k2dec = star.k2_dec
    tpf = star.get_target_pixel_files()[0]
    with tpf.open() as f:
        k2wcs = WCS(f[2].header)
        shape = np.array(f[1].data.field('FLUX'), dtype='float64')[0].shape

    # Get the POSS URL
    hou = int(k2ra * 24 / 360.)
    min = int(60 * (k2ra * 24 / 360. - hou))
    sec = 60 * (60 * (k2ra * 24 / 360. - hou) - min)
    ra = '%02d+%02d+%.2f' % (hou, min, sec)
    sgn = '' if np.sign(k2dec) >= 0 else '-'
    deg = int(np.abs(k2dec))
    min = int(60 * (np.abs(k2dec) - deg))
    sec = 3600 * (np.abs(k2dec) - deg - min / 60)
    dec = '%s%02d+%02d+%.1f' % (sgn, deg, min, sec)
    url = 'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&' + \
          'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3=' % (ra, dec)

    # Query the server
    r = urllib.request.Request(url)
    handler = urllib.request.urlopen(r)
    code = handler.getcode()
    if int(code) != 200:
        # Unavailable
        return None
    data = handler.read()

    # Atomically write to a temp file
    f = NamedTemporaryFile("wb", delete=False)
    f.write(data)
    f.flush()
    os.fsync(f.fileno())
    f.close()

    # Now open the POSS fits file
    with pyfits.open(f.name) as ff:
        img = ff[0].data

    # Map POSS pixels onto K2 pixels
    xy = np.empty((img.shape[0] * img.shape[1], 2))
    z = np.empty(img.shape[0] * img.shape[1])
    pwcs = WCS(f.name)
    k = 0
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            ra, dec = pwcs.all_pix2world(float(j), float(i), 0)
            xy[k] = k2wcs.all_world2pix(ra, dec, 0)
            z[k] = img[i, j]
            k += 1

    # Resample
    grid_x, grid_y = np.mgrid[-0.5:shape[1] - 0.5:0.1, -0.5:shape[0] - 0.5:0.1]
    resampled = griddata(xy, z, (grid_x, grid_y), method='cubic')

    # Rotate to align with K2 image. Not sure why, but it is necessary
    resampled = np.rot90(resampled)

    return resampled