Example #1
0
def WriteRootCaCerts(client):
  """Write the internal root CA certs to a file."""
  logging.debug('WriteRootCaCerts')
  managed_installs_dir = munkicommon.pref('ManagedInstallDir')
  certs_dir = os.path.join(managed_installs_dir, 'certs')
  cert_file_path = os.path.join(certs_dir, 'ca.pem')
  if not os.path.isdir(certs_dir):
    os.makedirs(certs_dir)

  tmpfile = tempfile.NamedTemporaryFile(
      dir=os.path.dirname(os.path.realpath(cert_file_path)))
  tmpfile.write(client.GetSystemRootCACertChain())

  logging.debug('WriteRootCaCerts: writing to tmp %s', tmpfile.name)

  try:
    os.unlink(cert_file_path)
  except OSError:
    pass

  try:
    os.link(tmpfile.name, cert_file_path)
  except OSError, e:
    tmpfile.close()
    raise client.Error('Error writing root CA certs: %s' % str(e))
    def revert(self):
        if self.__state != self.__STATE['APPLIED']:
            raise Error("Attempt to revert configuration from state %s" % self.__state)

        for child in self.__children:
            child.revert()

        log("Reverting changes to %s configuration" % self.__path)

        # Remove existing new configuration
        if os.access(self.__newpath, os.F_OK):
            os.unlink(self.__newpath)

        # Revert new configuration.
        if os.access(self.__path, os.F_OK):
            os.link(self.__path, self.__newpath)
            os.unlink(self.__path)

        # Revert to old configuration.
        if os.access(self.__oldpath, os.F_OK):
            os.link(self.__oldpath, self.__path)
            os.unlink(self.__oldpath)

        # Leave .*.xapi-new as an aid to debugging.

        self.__state = self.__STATE['REVERTED']
Example #3
0
    def acquire(self, timeout=None):
        try:
            open(self.unique_name, "wb").close()
        except IOError:
            raise LockFailed("failed to create %s" % self.unique_name)

        end_time = time.time()
        if timeout is not None and timeout > 0:
            end_time += timeout

        while True:
            # Try and create a hard link to it.
            try:
                os.link(self.unique_name, self.lock_file)
            except OSError:
                # Link creation failed.  Maybe we've double-locked?
                nlinks = os.stat(self.unique_name).st_nlink
                if nlinks == 2:
                    # The original link plus the one I created == 2.  We're
                    # good to go.
                    return
                else:
                    # Otherwise the lock creation failed.
                    if timeout is not None and time.time() > end_time:
                        os.unlink(self.unique_name)
                        if timeout > 0:
                            raise LockTimeout
                        else:
                            raise AlreadyLocked
                    time.sleep(timeout is not None and timeout/10 or 0.1)
            else:
                # Link creation succeeded.  We're good to go.
                return
    def _copy_game_file(self, src, dst):
        ifs = self.open(src, prefer_path=True)
        if not ifs:
            raise NotFoundError("Could not find file for {0}".format(src))

        if os.path.exists(dst):
            print("removing existing file", dst)
            os.remove(dst)

        if isinstance(ifs, str):
            # we got a direct path
            try:
                os.link(ifs, dst)
                return
            except Exception:
                # couldn't link file, normal on non-unix systems and also
                # if the files are on different file systems
                pass
            shutil.copyfile(ifs, dst)
        else:
            dst_partial = dst + ".partial"
            with open(dst_partial, "wb") as ofs:
                # ifs_sha1 = hashlib.sha1()
                while True:
                    # noinspection PyUnresolvedReferences
                    data = ifs.read()
                    if not data:
                        break
                    # ifs_sha1.update(data)
                    ofs.write(data)
            print("rename file from", dst_partial, "to", dst)
            os.rename(dst_partial, dst)
    def apply(self):
        if self.__state != self.__STATE['NOT-APPLIED']:
            raise Error("Attempt to apply configuration from state %s" % self.__state)

        for child in self.__children:
            child.apply()

        log("Applying changes to %s configuration" % self.__path)

        # Remove previous backup.
        if os.access(self.__oldpath, os.F_OK):
            os.unlink(self.__oldpath)

        # Save current configuration.
        if os.access(self.__path, os.F_OK):
            os.link(self.__path, self.__oldpath)
            os.unlink(self.__path)

        # Apply new configuration.
        assert(os.path.exists(self.__newpath))
        os.link(self.__newpath, self.__path)

        # Remove temporary file.
        os.unlink(self.__newpath)

        self.__state = self.__STATE['APPLIED']
Example #6
0
    def test_clean_stale_with_threads_active(self):
        """verify locks for multiple threads are cleaned up """

        # create sentinels for four threads in our process, and a 'dead'
        # process
        sentinel1 = self._create_sentinel(self.hostname, self.pid, 'Default-1')
        sentinel2 = self._create_sentinel(self.hostname, self.pid, 'Default-2')
        sentinel3 = self._create_sentinel(self.hostname, self.pid, 'Default-3')
        sentinel4 = self._create_sentinel(self.hostname, self.pid, 'Default-4')
        sentinel5 = self._create_sentinel(self.hostname, self.dead_pid,
                                          'Default-1')

        os.link(sentinel1, self.lock_file)

        utils.cleanup_file_locks()

        self.assertTrue(os.path.exists(sentinel1))
        self.assertTrue(os.path.exists(sentinel2))
        self.assertTrue(os.path.exists(sentinel3))
        self.assertTrue(os.path.exists(sentinel4))
        self.assertTrue(os.path.exists(self.lock_file))
        self.assertFalse(os.path.exists(sentinel5))

        os.unlink(sentinel1)
        os.unlink(sentinel2)
        os.unlink(sentinel3)
        os.unlink(sentinel4)
        os.unlink(self.lock_file)
Example #7
0
 def hardLink(self, source, target):
     try:
         os.link(source, target)
         return True
     except OSError as (t, e):
         qDebug("FileSystem.hardLink - %s: '%s' -> '%s'" % (e, source, target))
         return False
Example #8
0
def tst_link(mnt_dir):
    name1 = pjoin(mnt_dir, name_generator())
    name2 = pjoin(mnt_dir, name_generator())
    shutil.copyfile(TEST_FILE, name1)
    assert filecmp.cmp(name1, TEST_FILE, False)

    fstat1 = os.lstat(name1)
    assert fstat1.st_nlink == 1

    os.link(name1, name2)

    fstat1 = os.lstat(name1)
    fstat2 = os.lstat(name2)
    for attr in ('st_mode', 'st_dev', 'st_uid', 'st_gid',
                 'st_size', 'st_atime', 'st_mtime', 'st_ctime'):
        assert getattr(fstat1, attr) == getattr(fstat2, attr)
    assert os.path.basename(name2) in os.listdir(mnt_dir)
    assert filecmp.cmp(name1, name2, False)

    os.unlink(name2)

    assert os.path.basename(name2) not in os.listdir(mnt_dir)
    with pytest.raises(FileNotFoundError):
        os.lstat(name2)

    os.unlink(name1)
Example #9
0
def fly_CopterAVC(viewerip=None, map=False):
    '''fly ArduCopter in SIL for AVC2013 mission
    '''
    global homeloc

    if TARGET != 'sitl':
        util.build_SIL('ArduCopter', target=TARGET)

    home = "%f,%f,%u,%u" % (AVCHOME.lat, AVCHOME.lng, AVCHOME.alt, AVCHOME.heading)
    sil = util.start_SIL('ArduCopter', wipe=True, model='heli', home=home, speedup=speedup_default)
    mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550')
    mavproxy.expect('Received [0-9]+ parameters')

    # setup test parameters
    mavproxy.send("param load %s/Helicopter.parm\n" % testdir)
    mavproxy.expect('Loaded [0-9]+ parameters')

    # reboot with new parameters
    util.pexpect_close(mavproxy)
    util.pexpect_close(sil)

    sil = util.start_SIL('ArduCopter', model='heli', home=home, speedup=speedup_default)
    options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=5'
    if viewerip:
        options += ' --out=%s:14550' % viewerip
    if map:
        options += ' --map'
    mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
    mavproxy.expect('Telemetry log: (\S+)')
    logfile = mavproxy.match.group(1)
    print("LOGFILE %s" % logfile)

    buildlog = util.reltopdir("../buildlogs/CopterAVC-test.tlog")
    print("buildlog=%s" % buildlog)
    if os.path.exists(buildlog):
        os.unlink(buildlog)
    try:
        os.link(logfile, buildlog)
    except Exception:
        pass

    # the received parameters can come before or after the ready to fly message
    mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
    mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])

    util.expect_setup_callback(mavproxy, expect_callback)

    expect_list_clear()
    expect_list_extend([sil, mavproxy])

    if map:
        mavproxy.send('map icon 40.072467969730496 -105.2314389590174\n')
        mavproxy.send('map icon 40.072600990533829 -105.23146100342274\n')        

    # get a mavlink connection going
    try:
        mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
    except Exception, msg:
        print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
        raise
Example #10
0
        def test_dont_copy_file_onto_link_to_itself(self):
            # bug 851123.
            os.mkdir(TESTFN)
            src = os.path.join(TESTFN, 'cheese')
            dst = os.path.join(TESTFN, 'shop')
            try:
                f = open(src, 'w')
                f.write('cheddar')
                f.close()

                os.link(src, dst)
                self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
                self.assertEqual(open(src,'r').read(), 'cheddar')
                os.remove(dst)

                # Using `src` here would mean we end up with a symlink pointing
                # to TESTFN/TESTFN/cheese, while it should point at
                # TESTFN/cheese.
                os.symlink('cheese', dst)
                self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
                self.assertEqual(open(src,'r').read(), 'cheddar')
                os.remove(dst)
            finally:
                try:
                    shutil.rmtree(TESTFN)
                except OSError:
                    pass
Example #11
0
def link(src, dst):
    if os.name == 'nt':
        import ctypes

        if ctypes.windll.kernel32.CreateHardLinkW(unicode(dst), unicode(src), 0) == 0: raise ctypes.WinError()
    else:
        os.link(src, dst)
Example #12
0
    def import_files(cls, from_dir, to_dir, unlink_after):
        """
        Import audio files from from_dir into to_dir
        """
        audio_files = cls.get_filelist(from_dir)

        for audio_file in audio_files:
            from_path = audio_file.filename
            to_path = os.path.join(to_dir, audio_file.destination)

            makedirs(os.path.split(to_path)[0])

            try:
                if on_same_partition(from_path, os.path.dirname(to_path)):
                    # Hardlink here because we may not have write permission on the
                    #  source, so a move would fail, but we still want it put into the library.
                    # It also saves space if we're not deleting the file after
                    os.link(from_path, to_path)
                else:
                    shutil.copy(from_path, to_path)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    print "Warning: Target file already exists: %s. Skipping" % to_path
                else:
                    raise

            if unlink_after:
                try:
                    os.unlink(from_path)
                except OSError as e:
                    print "Warning: Caught error trying to unlink %s: %s. Ignoring" % (from_path, str(e))
                    pass
Example #13
0
def Main(argv):
    # Check number of parameters
    if len(argv) != 3:
        print("The function requires two parameters to be passed in.")
        return
    
    # Check parameters
    dirA = argv[1]
    if not os.path.isdir(dirA):
        print("The first parameter should be an existing directory.")
        return
    
    dirB = argv[2]
    if not os.path.isdir(dirB):
        print("The second parameter should be an existing directory.")
        return
       
    # Build a dictionary with key-value pair { file name - True }
    nameDict = { }
    
    for fileName in os.listdir(dirB):
        filePath = os.path.join(dirB, fileName)
        if os.path.isfile(filePath):
            nameDict[fileName] = True

    for fileName in os.listdir(dirA):
        filePathA = os.path.join(dirA, fileName)
        filePathB = os.path.join(dirB, fileName)
        if not nameDict.get(fileName, False):
            os.link(filePathA, filePathB)
        elif os.path.getmtime(filePathA) > os.path.getmtime(filePathB):
            os.remove(filePathB)
            os.link(filePathA, filePathB)
Example #14
0
    def extract_item(self, item, restore_attrs=True, dry_run=False):
        if dry_run:
            if b'chunks' in item:
                for _ in self.pipeline.fetch_many([c[0] for c in item[b'chunks']], is_preloaded=True):
                    pass
            return

        dest = self.cwd
        if item[b'path'].startswith('/') or item[b'path'].startswith('..'):
            raise Exception('Path should be relative and local')
        path = os.path.join(dest, item[b'path'])
        # Attempt to remove existing files, ignore errors on failure
        try:
            st = os.lstat(path)
            if stat.S_ISDIR(st.st_mode):
                os.rmdir(path)
            else:
                os.unlink(path)
        except OSError:
            pass
        mode = item[b'mode']
        if stat.S_ISDIR(mode):
            if not os.path.exists(path):
                os.makedirs(path)
            if restore_attrs:
                self.restore_attrs(path, item)
        elif stat.S_ISREG(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            # Hard link?
            if b'source' in item:
                source = os.path.join(dest, item[b'source'])
                if os.path.exists(path):
                    os.unlink(path)
                os.link(source, path)
            else:
                with open(path, 'wb') as fd:
                    ids = [c[0] for c in item[b'chunks']]
                    for data in self.pipeline.fetch_many(ids, is_preloaded=True):
                        fd.write(data)
                    fd.flush()
                    self.restore_attrs(path, item, fd=fd.fileno())
        elif stat.S_ISFIFO(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            os.mkfifo(path)
            self.restore_attrs(path, item)
        elif stat.S_ISLNK(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            source = item[b'source']
            if os.path.exists(path):
                os.unlink(path)
            os.symlink(source, path)
            self.restore_attrs(path, item, symlink=True)
        elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
            os.mknod(path, item[b'mode'], item[b'rdev'])
            self.restore_attrs(path, item)
        else:
            raise Exception('Unknown archive item type %r' % item[b'mode'])
Example #15
0
def copy(src, dst, hardlink=False, keep_symlink=True):
    assert not P.isdir(src), 'Source path must not be a dir'
    assert not P.isdir(dst), 'Destination path must not be a dir'

    if keep_symlink and P.islink(src):
        assert not P.isabs(readlink(src)), 'Cannot copy symlink that points to an absolute path (%s)' % src
        logger.debug('%8s %s -> %s' % ('symlink', src, dst))
        if P.exists(dst):
            assert readlink(dst) == readlink(src), 'Refusing to retarget already-exported symlink %s' % dst
        else:
            symlink(readlink(src), dst)
        return

    if P.exists(dst):
        assert hash_file(src) == hash_file(dst), 'Refusing to overwrite already exported dst %s' % dst
    else:
        if hardlink:
            try:
                link(src, dst)
                logger.debug('%8s %s -> %s' % ('hardlink', src, dst))
                return
            except OSError, o:
                if o.errno != errno.EXDEV: # Invalid cross-device link, not an error, fall back to copy
                    raise

        logger.debug('%8s %s -> %s' % ('copy', src, dst))
        shutil.copy2(src, dst)
Example #16
0
    def download(self, image_href, image_file):
        """Downloads image to specified location.

        :param image_href: Image reference.
        :param image_file: File object to write data to.
        :raises: exception.ImageRefValidationFailed if source image file
            doesn't exist.
        :raises: exception.ImageDownloadFailed if exceptions were raised while
            writing to file or creating hard link.
        """
        source_image_path = self.validate_href(image_href)
        dest_image_path = image_file.name
        local_device = os.stat(dest_image_path).st_dev
        try:
            # We should have read and write access to source file to create
            # hard link to it.
            if (local_device == os.stat(source_image_path).st_dev and
                    os.access(source_image_path, os.R_OK | os.W_OK)):
                image_file.close()
                os.remove(dest_image_path)
                os.link(source_image_path, dest_image_path)
            else:
                filesize = os.path.getsize(source_image_path)
                with open(source_image_path, 'rb') as input_img:
                    sendfile.sendfile(image_file.fileno(), input_img.fileno(),
                                      0, filesize)
        except Exception as e:
            raise exception.ImageDownloadFailed(image_href=image_href,
                                                reason=e)
Example #17
0
 def paste_hardlink(self):
     for f in self.copy_buffer:
         try:
             new_name = next_available_filename(f.basename)
             link(f.path, join(getcwd(), new_name))
         except Exception as x:
             self.notify(x)
Example #18
0
 def rename_no_overwrite(source_path, dest_path):
     # link will fail with EEXIST if there is already something at dest_path.
     os.link(source_path, dest_path)
     try:
         os.unlink(source_path)
     except EnvironmentError:
         reraise(UnableToUnlinkReplacementError)
 def create_test_files(self):
     """Create a minimal test case including all supported file types
     """
     # File
     self.create_regular_file('empty', size=0)
     # 2600-01-01 > 2**64 ns
     os.utime('input/empty', (19880895600, 19880895600))
     self.create_regular_file('file1', size=1024 * 80)
     self.create_regular_file('flagfile', size=1024)
     # Directory
     self.create_regular_file('dir2/file2', size=1024 * 80)
     # File owner
     os.chown('input/file1', 100, 200)
     # File mode
     os.chmod('input/file1', 0o7755)
     os.chmod('input/dir2', 0o555)
     # Block device
     os.mknod('input/bdev', 0o600 | stat.S_IFBLK,  os.makedev(10, 20))
     # Char device
     os.mknod('input/cdev', 0o600 | stat.S_IFCHR,  os.makedev(30, 40))
     # Hard link
     os.link(os.path.join(self.input_path, 'file1'),
             os.path.join(self.input_path, 'hardlink'))
     # Symlink
     os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
     if xattr.is_enabled():
         xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar')
         xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
     # FIFO node
     os.mkfifo(os.path.join(self.input_path, 'fifo1'))
     if has_lchflags:
         os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
Example #20
0
    def hard_link_dir(self, path, destination, existing):
        if not os.path.exists(destination):
            try:
                os.makedirs(destination)
            except OSError as e:
                # Raised when it already exists, but are there other cases?
                log.debug('Failed to create destination dir %s: %s', destination, e)
        # 'recursively' traverse and hard link
        working_dir = os.getcwd()
        os.chdir(path)  # change working dir to make dir joins easier
        for root, dirs, files in os.walk('.'):
            dst_dir = os.path.abspath(os.path.join(destination, root))
            for d in dirs:
                try:
                    os.mkdir(d)
                except OSError as e:
                    # Raised when it already exists, but are there other cases?
                    log.debug('Failed to create subdir %s: %s', d, e)
            for f in files:
                src_file = os.path.join(root, f)
                dst_file = os.path.join(dst_dir, f)
                log.debug('Hardlinking %s to %s', src_file, dst_file)
                try:
                    os.link(src_file, dst_file)
                except OSError as e:
                    log.debug('Failed to create hardlink for file %s: %s', f, e)
                    if existing == 'fail':
                        raise  # reraise to fail the entry in the calling function

        os.chdir(working_dir)
Example #21
0
 def test_subfiles(self):
   p = lambda x: os.path.join(self.tmp, x)
   def create(name, text):
     a = open(p(name), 'w')
     a.write(text)
     a.flush()
     return a
   def check(name, text):
     path = next(notified)
     self.assertEqual(path, p(name))
     with open(path) as f:
       self.assertEqual(f.read(), text)
   a = create('first', 'blah')
   a.write('...')
   notified = inotify.subfiles(self.tmp)
   check('first', 'blah')
   os.link(p(a.name), p('a hard link')) # ignored
   b = create('other', 'hello')
   b.close()
   check('other', 'hello')
   c = create('last', '!!!')
   a.close()
   check('first', 'blah...')
   os.rename(p(a.name), p(b.name))
   check('other', 'blah...')
   c.close()
   check('last', '!!!')
Example #22
0
def my_link(src, dest):
    dest = destpath(src, dest)
    try:
        os.link(src, dest)
    except OSError, e:
        e.filename = src + " -> " + dest
        raise
Example #23
0
 def opensocket(self):
     sockname = self.options.sockname
     tempname = "%s.%d" % (sockname, os.getpid())
     self.unlink_quietly(tempname)
     while 1:
         sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
         try:
             sock.bind(tempname)
             os.chmod(tempname, 0o700)
             try:
                 os.link(tempname, sockname)
                 break
             except os.error:
                 # Lock contention, or stale socket.
                 self.checkopen()
                 # Stale socket -- delete, sleep, and try again.
                 msg = "Unlinking stale socket %s; sleep 1" % sockname
                 sys.stderr.write(msg + "\n")
                 sys.stderr.flush() # just in case
                 self.logger.warn(msg)
                 self.unlink_quietly(sockname)
                 sock.close()
                 time.sleep(1)
                 continue
         finally:
             self.unlink_quietly(tempname)
     sock.listen(1)
     sock.setblocking(0)
     self.mastersocket = sock
Example #24
0
def unify(src, dst):
    """ Unify destination and source """

    # NOTE: at this point it is assumed files are unifiable

    # get a temp file name
    dir = os.path.split(src)[0]
    tmp_handle, tmp_path = tempfile.mkstemp(dir=dir)
    os.close(tmp_handle)

    # rename the destination, in case we need to back out
    os.rename(dst, tmp_path)

    # link source to destination
    try:
        os.link(src, dst)
    except:
        # back out
        print "Could not link %s -> %s, backing out" % (src, dst)
        try:
            if os.path.exists(dst):
                os.unlink(dst)
            os.rename(tmp_path, dst)
        except:
            print "Could not back out!!! the destination file is still there as", tmp_file
            raise exceptions.OSError

    # done, remove the temp file
    os.unlink(tmp_path)
Example #25
0
def render_task(arg):
    """
    This is the worker task run on a sub-process,
    it needs TARG and j2env set properly (done inside render())
    """
    fn, root = arg
    src = join(root, fn)
    dst = normpath(join("..", TARG, src))
    lvl = root.count(os.sep)
    #log("processing/f: %s" % src, nl=False)
    if fn.endswith(".html"):
        # we ignore html files starting with "_" (e.g. language specific templates)
        # except the ones in doc, which might be __init__.html
        if fn.startswith("_") and fn != "__init__.html":
            return
        # assume it's a template and process it
        tmpl = j2env.get_template(src)
        c = fn.rsplit(".", 1)[0].split("-", 1)[0]
        content = tmpl.render(level=lvl, filename=fn, category=c)
        with open(dst, "wb") as output:
            output.write(content.encode("utf-8"))
            output.write(b"\n")
    elif islink(src):
        # we have a symlink, copy it
        # log("SYMLINK/files %s" % src)
        if islink(dst):
            os.remove(dst)
        os.symlink(os.readlink(src), dst)
    else:
        # all other files, hardlink them
        # log("hardlink %s -> %s" % (src, dst))
        if exists(dst):
            os.remove(dst)
        os.link(src, dst)
Example #26
0
    def dump(self,outPath=None,overwrite=False,saveBackup=True):
        """Write the file back to the disk.
        If outPath is given, the file is saved under that name.

        If no outPath is given, and overwirte is True, the file is
        saved under the same name and path that it was loaded from;
        if save backup is true a backup copy (.bak) is made first.
        """
        if type(outPath) == StringType:
            if os.path.exists(outPath) and saveBackup:
                bakName = outPath+'.bak'
                if os.path.exists(bakName):
                    os.unlink(bakName)
                os.link(outPath,bakName)
                os.unlink(outPath)
            file(outPath,'w').write(str(self))
        else:
            if overwrite:
                outPath=self.rawFile.name
                if not self.rawFile.closed:
                    self.rawFile.close()
                self.dump(outPath=outPath,overwrite=overwrite,saveBackup=saveBackup)
                
            else:
                warnings.warn ("dump: must specify outPath or set overwrite to True")
Example #27
0
def copyfile(src, dst, overwrite=False, link=False):
    """
    Copy a file from `src` to `dst`; return `True` if the copy was
    actually made.  If `overwrite` is `False` (default), an existing
    destination entry is left unchanged and `False` is returned.

    If `link` is `True`, an attempt at hard-linking is done first;
    failing that, we copy the source file onto the destination
    one. Permission bits and modification times are copied as well.

    If `dst` is a directory, a file with the same basename as `src` is
    created (or overwritten) in the directory specified.
    """
    if os.path.isdir(dst):
        dst = os.path.join(dst, os.path.basename(src))
    if os.path.exists(dst) and not overwrite:
        return False
    if samefile(src, dst):
        return False
    try:
        dstdir = os.path.dirname(dst)
        if not os.path.exists(dstdir):
            os.makedirs(dstdir)
        if link:
            try:
                os.link(src, dst)
            except OSError, ex:
                # retry with normal copy
                shutil.copy2(src, dst)
        else:
Example #28
0
 def create_test_files(self):
     """Create a minimal test case including all supported file types
     """
     # File
     self.create_regual_file('empty', size=0)
     self.create_regual_file('file1', size=1024 * 80)
     # Directory
     self.create_regual_file('dir2/file2', size=1024 * 80)
     # File owner
     os.chown('input/file1', 100, 200)
     # File mode
     os.chmod('input/file1', 0o7755)
     os.chmod('input/dir2', 0o555)
     # Block device
     os.mknod('input/bdev', 0o600 | stat.S_IFBLK,  os.makedev(10, 20))
     # Char device
     os.mknod('input/cdev', 0o600 | stat.S_IFCHR,  os.makedev(30, 40))
     if xattr.is_enabled():
         xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar')
     # Hard link
     os.link(os.path.join(self.input_path, 'file1'),
             os.path.join(self.input_path, 'hardlink'))
     # Symlink
     os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
     # FIFO node
     os.mkfifo(os.path.join(self.input_path, 'fifo1'))
Example #29
0
def linux_fixup_databasedir():
    ''' Under Linux move database from /var/neubot to /var/lib/neubot '''
    # Explanation: /var/lib/neubot is FHS, /var/neubot isn't

    if os.name != 'posix':
        return
    if not sys.platform.startswith('linux'):
        return
    if os.getuid() != 0:
        return

    if not os.path.isfile('/var/neubot/database.sqlite3'):
        return
    if os.path.exists('/var/lib/neubot/database.sqlite3'):
        return

    logging.debug('database_xxx: /var/neubot -> /var/lib/neubot...')

    # Lazy import
    from neubot import utils_posix

    #
    # Here we create the new link as root, and we assume that
    # the caller will fix permissions afterwards.  This should
    # happen as long as we are invoked before the database
    # function that checks database path.
    #
    utils_posix.mkdir_idempotent('/var/lib/neubot')
    os.link('/var/neubot/database.sqlite3', '/var/lib/neubot/database.sqlite3')
    os.unlink('/var/neubot/database.sqlite3')

    logging.debug('database_xxx: /var/neubot -> /var/lib/neubot... done')
Example #30
0
 def __init__(self):
     self._uuid = str(uuid.uuid4())
     self.hadoop_cmd_file = tmp_path + "." + self._uuid
     abs_dir = os.path.dirname(os.path.abspath(__file__))
     self.mock_hadoop_client_path = os.path.join(abs_dir, "mock_hadoop_client.py_" + self._uuid)
     # os.path.abspath(__file__) could be mock_hadoop_client.pyc
     os.link(os.path.join(abs_dir, "mock_hadoop_client.py"), self.mock_hadoop_client_path)
Example #31
0
    ('=f', 'v2', [-1, (161, -1), 0, 0], 1.0, 'ricker1', 2.0),
]

# read mesh from disk
fieldio += [
    ('=R', 'x1', [0, 0, 1, 0], 'x.bin'),
    ('=R', 'x2', [0, 0, 1, 0], 'y.bin'),
]

# specify output
for c in '12':
    fieldio += [
        ('=w', 'u' + c, [-1, -1, 1, 0], 'source-u%s.bin' % c),
        ('=w', 'u' + c, [1, 0, 1, 0], 'canyon-u%s.bin' % c),
        ('=w', 'u' + c, [(2, 158), 1, 1, 0], 'flank-u%s.bin' % c),
        ('=w', 'v' + c, [0, 0, 1, (1, -1, 10)], 'snap-v%s.bin' % c),
        ('=w', 'u' + c, [0, 0, 1, (1, -1, 10)], 'snap-u%s.bin' % c),
    ]

# continue if command line
if __name__ == '__main__':

    # stage job, copy mesh files, and run job
    rundir = os.path.join('run', 'sim')
    job = cst.sord.stage(locals())
    for f in 'x.bin', 'y.bin':
        a = os.path.join('run', 'mesh', f)
        b = os.path.join('run', 'sim', f)
        os.link(a, b)
    cst.sord.run(job)
Example #32
0
 def hardlink_scons(self, src, dst, ver):
     try:
         os.unlink(dst)
     except OSError:
         pass
     os.link(ver, dst)
Example #33
0
 def link(self, target, name):
     return os.link(self._full_path(target), self._full_path(name))
Example #34
0
def main():

    module = AnsibleModule(
        argument_spec=dict(
            state=dict(choices=[
                'file', 'directory', 'link', 'hard', 'touch', 'absent'
            ],
                       default=None),
            path=dict(aliases=['dest', 'name'], required=True, type='path'),
            original_basename=dict(
                required=False),  # Internal use only, for recursive ops
            recurse=dict(default=False, type='bool'),
            force=dict(required=False, default=False, type='bool'),
            follow=dict(required=False, default=False, type='bool'),
            diff_peek=dict(
                default=None
            ),  # Internal use only, for internal checks in the action plugins
            validate=dict(
                required=False,
                default=None),  # Internal use only, for template and copy
            src=dict(required=False, default=None, type='path'),
        ),
        add_file_common_args=True,
        supports_check_mode=True)

    params = module.params
    state = params['state']
    recurse = params['recurse']
    force = params['force']
    diff_peek = params['diff_peek']
    src = params['src']
    b_src = to_bytes(src, errors='surrogate_or_strict')
    follow = params['follow']

    # modify source as we later reload and pass, specially relevant when used by other modules.
    path = params['path']
    b_path = to_bytes(path, errors='surrogate_or_strict')

    # short-circuit for diff_peek
    if diff_peek is not None:
        appears_binary = False
        try:
            f = open(b_path, 'rb')
            head = f.read(8192)
            f.close()
            if b("\x00") in head:
                appears_binary = True
        except:
            pass
        module.exit_json(path=path,
                         changed=False,
                         appears_binary=appears_binary)

    prev_state = get_state(b_path)

    # state should default to file, but since that creates many conflicts,
    # default to 'current' when it exists.
    if state is None:
        if prev_state != 'absent':
            state = prev_state
        elif recurse:
            state = 'directory'
        else:
            state = 'file'

    # source is both the source of a symlink or an informational passing of the src for a template module
    # or copy module, even if this module never uses it, it is needed to key off some things
    if src is None:
        if state in ('link', 'hard'):
            if follow and state == 'link':
                # use the current target of the link as the source
                src = to_native(os.path.realpath(b_path), errors='strict')
                b_src = to_bytes(os.path.realpath(b_path), errors='strict')
            else:
                module.fail_json(
                    msg='src and dest are required for creating links')

    # original_basename is used by other modules that depend on file.
    if state not in ("link", "absent") and os.path.isdir(b_path):
        basename = None
        if params['original_basename']:
            basename = params['original_basename']
        elif src is not None:
            basename = os.path.basename(src)
        if basename:
            params['path'] = path = os.path.join(path, basename)
            b_path = to_bytes(path, errors='surrogate_or_strict')
            prev_state = get_state(b_path)

    # make sure the target path is a directory when we're doing a recursive operation
    if recurse and state != 'directory':
        module.fail_json(path=path,
                         msg="recurse option requires state to be 'directory'")

    file_args = module.load_file_common_arguments(params)

    changed = False
    diff = {
        'before': {
            'path': path
        },
        'after': {
            'path': path
        },
    }

    state_change = False
    if prev_state != state:
        diff['before']['state'] = prev_state
        diff['after']['state'] = state
        state_change = True

    if state == 'absent':
        if state_change:
            if not module.check_mode:
                if prev_state == 'directory':
                    try:
                        shutil.rmtree(b_path, ignore_errors=False)
                    except Exception as e:
                        module.fail_json(msg="rmtree failed: %s" %
                                         to_native(e))
                else:
                    try:
                        os.unlink(b_path)
                    except Exception as e:
                        module.fail_json(path=path,
                                         msg="unlinking failed: %s " %
                                         to_native(e))
            module.exit_json(path=path, changed=True, diff=diff)
        else:
            module.exit_json(path=path, changed=False)

    elif state == 'file':

        if state_change:
            if follow and prev_state == 'link':
                # follow symlink and operate on original
                b_path = os.path.realpath(b_path)
                path = to_native(b_path, errors='strict')
                prev_state = get_state(b_path)
                file_args['path'] = path

        if prev_state not in ('file', 'hard'):
            # file is not absent and any other state is a conflict
            module.fail_json(path=path,
                             msg='file (%s) is %s, cannot continue' %
                             (path, prev_state))

        changed = module.set_fs_attributes_if_different(file_args,
                                                        changed,
                                                        diff,
                                                        expand=False)
        module.exit_json(path=path, changed=changed, diff=diff)

    elif state == 'directory':
        if follow and prev_state == 'link':
            b_path = os.path.realpath(b_path)
            path = to_native(b_path, errors='strict')
            prev_state = get_state(b_path)

        if prev_state == 'absent':
            if module.check_mode:
                module.exit_json(changed=True, diff=diff)
            changed = True
            curpath = ''

            try:
                # Split the path so we can apply filesystem attributes recursively
                # from the root (/) directory for absolute paths or the base path
                # of a relative path.  We can then walk the appropriate directory
                # path to apply attributes.
                for dirname in path.strip('/').split('/'):
                    curpath = '/'.join([curpath, dirname])
                    # Remove leading slash if we're creating a relative path
                    if not os.path.isabs(path):
                        curpath = curpath.lstrip('/')
                    b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
                    if not os.path.exists(b_curpath):
                        try:
                            os.mkdir(b_curpath)
                        except OSError as ex:
                            # Possibly something else created the dir since the os.path.exists
                            # check above. As long as it's a dir, we don't need to error out.
                            if not (ex.errno == errno.EEXIST
                                    and os.path.isdir(b_curpath)):
                                raise
                        tmp_file_args = file_args.copy()
                        tmp_file_args['path'] = curpath
                        changed = module.set_fs_attributes_if_different(
                            tmp_file_args, changed, diff, expand=False)
            except Exception as e:
                module.fail_json(
                    path=path,
                    msg='There was an issue creating %s as requested: %s' %
                    (curpath, to_native(e)))

        # We already know prev_state is not 'absent', therefore it exists in some form.
        elif prev_state != 'directory':
            module.fail_json(path=path,
                             msg='%s already exists as a %s' %
                             (path, prev_state))

        changed = module.set_fs_attributes_if_different(file_args,
                                                        changed,
                                                        diff,
                                                        expand=False)

        if recurse:
            changed |= recursive_set_attributes(
                module,
                to_bytes(file_args['path'], errors='surrogate_or_strict'),
                follow, file_args)

        module.exit_json(path=path, changed=changed, diff=diff)

    elif state in ('link', 'hard'):

        if not os.path.islink(b_path) and os.path.isdir(b_path):
            relpath = path
        else:
            b_relpath = os.path.dirname(b_path)
            relpath = to_native(b_relpath, errors='strict')

        absrc = os.path.join(relpath, src)
        b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
        if not force and not os.path.exists(b_absrc):
            module.fail_json(
                path=path,
                src=src,
                msg=
                'src file does not exist, use "force=yes" if you really want to create the link: %s'
                % absrc)

        if state == 'hard':
            if not os.path.isabs(b_src):
                module.fail_json(msg="absolute paths are required")
        elif prev_state == 'directory':
            if not force:
                module.fail_json(
                    path=path,
                    msg='refusing to convert between %s and %s for %s' %
                    (prev_state, state, path))
            elif os.listdir(b_path):
                # refuse to replace a directory that has files in it
                module.fail_json(
                    path=path,
                    msg='the directory %s is not empty, refusing to convert it'
                    % path)
        elif prev_state in ('file', 'hard') and not force:
            module.fail_json(
                path=path,
                msg='refusing to convert between %s and %s for %s' %
                (prev_state, state, path))

        if prev_state == 'absent':
            changed = True
        elif prev_state == 'link':
            b_old_src = os.readlink(b_path)
            if b_old_src != b_src:
                diff['before']['src'] = to_native(b_old_src, errors='strict')
                diff['after']['src'] = src
                changed = True
        elif prev_state == 'hard':
            if not (state == 'hard'
                    and os.stat(b_path).st_ino == os.stat(b_src).st_ino):
                changed = True
                if not force:
                    module.fail_json(
                        dest=path,
                        src=src,
                        msg=
                        'Cannot link, different hard link exists at destination'
                    )
        elif prev_state == 'file':
            changed = True
            if not force:
                module.fail_json(dest=path,
                                 src=src,
                                 msg='Cannot link, %s exists at destination' %
                                 prev_state)
        elif prev_state == 'directory':
            changed = True
            if os.path.exists(b_path):
                if state == 'hard' and os.stat(b_path).st_ino == os.stat(
                        b_src).st_ino:
                    module.exit_json(path=path, changed=False)
                elif not force:
                    module.fail_json(
                        dest=path,
                        src=src,
                        msg=
                        'Cannot link, different hard link exists at destination'
                    )
        else:
            module.fail_json(dest=path,
                             src=src,
                             msg='unexpected position reached')

        if changed and not module.check_mode:
            if prev_state != 'absent':
                # try to replace atomically
                b_tmppath = to_bytes(os.path.sep).join([
                    os.path.dirname(b_path),
                    to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))
                ])
                try:
                    if prev_state == 'directory' and state == 'link':
                        os.rmdir(b_path)
                    elif prev_state == 'directory' and state == 'hard':
                        if os.path.exists(b_path):
                            os.remove(b_path)
                    if state == 'hard':
                        os.link(b_src, b_tmppath)
                    else:
                        os.symlink(b_src, b_tmppath)
                    os.rename(b_tmppath, b_path)
                except OSError as e:
                    if os.path.exists(b_tmppath):
                        os.unlink(b_tmppath)
                    module.fail_json(path=path,
                                     msg='Error while replacing: %s' %
                                     to_native(e, nonstring='simplerepr'))
            else:
                try:
                    if state == 'hard':
                        os.link(b_src, b_path)
                    else:
                        os.symlink(b_src, b_path)
                except OSError as e:
                    module.fail_json(path=path,
                                     msg='Error while linking: %s' %
                                     to_native(e, nonstring='simplerepr'))

        if module.check_mode and not os.path.exists(b_path):
            module.exit_json(dest=path, src=src, changed=changed, diff=diff)

        changed = module.set_fs_attributes_if_different(file_args,
                                                        changed,
                                                        diff,
                                                        expand=False)
        module.exit_json(dest=path, src=src, changed=changed, diff=diff)

    elif state == 'touch':
        if not module.check_mode:

            if prev_state == 'absent':
                try:
                    open(b_path, 'wb').close()
                except OSError as e:
                    module.fail_json(path=path,
                                     msg='Error, could not touch target: %s' %
                                     to_native(e, nonstring='simplerepr'))
            elif prev_state in ('file', 'directory', 'hard'):
                try:
                    os.utime(b_path, None)
                except OSError as e:
                    module.fail_json(
                        path=path,
                        msg='Error while touching existing target: %s' %
                        to_native(e, nonstring='simplerepr'))
            else:
                module.fail_json(
                    msg=
                    'Cannot touch other than files, directories, and hardlinks (%s is %s)'
                    % (path, prev_state))
            try:
                module.set_fs_attributes_if_different(file_args,
                                                      True,
                                                      diff,
                                                      expand=False)
            except SystemExit as e:
                if e.code:
                    # We take this to mean that fail_json() was called from
                    # somewhere in basic.py
                    if prev_state == 'absent':
                        # If we just created the file we can safely remove it
                        os.remove(b_path)
                raise e

        module.exit_json(dest=path, changed=True, diff=diff)

    module.fail_json(path=path, msg='unexpected position reached')
Example #35
0
def link(src, dst):
    if os.name == 'nt':
        import ctypes
        if ctypes.windll.kernel32.CreateHardLinkW(six.text_type(dst), six.text_type(src), 0) == 0: raise ctypes.WinError()
    else:
        os.link(src, dst)
Example #36
0
def do_sillydiff(dirname, oldfile, newfile, outfile):
    today = time.strftime("%d.%m.%Y")
    try:
        oldfile = open(os.path.join(dirname, oldfile), "r")
        line = oldfile.readline()
        line = line.rstrip()
    except IOError:
        logger.warn("Warning, old file did not exist, assuming first run ever")
        os.link(os.path.join(dirname, newfile), os.path.join(dirname, outfile))
        return

    old_dict = dict()
    while line:
        key = line[0:12]
        value = old_dict.get(key, list())
        value.append(line[13:])
        old_dict[key] = value

        line = oldfile.readline()
        line = line.rstrip()
    oldfile.close()

    out = AtomicFileWriter(os.path.join(dirname, outfile), 'w')
    newin = open(os.path.join(dirname, newfile))

    for newline in newin:
        newline = newline.rstrip()
        pnr = newline[0:12]
        data = newline[13:]
        if pnr in old_dict:
            if data not in old_dict[pnr]:
                # Some change, want to update with new values.
                out.write(newline + "\n")
            else:
                old_dict[pnr].remove(data)

            # If nothing else is left, delete the key from the dictionary
            if not old_dict[pnr]:
                del old_dict[pnr]
        else:
            # completely new entry, output unconditionally
            out.write(newline + "\n")

    # Now, there is one problem left: we cannot output the old data blindly,
    # as people's names might have changed. So, we force *every* old record to
    # the current names in Cerebrum. This may result in the exactly same
    # record being output twice, but it should be fine.
    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    logger.debug("%d old records left", len(old_dict))
    for leftpnr in old_dict:
        # FIXME: it is unsafe to assume that this will succeed
        first, last = fnr2names(person, const, leftpnr[:-1])
        if not (first and last):
            logger.warn(
                "No name information for %s is available. %d "
                "entry(ies) will be skipped", leftpnr[:-1],
                len(old_dict[leftpnr]))
            continue

        for entry in old_dict[leftpnr]:
            vals = entry.split(";")
            vals[2] = first
            vals[3] = last
            vals[13] = today
            vals[17] = ""
            out.write("%s;%s\n" % (leftpnr, ";".join(vals)))

    out.close()
    newin.close()
def __main__():
    #Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option(
        '-p',
        '--num-threads',
        dest='num_threads',
        help='Use this many threads to align reads. The default is 1.')
    parser.add_option('-C',
                      '--color-space',
                      dest='color_space',
                      action='store_true',
                      help='This indicates color-space data')
    parser.add_option('-J',
                      '--junctions-output',
                      dest='junctions_output_file',
                      help='Junctions output file; formate is BED.')
    parser.add_option('-H',
                      '--hits-output',
                      dest='accepted_hits_output_file',
                      help='Accepted hits output file; formate is BAM.')
    parser.add_option('', '--own-file', dest='own_file', help='')
    parser.add_option(
        '-D',
        '--indexes-path',
        dest='index_path',
        help='Indexes directory; location of .ebwt and .fa files.')
    parser.add_option(
        '-r',
        '--mate-inner-dist',
        dest='mate_inner_dist',
        help='This is the expected (mean) inner distance between mate pairs. \
                                                                                For, example, for paired end runs with fragments selected at 300bp, \
                                                                                where each end is 50bp, you should set -r to be 200. There is no default, \
                                                                                and this parameter is required for paired end runs.'
    )
    parser.add_option(
        '',
        '--mate-std-dev',
        dest='mate_std_dev',
        help=
        'Standard deviation of distribution on inner distances between male pairs.'
    )
    parser.add_option('-n',
                      '--transcriptome-mismatches',
                      dest='transcriptome_mismatches')
    parser.add_option('',
                      '--genome-read-mismatches',
                      dest='genome_read_mismatches')
    parser.add_option('', '--read-mismatches', dest='read_mismatches')
    parser.add_option('', '--bowtie-n', action="store_true", dest='bowtie_n')
    parser.add_option(
        '-a',
        '--min-anchor-length',
        dest='min_anchor_length',
        help=
        'The "anchor length". TopHat will report junctions spanned by reads with at least this many bases on each side of the junction.'
    )
    parser.add_option(
        '-m',
        '--splice-mismatches',
        dest='splice_mismatches',
        help=
        'The maximum number of mismatches that can appear in the anchor region of a spliced alignment.'
    )
    parser.add_option(
        '-i',
        '--min-intron-length',
        dest='min_intron_length',
        help=
        'The minimum intron length. TopHat will ignore donor/acceptor pairs closer than this many bases apart.'
    )
    parser.add_option(
        '-I',
        '--max-intron-length',
        dest='max_intron_length',
        help=
        'The maximum intron length. When searching for junctions ab initio, TopHat will ignore donor/acceptor pairs farther than this many bases apart, except when such a pair is supported by a split segment alignment of a long read.'
    )
    parser.add_option('-g',
                      '--max_multihits',
                      dest='max_multihits',
                      help='Maximum number of alignments to be allowed')
    parser.add_option(
        '',
        '--seg-mismatches',
        dest='seg_mismatches',
        help=
        'Number of mismatches allowed in each segment alignment for reads mapped independently'
    )
    parser.add_option('',
                      '--seg-length',
                      dest='seg_length',
                      help='Minimum length of read segments')
    parser.add_option(
        '',
        '--library-type',
        dest='library_type',
        help=
        'TopHat will treat the reads as strand specific. Every read alignment will have an XS attribute tag. Consider supplying library type options below to select the correct RNA-seq protocol.'
    )
    parser.add_option(
        '',
        '--allow-indels',
        action="store_true",
        help=
        'Allow indel search. Indel search is disabled by default.(Not used since version 1.3.0)'
    )
    parser.add_option('',
                      '--max-insertion-length',
                      dest='max_insertion_length',
                      help='The maximum insertion length. The default is 3.')
    parser.add_option('',
                      '--max-deletion-length',
                      dest='max_deletion_length',
                      help='The maximum deletion length. The default is 3.')

    # Options for supplying own junctions
    parser.add_option(
        '-G',
        '--GTF',
        dest='gene_model_annotations',
        help='Supply TopHat with a list of gene model annotations. \
                                                                           TopHat will use the exon records in this file to build \
                                                                           a set of known splice junctions for each gene, and will \
                                                                           attempt to align reads to these junctions even if they \
                                                                           would not normally be covered by the initial mapping.'
    )
    parser.add_option(
        '-j',
        '--raw-juncs',
        dest='raw_juncs',
        help='Supply TopHat with a list of raw junctions. Junctions are \
                                                                    specified one per line, in a tab-delimited format. Records \
                                                                    look like: <chrom> <left> <right> <+/-> left and right are \
                                                                    zero-based coordinates, and specify the last character of the \
                                                                    left sequenced to be spliced to the first character of the right \
                                                                    sequence, inclusive.'
    )
    parser.add_option('',
                      '--no-novel-juncs',
                      action="store_true",
                      dest='no_novel_juncs',
                      help="Only look for junctions indicated in the \
                                                                                            supplied GFF file. (ignored without -G)"
                      )
    parser.add_option(
        '',
        '--no-novel-indels',
        action="store_true",
        dest='no_novel_indels',
        help="Skip indel search. Indel search is enabled by default.")
    # Types of search.
    parser.add_option(
        '',
        '--microexon-search',
        action="store_true",
        dest='microexon_search',
        help=
        'With this option, the pipeline will attempt to find alignments incident to microexons. Works only for reads 50bp or longer.'
    )
    parser.add_option(
        '',
        '--coverage-search',
        action="store_true",
        dest='coverage_search',
        help=
        'Enables the coverage based search for junctions. Use when coverage search is disabled by default (such as for reads 75bp or longer), for maximum sensitivity.'
    )
    parser.add_option('',
                      '--no-coverage-search',
                      action="store_false",
                      dest='coverage_search')
    parser.add_option(
        '',
        '--min-segment-intron',
        dest='min_segment_intron',
        help=
        'Minimum intron length that may be found during split-segment search')
    parser.add_option(
        '',
        '--max-segment-intron',
        dest='max_segment_intron',
        help=
        'Maximum intron length that may be found during split-segment search')
    parser.add_option(
        '',
        '--min-coverage-intron',
        dest='min_coverage_intron',
        help='Minimum intron length that may be found during coverage search')
    parser.add_option(
        '',
        '--max-coverage-intron',
        dest='max_coverage_intron',
        help='Maximum intron length that may be found during coverage search')

    # Fusion search options.
    parser.add_option('',
                      '--fusion-search',
                      action='store_true',
                      dest='fusion_search')
    parser.add_option('',
                      '--fusion-anchor-length',
                      dest='fusion_anchor_length')
    parser.add_option('', '--fusion-min-dist', dest='fusion_min_dist')
    parser.add_option('',
                      '--fusion-read-mismatches',
                      dest='fusion_read_mismatches')
    parser.add_option('', '--fusion-multireads', dest='fusion_multireads')
    parser.add_option('', '--fusion-multipairs', dest='fusion_multipairs')
    parser.add_option('',
                      '--fusion-ignore-chromosomes',
                      dest='fusion_ignore_chromosomes')

    # Wrapper options.
    parser.add_option(
        '-1',
        '--input1',
        dest='input1',
        help='The (forward or single-end) reads file in Sanger FASTQ format')
    parser.add_option('-2',
                      '--input2',
                      dest='input2',
                      help='The reverse reads file in Sanger FASTQ format')
    parser.add_option('', '--single-paired', dest='single_paired', help='')
    parser.add_option('', '--settings', dest='settings', help='')

    (options, args) = parser.parse_args()

    # Color or base space
    space = ''
    if options.color_space:
        space = '-C'

    # Creat bowtie index if necessary.
    tmp_index_dir = tempfile.mkdtemp()
    if options.own_file:
        index_path = os.path.join(
            tmp_index_dir,
            '.'.join(os.path.split(options.own_file)[1].split('.')[:-1]))
        try:
            os.link(options.own_file, index_path + '.fa')
        except:
            # Tophat prefers (but doesn't require) fasta file to be in same directory, with .fa extension
            pass
        cmd_index = 'bowtie-build %s -f %s %s' % (space, options.own_file,
                                                  index_path)
        try:
            tmp = tempfile.NamedTemporaryFile(dir=tmp_index_dir).name
            tmp_stderr = open(tmp, 'wb')
            proc = subprocess.Popen(args=cmd_index,
                                    shell=True,
                                    cwd=tmp_index_dir,
                                    stderr=tmp_stderr.fileno())
            returncode = proc.wait()
            tmp_stderr.close()
            # get stderr, allowing for case where it's very large
            tmp_stderr = open(tmp, 'rb')
            stderr = ''
            buffsize = 1048576
            try:
                while True:
                    stderr += tmp_stderr.read(buffsize)
                    if not stderr or len(stderr) % buffsize != 0:
                        break
            except OverflowError:
                pass
            tmp_stderr.close()
            if returncode != 0:
                raise Exception, stderr
        except Exception, e:
            if os.path.exists(tmp_index_dir):
                shutil.rmtree(tmp_index_dir)
            stop_err('Error indexing reference sequence\n' + str(e))
Example #38
0
def ensure_hardlink(path, src, follow, force, timestamps):
    b_path = to_bytes(path, errors='surrogate_or_strict')
    b_src = to_bytes(src, errors='surrogate_or_strict')
    prev_state = get_state(b_path)
    file_args = module.load_file_common_arguments(module.params)
    mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
    atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])

    # src is the source of a hardlink.  We require it if we are creating a new hardlink.
    # We require path in the argument_spec so we know it is present at this point.
    if src is None:
        raise AssibleModuleError(results={'msg': 'src is required for creating new hardlinks'})

    if not os.path.exists(b_src):
        raise AssibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})

    diff = initial_diff(path, 'hard', prev_state)
    changed = False

    if prev_state == 'absent':
        changed = True
    elif prev_state == 'link':
        b_old_src = os.readlink(b_path)
        if b_old_src != b_src:
            diff['before']['src'] = to_native(b_old_src, errors='strict')
            diff['after']['src'] = src
            changed = True
    elif prev_state == 'hard':
        if not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
            changed = True
            if not force:
                raise AssibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
                                                  'dest': path, 'src': src})
    elif prev_state == 'file':
        changed = True
        if not force:
            raise AssibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
                                              'dest': path, 'src': src})
    elif prev_state == 'directory':
        changed = True
        if os.path.exists(b_path):
            if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
                return {'path': path, 'changed': False}
            elif not force:
                raise AssibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
                                                  'dest': path, 'src': src})
    else:
        raise AssibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})

    if changed and not module.check_mode:
        if prev_state != 'absent':
            # try to replace atomically
            b_tmppath = to_bytes(os.path.sep).join(
                [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
            )
            try:
                if prev_state == 'directory':
                    if os.path.exists(b_path):
                        try:
                            os.unlink(b_path)
                        except OSError as e:
                            if e.errno != errno.ENOENT:  # It may already have been removed
                                raise
                os.link(b_src, b_tmppath)
                os.rename(b_tmppath, b_path)
            except OSError as e:
                if os.path.exists(b_tmppath):
                    os.unlink(b_tmppath)
                raise AssibleModuleError(results={'msg': 'Error while replacing: %s'
                                                         % to_native(e, nonstring='simplerepr'),
                                                  'path': path})
        else:
            try:
                os.link(b_src, b_path)
            except OSError as e:
                raise AssibleModuleError(results={'msg': 'Error while linking: %s'
                                                         % to_native(e, nonstring='simplerepr'),
                                                  'path': path})

    if module.check_mode and not os.path.exists(b_path):
        return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}

    changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
    changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)

    return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
Example #39
0
def drive_APMrover2(viewerip=None, map=False):
    '''drive APMrover2 in SIL

    you can pass viewerip as an IP address to optionally send fg and
    mavproxy packets too for local viewing of the mission in real time
    '''
    global homeloc

    options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=10'
    if viewerip:
        options += " --out=%s:14550" % viewerip
    if map:
        options += ' --map'

    sim_cmd = util.reltopdir(
        'Tools/autotest/pysim/sim_wrapper.py'
    ) + ' --frame=rover --rate=200 --speedup=100 --home=%f,%f,%u,%u' % (
        HOME.lat, HOME.lng, HOME.alt, HOME.heading)

    sil = util.start_SIL('APMrover2', wipe=True)
    mavproxy = util.start_MAVProxy_SIL('APMrover2', options=options)

    runsim = pexpect.spawn(sim_cmd, logfile=sys.stdout, timeout=10)
    runsim.delaybeforesend = 0
    runsim.expect('Starting at lat')

    print("WAITING FOR PARAMETERS")
    mavproxy.expect('Received [0-9]+ parameters')

    # setup test parameters
    mavproxy.send("param load %s/Rover.parm\n" % testdir)
    mavproxy.expect('Loaded [0-9]+ parameters')

    # restart with new parms
    util.pexpect_close(mavproxy)
    util.pexpect_close(sil)
    util.pexpect_close(runsim)

    sil = util.start_SIL('APMrover2')
    mavproxy = util.start_MAVProxy_SIL('APMrover2', options=options)
    mavproxy.expect('Logging to (\S+)')
    logfile = mavproxy.match.group(1)
    print("LOGFILE %s" % logfile)

    sim_cmd = util.reltopdir(
        'Tools/autotest/pysim/sim_wrapper.py'
    ) + ' --frame=rover --rate=200 --speedup=100 --home=%f,%f,%u,%u' % (
        HOME.lat, HOME.lng, HOME.alt, HOME.heading)

    runsim = pexpect.spawn(sim_cmd, logfile=sys.stdout, timeout=10)
    runsim.delaybeforesend = 0
    util.pexpect_autoclose(runsim)
    runsim.expect('Starting at lat')

    buildlog = util.reltopdir("../buildlogs/APMrover2-test.tlog")
    print("buildlog=%s" % buildlog)
    if os.path.exists(buildlog):
        os.unlink(buildlog)
    try:
        os.link(logfile, buildlog)
    except Exception:
        pass

    mavproxy.expect('Received [0-9]+ parameters')

    util.expect_setup_callback(mavproxy, expect_callback)

    expect_list_clear()
    expect_list_extend([runsim, sil, mavproxy])

    print("Started simulator")

    # get a mavlink connection going
    try:
        mav = mavutil.mavlink_connection('127.0.0.1:19550',
                                         robust_parsing=True)
    except Exception, msg:
        print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
        raise
Example #40
0
 def link(self, newpath):
     """ Create a hard link at 'newpath', pointing to this file. """
     os.link(self, newpath)
Example #41
0
def make_deb(distro, build_os, arch, spec, srcdir):
    # I can't remember the details anymore, but the initscript/upstart
    # job files' names must match the package name in some way; and
    # see also the --name flag to dh_installinit in the generated
    # debian/rules file.
    suffix = spec.suffix()
    sdir = setupdir(distro, build_os, arch, spec)
    if re.search("debian", distro.name()):
        os.unlink(sdir + "debian/mongod.upstart")
        os.link(
            sdir + "debian/mongod.service", sdir +
            "debian/%s%s-server.mongod.service" % (distro.pkgbase(), suffix))
        os.unlink(sdir + "debian/init.d")
    elif re.search("ubuntu", distro.name()):
        os.unlink(sdir + "debian/init.d")
        if build_os in ("ubuntu1204", "ubuntu1404", "ubuntu1410"):
            os.link(
                sdir + "debian/mongod.upstart",
                sdir + "debian/%s%s-server.mongod.upstart" %
                (distro.pkgbase(), suffix))
            os.unlink(sdir + "debian/mongod.service")
        else:
            os.link(
                sdir + "debian/mongod.service",
                sdir + "debian/%s%s-server.mongod.service" %
                (distro.pkgbase(), suffix))
            os.unlink(sdir + "debian/mongod.upstart")
    else:
        raise Exception("unknown debianoid flavor: not debian or ubuntu?")
    # Rewrite the control and rules files
    write_debian_changelog(sdir + "debian/changelog", spec, srcdir)
    distro_arch = distro.archname(arch)
    sysassert([
        "cp", "-v",
        srcdir + "debian/%s%s.control" % (distro.pkgbase(), suffix),
        sdir + "debian/control"
    ])
    sysassert([
        "cp", "-v", srcdir + "debian/%s%s.rules" % (distro.pkgbase(), suffix),
        sdir + "debian/rules"
    ])

    # old non-server-package postinst will be hanging around for old versions
    #
    if os.path.exists(sdir + "debian/postinst"):
        os.unlink(sdir + "debian/postinst")

    # copy our postinst files
    #
    sysassert([
        "sh", "-c",
        "cp -v \"%sdebian/\"*.postinst \"%sdebian/\"" % (srcdir, sdir)
    ])

    # Do the packaging.
    oldcwd = os.getcwd()
    try:
        os.chdir(sdir)
        sysassert(["dpkg-buildpackage", "-uc", "-us", "-a" + distro_arch])
    finally:
        os.chdir(oldcwd)
    r = distro.repodir(arch, build_os, spec)
    ensure_dir(r)
    # FIXME: see if shutil.copyfile or something can do this without
    # much pain.
    sysassert(["sh", "-c", "cp -v \"%s/../\"*.deb \"%s\"" % (sdir, r)])
    return r
Example #42
0
def Install(**kwargs):
    """
    This does the grunt-work of actually doing the install.
    The possible arguments are:
    - config	Object containing configuration.  This is where the download URL and
            	package directories will be specified.
    - interactive	Whether or not to be interactive.  If true (default), then
		bsd.Dialog will be used to for status and error messages.
    - disks	An array of Disk objects to install to.  If set, then the disks
    		will be partitioned and erased.  If NOT set, then the installer
		will create a new boot environment on the existing freenas-boot pool.
    - efi	Boolean indicating whether or not to use EFI (default is False).
    - upgrade_from	An unimported ZFSPool object to install to.  This must be set
    			when upgrading, and when creating a new BE on an existing pool.
    - upgrade	Boolean indicating whether or not to upgrade.  Requires upgrade_from to
		be valid.
    - data_dir	A string indicating the location of the /data.  Normally this will just
		be "/data", but if installing from something other than the ISO, it will
		be necessary to specify it.
    - password	A string indicating the root password.  Ignored for upgrades; may be None
    		(indicating no password, not recommended).
    - partitions	An array of Partition objects (see Utils).  Note that the OS
    			partition will always be installed last.
    - post_install	An array of callable objects, which will be called after installation,
    			as func(mount_point=/path, **kwargs).  MUST BE AN ARRAY.
    - package_handler	Call-back for the start of each package.  Arguments are
			(index [int], name [string], packages [array of package names])
    			be installed.
    - progress_handler	Call-back after each file/directory is installed.  Arguments are **kwargs,
    			will [currently] be either done=True (indicating the package is installed),
    			or (total=int [number of objects], index=int [current index], name=string
    			[name of object that was just installed]).
    - manifest	A manifest object.  Must be set.
    - package_directory	A path where the package files are located.  The package files must
    			already be located in this directory.
    - trampoline	A boolean indicating whether the post-install scripts should be run
    			on reboot (True, default) or during the install (False).
    """
    LogIt("Install({})".format(kwargs))
    orig_kwargs = kwargs.copy()

    config = kwargs.get("config", Configuration.SystemConfiguration())
    interactive = kwargs.get("interactive", True)
    disks = kwargs.get("disks", [])
    efi = kwargs.get("efi", False)
    upgrade_pool = kwargs.get("upgrade_from", None)
    upgrade = kwargs.get("upgrade", False)
    data_dir = kwargs.get("data_dir", "/data")
    password = kwargs.get("password", None)
    extra_partitions = kwargs.get("partitions", [])
    post_install = kwargs.get("post_install", [])
    package_notifier = kwargs.get("package_handler", None)
    progress_notifier = kwargs.get("progress_handler", None)
    manifest = kwargs.get("manifest", None)
    trampoline = kwargs.get("trampoline", True)
    # The default is based on ISO layout
    package_dir = kwargs.get("package_directory", "/.mount/{}/Packages".format(Project()))

    if type(post_install) != list:
        post_install = [post_install]
        
    if not manifest:
        if interactive:
            try:
                Dialog.MessageBox(Title(),
                                  "No manifest specified for the installation",
                                  height=7, width=45).run()
            except:
                pass
        raise InstallationError("No manifest specified for the installation")
                    
    config.SetPackageDir(package_dir)
    
    mount_point = tempfile.mkdtemp()
    
    # Quick sanity check
    if upgrade and upgrade_pool is None:
        if interactive:
            Dialog.MessageBox(Title(), "\nNo pool to upgrade from",
                              height=7, width=30).run()
        raise InstallationError("Upgrade selected but not previous boot pool selected")

    if disks is None and upgrade_pool is None:
        if interactive:
            Dialog.MessageBox(Title(), "\nNo disks or previous pool selected",
                              height=10, width=30).run()
        raise InstallationError("No disks or previous boot pool selected")
    
    if IsTruenas():
        # We use a 16g swap partition in TrueNAS.
        # Note that this is only used if the disks are being formatted.
        extra_partitions.append(Partition(type="swap", index="3", size=16*1024*1024*1024))
        def make_tn_swap(mount_point=None, **kwargs):
            # This uses the previously-defined variables, not kwargs
            if disks and mount_point:
                try:
                    RunCommand("/sbin/gmirror", "label", "-b", "prefer",
                               ["{}p3".format(disk.name) for disk in disks])
                    with open(os.path.join(mount_point, "data/fstab.swap"), "w") as swaptab:
                        print("/dev/mirror/swap.eli\tnone\tswap\tsw\t0\t0", file=swaptab)
                except RunCommandException as e:
                    LogIt("Could not create mirrored swap: {}".format(str(e)))
        post_install.append(make_tn_swap)
    # First step is to see if we're upgrading.
    # If so, we want to copy files from the active BE to
    # a location in /tmp, so we can copy them back later.
    # This will import, and then export, the freenas-boot pool.
    
    if upgrade_pool and upgrade:
        upgrade_dir = SaveConfiguration(interactive=interactive,
                                        pool=upgrade_pool)
    else:
        upgrade_dir = None

    # Second step is to see if we're formatting drives.
    # If so, we first will destroy the freenas-boot pool;
    # after that, we will partition the drives.  How we partition
    # depends on the boot method -- efi or bios.  We set the
    # BE name to "default" and create the freenas-boot pool, and
    # then the grub dataset.
    #
    # If we're NOT formatting the drive, we set the pool name
    # to time.strftime("default-%Y%m%d-%H%M%S")
    
    LogIt("disks = {}".format(disks))
    if disks:
        # This means we're formatting
        # We need to know what size and types to make the partitions.
        # If we're using EFI, then we need a 100mbyte msdosfs partition;
        # otherwise a 512k bios-boot.  If we have any extra partitions,
        # we'll take those into account as well.  For the big freebsd-zfs
        # partition, we'll take the minimum of the remaining space,
        # rounded down to the nearest gbyte.
        gByte = 1024 * 1024 * 1024
        if efi:
            # 100mbytes for efi partition
            used = 100 * 1024 * 1024
            boot_part = Partition(type="efi",
                                  index=1,
                                  size=used)
        else:
            # BIOS partition gets 512kbytes
            used = 512 * 1024
            boot_part = Partition(type="bios-boot",
                                  index=1,
                                  size=used)
        partitions = [boot_part]

        # For now, we always make the freenas-boot partition index 2, and place
        # it at the end of the disk.
        next_index = 3
        for part in (extra_partitions or []):
            # We will ignore the index given here.
            part.index = next_index
            used += part.size
            LogIt("Additional partition {}".format(part))
            partitions.append(part)
            next_index += 1

        # At this point, used is the sum of the partitions, in bytes.
        # This isn't really correct - we should be rounding the size up
        # to the blocksize of the disk.  But partitioning behaves strangely
        # sometimes with flash drives.  As a result, when we do the actual
        # partitioning, we use the smart-size (e.g., 1G), which rounds down.
        
        min_size = 0
        for disk in disks:
            # If the remaining space is too small, this installation won't work well.
            size = disk.size
            size = size - used
            if size < gByte:
                if size < 0:
                    fspace = "no free space after the other partitions"
                else:
                    fspace = "free space is {}, minimum is 1Gbyte".format(SmartSize(size))
                name = disk.name
                LogIt("Disk {} is too small {}".format(name, fspace))
                ssize = SmartSize(disk.size)
                if interactive:
                    Dialog.MessageBox(Title(),
                                      "Disk {} is too small ({})".format(name, ssize),
                                      height=10, width=25).run()
                raise InstallationException("Disk {} is too small ({})".format(name, ssize))
            if (size < min_size) or (not min_size):
                min_size = size
        if min_size == 0:
            if interactive:
                Dialog.MessageBox(Title(),
                                  "Unable to find the size of any of the selected disks",
                                  height=15, weidth=60).run()
            raise InstallationError("Unable to find disk size")
        
        # Round min_size down to a gbyte
        part_size = int(min_size / gByte) * gByte
        os_part = Partition(type="freebsd-zfs",
                            index=2,
                            size=part_size,
                            os=True)
        LogIt("OS partition {}".format(os_part))
        partitions.append(os_part)
                
        # We need to destroy any existing freenas-boot pool.
        # To do that, we may first need to import the pool.
        if upgrade_pool is None:
            try:
                old_pools = list(zfs.find_import(name="freenas-boot"))
            except libzfs.ZFSException as e:
                LogIt("Got ZFS error {} while trying to import freenas-boot for destruction".format(str(e)))
                old_pools = []
        else:
            old_pools = [upgrade_pool]
            # We'll be destroying it, so..
            upgrade_pool = None

        for pool in old_pools:
            try:
                dead_pool = zfs.import_pool(pool, "freenas-boot", {})
                if dead_pool is None:
                    dead_pool = zfs.get("freenas-boot")
                zfs.destroy("freenas-boot")
            except libzfs.ZFSException as e:
                LogIt("Trying to destroy a freenas-boot pool got error {}".format(str(e)))
            
        try:
            freenas_boot = FormatDisks(disks, partitions, interactive)
        except BaseException as e:
            LogIt("FormatDisks got exception {}".format(str(e)))
            raise
        
        bename = "freenas-boot/ROOT/default"
    else:
        # We need to import the pool (we exported it above if upgrade_pool)
        try:
            if upgrade_pool:
                freenas_boot = zfs.import_pool(upgrade_pool, "freenas-boot", {})
            else:
                freenas_boot = None
                pools = list(zfs.find_import(name="freenas-boot"))
                if len(pools) > 1:
                    raise InstallationError("There are multiple unimported freenas-boot pools")
                if len(pools) == 1:
                    freenas_boot = zfs.import_pool(upgrade_pool, "freenas-boot", {})
            if freenas_boot is None:
                freenas_boot = zfs.get("freenas-boot")
        except libzfs.ZFSException as e:
            LogIt("Got ZFS error {} while trying to import pool".format(str(e)))
            if interactive:
                Dialog.MessageBox("Error importing boot pool",
                                  "The {} Installer was unable to import the boot pool:\n\n\t{}".format(Project(), str(e)),
                                  height=25, width=60).run()
            raise InstallationError("Unable to import boot pool")

        bename = time.strftime("freenas-boot/ROOT/default-%Y%m%d-%H%M%S")
        
    # Next, we create the dataset, and mount it, and then mount
    # the grub dataset.
    # We also mount a devfs and tmpfs in the new environment.

    LogIt("BE name is {}".format(bename))
    try:
        freenas_boot.create(bename, fsopts={
            "mountpoint" : "legacy",
            "sync"       : "disabled",
        })
    except libzfs.ZFSException as e:
        LogIt("Could not create BE {}: {}".format(bename, str(e)))
        if interactive:
            Dialog.MessageBox(Title(),
                              "An error occurred creatint the installation boot environment\n" +
                              "\n\t{}".format(str(e)),
                              height=25, width=60).run()
        raise InstallationError("Could not create BE {}: {}".format(bename, str(e)))
    
    MountFilesystems(bename, mount_point)
    # After this, any exceptions need to have the filesystems unmounted
    try:
        # If upgrading, copy the stashed files back
        if upgrade_dir:
            RestoreConfiguration(save_path=upgrade_dir,
                                 interactive=interactive,
                                 destination=mount_point)
        else:
            if os.path.exists(data_dir):
                try:
                    copytree(data_dir, "{}/data".format(mount_point),
                             progress_callback=lambda src, dst: LogIt("Copying {} -> {}".format(src, dst)))
                except:
                    pass
            # 
            # We should also handle some FN9 stuff
            # In this case, we want the newer database file, for migration purposes
            # XXX -- this is a problem when installing from FreeBSD
            for dbfile in ["freenas-v1.db", "factory-v1.db"]:
                if os.path.exists("/data/{}".format(dbfile)):
                    copytree("/data/{}".format(dbfile), "{}/data/{}".format(mount_point, dbfile))

        # After that, we do the installlation.
        # This involves mounting the new BE,
        # and then running the install code on it.

        installer = Installer.Installer(manifest=manifest,
                                        root=mount_point,
                                        config=config)

        if installer.GetPackages() is not True:
            LogIt("Installer.GetPackages() failed")
            raise InstallationError("Unable to load packages")
        
        # This should only be true for the ISO installer.
        installer.trampoline = trampoline
        
        start_time = time.time()
        try:
            installer.InstallPackages(progressFunc=progress_notifier,
                                      handler=package_notifier)
        except BaseException as e:
            LogIt("InstallPackaages got exception {}".format(str(e)))
            raise InstallationError("Could not install packages")
        # Packages installed!
        if interactive:
            try:
                status = Dialog.MessageBox(Title(), "Preparing new boot environment",
                                           height=5, width=35, wait=False)
                status.clear()
                status.run()
            except:
                pass
        for f in ["{}/conf/default/etc/fstab".format(mount_point),
                  "{}/conf/base/etc/fstab".format(mount_point)
                  ]:
            try:
                os.remove(f)
            except:
                LogIt("Unable to remove {} -- ignoring".format(f))
                    
        try:
            with open("{}/etc/fstab".format(mount_point), "w") as fstab:
                print("freenas-boot/grub\t/boot/grub\tzfs\trw,noatime\t1\t0", file=fstab)
        except OSError as e:
            LogIt("Unable to create fstab: {}".format(str(e)))
            raise InstallationError("Unable to create filesystem table")
        try:
            os.link("{}/etc/fstab".format(mount_point),
                    "{}/conf/base/etc/fstab".format(mount_point))
        except OSError as e:
            LogIt("Unable to link /etc/fstab to /conf/base/etc/fstab: {}".format(str(e)))
            
        # Here, I should change module_path in boot/loader.conf, and get rid of the kernel line
        try:
            lines = []
            boot_config = "{}/boot/loader.conf".format(mount_point)
            with open(boot_config, "r") as bootfile:
                for line in bootfile:
                    line = line.rstrip()
                    if line.startswith("module_path="):
                        lines.append('module_path="/boot/kernel;/boot/modules;/usr/local/modules"')
                    elif line.startswith("kernel="):
                        lines.append('kernel="kernel"')
                    else:
                        lines.append(line)
            with open(boot_config, "w") as bootfile:
                for line in lines:
                    print(line, file=bootfile)
        except BaseException as e:
            LogIt("While modifying loader.conf, got exception {}".format(str(e)))
            # Otherwise I'll ignore it, I think
                        
        # This is to support Xen
        try:
            hvm = RunCommand("/usr/local/sbin/dmidecode", "-s", "system-product-name",
                             chroot=mount_point)
            if hvm == "HVM domU":
                with open(os.path.join(mount_point, "boot", "loader.conf.local"), "a") as f:
                    print('hint.hpet.0.clock="0"', file=f)
        except BaseException as e:
            LogIt("Got an exception trying to set XEN boot loader hint: {}".format(str(e)))
            
        # Now I have to mount a tmpfs on var
        try:
            LogIt("Mounting tmpfs on var")
            bsd.nmount(source="tmpfs",
                       fspath=os.path.join(mount_point, "var"),
                       fstype="tmpfs")
        except BaseException as e:
            LogIt("Got exception {} while trying to mount {}/var: {}".format(mount_point, str(e)))
            raise InstallationError("Unable to mount temporary space in newly-created BE")
        # Now we need to populate a data structure
        mtree_command = ["/usr/sbin/mtree", "-deUf" ]
        if os.path.exists("/usr/sbin/mtree"):
            mtree_command.append("{}/etc/mtree/BSD.var.dist".format(mount_point))
            mtree_command.extend(["-p", "{}/var".format(mount_point)])
            chroot=None
        else:
            mtree_command.extend(["/etc/mtree/BSD.var.dist", "-p", "/var"])
            chroot=mount_point

        try:
            RunCommand(*mtree_command,
                       chroot=chroot)
        except RunCommandException as e:
            LogIt("{} (chroot={}) failed: {}".format(mtree_command, chroot, str(e)))
            raise InstallationError("Unable to prepare new boot environment")

        try:
            # Now we need to install grub
            # We do this even if we didn't format the disks.
            # But if we didn't format the disks, we need to use the same type
            # of boot loader.
            if interactive:
                try:
                    status = Dialog.MessageBox(Title(), "Installing boot loader",
                                               height=5, width=35, wait=False)
                    status.clear()
                    status.run()
                except:
                    pass
            # We've just repartitioned, so rescan geom
            geom.scan()
            # Set the boot dataset
            freenas_boot.properties["bootfs"].value = bename
            LogIt("Set bootfs to {}".format(bename))
            # This is EXTREMELY ANNOYING.
            # I'd like to use libzfs to set the property here, but
            # I have to do it in the chrooted environment, because otherwise
            # zfs returns an error and doesn't set it.
            #freenas_boot.properties["cachefile"].value = "/boot/zfs/rpool.cache"
            try:
                RunCommand("/sbin/zpool",
                           "set", "cachefile=/boot/zfs/rpool.cache",
                           "freenas-boot",
                           chroot=mount_point)
            except RunCommandException as e:
                LogIt("Got exception {} while trying to set cachefile".format(str(e)))
                raise InstallationException("Could not set cachefile on boot pool")
            LogIt("Set cachefile to /boot/zfs/rpool.cache")
            # We need to set the serial port stuff in the database before running grub,
            # because it'll use that in the configuration file it generates.
            try:
                SaveSerialSettings(mount_point)
            except:
                raise InstallationError("Could not save serial console settings")

            try:
                # All boot pool disks are partitioned using the same type.
                # Or the darkness rises and squit once again rule the earth.
                # (It's happened.)
                use_efi = Utils.BootPartitionType(freenas_boot.disks[0]) == "efi"
                InstallGrub(chroot=mount_point,
                            disks=freenas_boot.disks,
                            bename=bename, efi=use_efi)
            except RunCommandException as e:
                LogIt("Command {} failed: {} (code {})".format(e.command, e.message, e.code))
                raise InstallationError("Boot loader installation failure")
            except BaseException as e:
                LogIt("InstallGrub got exception {}".format(str(e)))
                raise
    
            if interactive:
                try:
                    status = Dialog.MessageBox(Title(), "Finalizing installation",
                                               height=5, width=35, wait=False)
                    status.clear()
                    status.run()
                except BaseException as e:
                    LogIt("Finalizing got exception {}".format(str(e)))
                    
            # This is FN9 specific
            with open("{}/data/first-boot".format(mount_point), "wb"):
                pass
            if upgrade:
                for sentinel in ["/data/cd-upgrade", "/data/need-update"]:
                    with open(mount_point + sentinel, "wb") as f:
                        pass
            elif password is not None:
                if interactive:
                    try:
                        status = Dialog.MessageBox(Title(), "\nSetting root password",
                                                   height=7, width=35, wait=False)
                        status.clear()
                        status.run()
                    except:
                        pass
                try:
                    RunCommand("/etc/netcli", "reset_root_pw", password,
                               chroot=mount_point)
                except RunCommandException as e:
                    LogIt("Setting root password: {}".format(str(e)))
                    raise InstallationError("Unable to set root password")
        except BaseException as e:
            LogIt("Got exception {} during configuration".format(str(e)))
            if interactive:
                try:
                    Dialog.MessageBox(Title(),
                                      "Error during configuration",
                                      height=7, width=35).run()
                except:
                    pass
            raise

        # Let's turn sync back to default for the dataset
        try:
            ds = zfs.get_dataset(bename)
        except libzfs.ZFSException as e:
            LogIt("Got ZFS error {} while trying to get {} dataset".format(str(e), bename))
            raise InstallationError("Could not fid newly-created BE {}".format(bename))
        try:
            ds.properties["sync"].inherit()
        except BaseException as e:
            LogIt("Unable to set sync on {} to inherit: {}".format(bename, str(e)))
            # That's all I'm going to do for now

        # We save the manifest
        manifest.Save(mount_point)

        # Okay!  Now if there are any post-install functions, we call them
        for fp in post_install:
            fp(mount_point=mount_point, **kwargs)

        # And we're done!
        end_time = time.time()
    except InstallationError as e:
        # This is the outer try block -- it needs to ensure mountpoints are
        # cleaned up
        LogIt("Outer block got error {}".format(str(e)))
        if interactive:
            try:
                Dialog.MessageBox("{} Installation Error".format(Project()),
                                  e.message,
                                  height=25, width=50).run()
            except:
                pass
        raise
    except BaseException as e:
        LogIt("Outer block got base exception {}".format(str(e)))
        raise
    finally:
        if package_dir is None:
            LogIt("Removing downloaded packages directory {}".format(cache_dir))
            shutil.rmtree(cache_dir, ignore_errors=True)
        UnmountFilesystems(mount_point)

    LogIt("Exporting freenas-boot at end of installation")
    try:
        zfs.export_pool(freenas_boot)
    except libzfs.ZFSException as e:
        LogIt("Could not export freenas boot: {}".format(str(e)))
        raise

    if interactive:
        total_time = int(end_time - start_time)
        Dialog.MessageBox(Title(),
                          "The {} installer has finished the installation in {} seconds".format(Project(), total_time),
                          height=8, width=40).run()
Example #43
0
def os_func():
    '''
    操作系统模块
        该模块下的方法,对各个版本的兼容不明确,须谨慎使用.
        测试版本: Python:3.6.1   Windows:Windows10,64bit
    '''

    # === 系统 ===
    strs = os.name  # 当前系统: Linux'posix' / Windows'nt' / 'ce' / 'java'
    strs = os.sep  # 分隔符 \\ (windows:\\ linux:/)
    strs = os.pathsep  # path变量分隔符 ; (windows:; linux::)
    strs = os.linesep  # 换行分隔符 \r\n (windows:/r/n linux:\n)
    dics = os.environ  # 查看系统环境变量(字典)
    strs = os.getenv("Path", default=-1)  # 读取环境变量, 没有返回None
    os.putenv("Path", "C:\\python")  # 添加环境变量  (windows无效)
    os.unsetenv("Path")  # 删除环境变量  (windows不可用)
    strs = os.getlogin()  # 当前登录的用户名
    num = os.getpid()  # 当前进程PID
    num = os.system("cmd")  # 执行操作系统命令, 返回0/1(0执行正确;1执行错误)
    strs = os.popen("dir").read()  # 执行系统命令,并去读结果
    tups = os.times(
    )  # 当前进程时间(user用户 / system系统 / children_user子用户(windews=0) / children_system子系统(windews=0) / elapsed过去时间)
    bytes = os.urandom(10)  # n字节用以加密的随机字符
    num = os.cpu_count()  # CUP数量

    # === 进程 ===
    os.abort()  # 结束进程
    # execl(file, *args) / execle / execlp / execlpe / execv / execve / execvp / execvpe // 运行新程序替换当前进程
    os.execl(r"C:\python", 'python', 'hello.py', 'i')  # (windows执行失败)
    os._exit(0)  # 退出
    os.kill(
        8480, signal.SIGTERM
    )  # (系统) 终止进程(需要导入:signal) SIGINT (终止进程) / SIGTERM (终止进程) / SIGKILL (终止进程) / SIGALRM (闹钟信号)

    # === 文件 / 文件夹 ===
    strs = os.getcwd()  # 当前路径
    bytes = os.getcwdb()  # 当前路径
    os.chdir(r"C:\Windows")  # 切换路径
    strs = os.curdir  # 当前目录 .
    strs = os.pardir  # 上级目录 ..
    strs = os.sep  # 路径分隔符 ('/' or '\\')
    bytes = os.fsencode(r"C:\c.obj")  # (编码) 文件路径字符串转为bytes类型 => b'C:\\c.obj'
    strs = os.fsdecode(b"C:\c.obj")  # (解码) 文件路径转为strs类型 => 'C:\\c.obj'
    # chmod(path, mode, *, dir_fd=None, follow_symlinks=True)
    os.chmod(r"C:\python\hello.py", 777)  # 修改模式
    os.link("file.txt", "file.txt.link")  # 创建硬链接
    os.symlink("file.txt", "file.txt.link")  # 创建软链接 (windows执行失败)
    lists = os.listdir()  # 所有文件和文件夹(列表) ".""..""D:"
    # lstat(path, *, dir_fd=None)
    tups = os.lstat(r"C:\c.obj")  # 状态信息列表
    boolean = os.access(
        r"C:\c.obj", os.F_OK
    )  # (文件/文件夹) 权限测试 (mode: F_OK存在? / R_OK可读? / W_OK可写? / X_OK可执行?)
    # scandir(path='.')  // DirEntry迭代器, 文件目录
    lists = os.scandir()
    # st_atime (访问时间) / st_ctime (修改时间) / st_mtime (模式修改时间) / st_size (大小(字节bytes))
    # st_uid (用户ID) / st_gid (用户组ID)
    # st_ino (inode) / st_mode (模式) / st_dev (设备) / st_nlink (硬链接)
    # count = cpu_count() # (系统) CPU线程数(非核数)
    tups = os.stat(".")  # 获取状态信息, 返回stat_result对象
    # utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) // 更新文件的访问和修改时间
    num = os.utime(r"C:\c.obj")
    # walk(top, topdown=True, onerror=None, followlinks=False) // 根目录(top)遍历目录树,返回迭代器 (dirpath, dirnames[], filenames[]).
    root, dirnames, filenames = os.walk(r"C:\python")

    os.removedirs(r"c:\python")  # 删除多个文件夹 (windews删除多个文件夹失败,单个成功)
    # mkdir(path, mode=0o777, *, dir_fd=None) // 创建单个目录, 文件夹已存在,抛 FileExistsError 异常
    os.mkdir("test")
    # makedirs(name, mode=0o777, exist_ok=False)  // 创建(多个)文件夹
    os.makedirs(r"./t1/t2/t3")
    os.rmdir("test")  # 删除单个目录

    # mknod(path, mode=0o600, device=0, *, dir_fd=None) // 创建空文件 (windows不可用)
    os.mknod("test.txt")
    # remove(path, *, dir_fd=None)
    os.remove("test.txt")  # 删除文件
    # rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
    os.rename("text.txt", "file.txt")  # 重命名
    os.renames("text.txt", "file.txt")
    # replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None) // 重命名, 存在则替换
    os.replace("text.txt", "file.txt")
    tups = os.stat(r"text.txt")  # 文件属性

    # === 文件读写 ===
    # open(path, flags, mode=0o777, *, dir_fd=None) // 打开文件 fd:文件描述符
    fd = os.open(r"C:\c.obj", os.O_RDWR | os.O_CREAT)
    readfd, writefd = os.pipe()  # 打开管道,返回读取,写入 (windows失败)
    # fdopen(fd, *args, **kwargs) // 打开  (windews写入失败,读取""字符)
    f = os.fdopen(readfd)

    os.read(fd, 150)  # 读取
    os.write(fd, "String".encode("utf-8"))  # 写入
    os.fsync(fd)  # 强行写入
    os.ftruncate(fd, 100)  # 裁剪文件
    bytes = os.lseek(
        fd, 10,
        os.SEEK_SET)  # 设置指针  SEEK_SET(0开始) SEEK_CUR(1当前位置) SEEK_END(2末尾)

    fd_temp = os.dup(fd)  # 副本
    boolean = os.isatty(fd)  # 是否是tty设备

    stat = os.fstat(fd)  # 状态信息
    strs = os.device_encoding(fd)  # 返回终端字符编码,非终端None

    os.close(fd)  # 关闭
    os.closerange(fd, fd)  # 关闭并忽略错误

    # === DirEntry ===
    for dir in os.scandir():
        strs = dir.name  # 文件名
        strs = dir.path  # 完整路径名
        num = dir.inode()  # inode编号
        boolean = dir.is_dir()  # 是否是文件夹
        boolean = dir.is_file()  # 是否是文件
        boolean = dir.is_symlink()  # 是否是链接
        tups = dir.stat()  # 状态信息的stat_result对象
Example #44
0
def make_rpm(distro, build_os, arch, spec, srcdir):
    # Create the specfile.
    suffix = spec.suffix()
    sdir = setupdir(distro, build_os, arch, spec)

    specfile = srcdir + "rpm/mongodb%s.spec" % suffix
    init_spec = specfile.replace(".spec", "-init.spec")

    # The Debian directory is here for the manpages so we we need to remove the service file
    # from it so that RPM packages don't end up with the Debian file.
    os.unlink(sdir + "debian/mongod.service")

    # Swap out systemd files, different systemd spec files, and init scripts as needed based on
    # underlying os version. Arranged so that new distros moving forward automatically use
    # systemd. Note: the SUSE init packages use a different init script than then other RPM
    # distros.
    #
    if distro.name() == "suse" and distro.repo_os_version(build_os) in ("10",
                                                                        "11"):
        os.unlink(sdir + "rpm/init.d-mongod")
        os.link(sdir + "rpm/init.d-mongod.suse", sdir + "rpm/init.d-mongod")

        os.unlink(specfile)
        os.link(init_spec, specfile)
    elif distro.name() == "redhat" and distro.repo_os_version(build_os) in (
            "5", "6"):
        os.unlink(specfile)
        os.link(init_spec, specfile)
    elif distro.name() == "amazon":
        os.unlink(specfile)
        os.link(init_spec, specfile)

    topdir = ensure_dir('%s/rpmbuild/%s/' % (os.getcwd(), build_os))
    for subdir in ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]:
        ensure_dir("%s/%s/" % (topdir, subdir))
    distro_arch = distro.archname(arch)
    # RPM tools take these macro files that define variables in
    # RPMland.  Unfortunately, there's no way to tell RPM tools to use
    # a given file *in addition* to the files that it would already
    # load, so we have to figure out what it would normally load,
    # augment that list, and tell RPM to use the augmented list.  To
    # figure out what macrofiles ordinarily get loaded, older RPM
    # versions had a parameter called "macrofiles" that could be
    # extracted from "rpm --showrc".  But newer RPM versions don't
    # have this.  To tell RPM what macros to use, older versions of
    # RPM have a --macros option that doesn't work; on these versions,
    # you can put a "macrofiles" parameter into an rpmrc file.  But
    # that "macrofiles" setting doesn't do anything for newer RPM
    # versions, where you have to use the --macros flag instead.  And
    # all of this is to let us do our work with some guarantee that
    # we're not clobbering anything that doesn't belong to us.
    #
    # On RHEL systems, --rcfile will generally be used and
    # --macros will be used in Ubuntu.
    #
    macrofiles = [
        l for l in backtick(["rpm", "--showrc"]).split("\n")
        if l.startswith("macrofiles")
    ]
    flags = []
    macropath = os.getcwd() + "/macros"

    write_rpm_macros_file(macropath, topdir, distro.release_dist(build_os))
    if len(macrofiles) > 0:
        macrofiles = macrofiles[0] + ":" + macropath
        rcfile = os.getcwd() + "/rpmrc"
        write_rpmrc_file(rcfile, macrofiles)
        flags = ["--rcfile", rcfile]
    else:
        # This hard-coded hooey came from some box running RPM
        # 4.4.2.3.  It may not work over time, but RPM isn't sanely
        # configurable.
        flags = [
            "--macros",
            "/usr/lib/rpm/macros:/usr/lib/rpm/%s-linux/macros:/usr/lib/rpm/suse/macros:/etc/rpm/macros.*:/etc/rpm/macros:/etc/rpm/%s-linux/macros:~/.rpmmacros:%s"
            % (distro_arch, distro_arch, macropath)
        ]
    # Put the specfile and the tar'd up binaries and stuff in
    # place.
    #
    # The version of rpm and rpm tools in RHEL 5.5 can't interpolate the
    # %{dynamic_version} macro, so do it manually
    with open(specfile, "r") as spec_source:
        with open(topdir + "SPECS/" + os.path.basename(specfile),
                  "w") as spec_dest:
            for line in spec_source:
                line = line.replace('%{dynamic_version}',
                                    spec.pversion(distro))
                line = line.replace('%{dynamic_release}', spec.prelease())
                spec_dest.write(line)

    oldcwd = os.getcwd()
    os.chdir(sdir + "/../")
    try:
        sysassert([
            "tar", "-cpzf", topdir + "SOURCES/mongodb%s-%s.tar.gz" %
            (suffix, spec.pversion(distro)),
            os.path.basename(os.path.dirname(sdir))
        ])
    finally:
        os.chdir(oldcwd)
    # Do the build.

    flags.extend([
        "-D", "dynamic_version " + spec.pversion(distro), "-D",
        "dynamic_release " + spec.prelease(), "-D", "_topdir " + topdir
    ])

    # Versions of RPM after 4.4 ignore our BuildRoot tag so we need to
    # specify it on the command line args to rpmbuild
    if ((distro.name() == "suse" and distro.repo_os_version(build_os) == "15")
            or (distro.name() == "redhat"
                and distro.repo_os_version(build_os) == "8")):
        flags.extend([
            "--buildroot",
            os.path.join(topdir, "BUILDROOT"),
        ])

    sysassert(["rpmbuild", "-ba", "--target", distro_arch] + flags +
              ["%s/SPECS/mongodb%s.spec" % (topdir, suffix)])
    repo_dir = distro.repodir(arch, build_os, spec)
    ensure_dir(repo_dir)

    # FIXME: see if some combination of shutil.copy<hoohah> and glob
    # can do this without shelling out.
    sysassert([
        "sh", "-c",
        "cp -v \"%s/RPMS/%s/\"*.rpm \"%s\"" % (topdir, distro_arch, repo_dir)
    ])
    return repo_dir
Example #45
0
def fq2fq(f_in, f_in_type, f_out, f_out_type, link='symbolic', tmp_dir=None):
    """
    It converts qualities in a FASTQ file.

    f_in_type     ('sanger','solexa','illumina','auto-detect')
    f_out_type    ('sanger','solexa','illumina','illumina-1.5')

    link          ('soft','hard','copy')

    tmp_dir       temporary directory
    """
    if f_in_type.lower() == 'auto-detect':
        # detect the input FASTQ format type
        f_in_type = detect_fastq_format(f_in)
        print >> sys.stderr, "Auto-detect found " + f_in_type.upper(
        ) + " FASTQ format!"
    fit = 'fastq-' + f_in_type
    fot = ''
    if f_out_type == 'illumina-1.5':
        fot = 'fastq-illumina'
    else:
        fot = 'fastq-' + f_out_type
    if fit == fot and fit != '-':
        # input type is same as output type
        if os.path.isfile(f_out) or os.path.islink(f_out):
            os.remove(f_out)
        if link == 'soft':
            if os.path.islink(f_in):
                linkto = os.readlink(f_in)
                os.symlink(linkto, f_out)
            else:
                os.symlink(f_in, f_out)
        elif link == 'hard':
            linkto = f_in
            if os.path.islink(f_in):
                linkto = os.readlink(f_in)
            try:
                os.link(linkto, f_out)
            except OSError as er:
                print >> sys.stderr, "WARNING: Cannot do hard links ('%s' and '%s')!" % (
                    linkto, f_out)
                shutil.copyfile(linkto, f_out)


#                if er.errno == errno.EXDEV:
#                    # they are on different partitions
#                    # [Errno 18] Invalid cross-device link
#                    shutil.copyfile(linkto,f_out)
#                else:
#                    print >>sys.stderr,"ERROR: Cannot do hard links ('%s' and '%s')!" % (linkto,f_out)
#                    print >>sys.stderr,er
#                    sys.exit(1)

        elif options.link == 'copy':
            shutil.copyfile(f_in, f_out)
        else:
            print >> sys.stderr, "ERROR: unknown operation of linking!", link
            sys.exit(1)

    else:
        if hasattr(Bio.SeqIO, 'convert'):

            fin = f_in
            if f_in == "-":
                fin = sys.stdin
            elif f_in.lower().endswith('.gz'):
                fid = gzip.open(f_in, 'r')
            else:
                fin = open(f_in, 'r')

            fout = f_out
            if f_out == "-":
                fout = sys.stdout
            elif f_out.lower().endswith('.gz'):
                fout = gzip.open(f_out, 'r')
            else:
                fout = open(f_out, 'w')

            counts = Bio.SeqIO.convert(f_in, fit, f_out, fot)

            fin.close()
            fout.close()

        else:
            print >> sys.stderr, "Bio.SeqIO.convert() not supported!"
            print >> sys.stderr, "Trying to go around it!"
            if f_in == '-' or f_out == '-':
                print >> sys.stderr, "ERROR: BioPython library from Python is tool old! Please, upgrade it!"
                sys.exit(1)
            input_handle = open(f_in, "rU")
            output_handle = open(f_out, "w")
            sequences = Bio.SeqIO.parse(input_handle, fit)
            counts = Bio.SeqIO.write(sequences, output_handle, fot)
            output_handle.close()
            input_handle.close()
        if f_out_type == 'illumina-1.5':
            ftemp = give_me_temp_filename(tmp_dir)
            fi = open(f_out, 'rb')
            fo = open(ftemp, 'wb')
            size_buffer = 10**8
            i = 0
            while True:
                lines = fi.readlines(size_buffer)
                if not lines:
                    break
                lines = [
                    line.replace('@', 'B').replace('A', 'B') if
                    ((i + j + 1) % 4 == 0) else line
                    for (j, line) in enumerate(lines)
                ]
                i = i + len(lines)
                fo.writelines(lines)
            fi.close()
            fo.close()
            shutil.move(ftemp, f_out)

        print >> sys.stderr, "Converted %i records" % counts
Example #46
0
def _hardlink(src, dst):
    try:
        os.link(src, dst)
    except:
        _copy(src, dst)
Example #47
0
        output.append("-" + prev[old].key)
        old = old + 1
    topfile.write(version.to_line(len(output)) + "\n")
    for entry in output:
      topfile.write(entry + "\n")
    prev = msgs
  topfile.close()
  # For paranoia at the moment we're keeping a few revisions.   This
  # could almost certainly be done more elegantly, and it would be nice
  # to do versioning in subdirectories.
  try:
    os.unlink(topfile_name + ".older")
  except FileNotFoundError as e:
    pass
  try:
    os.link(topfile_name + ".old", topfile_name + ".older")
  except FileNotFoundError as e:
    pass
  else:
    os.unlink(topfile_name + ".old")
  try:
    os.link(topfile_name, topfile_name + ".old")
  except FileNotFoundError as e:
    pass
  else:
    os.unlink(topfile_name)
  os.link(topfile_name + ".new", topfile_name)
  os.unlink(topfile_name + ".new")

exit(0)
Example #48
0
    def handle_event(self, event, filetype):
        # pylint: disable=redefined-variable-type
        rsa_public_key = None
        site = event.get("site")
        if not site:
            site = self.find_site_for_file(event["full_path"])

        encryption_key_id = self.config["backup_sites"][site][
            "encryption_key_id"]
        if encryption_key_id:
            rsa_public_key = self.config["backup_sites"][site][
                "encryption_keys"][encryption_key_id]["public"]

        compressed_blob = None
        if event.get("compress_to_memory"):
            output_obj = BytesIO()
            compressed_filepath = None
        else:
            compressed_filepath = self.get_compressed_file_path(
                site, filetype, event["full_path"])
            output_obj = NamedTemporaryFile(
                dir=os.path.dirname(compressed_filepath),
                prefix=os.path.basename(compressed_filepath),
                suffix=".tmp-compress")

        input_obj = event.get("input_data")
        if not input_obj:
            input_obj = open(event["full_path"], "rb")
        with output_obj, input_obj:
            if filetype == "xlog":
                wal.verify_wal(wal_name=os.path.basename(event["full_path"]),
                               fileobj=input_obj)

            original_file_size, compressed_file_size = rohmufile.write_file(
                input_obj=input_obj,
                output_obj=output_obj,
                compression_algorithm=self.config["compression"]["algorithm"],
                compression_level=self.config["compression"]["level"],
                rsa_public_key=rsa_public_key,
                log_func=self.log.info,
            )

            if compressed_filepath:
                os.link(output_obj.name, compressed_filepath)
            else:
                compressed_blob = output_obj.getvalue()

        if event.get("delete_file_after_compression", True):
            os.unlink(event["full_path"])

        metadata = event.get("metadata", {})
        metadata.update({
            "pg-version":
            self.config["backup_sites"][site].get("pg_version"),
            "compression-algorithm":
            self.config["compression"]["algorithm"],
            "compression-level":
            self.config["compression"]["level"],
            "original-file-size":
            original_file_size,
        })
        if encryption_key_id:
            metadata.update({"encryption-key-id": encryption_key_id})
        if compressed_filepath:
            metadata_path = compressed_filepath + ".metadata"
            write_json_file(metadata_path, metadata)

        self.set_state_defaults_for_site(site)
        self.state[site][filetype]["original_data"] += original_file_size
        self.state[site][filetype]["compressed_data"] += compressed_file_size
        self.state[site][filetype]["count"] += 1
        if original_file_size:
            size_ratio = compressed_file_size / original_file_size
            self.stats.gauge("pghoard.compressed_size_ratio",
                             size_ratio,
                             tags={
                                 "algorithm":
                                 self.config["compression"]["algorithm"],
                                 "site":
                                 site,
                                 "type":
                                 filetype,
                             })
        transfer_object = {
            "callback_queue": event.get("callback_queue"),
            "file_size": compressed_file_size,
            "filetype": filetype,
            "metadata": metadata,
            "opaque": event.get("opaque"),
            "site": site,
            "type": "UPLOAD",
        }
        if compressed_filepath:
            transfer_object["local_path"] = compressed_filepath
        else:
            transfer_object["blob"] = compressed_blob
            transfer_object["local_path"] = event["full_path"]

        self.transfer_queue.put(transfer_object)
        return True
Example #49
0
def make_rpm(distro, build_os, arch, spec, srcdir):  # pylint: disable=too-many-locals
    """Create the RPM specfile."""
    suffix = spec.suffix()
    sdir = setupdir(distro, build_os, arch, spec)

    specfile = srcdir + "rpm/mongodb%s.spec" % suffix
    init_spec = specfile.replace(".spec", "-init.spec")

    # The Debian directory is here for the manpages so we we need to remove the service file
    # from it so that RPM packages don't end up with the Debian file.
    os.unlink(sdir + "debian/mongod.service")

    # Swap out systemd files, different systemd spec files, and init scripts as needed based on
    # underlying os version. Arranged so that new distros moving forward automatically use
    # systemd. Note: the SUSE init packages use a different init script than then other RPM
    # distros.
    #
    if distro.name() == "suse" and distro.repo_os_version(build_os) in ("10", "11"):
        os.unlink(sdir + "rpm/init.d-mongod")
        os.link(sdir + "rpm/init.d-mongod.suse", sdir + "rpm/init.d-mongod")

        os.unlink(specfile)
        os.link(init_spec, specfile)
    elif distro.name() == "redhat" and distro.repo_os_version(build_os) in ("5", "6"):
        os.unlink(specfile)
        os.link(init_spec, specfile)
    elif distro.name() == "amazon":
        os.unlink(specfile)
        os.link(init_spec, specfile)

    topdir = ensure_dir('%s/rpmbuild/%s/' % (os.getcwd(), build_os))
    for subdir in ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]:
        ensure_dir("%s/%s/" % (topdir, subdir))
    distro_arch = distro.archname(arch)

    # The version of rpm and rpm tools in RHEL 5.5 can't interpolate the
    # %{dynamic_version} macro, so do it manually
    with open(specfile, "r") as spec_source:
        with open(topdir + "SPECS/" + os.path.basename(specfile), "w") as spec_dest:
            for line in spec_source:
                line = line.replace('%{dynamic_version}', spec.pversion(distro))
                line = line.replace('%{dynamic_release}', spec.prelease())
                spec_dest.write(line)

    oldcwd = os.getcwd()
    os.chdir(sdir + "/../")
    try:
        sysassert([
            "tar", "-cpzf",
            topdir + "SOURCES/mongodb%s-%s.tar.gz" % (suffix, spec.pversion(distro)),
            os.path.basename(os.path.dirname(sdir))
        ])
    finally:
        os.chdir(oldcwd)
    # Do the build.

    flags = [
        "-D",
        f"_topdir {topdir}",
        "-D",
        f"dist .{distro.release_dist(build_os)}",
        "-D",
        "_use_internal_dependency_generator 0",
        "-D",
        f"dynamic_version {spec.pversion(distro)}",
        "-D",
        f"dynamic_release {spec.prelease()}",
    ]

    # Versions of RPM after 4.4 ignore our BuildRoot tag so we need to
    # specify it on the command line args to rpmbuild
    if ((distro.name() == "suse" and distro.repo_os_version(build_os) == "15")
            or (distro.name() == "redhat" and distro.repo_os_version(build_os) == "8")):
        flags.extend([
            "--buildroot",
            os.path.join(topdir, "BUILDROOT"),
        ])

    sysassert(["rpmbuild", "-ba", "--target", distro_arch] + flags +
              ["%s/SPECS/mongodb%s.spec" % (topdir, suffix)])
    repo_dir = distro.repodir(arch, build_os, spec)
    ensure_dir(repo_dir)
    # FIXME: see if some combination of shutil.copy<hoohah> and glob
    # can do this without shelling out.
    sysassert(["sh", "-c", "cp -v \"%s/RPMS/%s/\"*.rpm \"%s\"" % (topdir, distro_arch, repo_dir)])
    return repo_dir
Example #50
0
    def run_local_tar_basebackup(self):
        pgdata = self.config["backup_sites"][self.site]["pg_data_directory"]
        if not os.path.isdir(pgdata):
            raise errors.InvalidConfigurationError(
                "pg_data_directory {!r} does not exist".format(pgdata))

        temp_basebackup_dir, compressed_basebackup = self.get_paths_for_backup(
            self.basebackup_path)

        compression_algorithm = self.config["compression"]["algorithm"]
        compression_level = self.config["compression"]["level"]

        rsa_public_key = None
        encryption_key_id = self.config["backup_sites"][
            self.site]["encryption_key_id"]
        if encryption_key_id:
            rsa_public_key = self.config["backup_sites"][
                self.site]["encryption_keys"][encryption_key_id]["public"]

        self.log.debug("Connecting to database to start backup process")
        connection_string = connection_string_using_pgpass(
            self.connection_info)
        with psycopg2.connect(connection_string) as db_conn:
            cursor = db_conn.cursor()

            if self.pg_version_server >= 90600:
                # We'll always use the the non-exclusive backup mode on 9.6 and newer
                cursor.execute("SELECT pg_start_backup(%s, false, false)",
                               [BASEBACKUP_NAME])
                backup_label = None
                backup_mode = "non-exclusive"
            else:
                # On older versions, first check if we're in recovery, and find out the version of a possibly
                # installed pgespresso extension.  We use pgespresso's backup control functions when they're
                # available, and require them in case we're running on a replica.  We also make sure the
                # extension version is 1.2 or newer to prevent crashing when using tablespaces.
                cursor.execute(
                    "SELECT pg_is_in_recovery(), "
                    "       (SELECT extversion FROM pg_extension WHERE extname = 'pgespresso')"
                )
                in_recovery, pgespresso_version = cursor.fetchone()
                if in_recovery and (not pgespresso_version
                                    or pgespresso_version < "1.2"):
                    raise errors.InvalidConfigurationError(
                        "pgespresso version 1.2 or higher must be installed "
                        "to take `local-tar` backups from a replica")

                if pgespresso_version and pgespresso_version >= "1.2":
                    cursor.execute("SELECT pgespresso_start_backup(%s, false)",
                                   [BASEBACKUP_NAME])
                    backup_label = cursor.fetchone()[0]
                    backup_mode = "pgespresso"
                else:
                    cursor.execute("SELECT pg_start_backup(%s)",
                                   [BASEBACKUP_NAME])
                    with open(os.path.join(pgdata, "backup_label"), "r") as fp:
                        backup_label = fp.read()
                    backup_mode = "legacy"

            backup_stopped = False
            try:
                # Look up tablespaces and resolve their current filesystem locations
                cursor.execute(
                    "SELECT oid, spcname FROM pg_tablespace WHERE spcname NOT IN ('pg_default', 'pg_global')"
                )
                tablespaces = {
                    spcname: {
                        "path":
                        os.readlink(os.path.join(pgdata, "pg_tblspc",
                                                 str(oid))),
                        "oid":
                        oid,
                    }
                    for oid, spcname in cursor.fetchall()
                }
                db_conn.commit()

                self.log.info("Starting to backup %r to %r", pgdata,
                              compressed_basebackup)
                start_time = time.monotonic()
                with NamedTemporaryFile(
                        dir=temp_basebackup_dir,
                        prefix="data.",
                        suffix=".tmp-compress") as raw_output_obj:
                    with rohmufile.file_writer(
                            fileobj=raw_output_obj,
                            compression_algorithm=compression_algorithm,
                            compression_level=compression_level,
                            rsa_public_key=rsa_public_key) as output_obj:
                        with tarfile.TarFile(fileobj=output_obj,
                                             mode="w") as output_tar:
                            self.write_init_entries_to_tar(
                                pgdata=pgdata,
                                tablespaces=tablespaces,
                                tar=output_tar)
                            files = self.find_files_to_backup(
                                pgdata=pgdata,
                                tablespaces=tablespaces)  # NOTE: generator
                            self.write_files_to_tar(files=files,
                                                    tar=output_tar)
                            self.write_pg_control_to_tar(pgdata=pgdata,
                                                         tar=output_tar)

                            # Call the stop backup functions now to get backup label for 9.6+ non-exclusive backups
                            if backup_mode == "non-exclusive":
                                cursor.execute(
                                    "SELECT labelfile FROM pg_stop_backup(false)"
                                )
                                backup_label = cursor.fetchone()[0]
                            elif backup_mode == "pgespresso":
                                cursor.execute(
                                    "SELECT pgespresso_stop_backup(%s)",
                                    [backup_label])
                            else:
                                cursor.execute("SELECT pg_stop_backup()")
                            db_conn.commit()
                            backup_stopped = True

                            backup_label_data = backup_label.encode("utf-8")
                            self.write_backup_label_to_tar(
                                tar=output_tar, backup_label=backup_label_data)

                        input_size = output_obj.tell()

                    os.link(raw_output_obj.name, compressed_basebackup)
                    result_size = raw_output_obj.tell()

                rohmufile.log_compression_result(
                    elapsed=time.monotonic() - start_time,
                    encrypted=True if rsa_public_key else False,
                    log_func=self.log.info,
                    original_size=input_size,
                    result_size=result_size,
                    source_name=pgdata,
                )
            finally:
                db_conn.rollback()
                if not backup_stopped:
                    if backup_mode == "non-exclusive":
                        cursor.execute("SELECT pg_stop_backup(false)")
                    elif backup_mode == "pgespresso":
                        cursor.execute("SELECT pgespresso_stop_backup(%s)",
                                       [backup_label])
                    else:
                        cursor.execute("SELECT pg_stop_backup()")
                db_conn.commit()

        start_wal_segment, backup_start_time = self.parse_backup_label(
            backup_label_data)
        metadata = {
            "compression-algorithm": compression_algorithm,
            "encryption-key-id": encryption_key_id,
            "format": "pghoard-bb-v1",
            "original-file-size": input_size,
            "pg-version": self.pg_version_server,
            "start-time": backup_start_time,
            "start-wal-segment": start_wal_segment,
        }
        for spcname, spcinfo in tablespaces.items():
            metadata["tablespace-name-{}".format(spcinfo["oid"])] = spcname
            metadata["tablespace-path-{}".format(
                spcinfo["oid"])] = spcinfo["path"]

        self.transfer_queue.put({
            "callback_queue": self.callback_queue,
            "file_size": result_size,
            "filetype": "basebackup",
            "local_path": compressed_basebackup,
            "metadata": metadata,
            "site": self.site,
            "type": "UPLOAD",
        })
Example #51
0
        except Exception, e:
            error(extract_dents, 'Warn', 'DIR Fail: %s' % e)

        if 'dent' in inode:
            for dnode in inode['dent']:
                extract_dents(ubifs, inodes, dnode, dent_path, perms)

    elif dent_node.type == UBIFS_ITYPE_REG:
        try:
            if inode['ino'].nlink > 1:
                if 'hlink' not in inode:
                    inode['hlink'] = dent_path
                    buf = _process_reg_file(ubifs, inode, dent_path)
                    _write_reg_file(dent_path, buf)
                else:
                    os.link(inode['hlink'], dent_path)
                    log(extract_dents,
                        'Make Link: %s > %s' % (dent_path, inode['hlink']))
            else:
                buf = _process_reg_file(ubifs, inode, dent_path)
                _write_reg_file(dent_path, buf)

            if perms:
                _set_file_perms(dent_path, inode)

        except Exception, e:
            error(extract_dents, 'Warn', 'FILE Fail: %s' % e)

    elif dent_node.type == UBIFS_ITYPE_LNK:
        try:
            # probably will need to decompress ino data if > UBIFS_MIN_COMPR_LEN
Example #52
0
File: FS.py Project: ttcdt/pygruta
    def _update_index(self, story, delete=False):
        index = "%s/topics/.INDEX" % self.path

        lk = open(index + ".lck", "w")
        fcntl.flock(lk, 2)

        oi = open(index, "r")

        if oi is not None:
            # new index
            ni = open(index + ".new", "w")

            t = story.get("topic_id")
            s = story.get("id")
            d = story.get("date")

            if delete is True:
                # null record entry
                r = None
            else:
                # record entry
                r = ":".join([
                    d,
                    t,
                    s,
                    ",".join(story.get("tags")),
                    story.get("udate")
                    ]) + "\n"

            # iterate current index
            for l in oi:
                tr = l.replace("\n", "").split(":")

                # if not already saved and this record
                # is older, store here and destroy
                if r is not None and d > tr[0]:
                    ni.write(r)
                    r = None

                # store this record if it's not this story
                if t != tr[1] or s != tr[2]:
                    ni.write(l)

            # not yet stored? do it now
            if r is not None:
                ni.write(r)

            # now swap
            try:
                os.unlink(index + ".old")
            except:
                pass

            os.link(index,            index + ".old")
            os.rename(index + ".new", index)

            # finally close and release lock
            oi.close()
            ni.close()

        else:
            # no index; create it
            l = []

            # loop al stories
            for t in self.topics():
                for s in self.stories(t):
                    story = self.story(t, s)

                    r = [
                        story.get("date") or ("0" * 14),
                        t,
                        s,
                        ",".join(story.get("tags")),
                        story.get("udate")
                    ]

                    l.append(":".join(r))

            # reverse order
            l.sort(reverse=True)

            with open(index, "w") as ni:
                for r in l:
                    ni.write(r + "\n")

        lk.close()
Example #53
0
    def tar_one_file(self,
                     *,
                     temp_dir,
                     chunk_path,
                     files_to_backup,
                     callback_queue,
                     filetype="basebackup_chunk",
                     extra_metadata=None):
        start_time = time.monotonic()

        site_config = self.config["backup_sites"][self.site]
        encryption_key_id = site_config["encryption_key_id"]
        if encryption_key_id:
            rsa_public_key = site_config["encryption_keys"][encryption_key_id][
                "public"]
        else:
            rsa_public_key = None

        with NamedTemporaryFile(dir=temp_dir,
                                prefix=os.path.basename(chunk_path),
                                suffix=".tmp") as raw_output_obj:
            # pylint: disable=bad-continuation
            with rohmufile.file_writer(
                    compression_algorithm=self.config["compression"]
                ["algorithm"],
                    compression_level=self.config["compression"]["level"],
                    compression_threads=site_config[
                        "basebackup_compression_threads"],
                    rsa_public_key=rsa_public_key,
                    fileobj=raw_output_obj) as output_obj:
                with tarfile.TarFile(fileobj=output_obj,
                                     mode="w") as output_tar:
                    self.write_files_to_tar(files=files_to_backup,
                                            tar=output_tar)

                input_size = output_obj.tell()

            result_size = raw_output_obj.tell()
            # Make the file persist over the with-block with this hardlink
            os.link(raw_output_obj.name, chunk_path)

        rohmufile.log_compression_result(
            encrypted=bool(encryption_key_id),
            elapsed=time.monotonic() - start_time,
            original_size=input_size,
            result_size=result_size,
            source_name="$PGDATA files ({})".format(len(files_to_backup)),
            log_func=self.log.info,
        )

        size_ratio = result_size / input_size
        self.metrics.gauge("pghoard.compressed_size_ratio",
                           size_ratio,
                           tags={
                               "algorithm":
                               self.config["compression"]["algorithm"],
                               "site": self.site,
                               "type": "basebackup",
                           })

        metadata = {
            "compression-algorithm": self.config["compression"]["algorithm"],
            "encryption-key-id": encryption_key_id,
            "format": "pghoard-bb-v2",
            "original-file-size": input_size,
            "host": socket.gethostname(),
        }
        if extra_metadata:
            metadata.update(extra_metadata)
        self.transfer_queue.put({
            "callback_queue": callback_queue,
            "file_size": result_size,
            "filetype": filetype,
            "local_path": chunk_path,
            "metadata": metadata,
            "site": self.site,
            "type": "UPLOAD",
        })

        # Get the name of the chunk and the name of the parent directory (ie backup "name")
        chunk_name = "/".join(chunk_path.split("/")[-2:])
        return chunk_name, input_size, result_size
Example #54
0
def link(space, src, dst):
    "Create a hard link to a file."
    try:
        os.link(src, dst)
    except OSError as e:
        raise wrap_oserror(space, e)
Example #55
0
def drive_APMrover2(binary,
                    viewerip=None,
                    use_map=False,
                    valgrind=False,
                    gdb=False,
                    frame=None,
                    params=None,
                    gdbserver=False,
                    speedup=10):
    """Drive APMrover2 in SITL.

    you can pass viewerip as an IP address to optionally send fg and
    mavproxy packets too for local viewing of the mission in real time
    """
    global homeloc

    if frame is None:
        frame = 'rover'

    options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=10'
    if viewerip:
        options += " --out=%s:14550" % viewerip
    if use_map:
        options += ' --map'

    home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
    sitl = util.start_SITL(binary,
                           wipe=True,
                           model=frame,
                           home=home,
                           speedup=speedup)
    mavproxy = util.start_MAVProxy_SITL('APMrover2', options=options)

    progress("WAITING FOR PARAMETERS")
    mavproxy.expect('Received [0-9]+ parameters')

    # setup test parameters
    if params is None:
        params = vinfo.options["APMrover2"]["frames"][frame][
            "default_params_filename"]
    if not isinstance(params, list):
        params = [params]
    for x in params:
        mavproxy.send("param load %s\n" % os.path.join(testdir, x))
        mavproxy.expect('Loaded [0-9]+ parameters')
    set_parameter(mavproxy, 'LOG_REPLAY', 1)
    set_parameter(mavproxy, 'LOG_DISARMED', 1)

    # restart with new parms
    util.pexpect_close(mavproxy)
    util.pexpect_close(sitl)

    sitl = util.start_SITL(binary,
                           model='rover',
                           home=home,
                           speedup=speedup,
                           valgrind=valgrind,
                           gdb=gdb,
                           gdbserver=gdbserver)
    mavproxy = util.start_MAVProxy_SITL('APMrover2', options=options)
    mavproxy.expect('Telemetry log: (\S+)')
    logfile = mavproxy.match.group(1)
    progress("LOGFILE %s" % logfile)

    buildlog = util.reltopdir("../buildlogs/APMrover2-test.tlog")
    progress("buildlog=%s" % buildlog)
    if os.path.exists(buildlog):
        os.unlink(buildlog)
    try:
        os.link(logfile, buildlog)
    except Exception:
        pass

    mavproxy.expect('Received [0-9]+ parameters')

    util.expect_setup_callback(mavproxy, expect_callback)

    expect_list_clear()
    expect_list_extend([sitl, mavproxy])

    progress("Started simulator")

    # get a mavlink connection going
    try:
        mav = mavutil.mavlink_connection('127.0.0.1:19550',
                                         robust_parsing=True)
    except Exception as msg:
        progress("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
        raise
    mav.message_hooks.append(message_hook)
    mav.idle_hooks.append(idle_hook)

    failed = False
    e = 'None'
    try:
        progress("Waiting for a heartbeat with mavlink protocol %s" %
                 mav.WIRE_PROTOCOL_VERSION)
        mav.wait_heartbeat()
        progress("Setting up RC parameters")
        setup_rc(mavproxy)
        progress("Waiting for GPS fix")
        mav.wait_gps_fix()
        homeloc = mav.location()
        progress("Home location: %s" % homeloc)
        if not arm_rover(mavproxy, mav):
            progress("Failed to ARM")
            failed = True
        if not drive_mission(mavproxy, mav, os.path.join(
                testdir, "rover1.txt")):
            progress("Failed mission")
            failed = True
        if not drive_brake(mavproxy, mav):
            progress("Failed brake")
            failed = True
        if not disarm_rover(mavproxy, mav):
            progress("Failed to DISARM")
            failed = True
        if not log_download(mavproxy, mav,
                            util.reltopdir("../buildlogs/APMrover2-log.bin")):
            progress("Failed log download")
            failed = True
#        if not drive_left_circuit(mavproxy, mav):
#            progress("Failed left circuit")
#            failed = True
#        if not drive_RTL(mavproxy, mav):
#            progress("Failed RTL")
#            failed = True

# do not move this to be the first test.  MAVProxy's dedupe
# function may bite you.
        progress("Getting banner")
        if not do_get_banner(mavproxy, mav):
            progress("FAILED: get banner")
            failed = True

        progress("Getting autopilot capabilities")
        if not do_get_autopilot_capabilities(mavproxy, mav):
            progress("FAILED: get capabilities")
            failed = True

        progress("Setting mode via MAV_COMMAND_DO_SET_MODE")
        if not do_set_mode_via_command_long(mavproxy, mav):
            failed = True

    except pexpect.TIMEOUT as e:
        progress("Failed with timeout")
        failed = True

    mav.close()
    util.pexpect_close(mavproxy)
    util.pexpect_close(sitl)

    valgrind_log = util.valgrind_log_filepath(binary=binary, model='rover')
    if os.path.exists(valgrind_log):
        os.chmod(valgrind_log, 0o644)
        shutil.copy(valgrind_log,
                    util.reltopdir("../buildlogs/APMrover2-valgrind.log"))

    if failed:
        progress("FAILED: %s" % e)
        return False
    return True
Example #56
0
def start(ds, **kwargs):
    # Read the user input from the POST
    global HOST_IP
    urllib3.disable_warnings()
    yaml = read_yaml(WORKDIR + '/INPUT.yaml')
    REC_ISO_IMAGE_NAME        = yaml['iso_primary']
    REC_PROVISIONING_ISO_NAME = yaml['iso_secondary']
    INPUT_YAML_URL            = yaml['input_yaml']
    HOST_IP                   = yaml['rc_host']
    CLOUDNAME                 = 'CL-'+POD.POD
    ISO                       = '%s/images/install-%s.iso' % (RI_DIR, POD.POD)
    BOOTISO                   = '%s/images/bootcd-%s.iso'  % (RI_DIR, POD.POD)
    USERCONF                  = '%s/user-configs/%s/user_config.yaml' % (RI_DIR, CLOUDNAME)

    print('-----------------------------------------------------------------------------------------------')
    print('                      POD is '+POD.POD)
    print('                CLOUDNAME is '+CLOUDNAME)
    print('                  WORKDIR is '+WORKDIR)
    print('                  HOST_IP is '+HOST_IP)
    print('             EXTERNALROOT is '+EXTERNALROOT)
    print('       REC_ISO_IMAGE_NAME is '+REC_ISO_IMAGE_NAME)
    print('REC_PROVISIONING_ISO_NAME is '+REC_PROVISIONING_ISO_NAME)
    print('           INPUT_YAML_URL is '+INPUT_YAML_URL)
    print('                      ISO is '+ISO)
    print('                  BOOTISO is '+BOOTISO)
    print('                 USERCONF is '+USERCONF)
    print('-----------------------------------------------------------------------------------------------')

    # Setup RI_DIR
    initialize_RI(CLOUDNAME)

    # Fetch the three files into WORKDIR
    fetchURL(REC_ISO_IMAGE_NAME,        WORKDIR + '/install.iso');
    fetchURL(REC_PROVISIONING_ISO_NAME, WORKDIR + '/bootcd.iso');
    fetchURL(INPUT_YAML_URL,            WORKDIR + '/user_config.yaml');

    # Link files to RI_DIR with unique names
    os.link(WORKDIR + '/install.iso', ISO)
    os.link(WORKDIR + '/bootcd.iso', BOOTISO)
    os.link(WORKDIR + '/user_config.yaml', USERCONF)
    PWFILE = '%s/user-configs/%s/admin_passwd' % (RI_DIR, CLOUDNAME)
    with open(PWFILE, "w") as f:
        f.write(ADMIN_PASSWD + '\n')

    # Start the remote_installer
    client = docker.from_env()
    namefilt = { 'name': RI_NAME }
    ri = client.containers.list(filters=namefilt)
    if len(ri) == 0:
        print(RI_NAME + ' is not running.')
        c = start_RI(client)

    else:
        print(RI_NAME + ' is running.')
        c = ri[0]

    # Send request to remote_installer
    id = send_request(HOST_IP, CLOUDNAME, ISO, BOOTISO)

    # Wait up to WAIT_TIME minutes for completion
    if wait_for_completion(HOST_IP, id, WAIT_TIME):
        print('Installation failed after %d minutes.' % (WAIT_TIME))
        sys.exit(1)

    # Remove the ISOs?
    if REMOVE_ISO:
        for iso in (WORKDIR + '/install.iso', ISO, WORKDIR + '/bootcd.iso', BOOTISO):
            os.unlink(iso)

    # Done!
    print('Installation complete!')
    # sys.exit(0)  Don't exit as this will cause the task to fail!
    return 'Complete.'
Example #57
0
    def install(self, spec, prefix):
        quux_cc = '''#include "quux.h"
#include "garply/garply.h"
#include "quux_version.h"
#include <iostream>
#include <stdexcept>

const int Quux::version_major = quux_version_major;
const int Quux::version_minor = quux_version_minor;

Quux::Quux() {}

int
Quux::get_version() const
{
    return 10 * version_major + version_minor;
}

int
Quux::quuxify() const
{
    int quux_version = get_version();
    std::cout << "Quux::quuxify version " << quux_version
              << " invoked" <<std::endl;
    std::cout << "Quux config directory is %s" <<std::endl;
    Garply garply;
    int garply_version = garply.garplinate();

    if (garply_version != quux_version) {
        throw std::runtime_error(
            "Quux found an incompatible version of Garply.");
    }

    return quux_version;
}
'''
        quux_h = '''#ifndef QUUX_H_

class Quux
{
private:
    static const int version_major;
    static const int version_minor;

public:
    Quux();
    int get_version() const;
    int quuxify() const;
};

#endif // QUUX_H_
'''
        quuxifier_cc = '''
#include "quux.h"
#include <iostream>

int
main()
{
    Quux quux;
    quux.quuxify();

    return 0;
}
'''
        quux_version_h = '''const int quux_version_major = %s;
const int quux_version_minor = %s;
'''
        mkdirp('%s/quux' % prefix.include)
        mkdirp('%s/quux' % self.stage.source_path)
        with open('%s/quux_version.h' % self.stage.source_path, 'w') as f:
            f.write(quux_version_h % (self.version[0], self.version[1:]))
        with open('%s/quux/quux.cc' % self.stage.source_path, 'w') as f:
            f.write(quux_cc % (prefix.config))
        with open('%s/quux/quux.h' % self.stage.source_path, 'w') as f:
            f.write(quux_h)
        with open('%s/quux/quuxifier.cc' % self.stage.source_path, 'w') as f:
            f.write(quuxifier_cc)
        gpp = which('/usr/bin/g++')
        if sys.platform == 'darwin':
            gpp = which('/usr/bin/clang++')
        gpp('-Dquux_EXPORTS', '-I%s' % self.stage.source_path,
            '-I%s' % spec['garply'].prefix.include, '-O2', '-g', '-DNDEBUG',
            '-fPIC', '-o', 'quux.cc.o', '-c', 'quux/quux.cc')
        gpp('-Dquux_EXPORTS', '-I%s' % self.stage.source_path,
            '-I%s' % spec['garply'].prefix.include, '-O2', '-g', '-DNDEBUG',
            '-fPIC', '-o', 'quuxifier.cc.o', '-c', 'quux/quuxifier.cc')
        if sys.platform == 'darwin':
            gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-dynamiclib',
                '-Wl,-headerpad_max_install_names', '-o', 'libquux.dylib',
                '-install_name', '@rpath/libcorge.dylib', 'quux.cc.o',
                '-Wl,-rpath,%s' % prefix.lib64,
                '-Wl,-rpath,%s' % spec['garply'].prefix.lib64,
                '%s/libgarply.dylib' % spec['garply'].prefix.lib64)
            gpp('-O2', '-g', '-DNDEBUG', 'quuxifier.cc.o', '-o', 'quuxifier',
                '-Wl,-rpath,%s' % prefix.lib64,
                '-Wl,-rpath,%s' % spec['garply'].prefix.lib64, 'libquux.dylib',
                '%s/libgarply.dylib' % spec['garply'].prefix.lib64)
            mkdirp(prefix.lib64)
            copy('libquux.dylib', '%s/libquux.dylib' % prefix.lib64)
            os.link('%s/libquux.dylib' % prefix.lib64,
                    '%s/libquux.dylib.3.0' % prefix.lib64)
        else:
            gpp(
                '-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
                '-Wl,-soname,libquux.so', '-o', 'libquux.so', 'quux.cc.o',
                '-Wl,-rpath,%s:%s::::' %
                (prefix.lib64, spec['garply'].prefix.lib64),
                '%s/libgarply.so' % spec['garply'].prefix.lib64)
            gpp(
                '-O2', '-g', '-DNDEBUG', '-rdynamic', 'quuxifier.cc.o', '-o',
                'quuxifier', '-Wl,-rpath,%s:%s::::' %
                (prefix.lib64, spec['garply'].prefix.lib64), 'libquux.so',
                '%s/libgarply.so' % spec['garply'].prefix.lib64)
            mkdirp(prefix.lib64)
            copy('libquux.so', '%s/libquux.so' % prefix.lib64)
            os.link('%s/libquux.so' % prefix.lib64,
                    '%s/libquux.so.3.0' % prefix.lib64)
        copy('quuxifier', '%s/quuxifier' % prefix.lib64)
        copy('%s/quux/quux.h' % self.stage.source_path,
             '%s/quux/quux.h' % prefix.include)
        mkdirp(prefix.bin)
        copy('quux_version.h', '%s/quux_version.h' % prefix.bin)
        os.symlink('%s/quuxifier' % prefix.lib64, '%s/quuxifier' % prefix.bin)
        os.symlink('%s/garplinator' % spec['garply'].prefix.lib64,
                   '%s/garplinator' % prefix.bin)
Example #58
0
    def basebackup_compression_pipe(self, proc, basebackup_path):
        rsa_public_key = None
        encryption_key_id = self.config["backup_sites"][
            self.site]["encryption_key_id"]
        if encryption_key_id:
            rsa_public_key = self.config["backup_sites"][
                self.site]["encryption_keys"][encryption_key_id]["public"]
        compression_algorithm = self.config["compression"]["algorithm"]
        compression_level = self.config["compression"]["level"]
        self.log.debug("Compressing basebackup directly to file: %r",
                       basebackup_path)
        set_stream_nonblocking(proc.stderr)

        metadata = {
            "compression-algorithm": compression_algorithm,
            "encryption-key-id": encryption_key_id,
            "host": socket.gethostname(),
        }

        with NamedTemporaryFile(prefix=basebackup_path,
                                suffix=".tmp-compress") as output_obj:

            def extract_header_func(input_data):
                # backup_label should always be first in the tar ball
                if input_data[0:12].startswith(b"backup_label"):
                    # skip the 512 byte tar header to get to the actual backup label content
                    start_wal_segment, start_time = self.parse_backup_label(
                        input_data[512:1024])

                    metadata.update({
                        "start-wal-segment": start_wal_segment,
                        "start-time": start_time
                    })

            def progress_callback():
                stderr_data = proc.stderr.read()
                if stderr_data:
                    self.latest_activity = datetime.datetime.utcnow()
                    self.log.debug("pg_basebackup stderr: %r", stderr_data)

            original_input_size, compressed_file_size = rohmufile.write_file(
                input_obj=proc.stdout,
                output_obj=output_obj,
                compression_algorithm=compression_algorithm,
                compression_level=compression_level,
                rsa_public_key=rsa_public_key,
                progress_callback=progress_callback,
                log_func=self.log.info,
                header_func=extract_header_func)
            os.link(output_obj.name, basebackup_path)

        if original_input_size:
            size_ratio = compressed_file_size / original_input_size
            self.metrics.gauge("pghoard.compressed_size_ratio",
                               size_ratio,
                               tags={
                                   "algorithm": compression_algorithm,
                                   "site": self.site,
                                   "type": "basebackup",
                               })

        return original_input_size, compressed_file_size, metadata
Example #59
0
    tc = trpc.Client(host, port, user, password)
    torrent = tc.get_torrents(hash)[0]
    for each in torrent.files().values():
        if each['completed'] == each['size']:
            if each['name'].endswith(
                ('.rar', '.avi', '.mkv',
                 '.mp4')) and 'sample' not in each['name'].lower(
                 ) and '/subs' not in each['name'].lower():
                if each['name'].endswith('.rar'):
                    file = os.path.basename(each['name'])
                    if 'part' not in each['name'] or 'part01' in each['name']:
                        print file
                        print dir + each['name']
                        print output
                        subprocess.call([
                            '7z', 'x', dir + each['name'], '-aos',
                            '-o' + output
                        ])
                        print 'Successfully extracted {}'.format(dir +
                                                                 each['name'])
                        if 'tv_shows' in type:
                            autoProcessTV.processEpisode(output)
                else:
                    file = os.path.basename(each['name'])
                    os.link(dir + each['name'], output + file)
                    if 'tv_shows' in type:
                        autoProcessTV.processEpisode(output, file)

except KeyError, e:
    print "Environment Variables not supplied - is this being called from Transmission?"
    sys.exit()
        #dst_dir = os.path.join(dst, platform, year, month, day, pair_folder)
        dst_dir = os.path.join(dst, platform, year, month, day)

        if not os.path.isdir(dst_dir):
            if not args.dryrun:
                os.makedirs(dst_dir)

        for ifp in glob.glob(
                os.path.join(raster.srcdir, raster.stripid) + "*"):
            ofp = os.path.join(dst_dir, os.path.basename(ifp))
            if os.path.isfile(ofp) and args.overwrite:
                logger.debug("Copying {} to {}".format(ifp, ofp))
                if not args.dryrun:
                    os.remove(ofp)
                    if args.try_link:
                        os.link(ifp, ofp)
                    else:
                        shutil.copy2(ifp, ofp)

            elif not os.path.isfile(ofp):
                logger.debug("Copying {} to {}".format(ifp, ofp))
                if not args.dryrun:
                    if args.try_link:
                        os.link(ifp, ofp)
                    else:
                        shutil.copy2(ifp, ofp)

            else:
                logger.warning("Cannot copy {} to {}".format(ifp, ofp))

    logger.info('Done')