def EmptyDir(d):
    if debugMode():
        print("EmptyDir",d)

    if d==None:
        return
    
    if os.path.isdir(d):
      files=os.walk(d)
      
      # delete all the files
      for item in files:
          for sdir in item[1]:
              EmptyDir(item[0]+os.sep+sdir)
          for f in item[2]:
              ff = item[0]+os.sep+f
              os.remove(ff)
              if debugMode():
                print("  removed",ff)
    else:
        os.mkdir(d)
        print("created",d)
    # delete any subdirectories
    dirs = os.walk(d)
    for dd in dirs:
        for ddir in dd[1]:
            EmptyDir(dd[0]+os.sep+ddir)
            os.rmdir(dd[0]+os.sep+ddir)

    if debugMode():
        print("all files deleted from",d)
Example #2
0
    def _delete(self):

        # Gets root b4 deleting all files #
        self._root()

        # Deletes file if it exists #
        if self.written == True:
            os.remove(self.filename)
            self.written = False
        else:
            pass

        # Deletes created folders until a folder that is not empty is encountered #
        while self.roots != []:
            
            # Def current garbage (i.e. to be deleted) #
            current_dir = ''
            for folder in self.roots:
                current_dir += '%s/' % folder

            # Checks if there are other files or folders in the way #
            if os.listdir(current_dir) == []:
                os.rmdir(current_dir)
                del self.roots[-1]
            else:
                break
Example #3
0
 def do_mount(self, args):
     if self.dbg:
         print("DEBUG: Mount called")
     if self.fuse_enabled is False:
         print("ERROR: Only able to mount on systems with fuse installed")
         return
     from .File_System import memfuse
     self.fuseFS = memfuse.MemFS(self.fs)
     if self.dbg:
         print("DEBUG: MemFS has been created")
     newDir = False
     if not os.path.exists(self.mp):
         os.makedirs(self.mp)
         newDir = True
     if self.dbg:
         print("DEBUG: Mountpoint has been prepared")
     window = Thread(target=self.open_window)
     monitor = Thread(target=self.background_upload)
     window.start()
     if self.dbg:
         print("DEBUG: Window opened, about to mount")
     self.mounted = True
     monitor.start()
     memfuse.mount(self.fuseFS, self.mp)
     self.mounted = False
     if newDir:
         os.rmdir(self.mp)
     if not self.cmdloopused:
         print("STATUS: Uploading all files to online")
         self.do_uploadfs(self)
Example #4
0
            def _create_and_do_getcwd(dirname, current_path_length = 0):
                try:
                    os.mkdir(dirname)
                except:
                    self.skipTest("mkdir cannot create directory sufficiently "
                                  "deep for getcwd test")

                os.chdir(dirname)
                try:
                    os.getcwd()
                    if current_path_length < 4099:
                        _create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
                except OSError as e:
                    expected_errno = errno.ENAMETOOLONG
                    # The following platforms have quirky getcwd()
                    # behaviour -- see issue 9185 and 15765 for
                    # more information.
                    quirky_platform = (
                        'sunos' in sys.platform or
                        'netbsd' in sys.platform or
                        'openbsd' in sys.platform
                    )
                    if quirky_platform:
                        expected_errno = errno.ERANGE
                    self.assertEqual(e.errno, expected_errno)
                finally:
                    os.chdir('..')
                    os.rmdir(dirname)
Example #5
0
def rrmdir(directory):
    for root, dirs, files in os.walk(directory, topdown=False):
        for name in files:
            os.remove(os.path.join(root, name))
        for name in dirs:
            os.rmdir(os.path.join(root, name))
    os.rmdir(directory)
Example #6
0
def hadoop_jar(stdout, stderr, environ, *args):
    if len(args) < 1:
        stderr.write('RunJar jarFile [mainClass] args...\n')
        return -1

    jar_path = args[0]
    if not os.path.exists(jar_path):
        stderr.write(
            'Exception in thread "main" java.io.IOException: Error opening job'
            ' jar: %s\n' % jar_path)
        return -1

    # only simulate for streaming steps
    if HADOOP_STREAMING_JAR_RE.match(os.path.basename(jar_path)):
        streaming_args = args[1:]
        output_idx = list(streaming_args).index('-output')
        assert output_idx != -1
        output_dir = streaming_args[output_idx + 1]
        real_output_dir = hdfs_path_to_real_path(output_dir, environ)

        mock_output_dir = get_mock_hadoop_output()
        if mock_output_dir is None:
            stderr.write('Job failed!')
            return -1

        if os.path.isdir(real_output_dir):
            os.rmdir(real_output_dir)

        shutil.move(mock_output_dir, real_output_dir)

    now = datetime.datetime.now()
    stderr.write(now.strftime('Running job: job_%Y%m%d%H%M_0001\n'))
    stderr.write('Job succeeded!\n')
    return 0
Example #7
0
def move_desktop_file(root, target_data, prefix):
    # The desktop file is rightly installed into install_data.  But it should
    # always really be installed into prefix, because while we can install
    # normal data files anywhere we want, the desktop file needs to exist in
    # the main system to be found.  Only actually useful for /opt installs.

    old_desktop_path = os.path.normpath(root + target_data +
                                        '/share/applications')
    old_desktop_file = old_desktop_path + '/remarkable.desktop'
    desktop_path = os.path.normpath(root + prefix + '/share/applications')
    desktop_file = desktop_path + '/remarkable.desktop'

    if not os.path.exists(old_desktop_file):
        print ("ERROR: Can't find", old_desktop_file)
        sys.exit(1)
    elif target_data != prefix + '/':
        # This is an /opt install, so rename desktop file to use extras-
        desktop_file = desktop_path + '/extras-remarkable.desktop'
        try:
            os.makedirs(desktop_path)
            os.rename(old_desktop_file, desktop_file)
            os.rmdir(old_desktop_path)
        except OSError as e:
            print ("ERROR: Can't rename", old_desktop_file, ":", e)
            sys.exit(1)

    return desktop_file
Example #8
0
 def __del__(self):
     for file in self.files4del:
         os.remove(file)
     self.files4del = []
     if self.folder4del:
         os.rmdir(self.folder4del)
     self.folder4del = None
Example #9
0
def do_rmdir(path):
    try:
        os.rmdir(path)
    except Exception, err:
        logging.exception("Rmdir failed on %s err: %s", path, str(err))
        if err.errno != errno.ENOENT:
            raise
Example #10
0
 def lock(self, layer, coord, format):
     """ Acquire a cache lock for this tile.
     
         Returns nothing, but blocks until the lock has been acquired.
         Lock is implemented as an empty directory next to the tile file.
     """
     lockpath = self._lockpath(layer, coord, format)
     due = time.time() + layer.stale_lock_timeout
     
     while True:
         # try to acquire a directory lock, repeating if necessary.
         try:
             umask_old = os.umask(self.umask)
             
             if time.time() > due:
                 # someone left the door locked.
                 os.rmdir(lockpath)
             
             os.makedirs(lockpath, 0777&~self.umask)
             break
         except OSError, e:
             if e.errno != 17:
                 raise
             time.sleep(.2)
         finally:
Example #11
0
    def unlock(self, layer, coord, format):
        """ Release a cache lock for this tile.

            Lock is implemented as an empty directory next to the tile file.
        """
        lockpath = self._lockpath(layer, coord, format)
        os.rmdir(lockpath)
def temp_shapefile_from_zip(zip_path):
    """
    Given a path to a ZIP file, unpack it into a temp dir and return the path
    to the shapefile that was in there.  Doesn't clean up after itself unless
    there was an error.

    If you want to cleanup later, you can derive the temp dir from this path.
    """
    zf = ZipFile(zip_path)
    tempdir = mkdtemp()
    shape_path = None
    # Copy the zipped files to a temporary directory, preserving names.
    for name in zf.namelist():
        data = zf.read(name)
        outfile = os.path.join(tempdir, name)
        if name.endswith('.shp'):
            shape_path = outfile
        f = open(outfile, 'w')
        f.write(data)
        f.close()

    if shape_path is None:
        log.warn("No shapefile, cleaning up")
        # Clean up after ourselves.
        for file in os.listdir(tempdir):
            os.unlink(os.path.join(tempdir, file))
        os.rmdir(tempdir)
        raise ValueError("No shapefile found in zip")

    return shape_path
Example #13
0
    def tearDown(self):
        def cbStopListening(result=None):
            self.root = None
            self.site = None
            self.proxy.factory.stopFactory()
            self.server_factory.stopFactory()
            self.server = None
            self._cleanPending()
            self._cleanSelectables()

        os.rmdir("./html") # remove directory from _trial_temp
        self.b.service.poll_timeouts.stop()
        self.b.service.stopService()
        self.p.stopListening()
        for s in self.b.service.sessions.keys():
            sess = self.b.service.sessions.get(s)
            if sess:
                self.b.service.endSession(sess)
        if hasattr(self.proxy.factory,'client'):
            self.proxy.factory.client.transport.stopConnecting()
        self.server_factory.protocol.delay_features = 0

        d = defer.maybeDeferred(self.server.stopListening)
        d.addCallback(cbStopListening)

        return d
Example #14
0
    def make_target_directory(self, path):
        path = os.path.abspath(path)
        try:
            os.makedirs(path)
        except OSError as e:
            self.abort('Could not create target folder: %s' % e)

        if os.path.isdir(path):
            try:
                if len(os.listdir(path)) != 0:
                    raise OSError('Directory not empty')
            except OSError as e:
                self.abort('Bad target folder: %s' % e)

        scratch = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
        os.makedirs(scratch)
        try:
            yield scratch
        except:
            shutil.rmtree(scratch)
            raise
        else:
            # Use shutil.move here in case we move across a file system
            # boundary.
            for filename in os.listdir(scratch):
                if isinstance(path, unicode):
                    filename = filename.decode(fs_enc)
                shutil.move(os.path.join(scratch, filename),
                            os.path.join(path, filename))
            os.rmdir(scratch)
Example #15
0
def remove_directory(path):

    # In Python2 convert to unicode
    try:
        path = unicode(path)
    except NameError:
        pass

    # Note that shutil.rmtree fails if there are any broken symlinks in that
    # folder, so we use os.walk to traverse the directory tree from the bottom
    # up as recommended here:
    # http://stackoverflow.com/a/2656408

    for root, dirs, files in os.walk(path, topdown=False):
        for name in files:
            filename = os.path.join(root, name)
            if not os.path.islink(filename):
                os.chmod(filename, stat.S_IWUSR)
            os.remove(filename)
        for name in dirs:
            dir = os.path.join(root, name)
            if os.path.islink(dir):
                os.unlink(dir)
            else:
                os.rmdir(dir)

    os.rmdir(path)
Example #16
0
 def test_setup_make_voldir(self):
     """The directory for volumes shall be created automatically."""
     self._set_access_wrapper(True)
     voldir_path = os.path.join(self.TEST_MOUNT, self.TEST_VOLDIR)
     os.rmdir(voldir_path)
     self._driver.do_setup(None)
     self.assertTrue(os.path.isdir(voldir_path))
Example #17
0
def deldir(path):
    """Recursively remove a directory"""

    # This function is slightly more complicated because of a
    # strange behavior in AFS, that creates .__afsXXXXX files

    dirs = []

    def cleandir(arg, path, names):
        for name in names:
            filename = os.path.join(path, name)
            if os.path.isfile(filename):
                os.remove(filename)
        dirs.append(path)

    # remove files
    os.path.walk(path, cleandir, "")

    # remove directories
    for i in xrange(len(dirs)):
        # AFS work around
        afsFiles = list_files(dirs[-i])
        for f in afsFiles:
            os.remove(f)

        while True:
            try:
                if os.path.exists(dirs[-i]):
                    os.rmdir(dirs[-i])
            except Exception:
                continue
            break
Example #18
0
 def tearDown(self):
   self.torm.reverse()
   for path, kind in self.torm:
     if kind:
       os.rmdir(path)
     else:
       os.unlink(path)
Example #19
0
 def _assert_tar_count_equals(self, file_name, count):
     if not tarfile.is_tarfile(file_name):
         # Mabye it's not a tar cuz it's a status message.
         fin = open(file_name, 'r')
         contents = fin.read(256)
         fin.close()
         if contents.lower().find("no ") != -1:
             self.assertEqual( 0, count)
             return
         raise Exception("%s is not a tar file" % file_name)
     tar = tarfile.open(file_name)
     tmp_dir = "unit_test_tmp"
     if os.path.exists(tmp_dir):
         filenames = os.listdir(tmp_dir)
         for file in filenames:
             os.remove(os.path.join(tmp_dir, file))
         os.rmdir(tmp_dir)            
     os.mkdir(tmp_dir)
     tar.extractall(path=tmp_dir)
     tar.close()
     filenames = os.listdir(tmp_dir)
     try:
         self.assertEqual( len(filenames), count)
     finally:
         # clean up
         for file in filenames:
             os.remove(os.path.join(tmp_dir, file))
         os.rmdir(tmp_dir)
Example #20
0
 def tearDown(self):
     super(converterTestsCDF, self).tearDown()
     del self.SDobj
     if os.path.exists(self.testfile):
         os.remove(self.testfile)
     os.rmdir(self.testdir)
     warnings.simplefilter('default', dm.DMWarning)
Example #21
0
 def teardown(self):
     try:
         os.unlink(self.hosts_path)
     except OSError:
         pass
     os.rmdir(self.tempdir)
     ProcessTestExecutor.teardown(self)
Example #22
0
    def test_path_search_which(self):
        " which() finds an executable in $PATH and returns its abspath. "
        fname = 'gcc'
        bin_dir = tempfile.mkdtemp()
        bin_path = os.path.join(bin_dir, fname)
        save_path = os.environ['PATH']
        try:
            # setup
            os.environ['PATH'] = bin_dir
            with open(bin_path, 'w') as fp:
                pass

            # given non-executable,
            os.chmod(bin_path, 0o400)

            # exercise absolute and relative,
            assert pexpect.which(bin_path) is None
            assert pexpect.which(fname) is None

            # given executable,
            os.chmod(bin_path, 0o700)

            # exercise absolute and relative,
            assert pexpect.which(bin_path) == bin_path
            assert pexpect.which(fname) == bin_path

        finally:
            # restore,
            os.environ['PATH'] = save_path

            # destroy scratch files and folders,
            if os.path.exists(bin_path):
                os.unlink(bin_path)
            if os.path.exists(bin_dir):
                os.rmdir(bin_dir)
Example #23
0
    def test_which_follows_symlink(self):
        " which() follows symlinks and returns its path. "
        fname = 'original'
        symname = 'extra-crispy'
        bin_dir = tempfile.mkdtemp()
        bin_path = os.path.join(bin_dir, fname)
        sym_path = os.path.join(bin_dir, symname)
        save_path = os.environ['PATH']
        try:
            # setup
            os.environ['PATH'] = bin_dir
            with open(bin_path, 'w') as fp:
                pass
            os.chmod(bin_path, 0o400)
            os.symlink(bin_path, sym_path)

            # should not be found because symlink points to non-executable
            assert pexpect.which(symname) is None

            # but now it should -- because it is executable
            os.chmod(bin_path, 0o700)
            assert pexpect.which(symname) == sym_path

        finally:
            # restore,
            os.environ['PATH'] = save_path

            # destroy scratch files, symlinks, and folders,
            if os.path.exists(sym_path):
                os.unlink(sym_path)
            if os.path.exists(bin_path):
                os.unlink(bin_path)
            if os.path.exists(bin_dir):
                os.rmdir(bin_dir)
Example #24
0
   def __del__(self):
      """Destructor."""

      if self._ebuild_file_path:
         self.einfo('Cleaning up package build temporary directory')
         clean_proc = subprocess.Popen(
            ('ebuild', self._ebuild_file_path, 'clean'),
            stdout=self._dev_null, stderr=subprocess.STDOUT
         )
         clean_proc.communicate()
         clean_proc = None

         self.einfo('Deleting temporary ebuild')
         os.unlink(self._ebuild_file_path)
         # Delete the package directory, since now it should only contain the
         # Manifest file.
         package_path = os.path.dirname(self._ebuild_file_path)
         files_list = os.listdir(package_path)
         for i, file_path in enumerate(files_list):
            if file_path == 'Manifest':
               os.unlink(os.path.join(package_path, file_path))
               del files_list[i]
               break
         if files_list:
            ewarn(
               'Not removing {} unknown files in package directory `{}\''
               .format(len(files_list), package_path)
            )
         else:
            os.rmdir(package_path)

      self._dev_null.close()
Example #25
0
 def release(self):
     if not self.is_locked():
         raise NotLocked
     elif not os.path.exists(self.unique_name):
         raise NotMyLock
     os.unlink(self.unique_name)
     os.rmdir(self.lock_file)
Example #26
0
    def runTest(self):
        """This tests copying a directory structure to the device.
        """
        dvroot = self.dm.deviceRoot
        dvpath = posixpath.join(dvroot, 'infratest')
        self.dm.removeDir(dvpath)
        self.dm.mkDir(dvpath)

        p1 = os.path.join('test-files', 'push1')
        # Set up local stuff
        try:
            os.rmdir(p1)
        except:
            pass

        if not os.path.exists(p1):
            os.makedirs(os.path.join(p1, 'sub.1', 'sub.2'))
        if not os.path.exists(os.path.join(p1, 'sub.1', 'sub.2', 'testfile')):
            file(os.path.join(p1, 'sub.1', 'sub.2', 'testfile'), 'w').close()

        self.dm.pushDir(p1, posixpath.join(dvpath, 'push1'))

        self.assertTrue(
            self.dm.dirExists(posixpath.join(dvpath, 'push1', 'sub.1')))
        self.assertTrue(self.dm.dirExists(
            posixpath.join(dvpath, 'push1', 'sub.1', 'sub.2')))
Example #27
0
    def run(self):
        from distutils import log
        from distutils.filelist import FileList
        global CLEANUP

        distutils.command.clean.clean.run(self)

        if self.all:
            fl = FileList()
            fl.include_pattern("*.pyc", 0)
            fl.include_pattern("*.pyd", 0)
            fl.include_pattern("*.so", 0)
            CLEANUP += fl.files

        for f in CLEANUP:
            if os.path.isdir(f):
                try:
                    if not self.dry_run and os.path.exists(f):
                        os.rmdir(f)
                    log.info("removing '%s'", f)
                except IOError:
                    log.warning("unable to remove '%s'", f)

            else:
                try:
                    if not self.dry_run and os.path.exists(f):
                        os.remove(f)
                    log.info("removing '%s'", f)
                except IOError:
                    log.warning("unable to remove '%s'", f)
Example #28
0
    def test_which_should_not_match_folders(self):
        " Which does not match folders, even though they are executable. "
        # make up a path and insert a folder that is 'executable', a naive
        # implementation might match (previously pexpect versions 3.2 and
        # sh versions 1.0.8, reported by @lcm337.)
        fname = 'g++'
        bin_dir = tempfile.mkdtemp()
        bin_dir2 = os.path.join(bin_dir, fname)
        save_path = os.environ['PATH']
        try:
            os.environ['PATH'] = bin_dir
            os.mkdir(bin_dir2, 0o755)
            # should not be found because it is not executable *file*,
            # but rather, has the executable bit set, as a good folder
            # should -- it should not be returned because it fails isdir()
            exercise = pexpect.which(fname)
            assert exercise is None

        finally:
            # restore,
            os.environ['PATH'] = save_path
            # destroy scratch folders,
            for _dir in (bin_dir2, bin_dir,):
                if os.path.exists(_dir):
                    os.rmdir(_dir)
Example #29
0
    def test_ssh_client_sftp(self):
        """Test SFTP features of SSHClient. Copy local filename to server,
        check that data in both files is the same, make new directory on
        server, remove files and directory."""
        test_file_data = 'test'
        local_filename = 'test_file'
        remote_test_dir, remote_filename = 'remote_test_dir', 'test_file_copy'
        remote_filename = os.path.sep.join([remote_test_dir, remote_filename])
        remote_dir = 'remote_dir'
        test_file = open(local_filename, 'w')
        test_file.writelines([test_file_data + os.linesep])
        test_file.close()
        server = start_server({ self.fake_cmd : self.fake_resp },
                              self.listen_socket)
        client = SSHClient('127.0.0.1', port=self.listen_port,
                           pkey=self.user_key)
        client.copy_file(local_filename, remote_filename)
        self.assertTrue(os.path.isdir(remote_test_dir),
                        msg="SFTP create remote directory failed")
        self.assertTrue(os.path.isfile(remote_filename),
                        msg="SFTP copy failed")
        copied_file = open(remote_filename, 'r')
        copied_file_data = copied_file.readlines()[0].strip()
        copied_file.close()
        self.assertEqual(test_file_data, copied_file_data,
                         msg="Data in destination file %s does \
not match source %s" % (copied_file_data, test_file_data))
        for filepath in [local_filename, remote_filename]:
            os.unlink(filepath)
        client.mkdir(client._make_sftp(), remote_dir)
        self.assertTrue(os.path.isdir(remote_dir))
        for dirpath in [remote_dir, remote_test_dir]:
            os.rmdir(dirpath)
        del client
        server.join()
Example #30
0
def process_directory_target_walk(args, target_dir, target_filenames):
  (options, target_size) = args

  for target_filename in target_filenames:

    # Make sure this is a file
    target_path = os.path.join(target_dir, target_filename)
    if not os.path.isfile(target_path): 
      continue

    source_relpath = os.path.relpath(target_path, os.path.join(options.directory_target, target_size['directory']))
    
    # Remove the file if it is removed from the source directory
    if not os.path.exists(os.path.join(options.directory_source, source_relpath)):
      if options.no_delete:
        print 'would remove file ', target_path
      else:
        os.remove(target_path)

  # Remove directory if empty
  if not os.listdir(target_dir):
    if options.no_delete:
      print 'would remove directory ', target_dir
    else:
      os.rmdir(target_dir)
Example #31
0
                check_call('restorecon -Rv /', shell=True)

                os.fchdir(real_root)
                os.chroot(".")
                os.close(real_root)
                check_call('umount ' + installroot +
                           '/{sys/fs/selinux,sys,proc}',
                           shell=True)

            print(
                bcolors.OKBLUE +
                '[INFO] Packaging and cleaning files... May take some time.' +
                bcolors.ENDC)
            os.system('umount ' + installroot)
            os.rmdir(installroot)
            os.system(
                'mksquashfs ' + image_working_directory + ' ' +
                os.path.join(images_path, selected_image_name, 'squashfs.img'))
            try:
                sha256sum = hashlib.sha256(
                    open(
                        os.path.join(images_path, selected_image_name,
                                     'squashfs.img'),
                        'rb').read()).hexdigest()
            except Exception as e:
                print(e)
            shutil.rmtree(image_working_directory)

            print(bcolors.OKBLUE + '[INFO] Registering new image.' +
                  bcolors.ENDC)
Example #32
0
samples = []
not_downloaded = []

for row in csv_file:
    name = re.sub("&|\(|\)|\,", "", row[0])
    f = name.index("_")
    name = name[:name.index("_", f + 1)] + f"_{row[1]}"
    samples.append([name, row[1]])

for sample in samples:
    out = subprocess.run(f"prefetch -O SRA/ {sample[1]}",
                         shell=True,
                         capture_output=True)
    print(out.stdout.decode("utf-8"))

    files = os.listdir("SRA")
    for fl in files:
        if "SRR" in fl:
            print(f"DEBUG: {fl} > {sample[0]}")
            out = subprocess.run(f"mv SRA/{fl}/*.sra SRA/{sample[0]}.sra",
                                 shell=True,
                                 capture_output=True)
            os.rmdir(fl)
    print(out.stdout.decode("utf-8"))

#if not "has 0 unresolved dependencies" in str(out.stdout):
#    not_downloaded.append(sample)
#
#for sample in not_downloaded:
#    print(sample)
Example #33
0
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
    # Warn if bitcoind is already running (unix only)
    try:
        if subprocess.check_output(["pidof", "bitcoind"]) is not None:
            print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
    except (OSError, subprocess.SubprocessError):
        pass

    # Warn if there is a cache directory
    cache_dir = "%s/test/cache" % build_dir
    if os.path.isdir(cache_dir):
        print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))

    #Set env vars
    if "BITCOIND" not in os.environ:
        os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext
        os.environ["BITCOINCLI"] = build_dir + '/src/bitcoin-cli' + exeext

    tests_dir = src_dir + '/test/functional/'

    flags = ["--srcdir={}/src".format(build_dir)] + args
    flags.append("--cachedir=%s" % cache_dir)

    if enable_coverage:
        coverage = RPCCoverage()
        flags.append(coverage.flag)
        logging.debug("Initializing coverage directory at %s" % coverage.dir)
    else:
        coverage = None

    if len(test_list) > 1 and jobs > 1:
        # Populate cache
        subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])

    #Run Tests
    job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
    time0 = time.time()
    test_results = []

    max_len_name = len(max(test_list, key=len))

    for _ in range(len(test_list)):
        test_result, stdout, stderr = job_queue.get_next()
        test_results.append(test_result)

        if test_result.status == "Passed":
            logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
        elif test_result.status == "Skipped":
            logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
        else:
            print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
            print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
            print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')

    print_results(test_results, max_len_name, (int(time.time() - time0)))

    if coverage:
        coverage.report_rpc_coverage()

        logging.debug("Cleaning up coverage data")
        coverage.cleanup()

    # Clear up the temp directory if all subdirectories are gone
    if not os.listdir(tmpdir):
        os.rmdir(tmpdir)

    all_passed = all(map(lambda test_result: test_result.was_successful, test_results))

    sys.exit(not all_passed)
Example #34
0
def _process_task(pfilename):
    '''
    Recursive celery task
    '''
    from datetime import datetime
    from datetime import timedelta
    logger.debug("Function: %s()" % sys._getframe().f_code.co_name)
    logger.debug("Task ID: %s" % _process_task.request.id)

    #catch all unhandled exceptions and clean up
    try:
        list_of_file_dict = get_action_param_var(pfilename)
        os.unlink(pfilename)

        dmfilestat = DMFileStat.objects.get(id=list_of_file_dict[0]['pk'])
        terminate = True  # flag to indicate recursion termination
        total_processed = 0
        fstatus = "Success"

        start_time = datetime.now()
        max_time_delta = timedelta(seconds=10)

        # list_of_file_dict contains zero, one, or two dictionary variables to iterate over.
        for q, dict in enumerate(list_of_file_dict):
            # The dictionary contains an element named 'to_process' which is a list variable to iterate over
            logger.debug("%d, start_dir: %s" % (q, dict['start_dir']))
            logger.info("%6d %s %s" %
                        (len(dict['to_process']), dmfilestat.dmfileset.type,
                         dmfilestat.result.resultsName))

            while (datetime.now() - start_time) < max_time_delta:
                # If there are no files left to process, (all to_process lists are empty), the recursion ends
                if len(dict['to_process']) > 0:
                    terminate = False

                    try:
                        # process one file and remove entry from the list
                        path = dict['to_process'].pop(0)

                        j = dict['processed_cnt'] + 1

                        this_file_size = 0
                        if not os.path.islink(path):
                            this_file_size = os.lstat(path)[6]

                        if _process(path, dict['action'], dict['archivepath'],
                                    dict['start_dir'], dict['to_keep']):
                            dict['processed_cnt'] = j
                            dict['total_size'] += this_file_size
                            logger.debug("%04d/%04d %s %10d %s" %
                                         (j, dict['total_cnt'], dict['action'],
                                          dict['total_size'], path))

                    except (OSError, IOError, DMExceptions.RsyncError) as e:
                        #IOError: [Errno 28] No space left on device:
                        if e.errno == errno.ENOSPC:
                            raise
                        elif e.errno == errno.ENOENT:
                            logger.warn("%04d No longer exists %s" % (j, path))
                            continue
                        else:
                            raise
                    except:
                        errmsg = "%04d/%04d %s %10d %s" % (
                            j, dict['total_cnt'], dict['action'],
                            dict['total_size'], path)
                        logger.error(errmsg)
                        logger.error(traceback.format_exc())

                    if not dict['action'] in [
                            EXPORT, TEST
                    ] and dmfilestat.dmfileset.del_empty_dir:
                        dir = os.path.dirname(path)
                        try:
                            if len(os.listdir(dir)) == 0:
                                if not "plugin_out" in dir:
                                    try:
                                        os.rmdir(dir)
                                        logger.debug(
                                            "Removed empty directory: %s" %
                                            dir)
                                    except Exception as e:
                                        logger.warn("rmdir [%d] %s: %s" %
                                                    (e.errno, e.strerror, dir))
                        except OSError as e:
                            if e.errno == errno.ENOENT:
                                logger.warn("del_empty_dir Does not exist %s" %
                                            (path))
                                continue
                            else:
                                raise e
                else:
                    break

            # only expect to execute this line when no files to process
            total_processed += dict['total_size']

    except Exception as e:
        fstatus = "Error"
        terminate = True
        dmfilestat.setactionstate('E')
        logger.error(
            "DM Action failure on %s for %s report." %
            (dmfilestat.dmfileset.type, dmfilestat.result.resultsName))
        logger.error("This %s action will need to be manually completed." %
                     (dict['action']))
        logger.error("The following is the exception error:\n" +
                     traceback.format_exc())
        EventLog.objects.add_entry(
            dmfilestat.result,
            "%s - %s. Action not completed.  User intervention required." %
            (fstatus, e),
            username='******')

        # Release the task lock
        try:
            if dict['lockfile']:
                applock = TaskLock(dict['lockfile'])
                applock.unlock()
        except:
            logger.error(traceback.format_exc())

        # Do the user notification
        try:
            # pop up a message banner
            if dict['msg_banner']:
                dmfileset = dmfilestat.dmfileset
                project_msg = {}
                msg_dict = {}
                msg_dict[dmfileset.type] = fstatus
                project_msg[dmfilestat.result_id] = msg_dict
                project_msg_banner('', project_msg, dict['action'])
        except:
            logger.error(traceback.format_exc())

        # ====================================================================
        # Exit function here on error
        # ====================================================================
        return

    if not terminate:
        # ====================================================================
        # Launch next task
        # ====================================================================
        try:
            action = dict.get('action', 'unk')
            pfilename = set_action_param_var(list_of_file_dict, prefix=action)
            celery_result = _process_task.delay(pfilename)
        except:
            logger.error(traceback.format_exc())

    else:
        # ====================================================================
        # No more files to process.  Clean up and exit.
        # ====================================================================
        try:
            dmfilestat.diskspace = float(total_processed) / (1024 * 1024)
            dmfilestat.save()
            logger.info("%0.1f MB %s processed" %
                        (dmfilestat.diskspace, dmfilestat.dmfileset.type))
            if dict['action'] in [ARCHIVE, DELETE]:
                _emptydir_delete(dmfilestat)
        except:
            logger.error(traceback.format_exc())

        # Do the user notification
        try:
            _action_complete_update(dict['user'], dict['user_comment'],
                                    dmfilestat, dict['action'])

            # pop up a message banner
            if dict['msg_banner']:
                dmfileset = dmfilestat.dmfileset
                project_msg = {}
                msg_dict = {}
                msg_dict[dmfileset.type] = fstatus
                project_msg[dmfilestat.result_id] = msg_dict
                project_msg_banner('', project_msg, dict['action'])
        except:
            logger.error(traceback.format_exc())

        # Release the task lock
        try:
            if dict['lockfile']:
                applock = TaskLock(dict['lockfile'])
                applock.unlock()
        except:
            logger.error(traceback.format_exc())

    return
Example #35
0
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
    # Warn if bitcoind is already running (unix only)
    try:
        if subprocess.check_output(["pidof", "redspaced"]) is not None:
            print("%sWARNING!%s There is already a redspaced process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
    except (OSError, subprocess.SubprocessError):
        pass

    # Warn if there is a cache directory
    cache_dir = "%s/test/cache" % build_dir
    if os.path.isdir(cache_dir):
        print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))

    #Set env vars
    if "BITCOIND" not in os.environ:
        os.environ["BITCOIND"] = build_dir + '/src/redspaced' + exeext
        os.environ["BITCOINCLI"] = build_dir + '/src/redspace-cli' + exeext

    tests_dir = src_dir + '/test/functional/'

    flags = ["--srcdir={}/src".format(build_dir)] + args
    flags.append("--cachedir=%s" % cache_dir)

    if enable_coverage:
        coverage = RPCCoverage()
        flags.append(coverage.flag)
        logging.debug("Initializing coverage directory at %s" % coverage.dir)
    else:
        coverage = None

    if len(test_list) > 1 and jobs > 1:
        # Populate cache
        try:
            subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
        except subprocess.CalledProcessError as e:
            sys.stdout.buffer.write(e.output)
            raise

    #Run Tests
    job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
    time0 = time.time()
    test_results = []

    max_len_name = len(max(test_list, key=len))

    for _ in range(len(test_list)):
        test_result, testdir, stdout, stderr = job_queue.get_next()
        test_results.append(test_result)

        if test_result.status == "Passed":
            logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
        elif test_result.status == "Skipped":
            logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
        else:
            print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
            print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
            print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
            if combined_logs_len and os.path.isdir(testdir):
                # Print the final `combinedlogslen` lines of the combined logs
                print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
                print('\n============')
                print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
                print('============\n')
                combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
                print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))

    print_results(test_results, max_len_name, (int(time.time() - time0)))

    if coverage:
        coverage.report_rpc_coverage()

        logging.debug("Cleaning up coverage data")
        coverage.cleanup()

    # Clear up the temp directory if all subdirectories are gone
    if not os.listdir(tmpdir):
        os.rmdir(tmpdir)

    all_passed = all(map(lambda test_result: test_result.was_successful, test_results))

    sys.exit(not all_passed)
Example #36
0
def tearDownModule():
    os.rmdir(globals()['PREFIX'])
Example #37
0
 def tearDown(self):
     os.rmdir(os.path.join(os.path.dirname(__file__), '雲'))
Example #38
0
import os
# posix: Linux, Unix, Mas OS X; nt: Windows
print(os.name)
#print(os.uname())		# unsupport on windows, --> AttributeError

#print(os.environ)		# show environment variable message
print(os.environ.get('PATH'))
print(os.environ.get('X', 'default'))

abspath = os.path.abspath('.')
print(abspath)
abspath = os.path.join(abspath, 'testdir')
# method join will join path suit current os
print(abspath)
print(os.mkdir(abspath))  # None, if file already exist, --> FileExistsError
print(os.rmdir(abspath))  # None
print()

# abspath needn't really exist, methods which under treat abspathas str
print(os.path.split(abspath))
# slip path. \aaa\bbb\ccc\ddd --> (\aaa\bbb\ccc, ddd)
print(abspath)  # abspath value not change
abspath = os.path.split(abspath)[0]
print(abspath)
abspath = os.path.join(abspath, 'test.txt')
print(abspath)
print(os.path.splitext(abspath))
# slip path. \aaa\bbb\ccc\ddd.??? --> (\aaa\bbb\ccc\ddd, .???)
print(abspath)

abspath = os.path.abspath('.')
def main():

    currdir = os.getcwd()

    # Parse command-line arguments
    args = parse_arguments()
    hostname=socket.gethostname() + '-' + generate_random_str()
        
    if args.dir == '':
        print ('--dir-to-benchmark argument required.')
        return False

    bdir = os.path.expanduser(args.dir)

    test_results={}

    with tempfile.TemporaryDirectory() as tmpdir:    
        for test in tests:
            outdir = os.path.join(tmpdir,str(test[0]))
            destdir = os.path.join(bdir,hostname,str(test[0])) 
            os.mkdir(outdir)
            #bigstring = generate_random_dna(test[0])
            print('Generate %s byte random string' % test[0])
            bigstring = os.urandom(int(test[0]/3))    
            wsize = write_files(bigstring,test[1],outdir,hostname)
            
            start = time.time()
            if test[0] > 1000000 and os.name == 'nt':
                cmd = '%s "%s" "%s" /s /i' % ('xcopy', outdir, destdir)
                pipe = subprocess.Popen(cmd, shell=True)
                pipe.wait()
            else:
                destination = shutil.copytree(outdir, destdir)
            end = time.time()

            throughput = (wsize/1024/1024) / (end-start)
            test_results = addresult(test_results, 'throughput_mb_s', [test[0], test[1], "{0:.3f}".format(throughput)])
            files_per_sec = test[1] / (end-start)
            test_results = addresult(test_results, 'files_per_sec', [test[0], test[1], "{0:.1f}".format(files_per_sec)])

            print("Throughput (MiB/s):", "{0:.3f}".format(throughput))
            print("FPS:", "{0:.1f}".format(files_per_sec))

            print('Deleting %s files ...' % test[1])
            start = time.time()
            shutil.rmtree(destdir)
            end = time.time()
            delete_files_per_sec = test[1] / (end-start)
            print("Delete FPS:", "{0:.1f}".format(delete_files_per_sec))
            test_results = addresult(test_results, 'delete_files_per_sec', [test[0], test[1], "{0:.1f}".format(delete_files_per_sec)])
    
        os.rmdir(os.path.join(bdir,hostname))

        if args.prometheus_folder:
            with open(os.path.join(args.prometheus_folder, prom_label + '.prom.tmp'), 'w') as fh:

                for key in test_results:
                    fh.write('\n# TYPE ' + prom_label + '_%s gauge\n' % key)
                    for value in test_results[key]:
                        fh.write(prom_label + '_%s{bytes="%s", files="%s"} %s\n' % (key,value[0],value[1],value[2]))

            shutil.move(os.path.join(args.prometheus_folder, prom_label + '.prom.tmp'),
                    os.path.join(args.prometheus_folder, prom_label + '.prom'))
    def main(self):

        parser = optparse.OptionParser(usage="%prog [options]")
        parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
                          help="Leave oakcoinds and test.* datadir on exit or error")
        parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
                          help="Don't stop oakcoinds after the test execution")
        parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
                          help="Source directory containing oakcoind/oakcoin-cli (default: %default)")
        parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
                          help="Directory for caching pregenerated datadirs")
        parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
                          help="Root directory for datadirs")
        parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
                          help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
        parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
                          help="Print out all RPC calls as they are made")
        parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
                          help="The seed to use for assigning port numbers (default: current process id)")
        parser.add_option("--coveragedir", dest="coveragedir",
                          help="Write tested RPC commands into this directory")
        parser.add_option("--configfile", dest="configfile",
                          help="Location of the test framework config file")
        self.add_options(parser)
        (self.options, self.args) = parser.parse_args()

        # backup dir variable for removal at cleanup
        self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)

        if self.options.coveragedir:
            enable_coverage(self.options.coveragedir)

        PortSeed.n = self.options.port_seed

        os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']

        check_json_precision()

        # Set up temp directory and start logging
        os.makedirs(self.options.tmpdir, exist_ok=False)
        self._start_logging()

        success = False

        try:
            self.setup_chain()
            self.setup_network()
            self.run_test()
            success = True
        except JSONRPCException as e:
            self.log.exception("JSONRPC error")
        except AssertionError as e:
            self.log.exception("Assertion failed")
        except KeyError as e:
            self.log.exception("Key error")
        except Exception as e:
            self.log.exception("Unexpected exception caught during testing")
        except KeyboardInterrupt as e:
            self.log.warning("Exiting after keyboard interrupt")

        if not self.options.noshutdown:
            self.log.info("Stopping nodes")
            stop_nodes(self.nodes)
        else:
            self.log.info("Note: oakcoinds were not stopped and may still be running")

        if not self.options.nocleanup and not self.options.noshutdown and success:
            self.log.info("Cleaning up")
            shutil.rmtree(self.options.tmpdir)
            if not os.listdir(self.options.root):
                os.rmdir(self.options.root)
        else:
            self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
            if os.getenv("PYTHON_DEBUG", ""):
                # Dump the end of the debug logs, to aid in debugging rare
                # travis failures.
                import glob
                filenames = [self.options.tmpdir + "/test_framework.log"]
                filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
                MAX_LINES_TO_PRINT = 1000
                for fn in filenames:
                    try:
                        with open(fn, 'r') as f:
                            print("From" , fn, ":")
                            print("".join(deque(f, MAX_LINES_TO_PRINT)))
                    except OSError:
                        print("Opening file %s failed." % fn)
                        traceback.print_exc()
        if success:
            self.log.info("Tests successful")
            sys.exit(self.TEST_EXIT_PASSED)
        else:
            self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
            logging.shutdown()
            sys.exit(self.TEST_EXIT_FAILED)
Example #41
0
def run_tests(*,
              test_list,
              src_dir,
              build_dir,
              tmpdir,
              jobs=1,
              enable_coverage=False,
              args=None,
              combined_logs_len=0,
              failfast=False,
              runs_ci):
    args = args or []

    # Warn if bitcoind is already running (unix only)
    try:
        if subprocess.check_output(["pidof", "bitcoind"]) is not None:
            print(
                "%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!"
                % (BOLD[1], BOLD[0]))
    except (OSError, subprocess.SubprocessError):
        pass

    # Warn if there is a cache directory
    cache_dir = "%s/test/cache" % build_dir
    if os.path.isdir(cache_dir):
        print(
            "%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory."
            % (BOLD[1], BOLD[0], cache_dir))

    tests_dir = src_dir + '/test/functional/'

    flags = ['--cachedir={}'.format(cache_dir)] + args

    if enable_coverage:
        coverage = RPCCoverage()
        flags.append(coverage.flag)
        logging.debug("Initializing coverage directory at %s" % coverage.dir)
    else:
        coverage = None

    if len(test_list) > 1 and jobs > 1:
        # Populate cache
        try:
            subprocess.check_output(
                [sys.executable, tests_dir + 'create_cache.py'] + flags +
                ["--tmpdir=%s/cache" % tmpdir])
        except subprocess.CalledProcessError as e:
            sys.stdout.buffer.write(e.output)
            raise

    #Run Tests
    job_queue = TestHandler(
        num_tests_parallel=jobs,
        tests_dir=tests_dir,
        tmpdir=tmpdir,
        test_list=test_list,
        flags=flags,
        timeout_duration=40 * 60 if runs_ci else float('inf'),  # in seconds
    )
    start_time = time.time()
    test_results = []

    max_len_name = len(max(test_list, key=len))
    test_count = len(test_list)
    for i in range(test_count):
        test_result, testdir, stdout, stderr = job_queue.get_next()
        test_results.append(test_result)
        done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1],
                                           test_result.name, BOLD[0])
        if test_result.status == "Passed":
            logging.debug("%s passed, Duration: %s s" %
                          (done_str, test_result.time))
        elif test_result.status == "Skipped":
            logging.debug("%s skipped" % (done_str))
        else:
            print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
            print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
            print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
            if combined_logs_len and os.path.isdir(testdir):
                # Print the final `combinedlogslen` lines of the combined logs
                print('{}Combine the logs and print the last {} lines ...{}'.
                      format(BOLD[1], combined_logs_len, BOLD[0]))
                print('\n============')
                print('{}Combined log for {}:{}'.format(
                    BOLD[1], testdir, BOLD[0]))
                print('============\n')
                combined_logs_args = [
                    sys.executable,
                    os.path.join(tests_dir, 'combine_logs.py'), testdir
                ]
                if BOLD[0]:
                    combined_logs_args += ['--color']
                combined_logs, _ = subprocess.Popen(
                    combined_logs_args,
                    universal_newlines=True,
                    stdout=subprocess.PIPE).communicate()
                print("\n".join(
                    deque(combined_logs.splitlines(), combined_logs_len)))

            if failfast:
                logging.debug("Early exiting after test failure")
                break

    print_results(test_results, max_len_name, (int(time.time() - start_time)))

    if coverage:
        coverage.report_rpc_coverage()

        logging.debug("Cleaning up coverage data")
        coverage.cleanup()

    # Clear up the temp directory if all subdirectories are gone
    if not os.listdir(tmpdir):
        os.rmdir(tmpdir)

    all_passed = all(
        map(lambda test_result: test_result.was_successful, test_results))

    # This will be a no-op unless failfast is True in which case there may be dangling
    # processes which need to be killed.
    job_queue.kill_and_join()

    sys.exit(not all_passed)
Example #42
0
    def run_test(self):
        chain_height = self.nodes[0].getblockcount()
        assert_equal(chain_height, 200)

        self.log.debug("Mine a single block to get out of IBD")
        self.nodes[0].generate(1)
        self.sync_all()

        self.log.debug("Send 5 transactions from node2 (to its own address)")
        for i in range(5):
            last_txid = self.nodes[2].sendtoaddress(
                self.nodes[2].getnewaddress(), Decimal("10"))
        node2_balance = self.nodes[2].getbalance()
        self.sync_all()

        self.log.debug(
            "Verify that node0 and node1 have 5 transactions in their mempools"
        )
        assert_equal(len(self.nodes[0].getrawmempool()), 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 5)

        self.log.debug("Prioritize a transaction on node0")
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'], fees['modified'])
        self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])

        self.log.debug(
            "Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions."
        )
        self.stop_nodes()
        # Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
        # Also don't store the mempool, to keep the datadir clean
        self.start_node(1, extra_args=["-persistmempool=0"])
        self.start_node(0)
        self.start_node(2)
        wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"], timeout=1)
        wait_until(lambda: self.nodes[2].getmempoolinfo()["loaded"], timeout=1)
        assert_equal(len(self.nodes[0].getrawmempool()), 5)
        assert_equal(len(self.nodes[2].getrawmempool()), 5)
        # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        self.log.debug('Verify prioritization is loaded correctly')
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])

        # Verify accounting of mempool transactions after restart is correct
        self.nodes[2].syncwithvalidationinterfacequeue(
        )  # Flush mempool to wallet
        assert_equal(node2_balance, self.nodes[2].getbalance())

        self.log.debug(
            "Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file."
        )
        self.stop_nodes()
        self.start_node(0, extra_args=["-persistmempool=0"])
        wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        self.log.debug(
            "Stop-start node0. Verify that it has the transactions in its mempool."
        )
        self.stop_nodes()
        self.start_node(0)
        wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
        assert_equal(len(self.nodes[0].getrawmempool()), 5)

        mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest',
                                   'mempool.dat')
        mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest',
                                   'mempool.dat')
        self.log.debug(
            "Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it"
        )
        os.remove(mempooldat0)
        self.nodes[0].savemempool()
        assert os.path.isfile(mempooldat0)

        self.log.debug(
            "Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions"
        )
        os.rename(mempooldat0, mempooldat1)
        self.stop_nodes()
        self.start_node(1, extra_args=[])
        wait_until(lambda: self.nodes[1].getmempoolinfo()["loaded"])
        assert_equal(len(self.nodes[1].getrawmempool()), 5)

        self.log.debug(
            "Prevent raind from writing mempool.dat to disk. Verify that `savemempool` fails"
        )
        # to test the exception we are creating a tmp folder called mempool.dat.new
        # which is an implementation detail that could change and break this test
        mempooldotnew1 = mempooldat1 + '.new'
        os.mkdir(mempooldotnew1)
        assert_raises_rpc_error(-1, "Unable to dump mempool to disk",
                                self.nodes[1].savemempool)
        os.rmdir(mempooldotnew1)
Example #43
0
def rmdirs(dirpath):
    if os.path.isdir(dirpath) and not os.listdir(dirpath):
        ctx.ui.debug("Removing empty dir: %s" % dirpath)
        os.rmdir(dirpath)
        rmdirs(os.path.dirname(dirpath))
Example #44
0
import os
print(os.name)
# 如果需要获取详细的系统信息,可以调用uname()
print(os.uname())
# 环境变量
# 在操作系统中定义环境变量全部保存在os.environ变量中
print(os.environ)
print(os.environ.get('PATH'))
print(os.environ.get('x', 'default'))
# 操作文件和目录
# 查看当前目录的绝对路径
os.path.abspath('.')
# 在某个目录下创建一个新目录
os.path.join('/user/michael', 'testdir')  # 把新目录的完整路径表示出来
os.mkdir('/user/michael/testdir')  # 创建一个目录
os.rmdir('/user/michael/testdir')  # 删除一个目录
# 列出当前目录下的所有目录
[x for x in os.listdir('.') if os.path.isdir(x)]
# 列出特定后缀名的文件
[
    x for x in os.listdir('.')
    if os.path.isfile(x) and os.path.splitext(x)[1] == '.py'
]
# 正则表达式
# []表示匹配這些字符之一,$匹配字符串的結尾 re.sub()函數執行基於正則表達式的字符串的替換
# ^ 除了

import re

# def plural(noun):
#     if re.search('[sxz]$', noun):
Example #45
0
def removedirrecursive(top):
    for root, dirs, files in os.walk(top, topdown=False):
        for name in files:
            os.remove(os.path.join(root, name))
        for name in dirs:
            os.rmdir(os.path.join(root, name))
Example #46
0
    def test_lock_externally_lock_dir_not_exist(self):
        lock_dir = tempfile.mkdtemp()
        os.rmdir(lock_dir)
        self.config(lock_path=lock_dir, group='oslo_concurrency')

        self._do_test_lock_externally()
 def delete_file(self, path):
     for root, dirs, files in os.walk(path, topdown=False):
         for name in files:
             os.remove(os.path.join(root, name))
         for name in dirs:
             os.rmdir(os.path.join(root, name))
Example #48
0
def merge(file_dir: str):
    file_path = '.'.join(file_dir.split('.')[:-1])
    fs.merge(input_dir=file_dir, output_file=file_path, callback=status, cleanup=True)
    os.remove(file_dir + '/' + readme_file)
    os.rmdir(file_dir)
Example #49
0
def fetch_url(tinfoil,
              srcuri,
              srcrev,
              destdir,
              logger,
              preserve_tmp=False,
              mirrors=False):
    """
    Fetch the specified URL using normal do_fetch and do_unpack tasks, i.e.
    any dependencies that need to be satisfied in order to support the fetch
    operation will be taken care of
    """

    import bb

    checksums = {}
    fetchrecipepn = None

    # We need to put our temp directory under ${BASE_WORKDIR} otherwise
    # we may have problems with the recipe-specific sysroot population
    tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
    bb.utils.mkdirhier(tmpparent)
    tmpdir = tempfile.mkdtemp(prefix='recipetool-', dir=tmpparent)
    try:
        tmpworkdir = os.path.join(tmpdir, 'work')
        logger.debug('fetch_url: temp dir is %s' % tmpdir)

        fetchrecipedir = _get_temp_recipe_dir(tinfoil.config_data)
        if not fetchrecipedir:
            logger.error(
                'Searched BBFILES but unable to find a writeable place to put temporary recipe'
            )
            sys.exit(1)
        fetchrecipe = None
        bb.utils.mkdirhier(fetchrecipedir)
        try:
            # Generate a dummy recipe so we can follow more or less normal paths
            # for do_fetch and do_unpack
            # I'd use tempfile functions here but underscores can be produced by that and those
            # aren't allowed in recipe file names except to separate the version
            rndstring = ''.join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in range(8))
            fetchrecipe = os.path.join(fetchrecipedir,
                                       'tmp-recipetool-%s.bb' % rndstring)
            fetchrecipepn = os.path.splitext(os.path.basename(fetchrecipe))[0]
            logger.debug('Generating initial recipe %s for fetching' %
                         fetchrecipe)
            with open(fetchrecipe, 'w') as f:
                # We don't want to have to specify LIC_FILES_CHKSUM
                f.write('LICENSE = "CLOSED"\n')
                # We don't need the cross-compiler
                f.write('INHIBIT_DEFAULT_DEPS = "1"\n')
                # We don't have the checksums yet so we can't require them
                f.write('BB_STRICT_CHECKSUM = "ignore"\n')
                f.write('SRC_URI = "%s"\n' % srcuri)
                f.write('SRCREV = "%s"\n' % srcrev)
                f.write('WORKDIR = "%s"\n' % tmpworkdir)
                # Set S out of the way so it doesn't get created under the workdir
                f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
                if not mirrors:
                    # We do not need PREMIRRORS since we are almost certainly
                    # fetching new source rather than something that has already
                    # been fetched. Hence, we disable them by default.
                    # However, we provide an option for users to enable it.
                    f.write('PREMIRRORS = ""\n')
                    f.write('MIRRORS = ""\n')

            logger.info('Fetching %s...' % srcuri)

            # FIXME this is too noisy at the moment

            # Parse recipes so our new recipe gets picked up
            tinfoil.parse_recipes()

            def eventhandler(event):
                if isinstance(event, bb.fetch2.MissingChecksumEvent):
                    checksums.update(event.checksums)
                    return True
                return False

            # Run the fetch + unpack tasks
            res = tinfoil.build_targets(
                fetchrecipepn,
                'do_unpack',
                handle_events=True,
                extra_events=['bb.fetch2.MissingChecksumEvent'],
                event_callback=eventhandler)
            if not res:
                raise FetchUrlFailure(srcuri)

            # Remove unneeded directories
            rd = tinfoil.parse_recipe(fetchrecipepn)
            if rd:
                pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
                for pathvar in pathvars:
                    path = rd.getVar(pathvar)
                    if os.path.exists(path):
                        shutil.rmtree(path)
        finally:
            if fetchrecipe:
                try:
                    os.remove(fetchrecipe)
                except FileNotFoundError:
                    pass
            try:
                os.rmdir(fetchrecipedir)
            except OSError as e:
                import errno
                if e.errno != errno.ENOTEMPTY:
                    raise

        bb.utils.mkdirhier(destdir)
        for fn in os.listdir(tmpworkdir):
            shutil.move(os.path.join(tmpworkdir, fn), destdir)

    finally:
        if not preserve_tmp:
            shutil.rmtree(tmpdir)
            tmpdir = None

    return checksums, tmpdir
Example #50
0
# The rmdir() Method

# The rmdir() method deletes the directory, which is passed as an argument in the method.

import os

# This would  remove "/tmp/test"  directory.
os.rmdir("/tmp/test" )
Example #51
0
 def rmdir(self, path):
     full_path = self._full_path(path)
     return os.rmdir(full_path)
Example #52
0

#START N2C2 2018 NER
from raw_datasets.ner import load_n2c2_2018

try:
    save_dir = N2C2_2018_NER_TRAIN_PATH
    if not os.path.isdir(save_dir):
        print(f"Saving N2C2 2018 NER Train: {save_dir}")
        os.makedirs(save_dir)
        inputs = NERDataset.create_ner_dataset(list(load_n2c2_2018(partition='train')),
                                                           BertTokenizer.from_pretrained(bert_weight_directory),
                                                           save_directory=save_dir)
except FileNotFoundError as e:
    print(f"Could not find raw files for {save_dir}")
    os.rmdir(save_dir)

try:
    save_dir = N2C2_2018_NER_TEST_PATH
    if not os.path.isdir(save_dir):
        print(f"Saving N2C2 2018 NER Test: {save_dir}")
        os.makedirs(save_dir)
        inputs = NERDataset.create_ner_dataset(list(load_n2c2_2018(partition='test')),
                                                           BertTokenizer.from_pretrained(bert_weight_directory),
                                                           save_directory=save_dir)
except FileNotFoundError as e:
    print(f"Could not find raw files for {save_dir}")
    os.rmdir(save_dir)
#END N2C2 2018 NER

# #START TAC 2018 NER
Example #53
0
        try:
            system_name = predictions_file.split('/')[-1]
        except:
            system_name = predictions_file

    with open(golden_file, 'r') as f:
        golden_data = json.load(f)

    with open(predictions_file, 'r') as f:
        predictions_data = json.load(f)

    temp_dir = uuid.uuid4().hex
    qrels_temp_file = '{0}/{1}'.format(temp_dir, 'qrels.txt')
    qret_temp_file = '{0}/{1}'.format(temp_dir, 'qret.txt')

    try:
        if not os.path.exists(temp_dir):
            os.makedirs(temp_dir)
        else:
            sys.exit("Possible uuid collision")

        format_bioasq2treceval_qrels(golden_data, qrels_temp_file)
        format_bioasq2treceval_qret(predictions_data, system_name,
                                    qret_temp_file)

        trec_evaluate(qrels_temp_file, qret_temp_file)
    finally:
        os.remove(qrels_temp_file)
        os.remove(qret_temp_file)
        os.rmdir(temp_dir)
def remove(args, directory, files):
    try:
        count_arguments = len(args)
        count_files = len(files)
        if count_arguments == 0:
            if count_files == 0 and len(directory) == 0:
                print "rm: missing operand"
            elif count_files != 0 and len(directory) == 0:
                for i in files:
                    os.remove(i)
            elif count_files == 0 and len(directory) != 0:
                for i in range(0, len(directory)):
                    print "rm: cannot remove \'" + directory[
                        i] + "\': Is a directory"
            elif count_files != 0 and len(directory) != 0:
                for i in files:
                    os.remove(i)
                for i in range(0, len(directory)):
                    print "rm: cannot remove \'" + directory[
                        i] + "\': Is a directory"

        elif count_arguments == 1:
            if args[0] == "-r":
                if count_files == 0 and len(directory) == 0:
                    print "rm: missing operand"
                elif count_files != 0 and len(directory) == 0:
                    for i in files:
                        os.remove(i)
                elif count_files == 0 and len(directory) != 0:
                    for i in directory:
                        for root, dirs, files in os.walk(i, topdown=False):
                            for name in files:
                                os.remove(os.path.join(root, name))
                            for name in dirs:
                                os.rmdir(os.path.join(root, name))
                        os.removedirs(i)
                elif count_files != 0 and len(directory) != 0:
                    for i in files:
                        os.remove(i)
                    for i in directory:
                        for root, dirs, files in os.walk(i, topdown=False):
                            for name in files:
                                os.remove(os.path.join(root, name))
                            for name in dirs:
                                os.rmdir(os.path.join(root, name))
                        os.removedirs(i)

            elif ("-i" in args):
                print "Do you really want to delete:(Y/N)",
                r = raw_input()
                r = r.lower()
                if r == "y":
                    args = []
                    remove(args, directory, files)
                else:
                    return

            elif ("-ri" in args or "-ir" in args):
                print "Do you really want to delete:(Y/N)",
                r = raw_input()
                r = r.lower()
                if r == "y":
                    args = ["-r"]
                    remove(args, directory, files)
                else:
                    return

            else:
                print "rm: invalid option --\'" + args[0] + "\'"

        elif count_arguments == 2:
            if (args[0] == "-i" and args[1] == "-r") or (args[0] == "-r"
                                                         or args[1] == "-i"):
                print "Do you really want to delete:(Y/N)",
                r = raw_input()
                r = r.lower()
                if r == "y":
                    args = ["-r"]
                    remove(args, directory, files)
                else:
                    return
            else:
                print "Arguments other than -i and -r are not avaialble."

        else:
            print "Multi argument option not available."
    except (OSError, IOError):
        print "Remove Error."
Example #55
0
    def effect(self):
        self.zoom = float(
            self.document.xpath('//sodipodi:namedview/@inkscape:zoom',
                                namespaces=inkex.NSS)[0])
        self.width = 1/self.zoom * \
            float(self.document.xpath(
                '//sodipodi:namedview/@inkscape:window-width', namespaces=inkex.NSS)[0])
        self.height = 1/self.zoom * \
            float(self.document.xpath(
                '//sodipodi:namedview/@inkscape:window-height', namespaces=inkex.NSS)[0])
        self.width = self.unittouu(str(self.width) + 'px')
        self.height = self.unittouu(str(self.height) + 'px')

        self.options.scale = float(self.options.scale)
        action = self.options.action.strip("\"")
        if action == "viewold":
            for i in self.options.ids:
                node = self.selected[i]
                if node.tag != '{%s}g' % SVG_NS:
                    continue
                if '{%s}text' % WriteTexNS in node.attrib:
                    if self.options.tosvg == "true":
                        doc = inkex.etree.fromstring(
                            '<text x="%g" y="%g">%s</text>' %
                            (self.view_center[0] - self.width / 6,
                             self.view_center[1] - self.height / 6,
                             node.attrib.get('{%s}text' % WriteTexNS,
                                             '').decode('string-escape')))
                        p = node.getparent()
                        # p.remove(node)
                        p.append(doc)
                    else:
                        print >> sys.stderr, node.attrib.get(
                            '{%s}text' % WriteTexNS,
                            '').decode('string-escape')
                    return
            print >> sys.stderr, "No text find."
            return
        else:
            if action == "new":
                self.text = self.options.formula
            else:
                f = open(self.options.inputfile)
                self.text = f.read()
                f.close()

            if self.text == "":
                print >> sys.stderr, "empty LaTeX input. Nothing is changed."
                return

            tmp_dir = tempfile.mkdtemp("", "writetex-")
            tex_file = os.path.join(tmp_dir, "writetex.tex")
            svg_file = os.path.join(tmp_dir, "writetex.svg")
            pdf_file = os.path.join(tmp_dir, "writetex.pdf")
            log_file = os.path.join(tmp_dir, "writetex.log")
            out_file = os.path.join(tmp_dir, "writetex.out")
            err_file = os.path.join(tmp_dir, "writetex.err")
            aux_file = os.path.join(tmp_dir, "writetex.aux")
            crop_file = os.path.join(tmp_dir, "writetex-crop.pdf")

            if self.options.preline == "true":
                preamble = self.options.preamble
            else:
                if self.options.preamble == "":
                    preamble = ""
                else:
                    f = open(self.options.preamble)
                    preamble = f.read()
                    f.close()

            self.tex = r"""
            \documentclass[landscape,a3paper]{article}
            \usepackage{geometry}
            %s
            \pagestyle{empty}
            \begin{document}
            \noindent
            %s
            \end{document}
            """ % (preamble, self.text)

            tex = open(tex_file, 'w')
            tex.write(self.tex)
            tex.close()

            if self.options.latexcmd.lower() == "xelatex":
                subprocess.call(
                    'xelatex "-output-directory=%s" -interaction=nonstopmode -halt-on-error "%s" > "%s"'
                    % (tmp_dir, tex_file, out_file),
                    shell=True)
            elif self.options.latexcmd.lower() == "pdflatex":
                subprocess.call(
                    'pdflatex "-output-directory=%s" -interaction=nonstopmode -halt-on-error "%s" > "%s"'
                    % (tmp_dir, tex_file, out_file),
                    shell=True)
            else:
                # Setting `latexcmd` to following string produces the same result as xelatex condition:
                # 'xelatex "-output-directory={tmp_dir}" -interaction=nonstopmode -halt-on-error "{tex_file}" > "{out_file}"'
                subprocess.call(self.options.latexcmd.format(
                    tmp_dir=tmp_dir, tex_file=tex_file, out_file=out_file),
                                shell=True)

            try:
                if not isinstance(spawn.find_executable('pdfcrop'),
                                  type(None)):
                    # Here is a bug in pdfcrop, no idea how to fix.
                    crop_cmd = 'pdfcrop "%s"' % pdf_file
                    crop = subprocess.Popen(crop_cmd,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE,
                                            shell=True)
                    out = crop.communicate()
                    if len(out[1]) > 0:
                        inkex.errormsg("Error in pdfcrop:")
                        inkex.errormsg(" CMD executed: %s" % crop_cmd)
                        for msg in out:
                            inkex.errormsg(msg)
                        inkex.errormsg("Process will continue without crop")

                    if os.path.exists(crop_file):
                        os.remove(pdf_file)
                        os.rename(crop_file, pdf_file)
            except:
                pass

            if not os.path.exists(pdf_file):
                print >> sys.stderr, "Latex error: check your latex file and preamble."
                print >> sys.stderr, open(log_file).read()
                return
            else:
                if self.options.pdftosvg == '1':
                    subprocess.call('pdf2svg %s %s' % (pdf_file, svg_file),
                                    shell=True)
                    self.merge_pdf2svg_svg(svg_file)
                else:
                    subprocess.call(
                        'pstoedit -f plot-svg "%s" "%s"  -dt -ssp -psarg -r9600x9600 > "%s" 2> "%s"'
                        % (pdf_file, svg_file, out_file, err_file),
                        shell=True)
                    self.merge_pstoedit_svg(svg_file)

            os.remove(tex_file)
            os.remove(log_file)
            os.remove(out_file)
            if os.path.exists(err_file):
                os.remove(err_file)
            if os.path.exists(aux_file):
                os.remove(aux_file)
            if os.path.exists(svg_file):
                os.remove(svg_file)
            if os.path.exists(pdf_file):
                os.remove(pdf_file)
            os.rmdir(tmp_dir)
Example #56
0
 def tearDown(self):
     super(GRRTempFileTestFilename, self).tearDown()
     # The actual GRR temp dir.
     os.rmdir(tempfiles.GetDefaultGRRTempDirectory())
     self.tempdir_overrider.Stop()
Example #57
0
# pylint: disable = E1101

import os

file = open("file2.txt", "wb")
file.close()

os.rename("file2.txt", "file.txt")
os.remove("file.txt")
os.mkdir("folder")
os.rmdir("folder")

			{
				"chat": start,
				"callback_query": download,
				"inline_query": search,
				"chosen_inline_result": nada
			}
		)
	else:
		exit()

	print("Bot started")

	while True:
		sleep(1)
		path = os.statvfs("/")
		free_space = path.f_bavail * path.f_frsize

		if (del1 == del2 and is_audio == 0) or free_space <= 4000000000 or del2 > del1:
			del1 = 0
			del2 = 0

			for a in os.listdir(loc_dir):
				try:
					rmtree(loc_dir + a)
				except NotADirectoryError:
					os.remove(loc_dir + a)
				except OSError:
					pass
except KeyboardInterrupt:
	os.rmdir(loc_dir)
	print("\nSTOPPED")
Example #59
0
# Method 1
cwd = os.getcwd()
shutil.make_archive('tobedeleted', 'zip', cwd)

# Method 2
f = file('tobedeleted.py')
file_contents = f.read()

z = zf.ZipFile('tobedeleted-2.zip', mode='w')
z.writestr('tobedeleted.py', file_contents)
z.close()

# <demo> --- stop ---
# Exercise 5
print('Listing directory contents')
print("Directory: " + os.getcwd())
files = glob.glob('tobedeleted*')
for f in files:
    print(f)

# <demo> --- stop ---
# Exercise 6
raw_input("Press enter to delete the files and directory...")
for f in files:
    # Safety first!
    if f.find('tobedeleted') >= 0:
        os.unlink(f)

os.chdir('..')
os.rmdir(chapter_name)
Example #60
0
        # recover the filename generated by the inference code (as defined by MONAI output)
        img_filename = os.path.basename(img)
        flag_zip = 0
        if 'gz' in img_filename:
            img_filename = img_filename[:-7]
            flag_zip = 1
        else:
            img_filename = img_filename[:-4]
        out_filename = img_filename + '_' + config['output']['out_postfix'] + '.nii.gz' if flag_zip \
            else img_filename + '_' + config['output']['out_postfix'] + '.nii'
        out_filename = os.path.join(*[out_folder, img_filename, out_filename])

        # check existence of segmentation file
        if not os.path.exists(out_filename):
            raise FileNotFoundError("Network output file {} not found, "
                                    "check if the segmentation pipeline has failed".format(out_filename))

        # rename file with the indicated output name
        os.rename(out_filename, seg)
        if os.path.exists(seg):
            os.rmdir(os.path.join(out_folder, img_filename))