Example #1
1
File: utils.py Project: uodas/nupic
def remove(name, throwOnError=True):
    """
  Delete a directory or file.
  Similar to shutil.rmtree except that it logs
  """
    # log.debug("Removing item %s", name)
    if not os.path.exists(name):
        log.debug("Item %s does not exist", name)
        return
    if os.path.isdir(name):
        try:
            shutil.rmtree(name)
        except:
            if throwOnError:
                log.error("Unable to remove directory %s", name)
                raise
            else:
                log.warn("Unable to remove directory %s", name)
                return
    else:
        try:
            os.unlink(name)
        except Exception, e:
            if throwOnError:
                log.error("Unable to remove file %s (%s)", name, e)
                raise
            else:
                log.warning("Unable to remove file %s (%s)", name, e)
                return
Example #2
1
def delete_bugex_result(request, delete_token):
    """Delete the results data for a specific user request."""

    # get user request
    user_request = get_object_or_404(UserRequest, delete_token=delete_token)

    # check if this request already has been deleted
    if user_request.status == UserRequestStatus.DELETED:
        message = "This BugEx result has already been deleted."
    else:
        try:
            # Deleting underlying archive file
            user_request.codearchive.archive_file.delete()
            # Deleting BugExResult, CodeArchive, all Facts, all SourceFiles,
            # all ClassFiles, all Folders, all Lines
            if user_request.result:
                # only try to delete result, if there actually is one
                user_request.result.delete()
                user_request.result = None  # manually set relation to null
                user_request.save()
            user_request.codearchive.delete()
            # Delete the entire directory where the archive file was stored
            shutil.rmtree(user_request.folder)
            # Set user request status to DELETED
            user_request.update_status(UserRequestStatus.DELETED)

            message = "Your BugEx result has been deleted successfully."

        except Exception as e:
            # something unexpected, we have to log this
            message = "Sorry, we could not delete this result."
            logging.error(message + " Exception: " + str(e))

    # render status page with appropriate content
    return render(request, "bugex_webapp/status.html", {"message": message, "pagetitle": "Delete result"})
Example #3
1
 def test_init_git_create_repo(self):
     git_dir = os.path.join(WALIKI_DATA_DIR, ".git")
     shutil.rmtree(git_dir)
     Git()
     self.assertTrue(os.path.isdir(git_dir))
     self.assertEqual(git.config("user.name").stdout.decode("utf8")[:-1], WALIKI_COMMITTER_NAME)
     self.assertEqual(git.config("user.email").stdout.decode("utf8")[:-1], WALIKI_COMMITTER_EMAIL)
Example #4
1
def build_site(project_dir):
    build_dir = os.path.join(project_dir, BUILD_DIR)
    if os.path.exists(build_dir):
        shutil.rmtree(build_dir)
    os.makedirs(build_dir)
    build_pages(project_dir, build_dir)
    build_media(project_dir, build_dir)
Example #5
1
    def remove(self, ignore_errors=False):
        # Think about ignore_errors
        stream_logger.info("   - %s" % self.name)

        # If archive not already extract
        if not os.path.exists("%s/%s" % (conf.get("settings", "cache"), self.name)):
            self.unarchive()

        self.import_control()
        # Pre Remove
        stream_logger.info("     | Pre Remove")
        self.control.pre_remove()

        # Remove
        stream_logger.info("     | Remove")
        files_list = open(os.path.join(conf.get("settings", "cache"), self.name, "files.lst")).readlines()
        for _file in files_list:
            try:
                os.remove(os.path.join(conf.get("settings", "packages"), _file.replace("\n", "")))
            except:
                pass
        # Post Remove
        stream_logger.info("     | Post Remove")
        self.control.post_remove()

        stream_logger.info("     | Clean")
        shutil.rmtree(os.path.join(conf.get("settings", "cache"), self.name))
Example #6
1
def utt_distance(utt, utt2, method="dtw", metric="euclidean", sig2fv=SIG2FV, VI=None):
    """ Uses Trackfile class' distance measurements to compare utts...
        See docstring in tfuncs_analysis.py for more details...
    """

    temppath = mkdtemp()

    # wavs
    wfn1 = os.path.join(temppath, "1." + WAV_EXT)
    wfn2 = os.path.join(temppath, "2." + WAV_EXT)
    utt["waveform"].write(wfn1)
    utt2["waveform"].write(wfn2)
    # feats
    ffn1 = os.path.join(temppath, "1." + FEAT_EXT)
    ffn2 = os.path.join(temppath, "2." + FEAT_EXT)
    cmds = SIG2FV % {"inputfile": wfn1, "outputfile": ffn1}
    # print(cmds)
    os.system(cmds)
    cmds = SIG2FV % {"inputfile": wfn2, "outputfile": ffn2}
    # print(cmds)
    os.system(cmds)

    # tracks
    t1 = Track()
    t1.load_track(ffn1)
    t2 = Track()
    t2.load_track(ffn2)

    # compare and save
    t3 = t1.distances(t2, method=method, metric=metric, VI=VI)

    shutil.rmtree(temppath)

    return t3
Example #7
1
    def processReset(self, serie):
        logging.debug("In Reset::processReset()")
        self.setProgress(
            10,
            QtGui.QApplication.translate("Reset", "Reseting serie {0}", None, QtGui.QApplication.UnicodeUTF8).format(
                serie.description
            ),
        )
        QtCore.QCoreApplication.processEvents()
        study = serie.study
        patient = study.patient
        yaml = os.path.join(patient.directory, serie.file)
        (path, tail) = os.path.split(yaml)
        for filepath in os.listdir(path):
            if filepath != "main":
                filepath = os.path.join(path, filepath)
                if os.path.isdir(filepath):
                    shutil.rmtree(filepath)
                else:
                    os.remove(filepath)

        idDesc = "{0}{1}".format(hashStr(serie.uid), hashStr(serie.description))
        mScreens = []
        save = {"vti": "{0}/main/main.vti".format(idDesc), "mScreens": mScreens}
        mScreens.append(
            {"name": QtGui.QApplication.translate("Importer", "Main", None, QtGui.QApplication.UnicodeUTF8)}
        )

        persist_yaml_file(
            os.path.join(patient.directory, os.path.join(idDesc, "{0}{1}".format(hashStr(serie.uid), ".yaml"))), save
        )

        self.setProgress(100, QtGui.QApplication.translate("Reset", "Finished.", None, QtGui.QApplication.UnicodeUTF8))
        self.close()
Example #8
1
def _get_params_base_options(param_path):

    # Read parameter file into params object
    params = configparser.ConfigParser()
    try:
        params.read(param_path)
    except:
        raise ValueError, "Parameter file is invalid"

    # Setup param_dir and results_dir, get run_names
    param_dir = os.path.abspath(os.path.dirname(param_path))
    results_dir = os.path.join(param_dir, "results")

    if os.path.isdir(results_dir):
        shutil.rmtree(results_dir)
    os.makedirs(results_dir)

    run_names = params.sections()

    # Check there's at least one run
    if not run_names:
        raise NameError, "Parameters file must contain at least one run"

    # Create options dict
    base_options = {}
    base_options["param_dir"] = param_dir
    base_options["results_dir"] = results_dir
    base_options["run_names"] = run_names

    return params, base_options
def main():
    dir = "/mfs/backup/mysql/"
    pattern = os.path.join(dir, "*/snapshots/*")
    # pattern = os.path.join(dir, '*/binlogs/*')
    # pattern = os.path.join('/mfs/user/dba/recovery/*')

    total_size = 0
    for i in glob.glob(pattern):
        info = os.stat(i)
        t1 = time.gmtime(info.st_ctime)
        t2 = time.strftime("%Y-%m-%d", t1)
        year, month, day = t2.split("-")
        time1 = datetime.datetime(int(year), int(month), int(day))

        m1 = time.gmtime()
        m2 = time.strftime("%Y-%m-%d", m1)
        year, month, day = m2.split("-")
        time2 = datetime.datetime(int(year), int(month), int(day))

        days = (time2 - time1).days
        # print  time1, time2, days

        if days > 20:
            total_size = total_size + get_directory_size(i) / 1024 ** 3
            print i, time1, total_size
            try:
                pass
                shutil.rmtree(i)
                # os.remove(i)
            except Exception as exc:
                print i, exc
Example #10
1
def test_get_start_address():
    stdout, stderr = "", ""

    tmp_dir = mkdtemp()
    asm_fp = join(tmp_dir, "shellcode.asm")
    exe_fp = join(tmp_dir, "shellcode.exe")

    secret_fp = "/tmp/secret"
    os.system('echo "%s" > %s' % (SECRET_STR, secret_fp))

    kernel = ShellNoob.get_kernel()
    if kernel == "Linux":
        shutil.copyfile(join(dirname(__file__), "samples/x86-linux/open-read-write.asm"), asm_fp)
    elif kernel == "FreeBSD":
        shutil.copyfile(join(dirname(__file__), "samples/x86-freebsd/open-read-write.asm"), asm_fp)
    else:
        raise Exception("testing on kernel %s not supported" % kernel)

    _out, _err, _val = run_with_args("%s --to-exe" % asm_fp)
    stdout += _out
    stderr += _err
    assert _val == 0

    snoob = ShellNoob()
    start_addr = snoob.get_start_address(exe_fp)
    assert re.match("0x[0-9a-f]+", start_addr)

    shutil.rmtree(tmp_dir)
    os.unlink(secret_fp)
    return stdout, stderr, 0
Example #11
1
def Craeate_addon_from_github(URL, local_repo_folder):
    archive_suffix = "/archive/master.zip"
    print(URL)
    addonname = URL.strip("/").split("/")[-1]
    if not os.path.exists(local_repo_folder + os.sep + addonname):
        print("Making folder for addon in repo: ", addonname)
        os.makedirs(local_repo_folder + os.sep + addonname)
    download_file(URL + archive_suffix, local_repo_folder + os.sep + addonname + os.sep + "master.zip")
    try:
        xml_frm_file, ziptype = zipfilehandler(local_repo_folder + os.sep + addonname + os.sep + "master.zip")
    except Exception as e:
        print("cannot create a zip from githuburl ", URL)
        return
    root = ET.fromstring(xml_frm_file)
    for element in root.iter("addon"):
        addon_name = element.attrib["id"]
        addon_version = element.attrib["version"]
    try:
        currntzip = zipfile.ZipFile(local_repo_folder + os.sep + addonname + os.sep + "master.zip")
        currntzip.extractall(local_repo_folder + os.sep + addonname + os.sep)
        currntzip.close()
        shutil.move(
            local_repo_folder + os.sep + addonname + os.sep + addon_name + "-master",
            local_repo_folder + os.sep + addonname + os.sep + addon_name,
        )
        os.remove(local_repo_folder + os.sep + addonname + os.sep + "master.zip")
        shutil.make_archive(
            local_repo_folder + os.sep + addon_name + os.sep + addon_name + "-" + addon_version,
            "zip",
            local_repo_folder + os.sep + addon_name,
            addon_name,
        )
        shutil.rmtree(local_repo_folder + os.sep + addonname + os.sep + addon_name)
    except Exception as e:
        print("could not save fil ", addonname)
Example #12
0
def handle_overhead(suffix, db_basename, force_mode):
    index = read_ffindex(db_basename + "_" + suffix + ".ffindex")
    a3m_index = read_ffindex(db_basename + "_a3m.ffindex")

    overhead = get_overhead(index, a3m_index)

    # delete overhead cs219 files
    if len(overhead) == 0:
        return

    for f in overhead:
        sys.stderr.write(
            "WARNING: Entry "
            + f
            + " from "
            + db_basename
            + "_"
            + suffix
            + ".ff{data,index} has no corresponding entry in the a3m database!\n"
        )

    if force_mode:
        sys.stderr.write("WARNING: Try to fix overhead entries!\n")
        tmp_dir = tempfile.mkdtemp()

        try:
            index_file = os.path.join(tmp_dir, "to_delete.dat")
            write_set_to_file(overhead, index_file)
            remove_files_from_index(index_file, db_basename + "_" + suffix + ".ffindex")
            optimize_database(db_basename + "_" + suffix + ".ffdata", db_basename + "_" + suffix + ".ffindex")
        finally:
            shutil.rmtree(tmp_dir)
    else:
        sys.stderr.write("You may try to use the option --force to fix the database!\n")
Example #13
0
    def tearDown(self):
        for dirname in ["en_US", "ja_JP", "lv_LV"]:
            locale_dir = os.path.join(self._i18n_dir(), dirname)
            if os.path.isdir(locale_dir):
                shutil.rmtree(locale_dir)

        os.chdir(self.olddir)
Example #14
0
 def tearDown(self):
     d = defer.succeed(None)
     if self.real_bot and self.real_bot.running:
         d.addCallback(lambda _: self.real_bot.stopService())
     if os.path.exists(self.basedir):
         shutil.rmtree(self.basedir)
     return d
Example #15
0
    def test_get_configs_cwd(self):
        """config.in_cwd() finds config in shell's current working directory"""

        current_dir = os.getcwd()

        configs_found = config.in_cwd()

        # create a temporary folder and change dir into it
        tmp_dir = tempfile.mkdtemp(suffix="tmuxp")
        os.chdir(tmp_dir)

        try:
            config1 = open(".tmuxp.json", "w+b")
            config1.close()

            configs_found = config.in_cwd()
        finally:
            os.remove(config1.name)

        self.assertEqual(len(configs_found), 1)
        self.assertIn(".tmuxp.json", configs_found)

        # clean up
        os.chdir(current_dir)
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
Example #16
0
def tempdir():
    directory = tempfile.mkdtemp()

    try:
        yield directory
    finally:
        shutil.rmtree(directory)
Example #17
0
    def run(self):
        numpy.random.seed(self._seed)
        random.seed(numpy.random.randint(2 ** 31))

        try:

            class DeathRequestedError(Exception):
                pass

            try:

                def handle_sigusr1(number, frame):
                    raise DeathRequestedError()

                try:
                    signal.signal(signal.SIGUSR1, handle_sigusr1)

                    self.handle_subsolver()
                finally:
                    signal.signal(signal.SIGUSR1, signal.SIG_IGN)
            except DeathRequestedError:
                pass
            except Exception, error:
                self._stm_queue.put(error)
        except KeyboardInterrupt:
            pass
        finally:
            if self._popened is not None:
                self._popened.kill()

                os.kill(self._popened.pid, signal.SIGCONT)

                self._popened.wait()

            shutil.rmtree(self._tmpdir, ignore_errors=True)
Example #18
0
    def test_save_and_load_builder(self):
        df = self.df
        tmpPath = tempfile.mkdtemp()
        shutil.rmtree(tmpPath)
        df.write.json(tmpPath)
        actual = self.sqlCtx.read.json(tmpPath)
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))

        schema = StructType([StructField("value", StringType(), True)])
        actual = self.sqlCtx.read.json(tmpPath, schema)
        self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))

        df.write.mode("overwrite").json(tmpPath)
        actual = self.sqlCtx.read.json(tmpPath)
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))

        df.write.mode("overwrite").options(noUse="this options will not be used in save.").format("json").save(
            path=tmpPath
        )
        actual = self.sqlCtx.read.format("json").load(path=tmpPath, noUse="this options will not be used in load.")
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))

        defaultDataSourceName = self.sqlCtx.getConf("spark.sql.sources.default", "org.apache.spark.sql.parquet")
        self.sqlCtx.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
        actual = self.sqlCtx.load(path=tmpPath)
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
        self.sqlCtx.sql("SET spark.sql.sources.default=" + defaultDataSourceName)

        shutil.rmtree(tmpPath)
Example #19
0
def teardown_dir():
    global cookie_name
    global testdir
    if testdir:
        shutil.rmtree(testdir)
        testdir = None
    cookie_name = None
    def tear_down_class(cls):
        """
        Deletes test objects and bucket and tmp dir created by set_up_class.
        """
        if not hasattr(cls, "created_test_data"):
            return
        # Call cls.tearDown() in case the tests got interrupted, to ensure
        # dst objects get deleted.
        cls.tearDown()

        # Delete test objects.
        cls.empty_src_key.delete()
        cls.small_src_key.delete()
        cls.larger_src_key.delete()

        # Retry (for up to 2 minutes) the bucket gets deleted (it may not
        # the first time round, due to eventual consistency of bucket delete
        # operations).
        for i in range(60):
            try:
                cls.src_bucket_uri.delete_bucket()
                break
            except StorageResponseError:
                print "Test bucket (%s) not yet deleted, still trying" % (cls.src_bucket_uri.uri)
                time.sleep(2)
        shutil.rmtree(cls.tmp_dir)
        cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
Example #21
0
def check_a3m_format(db_basename, force_mode):
    entries = ffindex.read_index(db_basename + "_a3m.ffindex")
    data = ffindex.read_data(db_basename + "_a3m.ffdata")

    corrupted_alignments = set()
    for entry in entries:
        lines = ffindex.read_lines(entry, data)
        alignment = a3m.A3M_Container()
        try:
            alignment.read_a3m_from_lines(lines)
        except:
            corrupted_alignments.add(entry.name)
            sys.stderr.write("Warning: A3M " + entry.name + " is corrupted!\n")

    if len(corrupted_alignments) == 0:
        return

    if force_mode:
        tmp_dir = tempfile.mkdtemp()

        try:
            sys.stderr.write("WARNING: remove corrupted a3m's!\n")

            corrupted_index_file = os.path.join(tmp_dir, "corrupted.dat")
            write_set_to_file(corrupted_alignments, corrupted_index_file)

            for suffix in ["a3m", "cs219", "hhm"]:
                remove_files_from_index(corrupted_index_file, db_basename + "_" + suffix + ".ffindex")
                sort_database(db_basename + "_" + suffix + ".ffdata", db_basename + "_" + suffix + ".ffindex")
                optimize_database(db_basename + "_" + suffix + ".ffdata", db_basename + "_" + suffix + ".ffindex")
        finally:
            shutil.rmtree(tmp_dir)
    else:
        sys.stderr.write("You may try to use the option --force to fix the database!\n")
Example #22
0
 def tearDown(self):
     # Restore default dns
     self.tester.sys(self.source + self.sbin + self.cmd + "false")
     self.tester.cleanup_artifacts()
     self.tester.delete_keypair(self.keypair)
     self.tester.local("rm " + self.keypair.name + ".pem")
     shutil.rmtree(self.tester.credpath)
Example #23
0
def cleanupFiles():
    # First get rid of modified files
    for l in ["l1", "l2", "l3"]:
        arcpy.Delete_management(l)

    for f in glob.glob("C:\\Arctmp\\*"):
        try:
            shutil.rmtree(f)
        except:
            print "UNABLE TO REMOVE:", f
    # Now remove the old directory
    for i in xrange(0, 1000000):
        new_workspace = "C:\\Arctmp\\workspace." + str(i)
        if not os.path.exists(new_workspace):
            break
    print "TESTING USING WORKSPACE", new_workspace
    # Now move in fresh copies
    shutil.copytree("C:\\Arcbase", new_workspace)
    print "CONTENTS:"
    arcpy.env.workspace = new_workspace
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.shp")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.lyr")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.gdb")):
        print f
 def purge_screenshots():
     screenshots_folder = ApplicationData.get(".tmp_screenshots")
     if os.path.exists(screenshots_folder):
         try:
             shutil.rmtree(screenshots_folder)
         except EnvironmentError:
             pass
Example #25
0
    def _init_index(self, reset=False):
        index_path = os.path.join(jupyter_data_dir(), "index")

        # clear out old index if requested
        if reset:
            shutil.rmtree(index_path, True)

        # make sure there's a path to store the index data
        if not os.path.exists(index_path):
            os.makedirs(index_path)

        if not exists_in(index_path):
            # create an index with the current schema
            analyzer = ChineseAnalyzer()
            schema = Schema(
                basename=TEXT(stored=True, field_boost=5.0, analyzer=analyzer),
                dirname=ID(stored=True, analyzer=analyzer),
                path=ID(stored=True, unique=True, analyzer=analyzer),
                content=TEXT(stored=False, analyzer=analyzer),
                time=STORED,
            )
            self.ix = create_in(index_path, schema)
        else:
            # open the existing index
            self.ix = open_dir(index_path)

        # build a query parser based on the current schema
        self.query_parser = MultifieldParser(["content", "basename", "dirname"], self.ix.schema)
Example #26
0
    def archive(self, bucket, path, compress=False):

        # make root path, if it does not exist
        if not os.path.exists(path):
            os.mkdir(path)

        bckt = self.conn.get_bucket(bucket)
        count = 0

        for item in bckt.list():

            # build local path
            local_path = os.path.join(path, item.key)

            # find local dir and create intermediate dirs
            # if they don't exist
            local_dir = os.path.dirname(local_path)
            if not os.path.exists(local_dir):
                os.makedirs(local_dir)

            if not os.path.isdir(local_path):
                with open(local_path, "w") as local_file:
                    item.get_contents_to_file(local_file)
                    logging.info("copying %s:%s" % (bucket, item.key))
                    count += 1

        if compress:
            tarpath = "%s.tar.gz" % path
            with tarfile.open(tarpath, "w:gz") as tar:
                tar.add(path, arcname=path.split(os.sep)[-1], recursive=True)
            shutil.rmtree(path)
            logging.info("compressed archive and removed working directory")

        logging.info("archived %d files in %s" % (count, bucket))
Example #27
0
 def tearDown(self):
     """Nettoyage après chaque test."""
     shutil.rmtree(self.tmpdir)
     try:
         del os.environ["VIGILO_SETTINGS"]
     except KeyError:
         pass
Example #28
0
def sdv_clean(name, vs):
    path = [vs, name, "sdv"]
    print(path)

    shutil.rmtree(os.path.join(*path), True)

    path = [vs, name, "sdv.temp"]
    print(path)

    shutil.rmtree(os.path.join(*path), True)

    path = [vs, name, "staticdv.job"]
    print(path)

    try:
        os.unlink(os.path.join(*path))
    except OSError:
        pass

    path = [vs, name, "refine.sdv"]
    print(path)

    try:
        os.unlink(os.path.join(*path))
    except OSError:
        pass

    path = [vs, name, "sdv-map.h"]
    print(path)

    try:
        os.unlink(os.path.join(*path))
    except OSError:
        pass
Example #29
0
 def delete(self, project, version=None):
     if version is None:
         rmtree(path.join(self.basedir, project))
     else:
         remove(self._eggpath(project, version))
         if not self.list(project):  # remove project if no versions left
             self.delete(project)
Example #30
0
    def delete_dirs(self):
        reply = QtGui.QMessageBox.question(
            self,
            _fromUtf8("Ordner löschen"),
            "Bist du sicher?",
            QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
            QtGui.QMessageBox.No,
        )
        src = self.ui.lineEdit.text()

        if reply == QtGui.QMessageBox.Yes:
            self.ui.plainTextEdit.setPlainText(_fromUtf8(""))
            dirs = glob.glob(src + "/*")
            for dirname in dirs:
                dirname = os.path.normpath(dirname)
                if dirname == os.path.join(src, "Incomplete Downloads"):
                    continue
                else:
                    shutil.rmtree(dirname)
                    self.ui.plainTextEdit.appendPlainText(_fromUtf8(dirname + " gelöscht"))
            if len(dirs) > 1:
                self.ui.plainTextEdit.appendPlainText(_fromUtf8("{}\n".format("*" * 80)))
                self.ui.plainTextEdit.appendPlainText(_fromUtf8("Ordner gelöscht\n"))
                self.ui.plainTextEdit.appendPlainText(_fromUtf8("{}\n".format("*" * 80)))
            else:
                self.ui.plainTextEdit.appendPlainText(_fromUtf8("{}\n".format("*" * 80)))
                self.ui.plainTextEdit.appendPlainText(_fromUtf8("Keine Ordner zu löschen\n"))
                self.ui.plainTextEdit.appendPlainText(_fromUtf8("{}\n".format("*" * 80)))