Exemple #1
0
# Fichier d'exemple du livre "Developpement Systeme sous Linux"
# (C) 2015-2019 - Christophe BLAESS <*****@*****.**>
# https://www.blaess.fr/christophe/
# ------------------------------------------------------------------

from __future__ import print_function
import os
import stat
import sys

sys.argv.pop(0)
for f in sys.argv:
    try:
        s = os.stat(f)
        if (stat.S_ISBLK(s.st_mode)):
            print(f, "bloc")
        if (stat.S_ISCHR(s.st_mode)):
            print(f, "caractere")
        if (stat.S_ISDIR(s.st_mode)):
            print(f, "repertoire")
        if (stat.S_ISFIFO(s.st_mode)):
            print(f, "fifo")
        if (stat.S_ISLNK(s.st_mode)):
            print(f, "lien")
        if (stat.S_ISREG(s.st_mode)):
            print(f, "fichier")
        if (stat.S_ISSOCK(s.st_mode)):
            print(f, "socket")
    except:
        print(f, "erreur")
Exemple #2
0
"""Utilities for comparing files and directories.
Exemple #3
0
def serve_file(path, content_type=None, disposition=None, name=None):
    """Set status, headers, and body in order to serve the given file.
    
    The Content-Type header will be set to the content_type arg, if provided.
    If not provided, the Content-Type will be guessed by the file extension
    of the 'path' argument.
    
    If disposition is not None, the Content-Disposition header will be set
    to "<disposition>; filename=<name>". If name is None, it will be set
    to the basename of path. If disposition is None, no Content-Disposition
    header will be written.
    """

    response = cherrypy.response

    # If path is relative, users should fix it by making path absolute.
    # That is, CherryPy should not guess where the application root is.
    # It certainly should *not* use cwd (since CP may be invoked from a
    # variety of paths). If using tools.static, you can make your relative
    # paths become absolute by supplying a value for "tools.static.root".
    if not os.path.isabs(path):
        raise ValueError("'%s' is not an absolute path." % path)

    try:
        st = os.stat(path)
    except OSError:
        raise cherrypy.NotFound()

    # Check if path is a directory.
    if stat.S_ISDIR(st.st_mode):
        # Let the caller deal with it as they like.
        raise cherrypy.NotFound()

    # Set the Last-Modified response header, so that
    # modified-since validation code can work.
    response.headers['Last-Modified'] = http.HTTPDate(st.st_mtime)
    cptools.validate_since()

    if content_type is None:
        # Set content-type based on filename extension
        ext = ""
        i = path.rfind('.')
        if i != -1:
            ext = path[i:].lower()
        content_type = mimetypes.types_map.get(ext, "text/plain")
    response.headers['Content-Type'] = content_type

    if disposition is not None:
        if name is None:
            name = os.path.basename(path)
        cd = '%s; filename="%s"' % (disposition, name)
        response.headers["Content-Disposition"] = cd

    # Set Content-Length and use an iterable (file object)
    #   this way CP won't load the whole file in memory
    c_len = st.st_size
    bodyfile = open(path, 'rb')

    # HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
    if cherrypy.request.protocol >= (1, 1):
        response.headers["Accept-Ranges"] = "bytes"
        r = http.get_ranges(cherrypy.request.headers.get('Range'), c_len)
        if r == []:
            response.headers['Content-Range'] = "bytes */%s" % c_len
            message = "Invalid Range (first-byte-pos greater than Content-Length)"
            raise cherrypy.HTTPError(416, message)
        if r:
            if len(r) == 1:
                # Return a single-part response.
                start, stop = r[0]
                if stop > c_len:
                    stop = c_len
                r_len = stop - start
                response.status = "206 Partial Content"
                response.headers['Content-Range'] = ("bytes %s-%s/%s" %
                                                     (start, stop - 1, c_len))
                response.headers['Content-Length'] = r_len
                bodyfile.seek(start)
                response.body = file_generator_limited(bodyfile, r_len)
            else:
                # Return a multipart/byteranges response.
                response.status = "206 Partial Content"
                import mimetools
                boundary = mimetools.choose_boundary()
                ct = "multipart/byteranges; boundary=%s" % boundary
                response.headers['Content-Type'] = ct
                if response.headers.has_key("Content-Length"):
                    # Delete Content-Length header so finalize() recalcs it.
                    del response.headers["Content-Length"]

                def file_ranges():
                    # Apache compatibility:
                    yield "\r\n"

                    for start, stop in r:
                        yield "--" + boundary
                        yield "\r\nContent-type: %s" % content_type
                        yield ("\r\nContent-range: bytes %s-%s/%s\r\n\r\n" %
                               (start, stop - 1, c_len))
                        bodyfile.seek(start)
                        for chunk in file_generator_limited(
                                bodyfile, stop - start):
                            yield chunk
                        yield "\r\n"
                    # Final boundary
                    yield "--" + boundary + "--"

                    # Apache compatibility:
                    yield "\r\n"

                response.body = file_ranges()
        else:
            response.headers['Content-Length'] = c_len
            response.body = bodyfile
    else:
        response.headers['Content-Length'] = c_len
        response.body = bodyfile
    return response.body
# Author				: Not sure where I got this from
# Created				: 28th November 2011
# Last Modified		: 
# Version				: 1.0
# Modifications		: 

# Description			: Show file information for a given file


# get file information using os.stat()
# tested with Python24 vegsaeat 25sep2006
import os
import stat # index constants for os.stat()
import time
# pick a file you have ...
file_name = raw_input("Enter a file name: ")
file_stats = os.stat(file_name)
# create a dictionary to hold file info
file_info = {
  'fname': file_name,
  'fsize': file_stats [stat.ST_SIZE],
  'f_lm': time.strftime("%d/%m/%Y %I:%M:%S %p",time.localtime(file_stats[stat.ST_MTIME])),
  'f_la': time.strftime("%d/%m/%Y %I:%M:%S %p",time.localtime(file_stats[stat.ST_ATIME])),
  'f_ct': time.strftime("%d/%m/%Y %I:%M:%S %p",time.localtime(file_stats[stat.ST_CTIME]))
}
print
print "file name = %(fname)s" % file_info
print "file size = %(fsize)s bytes" % file_info
print "last modified = %(f_lm)s" % file_info
print "last accessed = %(f_la)s" % file_info
print "creation time = %(f_ct)s" % file_info
 def isdir(self, s):
     """Return true if the pathname refers to an existing directory."""
     st = self.callstat(s)
     if not st:
         return False
     return statmod.S_ISDIR(st.st_mode)
Exemple #6
0
def ensure_dirs(path, gid=-1, uid=-1, mode=0o777, minimal=True):
    """ensure dirs exist, creating as needed with (optional) gid, uid, and mode.

    Be forewarned- if mode is specified to a mode that blocks the euid
    from accessing the dir, this code *will* try to create the dir.

    :param path: directory to ensure exists on disk
    :param gid: a valid GID to set any created directories to
    :param uid: a valid UID to set any created directories to
    :param mode: permissions to set any created directories to
    :param minimal: boolean controlling whether or not the specified mode
        must be enforced, or is the minimal permissions necessary.  For example,
        if mode=0755, minimal=True, and a directory exists with mode 0707,
        this will restore the missing group perms resulting in 757.
    :return: True if the directory could be created/ensured to have those
        permissions, False if not.
    """

    try:
        st = os.stat(path)
    except OSError:
        base = os.path.sep
        try:
            um = os.umask(0)
            # if the dir perms would lack +wx, we have to force it
            force_temp_perms = ((mode & 0o300) != 0o300)
            resets = []
            apath = normpath(os.path.abspath(path))
            sticky_parent = False

            for directory in apath.split(os.path.sep):
                base = join(base, directory)
                try:
                    st = os.stat(base)
                    if not stat.S_ISDIR(st.st_mode):
                        # one of the path components isn't a dir
                        return False

                    # if it's a subdir, we need +wx at least
                    if apath != base:
                        sticky_parent = (st.st_mode & stat.S_ISGID)

                except OSError:
                    # nothing exists.
                    try:
                        if force_temp_perms:
                            if not _safe_mkdir(base, 0o700):
                                return False
                            resets.append((base, mode))
                        else:
                            if not _safe_mkdir(base, mode):
                                return False
                            if base == apath and sticky_parent:
                                resets.append((base, mode))
                            if gid != -1 or uid != -1:
                                os.chown(base, uid, gid)
                    except OSError:
                        return False

            try:
                for base, m in reversed(resets):
                    os.chmod(base, m)
                    if gid != -1 or uid != -1:
                        os.chown(base, uid, gid)
            except OSError:
                return False

        finally:
            os.umask(um)
        return True
    else:
        if not os.path.isdir(path):
            # don't change perms for existing paths that aren't dirs
            return False

        try:
            if ((gid != -1 and gid != st.st_gid)
                    or (uid != -1 and uid != st.st_uid)):
                os.chown(path, uid, gid)
            if minimal:
                if mode != (st.st_mode & mode):
                    os.chmod(path, st.st_mode | mode)
            elif mode != (st.st_mode & 0o7777):
                os.chmod(path, mode)
        except OSError:
            return False
    return True
Exemple #7
0
 def is_dir(self, path):
     try:
         return stat.S_ISDIR(self.sftp.stat(str(path)).st_mode)
     except FileNotFoundError:
         return False
Exemple #8
0
def icon(stat_entry: jobs_pb2.StatEntry) -> Text:
    if stat.S_ISDIR(stat_entry.st_mode):
        return '📂'
    elif _is_symlink(stat_entry):
        return '🔗'
    return '📄'
Exemple #9
0
 def info(self, path):
     st = self.sftp.stat(path)
     return {
         "size": st.st_size,
         "type": "dir" if stat.S_ISDIR(st.st_mode) else "file",
     }
Exemple #10
0
    hashvalid = already_saved(ent)
    wasmissing = ent.sha_missing()
    oldsize = ent.size
    if opt.verbose:
        if not exists:
            status = 'D'
        elif not hashvalid:
            if ent.sha == index.EMPTY_SHA:
                status = 'A'
            else:
                status = 'M'
        else:
            status = ' '
        if opt.verbose >= 2:
            log('%s %-70s\n' % (status, path_msg(ent.name)))
        elif not stat.S_ISDIR(ent.mode) and lastdir != dir:
            if not lastdir.startswith(dir):
                log('%s %-70s\n' % (status, path_msg(os.path.join(dir, b''))))
            lastdir = dir

    if opt.progress:
        progress_report(0)
    fcount += 1

    if not exists:
        continue
    if opt.smaller and ent.size >= opt.smaller:
        if exists and not hashvalid:
            if opt.verbose:
                log('skipping large file "%s"\n' % path_msg(ent.name))
            lastskip_name = ent.name
Exemple #11
0
def find_hardlink_target(hlink_db, ent):
    if hlink_db and not stat.S_ISDIR(ent.mode) and ent.nlink > 1:
        link_paths = hlink_db.node_paths(ent.dev, ent.ino)
        if link_paths:
            return link_paths[0]
Exemple #12
0
def main(argv=None):
    """
    Usage is:

    submit.py [id=<chargecode>] <url> <commandline> 

    Run from the working dir of the job, which must contain (in addition
    to the job files) a file named scheduler.conf with scheduler 
    properties for the job.

    id=<chargecode> if present gives the project to charge the job to.
    Url is url of the submitting website including taskid parameter.

    Returns 0 with "jobid=<jobid>" on stdout if job submitted ok
    Returns 1 with multiline error message on stdout if error.
    Returns 2 for the specific error of queue limit exceeded.
    """
    if argv is None:
        argv = sys.argv
    print "argv (%s)" % (argv, )
    debugfile = open('nsgdebug', "w")
    debugfile.write("argv (%s)\n" % (argv, ))
    splits = argv[1].split("=", 1)
    if (len(splits) == 2 and splits[0] == "id"):
        account = splits[1]
        url = argv[2]
        cmdline = argv[3:]
    elif (len(splits) == 1 and splits[0] == "--account"):
        account = argv[2]
        url = argv[4]
        cmdline = argv[6:]
    else:
        print "failed to account information!"
        sys.exit(1)

    job_properties = lib.getProperties('_JOBINFO.TXT')
    print " job_properties['Tool'] (%s)" % job_properties['Tool']

    tooltype = lib.getToolType(job_properties['Tool'])
    print "tool type is %s" % tooltype

    # HTCondor does not have the notion of queues and/or partition. This
    # is simply a place holder value with no meaning.
    queue = "condor"

    # The inputfile archive must currently be unzipped locally prior to
    # job submission. Otherwise, a 'failed to find modeldir!' error will
    # be thrown. However, the inputfile archive must be transferred to a
    # remote OSG site and unzipped locally at the site to run job. As
    # such, it may make more sense to restructure when this 'modeldir!'
    # error is thrown. For example, perhaps at job runtime, not prior to
    # submission.
    print "New input file used"
    os.system('unzip -n inputfile > /dev/null')

    scheduler_properties = lib.getProperties("scheduler.conf")
    print scheduler_properties

    scheduler_info = lib.schedulerInfo(scheduler_properties, tooltype)
    print scheduler_info

    hocfile = scheduler_properties['fname']
    if ('outfilename' in scheduler_properties):
        outfilename = scheduler_properties['outfilename']
    else:
        outfilename = "output"

    runtime = int(scheduler_info["runtime"])
    useLocalDisk = False

    cwdir = os.getcwd()

    # assume that the modeldir is the only dir in the working dir
    file_list = os.listdir('.')
    modeldir = None
    for filename in file_list:
        stat_tuple = os.lstat(filename)
        if stat.S_ISDIR(stat_tuple[stat.ST_MODE]) and filename != '__MACOSX':
            modeldir = filename
    if modeldir == None:
        print "failed to find modeldir!"
        sys.exit(1)

    if scheduler_properties.has_key('subdirname'):
        subdirname = scheduler_properties['subdirname']
    else:
        subdirname = None

    if (tooltype != "singularityparametersearch"):
        if subdirname != None:
            modeldir = subdirname

    # What is this section used for? Compiling models/libs on the fly
    # before the job is run? Mostly for neuron only?
    if (tooltype == "neuron77"
            or (scheduler_properties.has_key('nrnivmodloption')
                and scheduler_properties['nrnivmodloption'] == "1")):
        nrnivmodl = '/projects/ps-nsg/home/nsguser/applications_centos7/neuron7.7/nrn-7.7/x86_64/bin/nrnivmodl'
        print "running makelib.sh in %s %s" % (modeldir, nrnivmodl)
        os.system(
            "/projects/ps-nsg/home/nsguser/ngbw/contrib/scripts/makelib.sh %s %s"
            % (modeldir, nrnivmodl))
    else:
        print "tooltype (%s) not neuron, not running makelib.sh" % tooltype

    fullpathmodeldir = None

    # # # Create batch_command.cmdline file. # # #
    rfile = open(lib.cmdfile, "w")
    rfile.write("#!/usr/bin/env bash\n")

    # Again, for OSG, you will need to transfer inputfile to remote
    # system and unzip it locally on remote machine before starting
    # standard 'tooltype' commands.
    rfile.write("unzip -n inputfile > /dev/null\n")

    # # Why does each nsg job allow a custom directory name? Would it
    # not make sense to standardize name based on say a gateway job id?
    rfile.write("cd job_work_dir\n")

    # Record job's start date and time and then report job start to the
    # gateway frontend.
    rfile.write("echo Job starting at `date` > ../start.txt\n")
    rfile.writelines(("curl %s\&status=START" % (url), "\n"))
    rfile.writelines(("export CIPRES_THREADSPP=%d" %
                      (int(scheduler_info["threads_per_process"])), "\n"))
    rfile.writelines(
        ("export CIPRES_NP=%d" %
         (int(scheduler_info["ppn"]) * int(scheduler_info["nodes"])), "\n"))

    if (tooltype == "bash"):

        rfile.writelines(("module purge\n", ))
        rfile.writelines(("module list\n", ))
        rfile.writelines(("printenv\n", ))
        rfile.writelines(("lscpu\n", ))
        fname, interp, args = cmdline[0].split(" ", 2)
        rfile.writelines(("time -p bash %s %s" % (hocfile, args), "\n"))

    elif (tooltype == "freesurf"):

        rfile.writelines(("module purge\n", ))
        rfile.writelines(("module list\n", ))
        rfile.writelines(("printenv\n"))
        rfile.writelines(("lscpu\n", ))
        rfile.writelines(
            (". /opt/setup.sh\n", ))  # Setup osgvo-neuroimaging environment
        rfile.writelines(('export FS_LICENSE="${PWD}/license.txt"\n', ))
        rfile.writelines(('export SUBJECTS_DIR="${PWD}/subjects"\n', ))
        rfile.writelines(('mkdir -p "${SUBJECTS_DIR}"\n', ))
        fname, interp, args = cmdline[0].split(" ", 2)
        rfile.writelines(("time -p recon-all %s" % (args), "\n"))

    elif (tooltype == "python"):

        if (scheduler_properties['pythonversion'] == "2.7.15"):

            rfile.writelines(("module purge\n", ))
            rfile.writelines(("module load python/2.7.15\n", ))
            rfile.writelines(("module load py-argparse/1.4.0-py2.7\n", ))
            rfile.writelines(("module load py-asn1crypto/0.22.0-py2.7\n", ))
            rfile.writelines(("module load py-bottleneck/1.2.1-py2.7\n", ))
            rfile.writelines(("module load py-cffi/1.11.5-py2.7\n", ))
            rfile.writelines(("module load py-cryptography/2.3.1-py2.7\n", ))
            rfile.writelines(("module load py-cycler/0.10.0-py2.7\n", ))
            rfile.writelines(("module load py-cython/0.29-py2.7\n", ))
            rfile.writelines(("module load py-dateutil/2.5.2-py2.7\n", ))
            rfile.writelines(("module load py-enum34/1.1.6-py2.7\n", ))
            rfile.writelines(("module load py-functools32/3.2.3-2-py2.7\n", ))
            rfile.writelines(("module load py-h5py/2.8.0-py2.7\n", ))
            rfile.writelines(("module load py-idna/2.5-py2.7\n", ))
            rfile.writelines(("module load py-ipaddress/1.0.18-py2.7\n", ))
            rfile.writelines(("module load py-kiwisolver/1.0.1-py2.7\n", ))
            rfile.writelines(("module load py-lit/0.5.0-py2.7\n", ))
            rfile.writelines(("module load py-mako/1.0.4-py2.7\n", ))
            rfile.writelines(("module load py-markupsafe/1.0-py2.7\n", ))
            rfile.writelines(("module load py-matplotlib/2.2.3-py2.7\n", ))
            rfile.writelines(("module load py-nose/1.3.7-py2.7\n", ))
            rfile.writelines(("module load py-numexpr/2.6.5-py2.7\n", ))
            rfile.writelines(("module load py-numpy/1.15.2-py2.7\n", ))
            rfile.writelines(("module load py-pandas/0.23.4-py2.7\n", ))
            rfile.writelines(("module load py-paramiko/2.1.2-py2.7\n", ))
            rfile.writelines(("module load py-pillow/5.1.0-py2.7\n", ))
            rfile.writelines(("module load py-pkgconfig/1.2.2-py2.7\n", ))
            rfile.writelines(("module load py-pyasn1/0.2.3-py2.7\n", ))
            rfile.writelines(("module load py-pycparser/2.18-py2.7\n", ))
            rfile.writelines(("module load py-pyparsing/2.2.0-py2.7\n", ))
            rfile.writelines(("module load py-pytz/2017.2-py2.7\n", ))
            rfile.writelines(("module load py-scikit-learn/0.20.0-py2.7\n", ))
            rfile.writelines(("module load py-scipy/1.1.0-py2.7\n", ))
            rfile.writelines(("module load py-setuptools/40.4.3-py2.7\n", ))
            rfile.writelines(("module load py-six/1.11.0-py2.7\n", ))
            rfile.writelines(("module load py-subprocess32/3.2.7-py2.7\n", ))
            rfile.writelines(("module load pybullet/2.3.5-py2.7\n", ))
            rfile.writelines(("module list\n", ))
            rfile.writelines(("printenv\n", ))
            rfile.writelines(("lscpu\n", ))
            fname, interp, args = cmdline[0].split(" ", 2)
            rfile.writelines(("time -p python %s %s" % (hocfile, args), "\n"))

        elif (scheduler_properties['pythonversion'] == "3.7.0"):

            rfile.writelines(("module purge\n", ))
            rfile.writelines(("module load python/3.7.0\n", ))
            rfile.writelines(("module load py-asn1crypto/0.22.0-py3.7\n", ))
            rfile.writelines(("module load py-bottleneck/1.2.1-py3.7\n", ))
            rfile.writelines(("module load py-cffi/1.11.5-py3.7\n", ))
            rfile.writelines(("module load py-cryptography/1.8.1-py3.7\n", ))
            rfile.writelines(("module load py-cryptography/2.3.1-py3.7\n", ))
            rfile.writelines(("module load py-cycler/0.10.0-py3.7\n", ))
            rfile.writelines(("module load py-cython/0.29-py3.7\n", ))
            rfile.writelines(("module load py-dateutil/2.5.2-py3.7\n", ))
            rfile.writelines(("module load py-idna/2.5-py3.7\n", ))
            rfile.writelines(("module load py-kiwisolver/1.0.1-py3.7\n", ))
            rfile.writelines(("module load py-matplotlib/3.0.0-py3.7\n", ))
            rfile.writelines(("module load py-numexpr/2.6.5-py3.7\n", ))
            rfile.writelines(("module load py-numpy/1.15.2-py3.7\n", ))
            rfile.writelines(("module load py-pandas/0.23.4-py3.7\n", ))
            rfile.writelines(("module load py-paramiko/2.1.2-py3.7\n", ))
            rfile.writelines(("module load py-pillow/5.1.0-py3.7\n", ))
            rfile.writelines(("module load py-pyasn1/0.2.3-py3.7\n", ))
            rfile.writelines(("module load py-pycparser/2.18-py3.7\n", ))
            rfile.writelines(("module load py-pyparsing/2.2.0-py3.7\n", ))
            rfile.writelines(("module load py-pytz/2017.2-py3.7\n", ))
            rfile.writelines(("module load py-scikit-learn/0.20.0-py3.7\n", ))
            rfile.writelines(("module load py-scipy/1.1.0-py3.7\n", ))
            rfile.writelines(("module load py-setuptools/40.4.3-py3.7\n", ))
            rfile.writelines(("module load py-six/1.11.0-py3.7\n", ))
            rfile.writelines(("module list\n", ))
            rfile.writelines(("printenv\n", ))
            rfile.writelines(("lscpu\n", ))
            fname, interp, args = cmdline[0].split(" ", 2)
            rfile.writelines(("time -p python3 %s %s" % (hocfile, args), "\n"))

        else:

            rfile.writelines(("module purge\n", ))
            rfile.writelines(("module list\n", ))
            rfile.writelines(("printenv\n", ))
            rfile.writelines(("lscpu\n", ))
            rfile.writelines(("time -p python3 %s %s" % (hocfile, args), "\n"))

    elif (tooltype == "tensorflow"):

        rfile.writelines(("module purge\n", ))
        rfile.writelines(("module list\n", ))
        rfile.writelines(("printenv\n", ))
        rfile.writelines(("lscpu\n", ))
        if (scheduler_info['gpus'] == 1):
            rfile.writelines(("nvidia-smi\n", ))
        fname, interp, args = cmdline[0].split(" ", 2)
        rfile.writelines(("time -p python3 %s %s" % (hocfile, args), "\n"))

    else:

        print('ERROR :: tooltype unknown!')
        sys.exit(1)

    # Record job's end date and time.
    rfile.write("echo Job finished at `date` > ../done.txt\n")

    # Once the main set of job commands have been run, move up and out
    # of the primary job_work_dir ...
    rfile.write("cd ../\n")

    # ... then create a compressed tarball of the job_work_dir and all
    # other files created during the job. For all compressed tarballs
    # less than 1GB, the tarball may be transferred back to the job's
    # HTCondor submit node via HTCondor's native file transfer
    # mechanism. Large file transfers greater than 1GB should use an
    # alternative mechanism such as via globus-url-copy or stachcp.
    # i.e., eventually there will need to be a rewrite of the logic here
    # to switch between different file transfer mechanisms depending on
    # job type and/or job attributes. For now, we assume only HTCondor
    # file transfer mechanism will be utilized.
    if (tooltype == "eeglab_tg" or tooltype == "dynasim_tg"):
        rfile.writelines((
            "/bin/tar -cvzf %s.tar.gz ./%s ./scheduler_stderr.txt ./scheduler_stdout.txt ./stderr.txt ./stdout.txt"
            % (outfilename, modeldir), "\n"))
    else:
        rfile.writelines(
            ("/bin/tar -cvzf %s.tar.gz ./*" % (outfilename), "\n"))

    # Report job completion to the gateway frontend.
    rfile.write("curl %s\&status=DONE\n" % url)

    # # # batch_command.cmdline file creation complete. Close file # # #
    # # # and set file permissions.
    rfile.close()
    os.chmod(lib.cmdfile, 0744)

    # There is no inherent concept of a max wallclock time in HTCondor.
    # While one could be implemented, on OSG, we should assume all jobs
    # are preemptible. As such, setting wallclock time for jobs can be
    # eliminated for OSG-bound jobs. The following timestring settings
    # were left in the OSG version of the submit.py script for
    # completeness to compare with the submit.py script for Comet. The
    # lines can be eliminated, if desired.
    days, remainderminutes = divmod(runtime, 60 * 24)
    hours, remainderminutes = divmod(remainderminutes, 60)
    timestring = '{}-{:02d}:{:02d}:00'.format(days, hours, remainderminutes)

    # # # Create the HTCondor submit description file # # #
    rfile = open(lib.runfile, 'w')
    rfile.writelines(('universe = vanilla\n', ))
    rfile.writelines(('executable = batch_command.cmdline\n', ))
    #rfile.writelines(('arguments = %s' % (if_batch_command.cmdline_took_args), '\n'))
    rfile.writelines(
        ('request_cpus = %s' % (scheduler_info['threads_per_process']), '\n'))
    rfile.writelines(
        ('request_memory = %s' % (scheduler_info['memory']), '\n'))
    rfile.writelines(('request_disk = %s' % (scheduler_info['disk']), '\n'))
    if (scheduler_info['gpus'] == 1
        ):  # OSG currently only supports 1 GPU per job
        rfile.writelines(('request_gpus = 1\n', ))
        rfile.writelines(
            ('requirements = '
             '(Arch == "X86_64") && '
             '(OpSys == "LINUX") && '
             '(OSGVO_OS_STRING == "RHEL 6" || OSGVO_OS_STRING == "RHEL 7") && '
             '(HAS_MODULES =?= True) && '
             '(HAS_SINGULARITY == True) && '
             '(CUDACapability >= 3)', '\n'))
    else:
        rfile.writelines(
            ('requirements = '
             '(Arch == "X86_64") && '
             '(OpSys == "LINUX") && '
             '(OSGVO_OS_STRING == "RHEL 6" || OSGVO_OS_STRING == "RHEL 7") && '
             '(HAS_MODULES =?= True) && '
             '(HAS_SINGULARITY == True)', '\n'))
    rfile.writelines(('input = %s' % (fname), '\n'))
    #rfile.writelines(('transfer_input_files = %s' % (if_there_were_other_input_files), '\n'))
    rfile.writelines(('output = scheduler_stdout.txt\n', ))
    #rfile.writelines(('transfer_output_files = %s' % (if_wanted_only_subset_of_output_files), '\n'))
    rfile.writelines(('error = scheduler_stderr.txt\n', ))
    rfile.writelines(('log = scheduler_stdlog.txt\n', ))
    rfile.writelines(('should_transfer_files = YES\n', ))
    rfile.writelines(('when_to_transfer_output = ON_EXIT_OR_EVICT\n', ))
    rfile.writelines(('notify_user = = %s' % (lib.email), '\n'))
    rfile.writelines(('notification = Always\n', ))
    if (tooltype == "freesurf"):
        rfile.writelines((
            '+SingularityImage = "/cvmfs/singularity.opensciencegrid.org/opensciencegrid/osgvo-freesurfer:latest"',
            '\n'))
    elif ((tooltype == "tensorflow") and (scheduler_info['gpus'] == 0)):
        rfile.writelines((
            '+SingularityImage = "/cvmfs/singularity.opensciencegrid.org/opensciencegrid/tensorflow:latest"',
            '\n'))
    elif ((tooltype == "tensorflow") and (scheduler_info['gpus'] == 1)):
        rfile.writelines((
            '+SingularityImage = "/cvmfs/singularity.opensciencegrid.org/opensciencegrid/tensorflow-gpu:latest"',
            '\n'))
    rfile.writelines(('+ProjectName = %s' % (account), '\n'))
    rfile.writelines(('queue 1\n', ))
    rfile.close()

    # # # Create epilog file. Is this file needed for OSG integration? # # #
    rfile = open('./epilog', "w")
    rfile.write("#!/usr/bin/env bash\n")
    rfile.writelines(("curl %s\&status=DONE" % (url), "\n"))
    rfile.close()
    os.chmod('./epilog', 0755)

    # Close nsgdebug file.
    debugfile.close()

    # Submit job to HTCondor.
    return lib.submitJob(partition=queue)
    return 0
Exemple #13
0
def DoUnaryOp(op_id, s):
    # type: (Id_t, str) -> bool

    # Only use lstat if we're testing for a symlink.
    if op_id in (Id.BoolUnary_h, Id.BoolUnary_L):
        try:
            mode = posix.lstat(s).st_mode
        except OSError:
            # TODO: simple_test_builtin should this as status=2.
            #e_die("lstat() error: %s", e, word=node.child)
            return False

        return stat.S_ISLNK(mode)

    try:
        st = posix.stat(s)
    except OSError as e:
        # TODO: simple_test_builtin should this as status=2.
        # Problem: we really need errno, because test -f / is bad argument,
        # while test -f /nonexistent is a good argument but failed.  Gah.
        # ENOENT vs. ENAMETOOLONG.
        #e_die("stat() error: %s", e, word=node.child)
        return False
    mode = st.st_mode

    if op_id in (Id.BoolUnary_e, Id.BoolUnary_a):  # -a is alias for -e
        return True

    if op_id == Id.BoolUnary_f:
        return stat.S_ISREG(mode)

    if op_id == Id.BoolUnary_d:
        return stat.S_ISDIR(mode)

    if op_id == Id.BoolUnary_b:
        return stat.S_ISBLK(mode)

    if op_id == Id.BoolUnary_c:
        return stat.S_ISCHR(mode)

    if op_id == Id.BoolUnary_p:
        return stat.S_ISFIFO(mode)

    if op_id == Id.BoolUnary_S:
        return stat.S_ISSOCK(mode)

    if op_id == Id.BoolUnary_x:
        return posix.access(s, posix.X_OK)

    if op_id == Id.BoolUnary_r:
        return posix.access(s, posix.R_OK)

    if op_id == Id.BoolUnary_w:
        return posix.access(s, posix.W_OK)

    if op_id == Id.BoolUnary_s:
        return st.st_size != 0

    if op_id == Id.BoolUnary_O:
        return st.st_uid == posix.geteuid()

    if op_id == Id.BoolUnary_G:
        return st.st_gid == posix.getegid()

    e_die("%s isn't implemented", op_id)  # implicit location
Exemple #14
0
    def _batchCheckBasic(self, fn):
        assert self.__validPath(fn)

        fullfn = self.__fn2fullfn(fn)
        s = os.lstat(fullfn)

        # common check
        if True:
            try:
                pwd.getpwuid(s.st_uid)
            except KeyError:
                self.p._checkResult.append("\"%s\" has an invalid owner." %
                                           (fn))
            try:
                grp.getgrgid(s.st_gid)
            except KeyError:
                self.p._checkResult.append("\"%s\" has an invalid group." %
                                           (fn))

        # common check
        if True:
            if not (s.st_mode & stat.S_IRUSR):
                self.p._checkResult.append("\"%s\" is not readable by owner." %
                                           (fn))
            if not (s.st_mode & stat.S_IWUSR):
                # FIXME: there're so many files violates this rule, strange
                # self.p._checkResult.append("\"%s\" is not writeable by owner." % (fn))
                pass
            if not (s.st_mode & stat.S_IRGRP) and (s.st_mode & stat.S_IWGRP):
                self.p._checkResult.append(
                    "\"%s\" is not readable but writable by group." % (fn))
            if not (s.st_mode & stat.S_IROTH) and (s.st_mode & stat.S_IWOTH):
                self.p._checkResult.append(
                    "\"%s\" is not readable but writable by other." % (fn))
            if not (s.st_mode & stat.S_IRGRP) and (
                (s.st_mode & stat.S_IROTH) or (s.st_mode & stat.S_IWOTH)):
                self.p._checkResult.append(
                    "\"%s\" is not readable by group but readable/writable by other."
                    % (fn))
            if not (s.st_mode & stat.S_IWGRP) and (s.st_mode & stat.S_IWOTH):
                self.p._checkResult.append(
                    "\"%s\" is not writable by group but writable by other." %
                    (fn))

        # common check
        if True:
            if (s.st_mode & stat.S_ISVTX):
                self.p._checkResult.append(
                    "\"%s\" should not have sticky bit set." % (fn))

        # dedicated check for symlink
        if stat.S_ISLNK(s.st_mode):
            if not os.path.exists(fullfn):
                self.p._checkResult.append("\"%s\" is a broken symlink." %
                                           (fn))
            if stat.S_IMODE(s.st_mode) != 0o0777:
                self.p._checkResult.append("\"%s\" has invalid permission." %
                                           (fn))
            return

        # dedicated check for directory
        if stat.S_ISDIR(s.st_mode):
            if (s.st_mode & stat.S_ISUID):
                self.p._checkResult.append(
                    "\"%s\" should not have SUID bit set." % (fn))
            if (s.st_mode & stat.S_ISGID):
                # if showdn.startswith("/var/lib/portage"):
                #     pass        # FIXME, portage set SGID for these directories?
                # elif showdn.startswith("/var/log/portage"):
                #     pass        # FIXME, portage set SGID for these directories?
                # elif showdn.startswith("/var/log/journal"):
                #     pass        # FIXME, systemd set SGID for these directories?
                # else:
                #     self.p._checkResult.append("\"%s\" should not have SGID bit set." % (showdn))
                pass
            return

        # dedicated check for regular file
        if stat.S_ISREG(s.st_mode):
            if (s.st_mode & stat.S_ISUID):
                bad = False
                if not (s.st_mode & stat.S_IXUSR):
                    bad = True
                if not (s.st_mode & stat.S_IXGRP) and (
                    (s.st_mode & stat.S_IRGRP) or (s.st_mode & stat.S_IWGRP)):
                    bad = True
                if not (s.st_mode & stat.S_IXOTH) and (
                    (s.st_mode & stat.S_IROTH) or (s.st_mode & stat.S_IWOTH)):
                    bad = True
                if bad:
                    self.p._checkResult.append(
                        "\"%s\" is not appropriate for SUID bit." % (fn))
            if (s.st_mode & stat.S_ISGID):
                # FIXME
                # self.infoPrinter.printError("File \"%s\" should not have SGID bit set." % (showfn))
                pass
            return

        # all other file types are invalid for batch check
        self.p._checkResult.append("Type of \"%s\" is invalid." % (fn))
Exemple #15
0
def put(local_path, remote_path, mode=None):
    """
    Upload one or more files to a remote host.
    
    ``local_path`` may be a relative or absolute local file path, and may
    contain shell-style wildcards, as understood by the Python ``glob`` module.
    Tilde expansion (as implemented by ``os.path.expanduser``) is also
    performed.

    ``remote_path`` may also be a relative or absolute location, but applied to
    the remote host. Relative paths are relative to the remote user's home
    directory, but tilde expansion (e.g. ``~/.ssh/``) will also be performed if
    necessary.

    By default, `put` preserves file modes when uploading. However, you can
    also set the mode explicitly by specifying the ``mode`` keyword argument,
    which sets the numeric mode of the remote file. See the ``os.chmod``
    documentation or ``man chmod`` for the format of this argument.
    
    Examples::
    
        put('bin/project.zip', '/tmp/project.zip')
        put('*.py', 'cgi-bin/')
        put('index.html', 'index.html', mode=0755)
    
    """
    ftp = connections[env.host_string].open_sftp()
    with closing(ftp) as ftp:
        # Expand tildes (assumption: default remote cwd is user $HOME)
        remote_path = remote_path.replace('~', ftp.normalize('.'))
        # Get remote mode for directory-vs-file detection
        try:
            rmode = ftp.lstat(remote_path).st_mode
        except:
            # sadly, I see no better way of doing this
            rmode = None
        # Expand local tildes and get globs
        globs = glob(os.path.expanduser(local_path))
        # Deal with bad local_path
        if not globs:
            raise ValueError, "'%s' is not a valid local path or glob." \
                % local_path
    
        # Iterate over all given local files
        for lpath in globs:
            # If remote path is directory, tack on the local filename
            _remote_path = remote_path
            if rmode is not None and stat.S_ISDIR(rmode):
                _remote_path = os.path.join(
                    remote_path,
                    os.path.basename(lpath)
                )
            # Print
            if output.running:
                prefix = "[%s] " % env.host_string
                msg = "put: %s -> %s" % (lpath, _remote_path)
                if env.colors:
                    prefix = env.color_settings['host_prefix'](prefix)
                print(prefix + msg)
            # Try to catch raised exceptions (which is the only way to tell if
            # this operation had problems; there's no return code) during upload
            try:
                # Actually do the upload
                rattrs = ftp.put(lpath, _remote_path)
                # and finally set the file mode
                lmode = mode or os.stat(lpath).st_mode
                if lmode != rattrs.st_mode:
                    ftp.chmod(_remote_path, lmode)
            except Exception, e:
                msg = "put() encountered an exception while uploading '%s'"
                _handle_failure(message=msg % lpath, exception=e)
Exemple #16
0
 def isdir(self, path):
     return stat.S_ISDIR(self.st_mode(path))
	def _queue_dir_transfer(self, task_cls, src_path, dst_path):
		"""
		Handles the folder transfer by stopping bad transfers, creating tasks
		for transfers, and placing them in the queue.

		:param task_cls: The type of task the transfer will be.
		:param str src_path: The path to be uploaded or downloaded.
		:param str dst_path: The path to be created.
		"""
		if issubclass(task_cls, tasks.DownloadTask):
			src, dst = self.remote, self.local
			if not os.access(dst.path_mod.dirname(dst_path), os.W_OK):
				gui_utilities.show_dialog_error('Permission Denied', self.application.get_active_window(), 'Can not write to the destination directory.')
				return
			task = task_cls.dir_cls(dst_path, src_path, size=0)
		elif issubclass(task_cls, tasks.UploadTask):
			if not os.access(src_path, os.R_OK):
				gui_utilities.show_dialog_error('Permission Denied', self.application.get_active_window(), 'Can not read the source directory.')
				return
			src, dst = self.local, self.remote
			task = task_cls.dir_cls(src_path, dst_path, size=0)
			if not stat.S_ISDIR(dst.path_mode(dst_path)):
				try:
					dst.make_dir(dst_path)
				except (IOError, OSError):
					gui_utilities.show_dialog_error('Permission Denied', self.application.get_active_window(), 'Can not create the destination directory.')
					return
		else:
			raise ValueError('unknown task class')

		queued_tasks = []
		parent_directory_tasks = collections.OrderedDict({src_path: task})

		for dir_cont in src.walk(src_path):
			dst_base_path = dst.path_mod.normpath(dst.path_mod.join(dst_path, src.get_relpath(dir_cont.dirpath, start=src_path)))
			src_base_path = dir_cont.dirpath
			parent_task = parent_directory_tasks.pop(src_base_path, None)
			if parent_task is None:
				continue
			queued_tasks.append(parent_task)

			new_task_count = 0
			if issubclass(task_cls, tasks.DownloadTask):
				local_base_path, remote_base_path = (dst_base_path, src_base_path)
			else:
				local_base_path, remote_base_path = (src_base_path, dst_base_path)

			for filename in dir_cont.filenames:
				if not self.config['transfer_hidden'] and src.path_is_hidden(src.path_mod.join(src_base_path, filename)):
					continue
				try:
					file_size = src.get_file_size(src.path_mod.join(dir_cont.dirpath, filename))
				except (IOError, OSError):
					continue  # skip this file if we can't get it's size
				task = task_cls(
					self.local.path_mod.join(local_base_path, filename),
					self.remote.path_mod.join(remote_base_path, filename),
					parent=parent_task,
					size=file_size
				)
				queued_tasks.append(task)
				new_task_count += 1

			for dirname in dir_cont.dirnames:
				if not self.config['transfer_hidden'] and src.path_is_hidden(src.path_mod.join(src_base_path, dirname)):
					continue
				task = task_cls.dir_cls(
					self.local.path_mod.join(local_base_path, dirname),
					self.remote.path_mod.join(remote_base_path, dirname),
					parent=parent_task,
					size=0
				)
				parent_directory_tasks[src.path_mod.join(src_base_path, dirname)] = task
				new_task_count += 1

			parent_task.size += new_task_count
			for grandparent_task in parent_task.parents:
				grandparent_task.size += new_task_count
		for task in queued_tasks:
			self.queue.put(task)
		self.status_display.sync_view(queued_tasks)
Exemple #18
0
def test_vsiaz_extra_1():

    if not gdaltest.built_against_curl():
        pytest.skip()

    az_resource = gdal.GetConfigOption('AZ_RESOURCE')
    if az_resource is None:
        pytest.skip('Missing AZ_RESOURCE')

    if '/' not in az_resource:
        path = '/vsiaz/' + az_resource
        statres = gdal.VSIStatL(path)
        assert statres is not None and stat.S_ISDIR(statres.mode), \
            ('%s is not a valid bucket' % path)

        readdir = gdal.ReadDir(path)
        assert readdir is not None, 'ReadDir() should not return empty list'
        for filename in readdir:
            if filename != '.':
                subpath = path + '/' + filename
                assert gdal.VSIStatL(subpath) is not None, \
                    ('Stat(%s) should not return an error' % subpath)

        unique_id = 'vsiaz_test'
        subpath = path + '/' + unique_id
        ret = gdal.Mkdir(subpath, 0)
        assert ret >= 0, ('Mkdir(%s) should not return an error' % subpath)

        readdir = gdal.ReadDir(path)
        assert unique_id in readdir, \
            ('ReadDir(%s) should contain %s' % (path, unique_id))

        ret = gdal.Mkdir(subpath, 0)
        assert ret != 0, ('Mkdir(%s) repeated should return an error' % subpath)

        ret = gdal.Rmdir(subpath)
        assert ret >= 0, ('Rmdir(%s) should not return an error' % subpath)

        readdir = gdal.ReadDir(path)
        assert unique_id not in readdir, \
            ('ReadDir(%s) should not contain %s' % (path, unique_id))

        ret = gdal.Rmdir(subpath)
        assert ret != 0, ('Rmdir(%s) repeated should return an error' % subpath)

        ret = gdal.Mkdir(subpath, 0)
        assert ret >= 0, ('Mkdir(%s) should not return an error' % subpath)

        f = gdal.VSIFOpenL(subpath + '/test.txt', 'wb')
        assert f is not None
        gdal.VSIFWriteL('hello', 1, 5, f)
        gdal.VSIFCloseL(f)

        ret = gdal.Rmdir(subpath)
        assert ret != 0, \
            ('Rmdir(%s) on non empty directory should return an error' % subpath)

        f = gdal.VSIFOpenL(subpath + '/test.txt', 'rb')
        assert f is not None
        data = gdal.VSIFReadL(1, 5, f).decode('utf-8')
        assert data == 'hello'
        gdal.VSIFCloseL(f)

        assert gdal.Rename(subpath + '/test.txt', subpath + '/test2.txt') == 0

        f = gdal.VSIFOpenL(subpath + '/test2.txt', 'rb')
        assert f is not None
        data = gdal.VSIFReadL(1, 5, f).decode('utf-8')
        assert data == 'hello'
        gdal.VSIFCloseL(f)

        ret = gdal.Unlink(subpath + '/test2.txt')
        assert ret >= 0, \
            ('Unlink(%s) should not return an error' % (subpath + '/test2.txt'))

        ret = gdal.Rmdir(subpath)
        assert ret >= 0, ('Rmdir(%s) should not return an error' % subpath)

        return

    f = open_for_read('/vsiaz/' + az_resource)
    assert f is not None
    ret = gdal.VSIFReadL(1, 1, f)
    gdal.VSIFCloseL(f)

    assert len(ret) == 1

    # Same with /vsiaz_streaming/
    f = open_for_read('/vsiaz_streaming/' + az_resource)
    assert f is not None
    ret = gdal.VSIFReadL(1, 1, f)
    gdal.VSIFCloseL(f)

    assert len(ret) == 1

    if False:  # pylint: disable=using-constant-test
        # we actually try to read at read() time and bSetError = false
        # Invalid bucket : "The specified bucket does not exist"
        gdal.ErrorReset()
        f = open_for_read('/vsiaz/not_existing_bucket/foo')
        with gdaltest.error_handler():
            gdal.VSIFReadL(1, 1, f)
        gdal.VSIFCloseL(f)
        assert gdal.VSIGetLastErrorMsg() != ''

    # Invalid resource
    gdal.ErrorReset()
    f = open_for_read('/vsiaz_streaming/' + az_resource + '/invalid_resource.baz')
    assert f is None, gdal.VSIGetLastErrorMsg()

    # Test GetSignedURL()
    signed_url = gdal.GetSignedURL('/vsiaz/' + az_resource)
    f = open_for_read('/vsicurl_streaming/' + signed_url)
    assert f is not None
    ret = gdal.VSIFReadL(1, 1, f)
    gdal.VSIFCloseL(f)

    assert len(ret) == 1
Exemple #19
0
    def get(self, request, token):
        """ Only used for get dirents in a folder share link.

        Permission checking:
        1, If enable SHARE_LINK_LOGIN_REQUIRED, user must have been authenticated.
        2, If enable ENABLE_SHARE_LINK_AUDIT, user must have been authenticated, or have been audited.
        3, If share link is encrypted, share link password must have been checked.
        """

        # argument check
        thumbnail_size = request.GET.get('thumbnail_size', 48)
        try:
            thumbnail_size = int(thumbnail_size)
        except ValueError:
            error_msg = 'thumbnail_size invalid.'
            return api_error(status.HTTP_400_BAD_REQUEST, error_msg)

        # permission check

        # check if login required
        if SHARE_LINK_LOGIN_REQUIRED and \
                not request.user.is_authenticated():
            error_msg = 'Permission denied.'
            return api_error(status.HTTP_403_FORBIDDEN, error_msg)

        # check share link audit
        if is_pro_version() and ENABLE_SHARE_LINK_AUDIT and \
                not request.user.is_authenticated() and \
                not request.session.get('anonymous_email'):
            error_msg = 'Permission denied.'
            return api_error(status.HTTP_403_FORBIDDEN, error_msg)

        # resource check
        try:
            share_link = FileShare.objects.get(token=token)
        except FileShare.DoesNotExist:
            error_msg = 'Share link %s not found.' % token
            return api_error(status.HTTP_404_NOT_FOUND, error_msg)

        # check share link password
        if share_link.is_encrypted() and not check_share_link_access(
                request, token):
            error_msg = 'Permission denied.'
            return api_error(status.HTTP_403_FORBIDDEN, error_msg)

        if share_link.s_type != 'd':
            error_msg = 'Share link %s is not a folder share link.' % token
            return api_error(status.HTTP_400_BAD_REQUEST, error_msg)

        repo_id = share_link.repo_id
        repo = seafile_api.get_repo(repo_id)
        if not repo:
            error_msg = 'Library %s not found.' % repo_id
            return api_error(status.HTTP_404_NOT_FOUND, error_msg)

        share_link_path = share_link.path
        request_path = request.GET.get('path', '/')
        if request_path == '/':
            path = share_link_path
        else:
            path = posixpath.join(share_link_path, request_path.strip('/'))

        path = normalize_dir_path(path)
        dir_id = seafile_api.get_dir_id_by_path(repo_id, path)
        if not dir_id:
            error_msg = 'Folder %s not found.' % request_path
            return api_error(status.HTTP_404_NOT_FOUND, error_msg)

        try:
            current_commit = seafile_api.get_commit_list(repo_id, 0, 1)[0]
            dirent_list = seafile_api.list_dir_by_commit_and_path(
                repo_id, current_commit.id, path, -1, -1)
        except Exception as e:
            logger.error(e)
            error_msg = 'Internal Server Error'
            return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)

        result = []
        for dirent in dirent_list:

            # don't return parent folder(share link path) info to user
            # so use request_path here
            dirent_path = posixpath.join(request_path, dirent.obj_name)

            dirent_info = {}
            dirent_info['size'] = dirent.size
            dirent_info['last_modified'] = timestamp_to_isoformat_timestr(
                dirent.mtime)

            if stat.S_ISDIR(dirent.mode):
                dirent_info['is_dir'] = True
                dirent_info['folder_path'] = normalize_dir_path(dirent_path)
                dirent_info['folder_name'] = dirent.obj_name
            else:
                dirent_info['is_dir'] = False
                dirent_info['file_path'] = normalize_file_path(dirent_path)
                dirent_info['file_name'] = dirent.obj_name

                file_type, file_ext = get_file_type_and_ext(dirent.obj_name)
                if file_type in (IMAGE, XMIND) or \
                        file_type == VIDEO and ENABLE_VIDEO_THUMBNAIL:

                    if os.path.exists(
                            os.path.join(THUMBNAIL_ROOT, str(thumbnail_size),
                                         dirent.obj_id)):
                        req_image_path = posixpath.join(
                            request_path, dirent.obj_name)
                        src = get_share_link_thumbnail_src(
                            token, thumbnail_size, req_image_path)
                        dirent_info['encoded_thumbnail_src'] = urlquote(src)

            result.append(dirent_info)

        return Response({'dirent_list': result})
Exemple #20
0
def get_type(mode):
    if stat.S_ISDIR(mode) or stat.S_ISLNK(mode):
        type = 'dir'
    else:
        type = 'file'
    return type
Exemple #21
0
def handle_webdata_request(request_handler: tornado.web.RequestHandler, ):
    request_path = request_handler.request.path
    # normalize request path
    try:
        normalized_request_path = normalize_request_path(request_path)
    except InvalidCharacterInPath as ex:
        print(ex)
        # unauthorize
        request_handler.set_status(403)
        request_handler.set_header('Content-Type', 'application/json')
        response_obj = {
            'message': 'invalid character in path',
            'path': request_path,
        }

        response_str = json.dumps(response_obj)
        request_handler.write(response_str)

        return

    # join with webdata directory
    if len(normalized_request_path) == 0:
        local_path = WEBDATA_DIRECTORY
    elif normalized_request_path == '/':
        local_path = WEBDATA_DIRECTORY
    else:
        local_path = os.path.join(WEBDATA_DIRECTORY, normalized_request_path)

    if local_path == WEBDATA_DIRECTORY:
        pass
    elif not is_child_path(WEBDATA_DIRECTORY, local_path):
        # unauthorize
        request_handler.set_status(403)
        request_handler.set_header('Content-Type', 'application/json')
        response_obj = {
            'message': 'unauthorized access',
            'path': request_path,
        }

        response_str = json.dumps(response_obj)
        request_handler.write(response_str)

        return

    # check if file exists
    if not os.path.exists(local_path):
        # not found
        request_handler.set_status(404)
        request_handler.set_header('Content-Type', 'application/json')
        response_obj = {
            'message': 'file not found',
            'path': request_path,
        }

        response_str = json.dumps(response_obj)
        request_handler.write(response_str)

        return

    file_stat = os.stat(local_path)
    if stat.S_ISDIR(file_stat.st_mode):
        # automatically serve index.html
        # directory
        # check if index.html exists
        child_filename_list = os.listdir(local_path)
        for child_filename in child_filename_list:
            lowered_child_filename = child_filename.lower()
            if lowered_child_filename in VALID_HTML_INDEX_FILENAME_LIST:
                child_filepath = os.path.join(local_path, child_filename)
                # send modified time
                os.path.getmtime(child_filepath)
                # TODO
                request_handler.set_status(200)
                request_handler.set_header('Content-Type', 'text/html')
                filesize = os.stat(child_filepath).st_size
                if filesize > 0:
                    send_file_data(request_handler, child_filepath)
                return

        # not found
        # return directory listing
        # TODO option to disable directory listing
        # contruct static html page
        html_str = render_static_directory_listing_html(
            normalized_request_path,
            child_filename_list,
        )

        request_handler.set_status(200)
        request_handler.set_header('Content-Type', 'text/html')
        request_handler.write(html_str)
        return

    if not stat.S_ISREG(file_stat.st_mode):
        # unauthorize
        # this is not a regular file
        request_handler.set_status(403)
        request_handler.set_header('Content-Type', 'application/json')
        response_obj = {
            'message': 'this is not a regular file',
            'path': request_path,
        }

        response_str = json.dumps(response_obj)
        request_handler.write(response_str)

        return

    # regular file
    # TODO parse and support Range header
    request_handler.set_status(200)
    mime_type = get_mime_type_by_filename(local_path)

    if mime_type in TEXT_MIME_TYPE_LIST:
        # set mime type and charset to utf-8
        header_value = f'{mime_type}; charset=utf-8'
        request_handler.set_header('Content-Type', header_value)
    else:
        request_handler.set_header('Content-Type', mime_type)

    filesize = os.stat(local_path).st_size
    request_handler.set_header('Content-Length', str(filesize))

    send_file_data(request_handler, local_path)
Exemple #22
0
    def run(self):
        # Walk the tests hierarchy looking for tests
        dirs = self.testdirs.split(':')
        tests = []
        while dirs:
            dir = dirs.pop(0)
            if self.trace_tests:
                print 'Searching for tests in %s' % (dir, )
            for f in os.listdir(dir):
                fn = os.path.join(dir, f)
                statb = os.stat(fn)
                if stat.S_ISDIR(statb[0]):
                    dirs.append(fn)
                elif self.__TestFile_re.match(f):
                    tests.append(fn)

        number = 0
        import sys
        import traceback
        import new
        import unittest
        import types

        # Import each test into its own module, then add the test
        # cases in it to a complete suite.
        loader = unittest.defaultTestLoader
        suite = unittest.TestSuite()
        used_names = set()
        for fn in tests:
            stage = 'compile'
            try:
                # Assign a unique name for this test
                test_name = os.path.basename(fn).split('.')[0]
                test_name = test_name.replace('-', '_')
                number = 2
                base_name = test_name
                while test_name in used_names:
                    test_name = '%s%d' % (base_name, number)
                    number += 1

                # Read the test source in and compile it
                rv = compile(file(fn).read(), test_name, 'exec')
                state = 'evaluate'

                # Make a copy of the globals array so we don't
                # contaminate this environment.
                g = globals().copy()

                # The test cases use __file__ to determine the path to
                # the schemas
                g['__file__'] = fn

                # Create a module into which the test will be evaluated.
                module = new.module(test_name)

                # The generated code uses __name__ to look up the
                # containing module in sys.modules.
                g['__name__'] = test_name
                sys.modules[test_name] = module

                # Import the test into the module, making sure the created globals look like they're in the module.
                eval(rv, g)
                module.__dict__.update(g)

                # Find all subclasses of unittest.TestCase that were
                # in the test source and add them to the suite.
                for (nm, obj) in g.items():
                    if (type == type(obj)) and issubclass(
                            obj, unittest.TestCase):
                        suite.addTest(loader.loadTestsFromTestCase(obj))
                if self.trace_tests:
                    print '%s imported' % (fn, )
            except Exception, e:
                print '%s failed in %s: %s' % (fn, stage, e)
                raise
Exemple #23
0
    def __write(self, fp, arcname=None, compress_type=None, st=None):
        """Put the bytes from filename into the archive under the name
        arcname."""
        if not self.fp:
            raise RuntimeError(
                  "Attempt to write to ZIP archive that was already closed")

        if isinstance(fp, basestring):
            filename, fp = (fp, None)
            st = st or os.stat(filename)
        else:
            filename = '<stream>'
            st = st or os.stat(0)

        isdir = stat.S_ISDIR(st.st_mode)
        mtime = time.localtime(st.st_mtime)
        if (mtime.tm_year < 1980):
            mtime = time.localtime()
        date_time = mtime[0:6]
        # Create ZipInfo instance to store file information
        if arcname is None:
            arcname = filename
        arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
        while arcname[0] in (os.sep, os.altsep):
            arcname = arcname[1:]
        if isdir:
            arcname += '/'
        zinfo = ZipInfo(arcname, date_time)
        zinfo.external_attr = (st[0] & 0xFFFF) << 16      # Unix attributes
        if compress_type is None:
            zinfo.compress_type = self.compression
        else:
            zinfo.compress_type = compress_type

        zinfo.file_size = st.st_size
        zinfo.flag_bits = 0x00
        zinfo.flag_bits |= 0x08                 # ZIP flag bits, bit 3 indicates presence of data descriptor
        zinfo.header_offset = self.fp.tell()    # Start of header bytes
        if zinfo.compress_type == ZIP_LZMA:
            # Compressed data includes an end-of-stream (EOS) marker
            zinfo.flag_bits |= 0x02

        self._writecheck(zinfo)
        self._didModify = True

        if isdir:
            zinfo.file_size = 0
            zinfo.compress_size = 0
            zinfo.CRC = 0
            self.filelist.append(zinfo)
            self.NameToInfo[zinfo.filename] = zinfo
            yield self.fp.write(zinfo.FileHeader(False))
            return

        cmpr = _get_compressor(zinfo.compress_type)
        fp = fp or open(filename, 'rb')
        with fp:
            # Must overwrite CRC and sizes with correct data later
            zinfo.CRC = CRC = 0
            zinfo.compress_size = compress_size = 0
            # Compressed size can be larger than uncompressed size
            zip64 = self._allowZip64 and \
                    zinfo.file_size * 1.05 > ZIP64_LIMIT
            yield self.fp.write(zinfo.FileHeader(zip64))
            file_size = 0
            while 1:
                sz = 1024 * 8
                if zinfo.file_size > 0:  # known size, read only that much
                    if zinfo.file_size == file_size:
                        break
                    sz = min(zinfo.file_size - file_size, sz)
                buf = fp.read(sz)
                if not buf:
                    break
                file_size = file_size + len(buf)
                CRC = crc32(buf, CRC) & 0xffffffff
                if cmpr:
                    buf = cmpr.compress(buf)
                    compress_size = compress_size + len(buf)
                yield self.fp.write(buf)
        if cmpr:
            buf = cmpr.flush()
            compress_size = compress_size + len(buf)
            yield self.fp.write(buf)
            zinfo.compress_size = compress_size
        else:
            zinfo.compress_size = file_size
        zinfo.CRC = CRC
        if zinfo.file_size > 0 and zinfo.file_size != file_size:
            raise RuntimeError('File size changed during compressing')
        else:
            zinfo.file_size = file_size
        if not zip64 and self._allowZip64:
            if file_size > ZIP64_LIMIT:
                raise RuntimeError('File size has increased during compressing')
            if compress_size > ZIP64_LIMIT:
                raise RuntimeError('Compressed size larger than uncompressed size')

        # Seek backwards and write file header (which will now include
        # correct CRC and file sizes)
        # position = self.fp.tell()       # Preserve current position in file
        # self.fp.seek(zinfo.header_offset, 0)
        # self.fp.write(zinfo.FileHeader(zip64))
        # self.fp.seek(position, 0)
        yield self.fp.write(zinfo.DataDescriptor())
        self.filelist.append(zinfo)
        self.NameToInfo[zinfo.filename] = zinfo
            def process(path):
                s = os.lstat(path)

                if stat.S_ISDIR(s.st_mode):
                    update_hash('d')
                elif stat.S_ISCHR(s.st_mode):
                    update_hash('c')
                elif stat.S_ISBLK(s.st_mode):
                    update_hash('b')
                elif stat.S_ISSOCK(s.st_mode):
                    update_hash('s')
                elif stat.S_ISLNK(s.st_mode):
                    update_hash('l')
                elif stat.S_ISFIFO(s.st_mode):
                    update_hash('p')
                else:
                    update_hash('-')

                def add_perm(mask, on, off='-'):
                    if mask & s.st_mode:
                        update_hash(on)
                    else:
                        update_hash(off)

                add_perm(stat.S_IRUSR, 'r')
                add_perm(stat.S_IWUSR, 'w')
                if stat.S_ISUID & s.st_mode:
                    add_perm(stat.S_IXUSR, 's', 'S')
                else:
                    add_perm(stat.S_IXUSR, 'x')

                add_perm(stat.S_IRGRP, 'r')
                add_perm(stat.S_IWGRP, 'w')
                if stat.S_ISGID & s.st_mode:
                    add_perm(stat.S_IXGRP, 's', 'S')
                else:
                    add_perm(stat.S_IXGRP, 'x')

                add_perm(stat.S_IROTH, 'r')
                add_perm(stat.S_IWOTH, 'w')
                if stat.S_ISVTX & s.st_mode:
                    update_hash('t')
                else:
                    add_perm(stat.S_IXOTH, 'x')

                if include_owners:
                    update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
                    update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)

                update_hash(" ")
                if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
                    update_hash("%9s" %
                                ("%d.%d" %
                                 (os.major(s.st_rdev), os.minor(s.st_rdev))))
                else:
                    update_hash(" " * 9)

                update_hash(" ")
                if stat.S_ISREG(s.st_mode):
                    update_hash("%10d" % s.st_size)
                else:
                    update_hash(" " * 10)

                update_hash(" ")
                fh = hashlib.sha256()
                if stat.S_ISREG(s.st_mode):
                    # Hash file contents
                    with open(path, 'rb') as d:
                        for chunk in iter(lambda: d.read(4096), b""):
                            fh.update(chunk)
                    update_hash(fh.hexdigest())
                else:
                    update_hash(" " * len(fh.hexdigest()))

                update_hash(" %s" % path)

                if stat.S_ISLNK(s.st_mode):
                    update_hash(" -> %s" % os.readlink(path))

                update_hash("\n")
    def is_unarchived(self):
        cmd = '%s -ZT -s "%s"' % (self.cmd_path, self.src)
        if self.excludes:
            cmd += ' -x "' + '" "'.join(self.excludes) + '"'
        rc, out, err = self.module.run_command(cmd)

        old_out = out
        diff = ''
        out = ''
        if rc == 0:
            unarchived = True
        else:
            unarchived = False

        # Get some information related to user/group ownership
        umask = os.umask(0)
        os.umask(umask)

        # Get current user and group information
        groups = os.getgroups()
        run_uid = os.getuid()
        run_gid = os.getgid()
        try:
            run_owner = pwd.getpwuid(run_uid).pw_name
        except:
            run_owner = run_uid
        try:
            run_group = grp.getgrgid(run_gid).gr_name
        except:
            run_group = run_gid

        # Get future user ownership
        fut_owner = fut_uid = None
        if self.file_args['owner']:
            try:
                tpw = pwd.getpwname(self.file_args['owner'])
            except:
                try:
                    tpw = pwd.getpwuid(self.file_args['owner'])
                except:
                    tpw = pwd.getpwuid(run_uid)
            fut_owner = tpw.pw_name
            fut_uid = tpw.pw_uid
        else:
            try:
                fut_owner = run_owner
            except:
                pass
            fut_uid = run_uid

        # Get future group ownership
        fut_group = fut_gid = None
        if self.file_args['group']:
            try:
                tgr = grp.getgrnam(self.file_args['group'])
            except:
                try:
                    tgr = grp.getgrgid(self.file_args['group'])
                except:
                    tgr = grp.getgrgid(run_gid)
            fut_group = tgr.gr_name
            fut_gid = tgr.gr_gid
        else:
            try:
                fut_group = run_group
            except:
                pass
            fut_gid = run_gid

        for line in old_out.splitlines():
            change = False

            pcs = line.split()
            if len(pcs) != 8: continue

            ztype = pcs[0][0]
            permstr = pcs[0][1:10]
            version = pcs[0][1]
            ostype = pcs[0][2]
            size = int(pcs[3])
            path = pcs[7]

            # Skip excluded files
            if path in self.excludes:
                out += 'Path %s is excluded on request\n' % path
                continue

            # Itemized change requires L for symlink
            if path[-1] == '/':
                if ztype != 'd':
                    err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (
                        path, ztype)
                ftype = 'd'
            elif ztype == 'l':
                ftype = 'L'
            elif ztype == '-':
                ftype = 'f'
            elif ztype == '?':
                ftype = 'f'

            # Some files may be storing FAT permissions, not Unix permissions
            if len(permstr) == 6:
                if path[-1] == '/':
                    permstr = 'rwxrwxrwx'
                elif permstr == 'rwx---':
                    permstr = 'rwxrwxrwx'
                else:
                    permstr = 'rw-rw-rw-'

            # Test string conformity
            if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
                raise UnarchiveError('ZIP info perm format incorrect, %s' %
                                     permstr)

            # DEBUG
#            err += "%s%s %10d %s\n" % (ztype, permstr, size, path)

            dest = os.path.join(self.dest, path)
            try:
                st = os.lstat(dest)
            except:
                change = True
                self.includes.append(path)
                err += 'Path %s is missing\n' % path
                diff += '>%s++++++.?? %s\n' % (ftype, path)
                continue

            # Compare file types
            if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'File %s already exists, but not as a directory\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'f' and not stat.S_ISREG(st.st_mode):
                change = True
                unarchived = False
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a regular file\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a symlink\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            itemized = list('.%s.......??' % ftype)

            dt_object = datetime.datetime(
                *(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
            timestamp = time.mktime(dt_object.timetuple())

            # Compare file timestamps
            if stat.S_ISREG(st.st_mode):
                if self.module.params['keep_newer']:
                    if timestamp > st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s is older, replacing file\n' % path
                        itemized[4] = 't'
                    elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
                        # Add to excluded files, ignore other changes
                        out += 'File %s is newer, excluding file\n' % path
                        continue
                else:
                    if timestamp != st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s differs in mtime (%f vs %f)\n' % (
                            path, timestamp, st.st_mtime)
                        itemized[4] = 't'

            # Compare file sizes
            if stat.S_ISREG(st.st_mode) and size != st.st_size:
                change = True
                err += 'File %s differs in size (%d vs %d)\n' % (path, size,
                                                                 st.st_size)
                itemized[3] = 's'

            # Compare file checksums
            if stat.S_ISREG(st.st_mode):
                crc = crc32(dest)
                if crc != self._crc32(path):
                    change = True
                    err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (
                        path, self._crc32(path), crc)
                    itemized[2] = 'c'

            # Compare file permissions

            # Do not handle permissions of symlinks
            if ftype != 'L':
                # Only special files require no umask-handling
                if ztype == '?':
                    mode = self._permstr_to_octal(permstr, 0)
                else:
                    mode = self._permstr_to_octal(permstr, umask)
                if self.file_args['mode'] and self.file_args[
                        'mode'] != stat.S_IMODE(st.st_mode):
                    change = True
                    err += 'Path %s differs in permissions (%o vs %o)\n' % (
                        path, self.file_args['mode'], stat.S_IMODE(st.st_mode))
                    itemized[5] = 'p'
                elif mode != stat.S_IMODE(st.st_mode):
                    change = True
                    itemized[5] = 'p'
                    err += 'Path %s differs in permissions (%o vs %o)\n' % (
                        path, mode, stat.S_IMODE(st.st_mode))

            # Compare file user ownership
            owner = uid = None
            try:
                owner = pwd.getpwuid(st.st_uid).pw_name
            except:
                uid = st.st_uid

            # If we are not root and requested owner is not our user, fail
            if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
                raise UnarchiveError(
                    'Cannot change ownership of %s to %s, as user %s' %
                    (path, fut_owner, run_owner))

            if owner and owner != fut_owner:
                change = True
                err += 'Path %s is owned by user %s, not by user %s as expected\n' % (
                    path, owner, fut_owner)
                itemized[6] = 'o'
            elif uid and uid != fut_uid:
                change = True
                err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (
                    path, uid, fut_uid)
                itemized[6] = 'o'

            # Compare file group ownership
            group = gid = None
            try:
                group = grp.getgrgid(st.st_gid).gr_name
            except:
                gid = st.st_gid

            if run_uid != 0 and fut_gid not in groups:
                raise UnarchiveError(
                    'Cannot change group ownership of %s to %s, as user %s' %
                    (path, fut_group, run_owner))

            if group and group != fut_group:
                change = True
                err += 'Path %s is owned by group %s, not by group %s as expected\n' % (
                    path, group, fut_group)
                itemized[6] = 'g'
            elif gid and gid != fut_gid:
                change = True
                err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (
                    path, gid, fut_gid)
                itemized[6] = 'g'

            # Register changed files and finalize diff output
            if change:
                if path not in self.includes:
                    self.includes.append(path)
                diff += '%s %s\n' % (''.join(itemized), path)

        if self.includes:
            unarchived = False

        # DEBUG
#        out = old_out + out

        return dict(unarchived=unarchived,
                    rc=rc,
                    out=out,
                    err=err,
                    cmd=cmd,
                    diff=diff)
Exemple #26
0
async def test_getattr_root(ctx):
    assert stat.S_ISDIR((await ctx.server.getattr(ROOT_INODE,
                                                  some_ctx)).st_mode)
    await fsck(ctx)
Exemple #27
0
    def _restore_file(self, file_meta, restore_path, data_stream, data_gen,
                      backup_level):
        file_abs_path = os.path.join(restore_path, file_meta['path'])

        inode = file_meta.get('inode', {})
        file_mode = inode.get('mode')

        if os.path.exists(file_abs_path):
            if backup_level == 0:
                self._remove_file(file_abs_path)
            else:
                if file_meta.get('deleted'):
                    self._remove_file(file_abs_path)
                    return data_stream
                elif file_meta.get(
                        'new_level') and not stat.S_ISREG(file_mode):
                    self._set_inode(file_abs_path, inode)
                    return data_stream

        if not file_mode:
            return data_stream

        if stat.S_ISREG(file_mode):
            data_stream = self._restore_reg_file(file_abs_path, file_meta,
                                                 data_gen, data_stream)

        elif stat.S_ISDIR(file_mode):
            try:
                os.makedirs(file_abs_path, file_mode)
            except (OSError, IOError) as error:
                LOG.warning('Directory {0} creation error: {1}'.format(
                    file_abs_path, error))

        elif stat.S_ISBLK(file_mode):
            try:
                self._make_dev_file(file_abs_path, file_meta['dev'], file_mode)
            except (OSError, IOError) as error:
                LOG.warning('Block file {0} creation error: {1}'.format(
                    file_abs_path, error))

        elif stat.S_ISCHR(file_mode):
            try:
                self._make_dev_file(file_abs_path, file_meta['dev'], file_mode)
            except (OSError, IOError) as error:
                LOG.warning('Character file {0} creation error: {1}'.format(
                    file_abs_path, error))

        elif stat.S_ISFIFO(file_mode):
            try:
                os.mkfifo(file_abs_path)
            except (OSError, IOError) as error:
                LOG.warning('FIFO or Pipe file {0} creation error: {1}'.format(
                    file_abs_path, error))

        elif stat.S_ISLNK(file_mode):
            try:
                os.symlink(file_meta.get('lname', ''), file_abs_path)
            except (OSError, IOError) as error:
                LOG.warning('Link file {0} creation error: {1}'.format(
                    file_abs_path, error))

        if not stat.S_ISLNK(file_mode):
            self._set_inode(file_abs_path, inode)

        return data_stream
Exemple #28
0
 def entries(cls, path):
     """directory entries in an array"""
     # prevent symlinks being followed
     if not stat.S_ISDIR(os.lstat(path).st_mode):
         raise OSError(ENOTDIR, os.strerror(ENOTDIR))
     return os.listdir(path)
Exemple #29
0
def get_repo_dirents(request, repo, commit, path, offset=-1, limit=-1):
    """List repo dirents based on commit id and path. Use ``offset`` and
    ``limit`` to do paginating.

    Returns: A tupple of (file_list, dir_list, dirent_more)

    TODO: Some unrelated parts(file sharing, stars, modified info, etc) need
    to be pulled out to multiple functions.
    """

    dir_list = []
    file_list = []
    dirent_more = False
    if commit.root_id == EMPTY_SHA1:
        return ([], [], False) if limit == -1 else ([], [], False)
    else:
        try:
            dirs = seafile_api.list_dir_by_commit_and_path(
                commit.repo_id, commit.id, path, offset, limit)
            if not dirs:
                return ([], [], False)
        except SearpcError as e:
            logger.error(e)
            return ([], [], False)

        if limit != -1 and limit == len(dirs):
            dirent_more = True

        username = request.user.username
        starred_files = get_dir_starred_files(username, repo.id, path)
        fileshares = FileShare.objects.filter(repo_id=repo.id).filter(
            username=username)
        uploadlinks = UploadLinkShare.objects.filter(repo_id=repo.id).filter(
            username=username)

        view_dir_base = reverse("view_common_lib_dir", args=[repo.id, ''])
        dl_dir_base = reverse('repo_download_dir', args=[repo.id])
        file_history_base = reverse('file_revisions', args=[repo.id])
        for dirent in dirs:
            dirent.last_modified = dirent.mtime
            dirent.sharelink = ''
            dirent.uploadlink = ''
            if stat.S_ISDIR(dirent.props.mode):
                dpath = posixpath.join(path, dirent.obj_name)
                if dpath[-1] != '/':
                    dpath += '/'
                for share in fileshares:
                    if dpath == share.path:
                        dirent.sharelink = gen_dir_share_link(share.token)
                        dirent.sharetoken = share.token
                        break
                for link in uploadlinks:
                    if dpath == link.path:
                        dirent.uploadlink = gen_shared_upload_link(link.token)
                        dirent.uploadtoken = link.token
                        break
                p_dpath = posixpath.join(path, dirent.obj_name)
                dirent.view_link = view_dir_base + '?p=' + urlquote(p_dpath)
                dirent.dl_link = dl_dir_base + '?p=' + urlquote(p_dpath)
                dir_list.append(dirent)
            else:
                file_list.append(dirent)
                if repo.version == 0:
                    dirent.file_size = get_file_size(repo.store_id,
                                                     repo.version,
                                                     dirent.obj_id)
                else:
                    dirent.file_size = dirent.size
                dirent.starred = False
                fpath = posixpath.join(path, dirent.obj_name)
                p_fpath = posixpath.join(path, dirent.obj_name)
                dirent.view_link = reverse('view_lib_file',
                                           args=[repo.id, p_fpath])
                dirent.dl_link = get_file_download_link(
                    repo.id, dirent.obj_id, p_fpath)
                dirent.history_link = file_history_base + '?p=' + urlquote(
                    p_fpath)
                if fpath in starred_files:
                    dirent.starred = True
                for share in fileshares:
                    if fpath == share.path:
                        dirent.sharelink = gen_file_share_link(share.token)
                        dirent.sharetoken = share.token
                        break

        return (file_list, dir_list, dirent_more)
Exemple #30
0
    def Eval(self, node):
        #print('!!', node.tag)

        if node.tag == bool_expr_e.WordTest:
            s = self._EvalCompoundWord(node.w)
            return bool(s)

        if node.tag == bool_expr_e.LogicalNot:
            b = self.Eval(node.child)
            return not b

        if node.tag == bool_expr_e.LogicalAnd:
            # Short-circuit evaluation
            if self.Eval(node.left):
                return self.Eval(node.right)
            else:
                return False

        if node.tag == bool_expr_e.LogicalOr:
            if self.Eval(node.left):
                return True
            else:
                return self.Eval(node.right)

        if node.tag == bool_expr_e.BoolUnary:
            op_id = node.op_id
            s = self._EvalCompoundWord(node.child)

            # Now dispatch on arg type
            arg_type = BOOL_ARG_TYPES[
                op_id.enum_id]  # could be static in the LST?

            if arg_type == bool_arg_type_e.Path:
                # Only use lstat if we're testing for a symlink.
                if op_id in (Id.BoolUnary_h, Id.BoolUnary_L):
                    try:
                        mode = posix.lstat(s).st_mode
                    except OSError:
                        return False

                    return stat.S_ISLNK(mode)

                try:
                    st = posix.stat(s)
                except OSError:
                    # TODO: Signal extra debug information?
                    #log("Error from stat(%r): %s" % (s, e))
                    return False
                mode = st.st_mode

                if op_id in (Id.BoolUnary_e,
                             Id.BoolUnary_a):  # -a is alias for -e
                    return True

                if op_id == Id.BoolUnary_f:
                    return stat.S_ISREG(mode)

                if op_id == Id.BoolUnary_d:
                    return stat.S_ISDIR(mode)

                if op_id == Id.BoolUnary_x:
                    return posix.access(s, posix.X_OK)

                if op_id == Id.BoolUnary_r:
                    return posix.access(s, posix.R_OK)

                if op_id == Id.BoolUnary_w:
                    return posix.access(s, posix.W_OK)

                if op_id == Id.BoolUnary_s:
                    return st.st_size != 0

                raise NotImplementedError(op_id)

            if arg_type == bool_arg_type_e.Str:
                if op_id == Id.BoolUnary_z:
                    return not bool(s)
                if op_id == Id.BoolUnary_n:
                    return bool(s)

                raise NotImplementedError(op_id)

            if arg_type == bool_arg_type_e.Other:
                if op_id == Id.BoolUnary_t:
                    try:
                        fd = int(s)
                    except ValueError:
                        # TODO: Need location information of [
                        e_die('Invalid file descriptor %r', s)
                    return posix.isatty(fd)

                raise NotImplementedError(op_id)

            raise NotImplementedError(arg_type)

        if node.tag == bool_expr_e.BoolBinary:
            op_id = node.op_id

            s1 = self._EvalCompoundWord(node.left)
            # Whether to glob escape
            do_fnmatch = op_id in (Id.BoolBinary_GlobEqual,
                                   Id.BoolBinary_GlobDEqual,
                                   Id.BoolBinary_GlobNEqual)
            do_ere = (op_id == Id.BoolBinary_EqualTilde)
            s2 = self._EvalCompoundWord(node.right,
                                        do_fnmatch=do_fnmatch,
                                        do_ere=do_ere)

            # Now dispatch on arg type
            arg_type = BOOL_ARG_TYPES[op_id.enum_id]

            if arg_type == bool_arg_type_e.Path:
                st1 = posix.stat(s1)
                st2 = posix.stat(s2)

                # TODO: test newer than (mtime)
                if op_id == Id.BoolBinary_nt:
                    return st1[stat.ST_MTIME] > st2[stat.ST_MTIME]
                if op_id == Id.BoolBinary_ot:
                    return st1[stat.ST_MTIME] < st2[stat.ST_MTIME]

                raise NotImplementedError(op_id)

            if arg_type == bool_arg_type_e.Int:
                # NOTE: We assume they are constants like [[ 3 -eq 3 ]].
                # Bash also allows [[ 1+2 -eq 3 ]].
                i1 = self._StringToIntegerOrError(s1, blame_word=node.left)
                i2 = self._StringToIntegerOrError(s2, blame_word=node.right)

                if op_id == Id.BoolBinary_eq:
                    return i1 == i2
                if op_id == Id.BoolBinary_ne:
                    return i1 != i2
                if op_id == Id.BoolBinary_gt:
                    return i1 > i2
                if op_id == Id.BoolBinary_ge:
                    return i1 >= i2
                if op_id == Id.BoolBinary_lt:
                    return i1 < i2
                if op_id == Id.BoolBinary_le:
                    return i1 <= i2

                raise NotImplementedError(op_id)

            if arg_type == bool_arg_type_e.Str:

                if op_id in (Id.BoolBinary_GlobEqual,
                             Id.BoolBinary_GlobDEqual):
                    #log('Matching %s against pattern %s', s1, s2)

                    # TODO: Respect extended glob?  * and ! and ? are quoted improperly.
                    # But @ and + are OK.
                    return libc.fnmatch(s2, s1)

                if op_id == Id.BoolBinary_GlobNEqual:
                    return not libc.fnmatch(s2, s1)

                if op_id in (Id.BoolBinary_Equal, Id.BoolBinary_DEqual):
                    return s1 == s2

                if op_id == Id.BoolBinary_NEqual:
                    return s1 != s2

                if op_id == Id.BoolBinary_EqualTilde:
                    # TODO: This should go to --debug-file
                    #log('Matching %r against regex %r', s1, s2)
                    try:
                        matches = libc.regex_match(s2, s1)
                    except RuntimeError:
                        # Status 2 indicates a regex parse error.  This is fatal in OSH but
                        # not in bash, which treats [[ like a command with an exit code.
                        e_die("Invalid regex %r",
                              s2,
                              word=node.right,
                              status=2)

                    if matches is None:
                        return False

                    self._SetRegexMatches(matches)
                    return True

                if op_id == Id.Redir_Less:  # pun
                    return s1 < s2

                if op_id == Id.Redir_Great:  # pun
                    return s1 > s2

                raise NotImplementedError(op_id)

        raise AssertionError(node.tag)