예제 #1
0
    def __init__(self, exec_file, extra_args=None, stdout_file = None, stderr_file = None):
        self.exec_file = exec_file
        self.cwd = None
        self.stdout_file = stdout_file
        self.stderr_file = stderr_file
        self.sp = None

        if extra_args is None:
            self.extra_args = None
        else:
            self.extra_args = [str(a) for a in list(extra_args)] # make a duplicate

        if not is_executable(self.exec_file):
            raise Exception("Not executable ? : %s" % self.exec_file)
예제 #2
0
def handle_dat(request):
    allowed_directory = "global/"
    file = allowed_directory + request.fields[0]
    args = request.fields[2:]
    if not in_directory(file, allowed_directory):
        raise SecurityViolation("Forbidden", request.fields[0])
    if is_executable(file):
        p = subprocess.run([file] + args,
                           input=request.fields[1].encode("utf-8")
                           if len(request.fields) >= 1 else b'',
                           capture_output=True)
        if len(p.stderr) > 0:
            print_err("stderr: " + p.stderr.decode("utf-8"))
        return unet_request.Request(p.stdout)
    elif is_readable(file):
        with open(file, "rb") as f:
            return unet_request.Request(f.read())
    else:
        raise InvalidRequest("Not Found", file[len(allowed_directory) - 1:])
예제 #3
0
def run_exp(name, schedule, scheduler, kernel, duration, work_dir, out_dir):
    proc_entries = []
    executables  = []

    # Parse values for proc entries
    for entry_conf in schedule['proc']:
        path = entry_conf[0]
        data = entry_conf[1]

        if not os.path.exists(path):
            raise IOError("Invalid proc path %s: %s" % (path, name))

        proc_entries += [ProcEntry(path, data)]

    # Parse spinners
    for spin_conf in schedule['spin']:
        if isinstance(spin_conf, str):
            # Just a string defaults to default spin
            (spin, args) = (conf.DEFAULTS['spin'], spin_conf)
        else:
            # Otherwise its a pair, the type and the args
            if len(spin_conf) != 2:
                raise IOError("Invalid spin conf %s: %s" % (spin_conf, name))
            (spin, args) = (spin_conf[0], spin_conf[1])

        # if not conf.BINS[spin]:
        #     raise IndexError("No knowledge of program %s: %s" % (spin, name))

        real_spin = com.get_executable(spin, "")
        real_args = args.split()
        if re.match(".*spin", real_spin):
            real_args = ['-w'] + real_args + [duration]

        if not com.is_executable(real_spin):
            raise OSError("Cannot run spin %s: %s" % (real_spin, name))

        executables += [Executable(real_spin, real_args)]

    exp = Experiment(name, scheduler, work_dir, out_dir,
                     proc_entries, executables)

    exp.run_exp()
예제 #4
0
def main(event):
    startTime = datetime.now()
    print('startTime: %s' % startTime.strftime('[%Y-%m-%dT%H:%M:%S.%fZ]'))

    #return params
    os.environ['GG_STORAGE_URI'] = event['storageBackend']
    thunks = event['thunks']
    timelog = event.get('timelog')

    # Remove old thunk-execute directories
    os.system("rm -rf /tmp/thunk-execute.*")

    # Write thunks to disk

    tried_once = False

    while True:
        try:
            for thunk_item in thunks:
                thunk_data = b64decode(thunk_item['data'])
                if os.path.exists(GGPaths.blob_path(thunk_item['hash'])):
                    os.remove(GGPaths.blob_path(thunk_item['hash']))
                with open(GGPaths.blob_path(thunk_item['hash']), "wb") as fout:
                    fout.write(thunk_data)

            # Move executables from Lambda package to .gg directory
            executables_dir = os.path.join(curdir, 'executables')
            if os.path.exists(executables_dir):
                for exe in os.listdir(executables_dir):
                    blob_path = GGPaths.blob_path(exe)
                    exe_path = os.path.join(executables_dir, exe)

                    if not os.path.exists(blob_path):
                        shutil.copy(exe_path, blob_path)
                        make_executable(blob_path)

            break

        except OSError as ex:
            if not tried_once and ex.errno == errno.ENOSPC:
                # there's no space left; let's get rid of GG_DIR and try again
                tried_once = True
                os.system("rm -rf '{}'".format(GGPaths.blobs))
                os.system("rm -rf '{}'".format(GGPaths.reductions))
                make_gg_dirs()
                continue
            else:
                raise

    # Execute the thunk, and upload the result

#    command = ["gg-execute-static",
#               "--get-dependencies",
#               "--put-output",
#               "--cleanup"]

    command = "./gg-execute-static --get-dependencies --put-output --cleanup"

    if timelog:
        #command += ["--timelog"]
        command += " --timelog"

    for x in thunks:
        command += " " + x['hash']

    print('command: ' + command)

    return_code, stdout = run_command(command)
    #    return_code, stdout = run_command(command +
    #        [x['hash'] for x in thunks])
    print('command output: ' + stdout)

    executed_thunks = []

    for thunk in thunks:
        outputs = []

        for output_tag in thunk['outputs']:
            output_hash = GGCache.check(thunk['hash'], output_tag)

            if not output_hash:
                return {'returnCode': return_code, 'stdout': stdout}

            data = None
            if is_hash_for_thunk(output_hash):
                with open(GGPaths.blob_path(output_hash), 'rb') as tin:
                    data = b64encode(tin.read()).decode('ascii')

            outputs += [{
                'tag':
                output_tag,
                'hash':
                output_hash,
                'size':
                os.path.getsize(GGPaths.blob_path(output_hash)),
                'executable':
                is_executable(GGPaths.blob_path(output_hash)),
                'data':
                data
            }]

        executed_thunks += [{'thunkHash': thunk['hash'], 'outputs': outputs}]

    return {'returnCode': 0, 'stdout': '', 'executedThunks': executed_thunks}
예제 #5
0
파일: function.py 프로젝트: eorbay/gg
def handler(event, context):
    gginfo = GGInfo()

    gginfo.thunk_hash = event['thunk_hash']
    gginfo.s3_bucket = event['s3_bucket']
    gginfo.s3_region = event['s3_region']
    gginfo.infiles = event['infiles']

    enable_timelog = event.get('timelog', True)
    timelogger = TimeLog(enabled=enable_timelog)

    thunk_data = b64decode(event['thunk_data'])

    with open(GGPaths.blob_path(gginfo.thunk_hash), "wb") as fout:
        fout.write(thunk_data)

    timelogger.add_point("write thunk to disk")

    executables_dir = os.path.join(curdir, 'executables')

    if os.path.exists(executables_dir):
        for exe in os.listdir(executables_dir):
            blob_path = GGPaths.blob_path(exe)
            exe_path = os.path.join(executables_dir, exe)

            if not os.path.exists(blob_path):
                shutil.copy(exe_path, blob_path)
                make_executable(blob_path)

    timelogger.add_point("copy executables to ggdir")

    # only clean up the gg directory if running on Lambda.
    if not fetch_dependencies(gginfo, gginfo.infiles, not GG_RUNNER):
        return {'errorType': 'GG-FetchDependenciesFailed'}

    for infile in gginfo.infiles:
        if infile['executable']:
            print("exe %s" % infile['hash'])
            make_executable(GGPaths.blob_path(infile['hash']))

    timelogger.add_point("fetching the dependencies")

    return_code, output = run_command(["gg-execute-static", gginfo.thunk_hash])

    if return_code:
        return {
            'errorType': 'GG-ExecutionFailed',
        }

    timelogger.add_point("gg-execute")

    result = GGCache.check(gginfo.thunk_hash)

    if not result:
        return {'errorType': 'GG-ExecutionFailed'}

    executable = is_executable(GGPaths.blob_path(result))

    timelogger.add_point("check the outfile")

    s3_client = boto3.client('s3')

    print("writing %s" % GGPaths.blob_path(result))
    with open(GGPaths.blob_path(result), "rb") as bfile:
        blob = bfile.read()
        print("file read")
        r.set(result, blob)
        print("redis key set")
    """
    s3_client.upload_file(GGPaths.blob_path(result), gginfo.s3_bucket, result)

    s3_client.put_object_acl(
        ACL='public-read',
        Bucket=gginfo.s3_bucket,
        Key=result
    )

    s3_client.put_object_tagging(
        Bucket=gginfo.s3_bucket,
        Key=result,
        Tagging={
            'TagSet': [
                { 'Key': 'gg:reduced_from', 'Value': gginfo.thunk_hash },
                { 'Key': 'gg:executable', 'Value': 'true' if executable else 'false' }
            ]
        }
    )
    """
    timelogger.add_point("upload outfile to s3")

    if enable_timelog:
        print("s3 timelog uploading..")
        s3_client.put_object(ACL='public-read',
                             Bucket=gginfo.s3_bucket,
                             Key="runlogs/{}".format(gginfo.thunk_hash),
                             Body=str({
                                 'output_hash': result,
                                 'started': timelogger.start,
                                 'timelog': timelogger.points
                             }).encode('utf-8'))
    print("s3 timelog uploaded!")
    return {
        'thunk_hash': gginfo.thunk_hash,
        'output_hash': result,
        'output_size': os.path.getsize(GGPaths.blob_path(result)),
        'executable_output': executable
    }
예제 #6
0
def handler(event, context):
    os.environ['GG_STORAGE_URI'] = event['storageBackend']
    thunks = event['thunks']
    timelog = event.get('timelog')

    # Remove old thunk-execute directories
    os.system("rm -rf /tmp/thunk-execute.*")

    # Write thunks to disk
    for thunk_item in thunks:
        thunk_data = b64decode(thunk_item['data'])
        if os.path.exists(GGPaths.blob_path(thunk_item['hash'])):
            os.remove(GGPaths.blob_path(thunk_item['hash']))
        with open(GGPaths.blob_path(thunk_item['hash']), "wb") as fout:
            fout.write(thunk_data)

    # Move executables from Lambda package to .gg directory
    executables_dir = os.path.join(curdir, 'executables')
    if os.path.exists(executables_dir):
        for exe in os.listdir(executables_dir):
            blob_path = GGPaths.blob_path(exe)
            exe_path = os.path.join(executables_dir, exe)

            if not os.path.exists(blob_path):
                shutil.copy(exe_path, blob_path)
                make_executable(blob_path)

    # Execute the thunk, and upload the result
    command = [
        "gg-execute-static", "--get-dependencies", "--put-output", "--cleanup"
    ]

    if timelog:
        command += ["--timelog"]

    return_code, stdout = run_command(command + [x['hash'] for x in thunks])

    executed_thunks = []

    for thunk in thunks:
        outputs = []

        for output_tag in thunk['outputs']:
            output_hash = GGCache.check(thunk['hash'], output_tag)

            if not output_hash:
                return {'returnCode': return_code, 'stdout': stdout}

            data = None
            if is_hash_for_thunk(output_hash):
                with open(GGPaths.blob_path(output_hash), 'rb') as tin:
                    data = b64encode(tin.read()).decode('ascii')

            outputs += [{
                'tag':
                output_tag,
                'hash':
                output_hash,
                'size':
                os.path.getsize(GGPaths.blob_path(output_hash)),
                'executable':
                is_executable(GGPaths.blob_path(output_hash)),
                'data':
                data
            }]

        executed_thunks += [{'thunkHash': thunk['hash'], 'outputs': outputs}]

    return {'returnCode': 0, 'stdout': '', 'executedThunks': executed_thunks}