コード例 #1
0
ファイル: util.py プロジェクト: adongy/spreads
 def to_python(self, value):
     from spreadsplug.web.app import app
     try:
         uuid.UUID(value)
         workflow = Workflow.find_by_id(app.config['base_path'], value)
     except ValueError:
         workflow = Workflow.find_by_slug(app.config['base_path'], value)
     if workflow is None:
         abort(404)
     return workflow
コード例 #2
0
ファイル: util.py プロジェクト: jamescr/spreads
 def to_python(self, value):
     from spreadsplug.web import app
     try:
         uuid.UUID(value)
         workflow = Workflow.find_by_id(app.config['base_path'], value)
     except ValueError:
         workflow = Workflow.find_by_slug(app.config['base_path'], value)
     if workflow is None:
         abort(404)
     return workflow
コード例 #3
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)
        zstream = workflow.bag.package_as_zipstream(compression=None)

        self.set_status(200)
        self.set_header('Content-type', 'application/zip')
        self.set_header('Content-length',
                        str(self.calculate_zipsize(zstream.paths_to_write)))

        self.zstream_iter = iter(zstream)

        self.send_next_chunk()
コード例 #4
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)
        zstream = workflow.bag.package_as_zipstream(compression=None)

        self.set_status(200)
        self.set_header('Content-type', 'application/zip')
        self.set_header('Content-length',
                        str(self.calculate_zipsize(zstream.paths_to_write)))

        self.zstream_iter = iter(zstream)

        self.send_next_chunk()
コード例 #5
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)

        self.set_status(200)
        self.set_header('Content-type', 'application/tar')
        self.set_header('Content-length', self.calculate_tarsize(workflow))

        self.fp = QueueIO(self)
        self.thread = threading.Thread(target=self.create_tar,
                                       args=(workflow, self.on_done,
                                             self.on_exception))
        self.thread.start()
        self.send_next_chunk()
コード例 #6
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)

        self.set_status(200)
        self.set_header('Content-type', 'application/tar')
        self.set_header('Content-length', self.calculate_tarsize(workflow))

        self.fp = QueueIO(self)
        self.thread = threading.Thread(
            target=self.create_tar,
            args=(workflow, self.on_done, self.on_exception)
        )
        self.thread.start()
        self.send_next_chunk()
コード例 #7
0
def transfer_to_stick(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    stick = find_stick()
    files = list(workflow.path.rglob('*'))
    num_files = len(files)
    # Filter out problematic characters
    clean_name = (workflow.path.name.replace(':', '_').replace('/', '_'))
    workflow.status['step'] = 'transfer'
    try:
        if IS_WIN:
            target_path = Path(stick) / clean_name
        else:
            mount = stick.get_dbus_method(
                "FilesystemMount",
                dbus_interface="org.freedesktop.UDisks.Device")
            mount_point = mount('', [])
            target_path = Path(mount_point) / clean_name
        if target_path.exists():
            shutil.rmtree(unicode(target_path))
        target_path.mkdir()
        signals['transfer:started'].send(workflow)
        for num, path in enumerate(files, 1):
            signals['transfer:progressed'].send(workflow,
                                                progress=(num / num_files) *
                                                0.79,
                                                status=path.name)
            workflow.status['step_done'] = (num / num_files) * 0.79
            target = target_path / path.relative_to(workflow.path)
            if path.is_dir():
                target.mkdir()
            else:
                shutil.copyfile(unicode(path), unicode(target))
    finally:
        if 'mount_point' in locals():
            signals['transfer:progressed'].send(workflow,
                                                progress=0.8,
                                                status="Syncing...")
            workflow.status['step_done'] = 0.8
            unmount = stick.get_dbus_method(
                "FilesystemUnmount",
                dbus_interface="org.freedesktop.UDisks.Device")
            unmount([], timeout=1e6)  # dbus-python doesn't know an infinite
            # timeout... unmounting sometimes takes a
            # long time, since the device has to be
            # synced.
        signals['transfer:completed'].send(workflow)
        workflow.status['step'] = None
コード例 #8
0
ファイル: tasks.py プロジェクト: 5up3rD4n1/spreads
def transfer_to_stick(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    stick = find_stick()
    files = list(workflow.path.rglob('*'))
    num_files = len(files)
    # Filter out problematic characters
    clean_name = (workflow.path.name.replace(':', '_')
                                    .replace('/', '_'))
    workflow.status['step'] = 'transfer'
    try:
        if IS_WIN:
            target_path = Path(stick)/clean_name
        else:
            mount = stick.get_dbus_method(
                "FilesystemMount",
                dbus_interface="org.freedesktop.UDisks.Device")
            mount_point = mount('', [])
            target_path = Path(mount_point)/clean_name
        if target_path.exists():
            shutil.rmtree(unicode(target_path))
        target_path.mkdir()
        signals['transfer:started'].send(workflow)
        for num, path in enumerate(files, 1):
            signals['transfer:progressed'].send(
                workflow, progress=(num/num_files)*0.79, status=path.name)
            workflow.status['step_done'] = (num/num_files)*0.79
            target = target_path/path.relative_to(workflow.path)
            if path.is_dir():
                target.mkdir()
            else:
                shutil.copyfile(unicode(path), unicode(target))
    finally:
        if 'mount_point' in locals():
            signals['transfer:progressed'].send(workflow, progress=0.8,
                                                status="Syncing...")
            workflow.status['step_done'] = 0.8
            unmount = stick.get_dbus_method(
                "FilesystemUnmount",
                dbus_interface="org.freedesktop.UDisks.Device")
            unmount([], timeout=1e6)  # dbus-python doesn't know an infinite
                                      # timeout... unmounting sometimes takes a
                                      # long time, since the device has to be
                                      # synced.
        signals['transfer:completed'].send(workflow)
        workflow.status['step'] = None
コード例 #9
0
ファイル: tasks.py プロジェクト: 5up3rD4n1/spreads
def upload_workflow(wf_id, base_path, endpoint, user_config,
                    start_process=False, start_output=False):
    logger.debug("Uploading workflow to postprocessing server")

    workflow = Workflow.find_by_id(base_path, wf_id)
    # NOTE: This is kind of nasty.... We temporarily write the user-supplied
    # configuration to the bag, update the tag-payload, create the zip, and
    # once everything is done, we restore the old version
    tmp_cfg = copy.deepcopy(workflow.config)
    tmp_cfg.set(user_config)
    tmp_cfg_path = workflow.path/'config.yml'
    tmp_cfg.dump(filename=unicode(tmp_cfg_path),
                 sections=(user_config['plugins'] + ["plugins", "device"]))
    workflow.bag.add_tagfiles(unicode(tmp_cfg_path))

    # Create a zipstream from the workflow-bag
    zstream = workflow.bag.package_as_zipstream(compression=None)
    zstream_copy = copy.deepcopy(zstream)
    zsize = sum(len(x) for x in zstream_copy)

    def zstream_wrapper():
        """ Wrapper around our zstream so we can emit a signal when all data
        has been streamed to the client.
        """
        transferred = 0
        progress = "0.00"
        for data in zstream:
            yield data
            transferred += len(data)
            # Only update progress if we've progress at least by 0.01
            new_progress = "{0:.2f}".format(transferred/zsize)
            if new_progress != progress:
                progress = new_progress
                signals['submit:progressed'].send(
                    workflow, progress=float(progress),
                    status="Uploading workflow...")

    # NOTE: This is neccessary since requests makes a chunked upload when
    #       passed a plain generator, which is not supported by the WSGI
    #       protocol that receives it. Hence we wrap it inside of a
    #       GeneratorIO to make it appear as a file-like object with a
    #       known size.
    zstream_fp = GeneratorIO(zstream_wrapper(), zsize)
    signals['submit:started'].send(workflow)
    resp = requests.post(endpoint, data=zstream_fp,
                         headers={'Content-Type': 'application/zip'})
    if not resp:
        error_msg = "Upload failed: {0}".format(resp.content)
        signals['submit:error'].send(workflow, message=error_msg,
                                     data=resp.content)
        logger.error(error_msg)
    else:
        wfid = resp.json()['id']
        if start_process:
            requests.post(endpoint + "/{0}/process".format(wfid))
        if start_output:
            requests.post(endpoint + "/{0}/output".format(wfid))
        signals['submit:completed'].send(workflow, remote_id=wfid)

    # Restore our old configuration
    workflow._save_config()
コード例 #10
0
ファイル: tasks.py プロジェクト: 5up3rD4n1/spreads
def output_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating output generation for workflow {0}"
                 .format(workflow.slug))
    workflow.output()
コード例 #11
0
ファイル: tasks.py プロジェクト: 5up3rD4n1/spreads
def process_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating processing for workflow {0}"
                 .format(workflow.slug))
    workflow.process()
コード例 #12
0
def upload_workflow(wf_id,
                    base_path,
                    endpoint,
                    user_config,
                    start_process=False,
                    start_output=False):
    logger.debug("Uploading workflow to postprocessing server")

    workflow = Workflow.find_by_id(base_path, wf_id)
    # NOTE: This is kind of nasty.... We temporarily write the user-supplied
    # configuration to the bag, update the tag-payload, create the zip, and
    # once everything is done, we restore the old version
    tmp_cfg = copy.deepcopy(workflow.config)
    tmp_cfg.set(user_config)
    tmp_cfg_path = workflow.path / 'config.yml'
    tmp_cfg.dump(filename=unicode(tmp_cfg_path),
                 sections=(user_config['plugins'] + ["plugins", "device"]))
    workflow.bag.add_tagfiles(unicode(tmp_cfg_path))

    # Create a zipstream from the workflow-bag
    zstream = workflow.bag.package_as_zipstream(compression=None)
    zsize = calculate_zipsize(zstream.paths_to_write)

    def zstream_wrapper():
        """ Wrapper around our zstream so we can emit a signal when all data
        has been streamed to the client.
        """
        transferred = 0
        progress = "0.00"
        for data in zstream:
            yield data
            transferred += len(data)
            # Only update progress if we've progress at least by 0.01
            new_progress = "{0:.2f}".format(transferred / zsize)
            if new_progress != progress:
                progress = new_progress
                signals['submit:progressed'].send(
                    workflow,
                    progress=float(progress),
                    status="Uploading workflow...")

    # NOTE: This is neccessary since requests makes a chunked upload when
    #       passed a plain generator, which is not supported by the WSGI
    #       protocol that receives it. Hence we wrap it inside of a
    #       GeneratorIO to make it appear as a file-like object with a
    #       known size.
    zstream_fp = GeneratorIO(zstream_wrapper(), zsize)
    logger.debug("Projected size for upload: {}".format(zsize))
    signals['submit:started'].send(workflow)
    resp = requests.post(endpoint,
                         data=zstream_fp,
                         headers={'Content-Type': 'application/zip'})
    if not resp:
        error_msg = "Upload failed: {0}".format(resp.content)
        signals['submit:error'].send(workflow,
                                     message=error_msg,
                                     data=resp.content)
        logger.error(error_msg)
    else:
        wfid = resp.json()['id']
        if start_process:
            requests.post(endpoint + "/{0}/process".format(wfid))
        if start_output:
            requests.post(endpoint + "/{0}/output".format(wfid))
        signals['submit:completed'].send(workflow, remote_id=wfid)

    # Restore our old configuration
    workflow._save_config()
コード例 #13
0
def output_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating output generation for workflow {0}".format(
        workflow.slug))
    workflow.output()
コード例 #14
0
def process_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating processing for workflow {0}".format(
        workflow.slug))
    workflow.process()