Example #1
0
def create_workflow():
    """ Create a new workflow.

    Payload should be a JSON object. The only required attribute is 'name' for
    the desired workflow name. Optionally, 'config' can be set to a
    configuration object in the form "plugin_name: { setting: value, ...}".

    Returns the newly created workflow as a JSON object.
    """
    data = json.loads(request.data)
    path = Path(app.config['base_path'])/unicode(data['name'])

    # Setup default configuration
    config = app.config['default_config']
    # Overlay user-supplied values, if existant
    user_config = data.get('config', None)
    if user_config is not None:
        config = config.with_overlay(user_config)
    workflow = Workflow(config=config, path=path,
                        step=data.get('step', None),
                        step_done=data.get('step_done', None))
    try:
        workflow.id = persistence.save_workflow(workflow)
    except persistence.ValidationError as e:
        return make_response(json.dumps(dict(errors=e.errors)), 400,
                             {'Content-Type': 'application/json'})
    return make_response(json.dumps(workflow),
                         200, {'Content-Type': 'application/json'})
Example #2
0
def create_workflow():
    """ Create a new workflow.

    Payload should be a JSON object. The only required attribute is 'name' for
    the desired workflow name. Optionally, 'config' can be set to a
    configuration object in the form "plugin_name: { setting: value, ...}".

    Returns the newly created workflow as a JSON object.
    """
    data = json.loads(request.data)
    path = Path(app.config['base_path']) / unicode(data['name'])

    # Setup default configuration
    config = app.config['default_config']
    # Overlay user-supplied values, if existant
    user_config = data.get('config', None)
    if user_config is not None:
        config = config.with_overlay(user_config)
    workflow = Workflow(config=config,
                        path=path,
                        step=data.get('step', None),
                        step_done=data.get('step_done', None))
    try:
        workflow.id = persistence.save_workflow(workflow)
    except persistence.ValidationError as e:
        return make_response(json.dumps(dict(errors=e.errors)), 400,
                             {'Content-Type': 'application/json'})
    return make_response(json.dumps(workflow), 200,
                         {'Content-Type': 'application/json'})
Example #3
0
def output(config):
    path = config['path'].get()
    workflow = Workflow(config=config, path=path)
    draw_progress(0)
    workflow.on_step_progressed.connect(
        lambda x, **kwargs: draw_progress(kwargs['progress']),
        sender=workflow, weak=False)
    workflow.output()
Example #4
0
def output(config):
    path = config['path'].get()
    workflow = Workflow(config=config, path=path)
    draw_progress(0)
    workflow.on_step_progressed.connect(
        lambda x, **kwargs: draw_progress(kwargs['progress']),
        sender=workflow,
        weak=False)
    workflow.output()
Example #5
0
 def to_python(self, value):
     from spreadsplug.web.app import app
     try:
         uuid.UUID(value)
         workflow = Workflow.find_by_id(app.config['base_path'], value)
     except ValueError:
         workflow = Workflow.find_by_slug(app.config['base_path'], value)
     if workflow is None:
         abort(404)
     return workflow
Example #6
0
 def to_python(self, value):
     from spreadsplug.web import app
     try:
         uuid.UUID(value)
         workflow = Workflow.find_by_id(app.config['base_path'], value)
     except ValueError:
         workflow = Workflow.find_by_slug(app.config['base_path'], value)
     if workflow is None:
         abort(404)
     return workflow
Example #7
0
def delete_workflow(workflow):
    """ Delete a single workflow from database and disk.

    :param workflow:    UUID or slug for the workflow to be updated
    :type workflow:     str

    :status 200:        When deletion was succesful
    """
    Workflow.remove(workflow)
    return jsonify({})
Example #8
0
def create_workflow():
    """ Create a new workflow.

    :reqheader Accept:      :mimetype:`application/json`
    :<json object config:   Configuration for new workflow
    :<json object metadata: Metadata for new workflow

    :resheader Content-Type:    :mimetype:`application/json`
    :status 200:                When everything was OK.
    :status 400:                When validation of configuration or metadata
                                failed.
    """
    data = json.loads(request.data)

    if data.get('config'):
        config = app.config['default_config'].with_overlay(
            data.get('config'))
    else:
        config = app.config['default_config']

    metadata = data.get('metadata', {})

    workflow = Workflow.create(location=app.config['base_path'],
                               config=config,
                               metadata=metadata)
    return make_response(json.dumps(workflow),
                         200, {'Content-Type': 'application/json'})
Example #9
0
def create_workflow():
    """ Create a new workflow.

    Returns the newly created workflow as a JSON object.
    """
    if request.content_type == 'application/zip':
        zfile = zipfile.ZipFile(StringIO.StringIO(request.data))
        zfile.extractall(path=app.config['base_path'])
        wfname = os.path.dirname(zfile.filelist[0].filename)
        workflow = Workflow(path=os.path.join(app.config['base_path'], wfname))
        from spreads.workflow import on_created
        on_created.send(workflow, workflow=workflow)
    else:
        data = json.loads(request.data)

        if data.get('config'):
            config = app.config['default_config'].with_overlay(
                data.get('config'))
        else:
            config = app.config['default_config']

        metadata = data.get('metadata', {})

        try:
            workflow = Workflow.create(location=app.config['base_path'],
                                       config=config,
                                       metadata=metadata)
        except ValidationError as e:
            return make_response(json.dumps(dict(errors=e.errors)), 400,
                                 {'Content-Type': 'application/json'})
    return make_response(json.dumps(workflow),
                         200, {'Content-Type': 'application/json'})
Example #10
0
def get_workflow(workflow_id):
    # See if the workflow is among our cached instances
    if workflow_id in WorkflowCache:
        return WorkflowCache[workflow_id]
    logger.debug("Loading workflow {0} from database".format(workflow_id))
    with open_connection() as con:
        db_data = con.execute("SELECT * FROM workflow WHERE workflow.id=?",
                              (workflow_id, )).fetchone()
    if db_data is None:
        logger.warn("Workflow {0} was not found.".format(workflow_id))
        return None

    db_workflow = DbWorkflow(*db_data)

    # Try to load configuration from database
    if db_workflow.config is not None:
        config = json.loads(db_workflow.config)
    else:
        config = None
    workflow = Workflow(path=Path(app.config['base_path']) / db_workflow.name,
                        config=config,
                        step=db_workflow.step,
                        step_done=bool(db_workflow.step_done),
                        id=workflow_id)
    WorkflowCache[workflow_id] = workflow
    return workflow
Example #11
0
def list_workflows():
    """ Return a list of all workflows.

    :resheader Content-Type:    :mimetype:`application/json`
    """
    workflows = Workflow.find_all(app.config['base_path'])
    return make_response(json.dumps(workflows.values()),
                         200, {'Content-Type': 'application/json'})
Example #12
0
    def post(self):
        self.fp.close()
        with zipfile.ZipFile(self.fname) as zf:
            wfname = os.path.dirname(zf.namelist()[0])
            zf.extractall(path=self.base_path)
        os.unlink(self.fname)

        workflow = Workflow(path=os.path.join(self.base_path, wfname))
        from spreads.workflow import on_created
        on_created.send(workflow, workflow=workflow)

        self.set_header('Content-Type', 'application/json')
        self.write(json.dumps(workflow, cls=util.CustomJSONEncoder))
Example #13
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)
        zstream = workflow.bag.package_as_zipstream(compression=None)

        self.set_status(200)
        self.set_header('Content-type', 'application/zip')
        self.set_header('Content-length',
                        str(self.calculate_zipsize(zstream.paths_to_write)))

        self.zstream_iter = iter(zstream)

        self.send_next_chunk()
Example #14
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)
        zstream = workflow.bag.package_as_zipstream(compression=None)

        self.set_status(200)
        self.set_header('Content-type', 'application/zip')
        self.set_header('Content-length',
                        str(self.calculate_zipsize(zstream.paths_to_write)))

        self.zstream_iter = iter(zstream)

        self.send_next_chunk()
Example #15
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)

        self.set_status(200)
        self.set_header('Content-type', 'application/tar')
        self.set_header('Content-length', self.calculate_tarsize(workflow))

        self.fp = QueueIO(self)
        self.thread = threading.Thread(target=self.create_tar,
                                       args=(workflow, self.on_done,
                                             self.on_exception))
        self.thread.start()
        self.send_next_chunk()
Example #16
0
def create_workflow():
    """ Create a new workflow.

    Payload should be a JSON object. The only required attribute is 'name' for
    the desired workflow name. Optionally, 'config' can be set to a
    configuration object in the form "plugin_name: { setting: value, ...}".

    Returns the newly created workflow as a JSON object.
    """
    if request.content_type == 'application/zip':
        zfile = zipfile.ZipFile(StringIO.StringIO(request.data))
        zfile.extractall(path=app.config['base_path'])
        wfname = os.path.dirname(zfile.filelist[0].filename)
        workflow = Workflow(path=os.path.join(app.config['base_path'], wfname))
        from spreads.workflow import on_created
        on_created.send(workflow, workflow=workflow)
    else:
        data = json.loads(request.data)

        if data.get('config'):
            config = app.config['default_config'].with_overlay(
                data.get('config'))
        else:
            config = app.config['default_config']

        metadata = data.get('metadata', {})

        try:
            workflow = Workflow.create(location=app.config['base_path'],
                                       name=unicode(data['name']),
                                       config=config,
                                       metadata=metadata)
        except ValidationError as e:
            return make_response(json.dumps(dict(errors=e.errors)), 400,
                                 {'Content-Type': 'application/json'})
    return make_response(json.dumps(workflow),
                         200, {'Content-Type': 'application/json'})
Example #17
0
    def get(self, workflow_id, filename):
        uuid.UUID(workflow_id)
        workflow = Workflow.find_by_id(self.base_path, workflow_id)

        self.set_status(200)
        self.set_header('Content-type', 'application/tar')
        self.set_header('Content-length', self.calculate_tarsize(workflow))

        self.fp = QueueIO(self)
        self.thread = threading.Thread(
            target=self.create_tar,
            args=(workflow, self.on_done, self.on_exception)
        )
        self.thread.start()
        self.send_next_chunk()
Example #18
0
def transfer_to_stick(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    stick = find_stick()
    files = list(workflow.path.rglob('*'))
    num_files = len(files)
    # Filter out problematic characters
    clean_name = (workflow.path.name.replace(':', '_').replace('/', '_'))
    workflow.status['step'] = 'transfer'
    try:
        if IS_WIN:
            target_path = Path(stick) / clean_name
        else:
            mount = stick.get_dbus_method(
                "FilesystemMount",
                dbus_interface="org.freedesktop.UDisks.Device")
            mount_point = mount('', [])
            target_path = Path(mount_point) / clean_name
        if target_path.exists():
            shutil.rmtree(unicode(target_path))
        target_path.mkdir()
        signals['transfer:started'].send(workflow)
        for num, path in enumerate(files, 1):
            signals['transfer:progressed'].send(workflow,
                                                progress=(num / num_files) *
                                                0.79,
                                                status=path.name)
            workflow.status['step_done'] = (num / num_files) * 0.79
            target = target_path / path.relative_to(workflow.path)
            if path.is_dir():
                target.mkdir()
            else:
                shutil.copyfile(unicode(path), unicode(target))
    finally:
        if 'mount_point' in locals():
            signals['transfer:progressed'].send(workflow,
                                                progress=0.8,
                                                status="Syncing...")
            workflow.status['step_done'] = 0.8
            unmount = stick.get_dbus_method(
                "FilesystemUnmount",
                dbus_interface="org.freedesktop.UDisks.Device")
            unmount([], timeout=1e6)  # dbus-python doesn't know an infinite
            # timeout... unmounting sometimes takes a
            # long time, since the device has to be
            # synced.
        signals['transfer:completed'].send(workflow)
        workflow.status['step'] = None
Example #19
0
def prepare_capture(workflow):
    """ Prepare capture for the requested workflow. """
    # Check if any other workflow is active and finish, if neccessary
    logger.debug("Finishing previous workflows")
    wfitems = Workflow.find_all(app.config['base_path'], key='id').iteritems()
    for wfid, wf in wfitems:
        if wf.status['step'] == 'capture' and wf.status['prepared']:
            if wf is workflow and not request.args.get('reset'):
                return 'OK'
            wf.finish_capture()
    try:
        workflow.prepare_capture()
    except DeviceException as e:
        logger.error(e)
        raise ApiException("Could not prepare capture: {0}".format(e.message),
                           500, error_type='device')
    return 'OK'
Example #20
0
def capture(config):
    path = config['path'].get()
    workflow = Workflow(config=config, path=path)
    if len(workflow.devices) != 2:
        raise DeviceException("Please connect and turn on two"
                              " pre-configured devices! ({0} were"
                              " found)".format(len(workflow.devices)))
    print(colorize("Found {0} devices!".format(len(workflow.devices)),
                   colorama.Fore.GREEN))
    if any(not x.target_page for x in workflow.devices):
        raise DeviceException("At least one of the devices has not been"
                              " properly configured, please re-run the"
                              " program with the \'configure\' option!")
    # Set up for capturing
    print("Setting up devices for capturing.")
    workflow.prepare_capture()
    # Start capture loop
    shot_count = 0
    pages_per_hour = 0
    capture_keys = workflow.config['capture']['capture_keys'].as_str_seq()
    print("({0}) capture | (r) retake last shot | (f) finish "
          .format("/".join(capture_keys)))
    while True:
        retake = False
        char = getch().lower()
        if char == 'f':
            break
        elif char == 'r':
            retake = True
        elif char not in capture_keys:
            continue
        workflow.capture(retake=retake)
        shot_count += len(workflow.devices)
        pages_per_hour = (3600/(time.time() -
                          workflow.capture_start))*shot_count
        status = ("\rShot {0: >3} pages [{1: >4.0f}/h] "
                  .format(unicode(shot_count), pages_per_hour))
        sys.stdout.write(status)
        sys.stdout.flush()
    workflow.finish_capture()
    if workflow.capture_start is None:
        return
    sys.stdout.write("\rShot {0} pages in {1:.1f} minutes, average speed was"
                     " {2:.0f} pages per hour\n"
                     .format(colorize(str(shot_count), colorama.Fore.GREEN),
                             (time.time() - workflow.capture_start)/60,
                             pages_per_hour))
    sys.stdout.flush()
Example #21
0
def transfer_to_stick(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    stick = find_stick()
    files = list(workflow.path.rglob('*'))
    num_files = len(files)
    # Filter out problematic characters
    clean_name = (workflow.path.name.replace(':', '_')
                                    .replace('/', '_'))
    workflow.status['step'] = 'transfer'
    try:
        if IS_WIN:
            target_path = Path(stick)/clean_name
        else:
            mount = stick.get_dbus_method(
                "FilesystemMount",
                dbus_interface="org.freedesktop.UDisks.Device")
            mount_point = mount('', [])
            target_path = Path(mount_point)/clean_name
        if target_path.exists():
            shutil.rmtree(unicode(target_path))
        target_path.mkdir()
        signals['transfer:started'].send(workflow)
        for num, path in enumerate(files, 1):
            signals['transfer:progressed'].send(
                workflow, progress=(num/num_files)*0.79, status=path.name)
            workflow.status['step_done'] = (num/num_files)*0.79
            target = target_path/path.relative_to(workflow.path)
            if path.is_dir():
                target.mkdir()
            else:
                shutil.copyfile(unicode(path), unicode(target))
    finally:
        if 'mount_point' in locals():
            signals['transfer:progressed'].send(workflow, progress=0.8,
                                                status="Syncing...")
            workflow.status['step_done'] = 0.8
            unmount = stick.get_dbus_method(
                "FilesystemUnmount",
                dbus_interface="org.freedesktop.UDisks.Device")
            unmount([], timeout=1e6)  # dbus-python doesn't know an infinite
                                      # timeout... unmounting sometimes takes a
                                      # long time, since the device has to be
                                      # synced.
        signals['transfer:completed'].send(workflow)
        workflow.status['step'] = None
Example #22
0
def prepare_capture(workflow):
    """ Prepare capture for the requested workflow.

    """
    if app.config['mode'] not in ('scanner', 'full'):
        raise ApiException("Only possible when running in 'scanner' or 'full'"
                           " mode.", 503)

    # Check if any other workflow is active and finish, if neccessary
    logger.debug("Finishing previous workflows")
    wfitems = Workflow.find_all(app.config['base_path'], key='id').iteritems()
    for wfid, wf in wfitems:
        if wf.status['step'] == 'capture' and wf.status['prepared']:
            if wf is workflow and not request.args.get('reset'):
                return 'OK'
            wf.finish_capture()
    workflow.prepare_capture()
    return 'OK'
Example #23
0
def prepare_capture(workflow):
    """ Prepare capture for the requested workflow.

    """
    if app.config['mode'] not in ('scanner', 'full'):
        raise ApiException("Only possible when running in 'scanner' or 'full'"
                           " mode.", 503)

    # Check if any other workflow is active and finish, if neccessary
    logger.debug("Finishing previous workflows")
    wfitems = Workflow.find_all(app.config['base_path'], key='id').iteritems()
    for wfid, wf in wfitems:
        if wf.status['step'] == 'capture' and wf.status['prepared']:
            if wf is workflow and not request.args.get('reset'):
                return 'OK'
            wf.finish_capture()
    workflow.prepare_capture()
    return 'OK'
Example #24
0
def create_workflow():
    """ Create a new workflow.

    Returns the newly created workflow as a JSON object.
    """
    data = json.loads(request.data)

    if data.get('config'):
        config = app.config['default_config'].with_overlay(
            data.get('config'))
    else:
        config = app.config['default_config']

    metadata = data.get('metadata', {})

    workflow = Workflow.create(location=app.config['base_path'],
                               config=config,
                               metadata=metadata)
    return make_response(json.dumps(workflow),
                         200, {'Content-Type': 'application/json'})
Example #25
0
def delete_workflow(workflow):
    """ Delete a single workflow from database and disk. """
    Workflow.remove(workflow)
    return jsonify({})
Example #26
0
def workflow(config):
    from spreads.workflow import Workflow
    wf = Workflow(path="/tmp/foobar", config=config)
    return wf
Example #27
0
def delete_workflow(workflow):
    """ Delete a single workflow from database and disk. """
    Workflow.remove(workflow)
    return jsonify({})
Example #28
0
def process_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating processing for workflow {0}".format(
        workflow.slug))
    workflow.process()
Example #29
0
def output_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating output generation for workflow {0}".format(
        workflow.slug))
    workflow.output()
Example #30
0
def output(config):
    path = config['path'].get()
    workflow = Workflow(config=config, path=path)
    workflow.output()
Example #31
0
def postprocess(config):
    path = config['path'].get()
    workflow = Workflow(config=config, path=path)
    workflow.process()
Example #32
0
def upload_workflow(wf_id, base_path, endpoint, user_config,
                    start_process=False, start_output=False):
    logger.debug("Uploading workflow to postprocessing server")

    workflow = Workflow.find_by_id(base_path, wf_id)
    # NOTE: This is kind of nasty.... We temporarily write the user-supplied
    # configuration to the bag, update the tag-payload, create the zip, and
    # once everything is done, we restore the old version
    tmp_cfg = copy.deepcopy(workflow.config)
    tmp_cfg.set(user_config)
    tmp_cfg_path = workflow.path/'config.yml'
    tmp_cfg.dump(filename=unicode(tmp_cfg_path),
                 sections=(user_config['plugins'] + ["plugins", "device"]))
    workflow.bag.add_tagfiles(unicode(tmp_cfg_path))

    # Create a zipstream from the workflow-bag
    zstream = workflow.bag.package_as_zipstream(compression=None)
    zstream_copy = copy.deepcopy(zstream)
    zsize = sum(len(x) for x in zstream_copy)

    def zstream_wrapper():
        """ Wrapper around our zstream so we can emit a signal when all data
        has been streamed to the client.
        """
        transferred = 0
        progress = "0.00"
        for data in zstream:
            yield data
            transferred += len(data)
            # Only update progress if we've progress at least by 0.01
            new_progress = "{0:.2f}".format(transferred/zsize)
            if new_progress != progress:
                progress = new_progress
                signals['submit:progressed'].send(
                    workflow, progress=float(progress),
                    status="Uploading workflow...")

    # NOTE: This is neccessary since requests makes a chunked upload when
    #       passed a plain generator, which is not supported by the WSGI
    #       protocol that receives it. Hence we wrap it inside of a
    #       GeneratorIO to make it appear as a file-like object with a
    #       known size.
    zstream_fp = GeneratorIO(zstream_wrapper(), zsize)
    signals['submit:started'].send(workflow)
    resp = requests.post(endpoint, data=zstream_fp,
                         headers={'Content-Type': 'application/zip'})
    if not resp:
        error_msg = "Upload failed: {0}".format(resp.content)
        signals['submit:error'].send(workflow, message=error_msg,
                                     data=resp.content)
        logger.error(error_msg)
    else:
        wfid = resp.json()['id']
        if start_process:
            requests.post(endpoint + "/{0}/process".format(wfid))
        if start_output:
            requests.post(endpoint + "/{0}/output".format(wfid))
        signals['submit:completed'].send(workflow, remote_id=wfid)

    # Restore our old configuration
    workflow._save_config()
Example #33
0
def upload_workflow(wf_id,
                    base_path,
                    endpoint,
                    user_config,
                    start_process=False,
                    start_output=False):
    logger.debug("Uploading workflow to postprocessing server")

    workflow = Workflow.find_by_id(base_path, wf_id)
    # NOTE: This is kind of nasty.... We temporarily write the user-supplied
    # configuration to the bag, update the tag-payload, create the zip, and
    # once everything is done, we restore the old version
    tmp_cfg = copy.deepcopy(workflow.config)
    tmp_cfg.set(user_config)
    tmp_cfg_path = workflow.path / 'config.yml'
    tmp_cfg.dump(filename=unicode(tmp_cfg_path),
                 sections=(user_config['plugins'] + ["plugins", "device"]))
    workflow.bag.add_tagfiles(unicode(tmp_cfg_path))

    # Create a zipstream from the workflow-bag
    zstream = workflow.bag.package_as_zipstream(compression=None)
    zsize = calculate_zipsize(zstream.paths_to_write)

    def zstream_wrapper():
        """ Wrapper around our zstream so we can emit a signal when all data
        has been streamed to the client.
        """
        transferred = 0
        progress = "0.00"
        for data in zstream:
            yield data
            transferred += len(data)
            # Only update progress if we've progress at least by 0.01
            new_progress = "{0:.2f}".format(transferred / zsize)
            if new_progress != progress:
                progress = new_progress
                signals['submit:progressed'].send(
                    workflow,
                    progress=float(progress),
                    status="Uploading workflow...")

    # NOTE: This is neccessary since requests makes a chunked upload when
    #       passed a plain generator, which is not supported by the WSGI
    #       protocol that receives it. Hence we wrap it inside of a
    #       GeneratorIO to make it appear as a file-like object with a
    #       known size.
    zstream_fp = GeneratorIO(zstream_wrapper(), zsize)
    logger.debug("Projected size for upload: {}".format(zsize))
    signals['submit:started'].send(workflow)
    resp = requests.post(endpoint,
                         data=zstream_fp,
                         headers={'Content-Type': 'application/zip'})
    if not resp:
        error_msg = "Upload failed: {0}".format(resp.content)
        signals['submit:error'].send(workflow,
                                     message=error_msg,
                                     data=resp.content)
        logger.error(error_msg)
    else:
        wfid = resp.json()['id']
        if start_process:
            requests.post(endpoint + "/{0}/process".format(wfid))
        if start_output:
            requests.post(endpoint + "/{0}/output".format(wfid))
        signals['submit:completed'].send(workflow, remote_id=wfid)

    # Restore our old configuration
    workflow._save_config()
Example #34
0
def capture(config):
    path = config['path'].get()
    workflow = Workflow(config=config, path=path)
    workflow.on_created.send(workflow=workflow)
    capture_keys = workflow.config['capture']['capture_keys'].as_str_seq()

    # Some closures
    def refresh_stats():
        # Callback to print statistics
        if refresh_stats.start_time is not None:
            pages_per_hour = ((3600/(time.time() - refresh_stats.start_time))
                              * workflow.pages_shot)
        else:
            pages_per_hour = 0.0
            refresh_stats.start_time = time.time()
        status = ("\rShot {0: >3} pages [{1: >4.0f}/h] "
                  .format(unicode(workflow.pages_shot), pages_per_hour))
        sys.stdout.write(status)
        sys.stdout.flush()
    refresh_stats.start_time = None

    def trigger_loop():
        is_posix = sys.platform != 'win32'
        old_count = workflow.pages_shot
        if is_posix:
            import select
            old_settings = termios.tcgetattr(sys.stdin)
            data_available = lambda: (select.select([sys.stdin], [], [], 0) ==
                                     ([sys.stdin], [], []))
            read_char = lambda: sys.stdin.read(1)
        else:
            data_available = msvcrt.kbhit
            read_char = msvcrt.getch

        try:
            if is_posix:
                tty.setcbreak(sys.stdin.fileno())
            while True:
                time.sleep(0.01)
                if workflow.pages_shot != old_count:
                    old_count = workflow.pages_shot
                    refresh_stats()
                if not data_available():
                    continue
                char = read_char()
                if char in tuple(capture_keys) + ('r', ):
                    workflow.capture(retake=(char == 'r'))
                    refresh_stats()
                elif char == 'f':
                    break
        finally:
            if is_posix:
                termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)

    if len(workflow.devices) != 2:
        raise DeviceException("Please connect and turn on two"
                              " pre-configured devices! ({0} were"
                              " found)".format(len(workflow.devices)))
    print(colorize("Found {0} devices!".format(len(workflow.devices)),
                   colorama.Fore.GREEN))
    if any(not x.target_page for x in workflow.devices):
        raise DeviceException("At least one of the devices has not been"
                              " properly configured, please re-run the"
                              " program with the \'configure\' option!")
    # Set up for capturing
    print("Setting up devices for capturing.")
    workflow.prepare_capture()

    print("({0}) capture | (r) retake last shot | (f) finish "
          .format("/".join(capture_keys)))
    # Start trigger loop
    trigger_loop()

    workflow.finish_capture()
Example #35
0
def process_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating processing for workflow {0}"
                 .format(workflow.slug))
    workflow.process()
Example #36
0
def output_workflow(wf_id, base_path):
    workflow = Workflow.find_by_id(base_path, wf_id)
    logger.debug("Initiating output generation for workflow {0}"
                 .format(workflow.slug))
    workflow.output()
Example #37
0
def workflow(config, tmpdir):
    from spreads.workflow import Workflow
    wf = Workflow(path=unicode(tmpdir), config=config)
    return wf
Example #38
0
def capture(config):
    path = config['path'].get()
    workflow = Workflow(config=config, path=path)
    workflow.on_created.send(workflow=workflow)
    capture_keys = workflow.config['core']['capture_keys'].as_str_seq()

    # Some closures
    def refresh_stats():
        # Callback to print statistics
        if refresh_stats.start_time is not None:
            pages_per_hour = ((3600 /
                               (time.time() - refresh_stats.start_time)) *
                              workflow.pages_shot)
        else:
            pages_per_hour = 0.0
            refresh_stats.start_time = time.time()
        status = ("\rShot {0: >3} pages [{1: >4.0f}/h] ".format(
            unicode(workflow.pages_shot), pages_per_hour))
        sys.stdout.write(status)
        sys.stdout.flush()

    refresh_stats.start_time = None

    def trigger_loop():
        is_posix = sys.platform != 'win32'
        old_count = workflow.pages_shot
        if is_posix:
            import select
            old_settings = termios.tcgetattr(sys.stdin)
            data_available = lambda: (select.select([sys.stdin], [], [], 0) ==
                                      ([sys.stdin], [], []))
            read_char = lambda: sys.stdin.read(1)
        else:
            data_available = msvcrt.kbhit
            read_char = msvcrt.getch

        try:
            if is_posix:
                tty.setcbreak(sys.stdin.fileno())
            while True:
                time.sleep(0.01)
                if workflow.pages_shot != old_count:
                    old_count = workflow.pages_shot
                    refresh_stats()
                if not data_available():
                    continue
                char = read_char()
                if char in tuple(capture_keys) + ('r', ):
                    workflow.capture(retake=(char == 'r'))
                    refresh_stats()
                elif char == 'f':
                    break
        finally:
            if is_posix:
                termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)

    if len(workflow.devices) != 2:
        raise DeviceException("Please connect and turn on two"
                              " pre-configured devices! ({0} were"
                              " found)".format(len(workflow.devices)))
    print(
        colorize("Found {0} devices!".format(len(workflow.devices)),
                 colorama.Fore.GREEN))
    if any(not x.target_page for x in workflow.devices):
        raise DeviceException("At least one of the devices has not been"
                              " properly configured, please re-run the"
                              " program with the \'configure\' option!")
    # Set up for capturing
    print("Setting up devices for capturing.")
    workflow.prepare_capture()

    print("({0}) capture | (r) retake last shot | (f) finish ".format(
        "/".join(capture_keys)))
    # Start trigger loop
    trigger_loop()

    workflow.finish_capture()