예제 #1
0
파일: views.py 프로젝트: aichemzee/DIGITS
def show_classify_one(inference_job):

    # retrieve inference parameters
    model_job, paths, _ = inference_job.get_parameters()

    if inference_job.status.is_running():
        # the inference job is still running
        if request_wants_json():
            return flask.jsonify(inference_job.json_dict())
        else:
            return flask.render_template('inference/images/classification/classify_one.html',
                model_job          = model_job,
                job                = inference_job,
                running            = True,
                )
    else:
        # the inference job has completed

        # retrieve inference data
        inputs, outputs, visualizations = inference_job.get_data()

        # delete job
        scheduler.delete_job(inference_job)

        # remove file (fails silently if a URL was provided)
        try:
            os.remove(paths[0])
        except:
            pass

        image = None
        predictions = []
        if inputs is not None and len(inputs['data']) == 1:
            image = utils.image.embed_image_html(inputs['data'][0])
            # convert to class probabilities for viewing
            last_output_name, last_output_data = outputs.items()[-1]

            if len(last_output_data) == 1:
                scores = last_output_data[0].flatten()
                indices = (-scores).argsort()
                labels = model_job.train_task().get_labels()
                predictions = []
                for i in indices:
                    predictions.append( (labels[i], scores[i]) )
                predictions = [(p[0], round(100.0*p[1],2)) for p in predictions[:5]]

        if request_wants_json():
            return flask.jsonify({'predictions': predictions})
        else:
            return flask.render_template('inference/images/classification/classify_one.html',
                    model_job       = model_job,
                    job             = inference_job,
                    image_src       = image,
                    predictions     = predictions,
                    visualizations  = visualizations,
                    total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'),
                    running         = False,
                    )
예제 #2
0
파일: views.py 프로젝트: dchall88/DIGITS
def image_classification_dataset_create():
    """
    Creates a new ImageClassificationDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationDatasetForm()

    ## Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template(
                'datasets/images/classification/new.html', form=form), 400

    job = None
    try:
        job = ImageClassificationDatasetJob(
            name=form.dataset_name.data,
            image_dims=(
                int(form.resize_height.data),
                int(form.resize_width.data),
                int(form.resize_channels.data),
            ),
            resize_mode=form.resize_mode.data,
            bbox_mode=int(form.bbox_mode.data),
            scale_factor=float(form.scale_factor.data),
        )

        if form.method.data == 'folder':
            from_folders(job, form)

        elif form.method.data == 'textfile':
            from_files(job, form)

        elif form.method.data == 'jsonfile':
            from_json(job, form)

        else:
            raise ValueError('method not supported')

        ## Save form data with the job so we can easily clone it later.
        save_form_to_job(job, form)

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(
                flask.url_for('datasets_show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #3
0
파일: views.py 프로젝트: yhgon/DIGITS
def create():
    """
    Creates a new ImageClassificationDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationDatasetForm()

    # Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('datasets/images/classification/new.html', form=form), 400

    job = None
    try:
        job = ImageClassificationDatasetJob(
            username=utils.auth.get_username(),
            name=form.dataset_name.data,
            group=form.group_name.data,
            image_dims=(
                int(form.resize_height.data),
                int(form.resize_width.data),
                int(form.resize_channels.data),
            ),
            resize_mode=form.resize_mode.data
        )

        if form.method.data == 'folder':
            from_folders(job, form)

        elif form.method.data == 'textfile':
            from_files(job, form)

        elif form.method.data == 's3':
            from_s3(job, form)

        else:
            raise ValueError('method not supported')

        # Save form data with the job so we can easily clone it later.
        save_form_to_job(job, form)

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('digits.dataset.views.show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #4
0
파일: views.py 프로젝트: dchall88/DIGITS
def image_classification_dataset_create():
    """
    Creates a new ImageClassificationDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationDatasetForm()

    ## Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({"errors": form.errors}), 400
        else:
            return flask.render_template("datasets/images/classification/new.html", form=form), 400

    job = None
    try:
        job = ImageClassificationDatasetJob(
            name=form.dataset_name.data,
            image_dims=(int(form.resize_height.data), int(form.resize_width.data), int(form.resize_channels.data)),
            resize_mode=form.resize_mode.data,
            bbox_mode=int(form.bbox_mode.data),
            scale_factor=float(form.scale_factor.data),
        )

        if form.method.data == "folder":
            from_folders(job, form)

        elif form.method.data == "textfile":
            from_files(job, form)

        elif form.method.data == "jsonfile":
            from_json(job, form)

        else:
            raise ValueError("method not supported")

        ## Save form data with the job so we can easily clone it later.
        save_form_to_job(job, form)

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for("datasets_show", job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #5
0
파일: views.py 프로젝트: rupertsmall/DIGITS
def image_classification_model_classify_one():
    """
    Classify one image and return the top 5 classifications

    Returns JSON when requested: {predictions: {category: confidence,...}}
    """
    job = job_from_request()

    image = None
    if "image_url" in flask.request.form and flask.request.form["image_url"]:
        image = utils.image.load_image(flask.request.form["image_url"])
    elif "image_file" in flask.request.files and flask.request.files["image_file"]:
        outfile = tempfile.mkstemp(suffix=".bin")
        flask.request.files["image_file"].save(outfile[1])
        image = utils.image.load_image(outfile[1])
        os.close(outfile[0])
        os.remove(outfile[1])
    else:
        raise werkzeug.exceptions.BadRequest("must provide image_url or image_file")

    # resize image
    db_task = job.train_task().dataset.train_db_task()
    height = db_task.image_dims[0]
    width = db_task.image_dims[1]
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(
        image, height, width, channels=db_task.image_dims[2], resize_mode=db_task.resize_mode
    )

    epoch = None
    if "snapshot_epoch" in flask.request.form:
        epoch = float(flask.request.form["snapshot_epoch"])

    layers = "none"
    if "show_visualizations" in flask.request.form and flask.request.form["show_visualizations"]:
        layers = "all"

    predictions, visualizations = None, None
    try:
        predictions, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)
    except frameworks.errors.InferenceError as e:
        return e.__str__(), 403

    # take top 5
    if predictions:
        predictions = [(p[0], round(100.0 * p[1], 2)) for p in predictions[:5]]

    if request_wants_json():
        return flask.jsonify({"predictions": predictions})
    else:
        return flask.render_template(
            "models/images/classification/classify_one.html",
            job=job,
            image_src=utils.image.embed_image_html(image),
            predictions=predictions,
            visualizations=visualizations,
            total_parameters=sum(v["param_count"] for v in visualizations if v["vis_type"] == "Weights"),
        )
예제 #6
0
def handle_error(e):
    """
    Handle errors, formatting them as JSON if requested
    """
    error_type = type(e).__name__
    message = str(e)
    trace = None
    description = None
    status_code = 500
    if isinstance(e, werkzeug.exceptions.HTTPException):
        status_code = e.code
        description = e.description
    if app.debug:
        trace = traceback.format_exc()

    if request_wants_json():
        details = {"message": message, "type": error_type}
        if description is not None:
            details["description"] = description
        if trace is not None:
            details["trace"] = trace.split("\n")
        return flask.jsonify({"error": details}), status_code
    else:
        return (
            flask.render_template(
                "error.html", title=error_type, message=message, description=description, trace=trace
            ),
            status_code,
        )
예제 #7
0
파일: views.py 프로젝트: iwalkdaline/DIGITS
def handle_error(e):
    """
    Handle errors, formatting them as JSON if requested
    """
    error_type = type(e).__name__
    message = str(e)
    trace = None
    description = None
    status_code = 500
    if isinstance(e, werkzeug.exceptions.HTTPException):
        status_code = e.code
        description = e.description
    if app.debug:
        trace = traceback.format_exc()

    if request_wants_json():
        details = {
            'message': message,
            'type': error_type,
        }
        if description is not None:
            details['description'] = description
        if trace is not None:
            details['trace'] = trace.split('\n')
        return flask.jsonify({'error': details}), status_code
    else:
        return flask.render_template(
            'error.html',
            title=error_type,
            message=message,
            description=description,
            trace=trace,
        ), status_code
예제 #8
0
파일: views.py 프로젝트: Cloud-CV/DIGITS
def handle_error(e):
    """
    Handle errors, formatting them as JSON if requested
    """
    error_type = type(e).__name__
    message = str(e)
    trace = None
    description = None
    status_code = 500
    if isinstance(e, werkzeug.exceptions.HTTPException):
        status_code = e.code
        description = e.description
    if app.debug:
        trace = traceback.format_exc()

    if request_wants_json():
        details = {
                'message': message,
                'type': error_type,
                }
        if description is not None:
            details['description'] = description
        if trace is not None:
            details['trace'] = trace.split('\n')
        return flask.jsonify({'error': details}), status_code
    else:
        return flask.render_template('error.html',
                title       = error_type,
                message     = message,
                description = description,
                trace       = trace,
                ), status_code
예제 #9
0
def generic_image_model_infer_one():
    """
    Infer one image
    """
    job = job_from_request()

    image = None
    if 'image_url' in flask.request.form and flask.request.form['image_url']:
        image = utils.image.load_image(flask.request.form['image_url'])
    elif 'image_file' in flask.request.files and flask.request.files[
            'image_file']:
        outfile = tempfile.mkstemp(suffix='.bin')
        flask.request.files['image_file'].save(outfile[1])
        image = utils.image.load_image(outfile[1])
        os.close(outfile[0])
        os.remove(outfile[1])
    else:
        raise werkzeug.exceptions.BadRequest(
            'must provide image_url or image_file')

    # resize image
    db_task = job.train_task().dataset.analyze_db_tasks()[0]
    height = db_task.image_height
    width = db_task.image_width
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(
        image,
        height,
        width,
        channels=db_task.image_channels,
        resize_mode='squash',
    )

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form[
            'show_visualizations']:
        layers = 'all'

    outputs, visualizations = job.train_task().infer_one(image,
                                                         snapshot_epoch=epoch,
                                                         layers=layers)

    if request_wants_json():
        return flask.jsonify({
            'outputs':
            dict((name, blob.tolist()) for name, blob in outputs.iteritems())
        })
    else:
        return flask.render_template(
            'models/images/generic/infer_one.html',
            image_src=utils.image.embed_image_html(image),
            network_outputs=outputs,
            visualizations=visualizations,
        )
예제 #10
0
def show(job_id):
    """
    Show a DatasetJob

    Returns JSON when requested:
        {id, name, directory, status}
    """
    job = scheduler.get_job(job_id)
    if job is None:
        raise werkzeug.exceptions.NotFound('Job not found')

    related_jobs = scheduler.get_related_jobs(job)

    if request_wants_json():
        return flask.jsonify(job.json_dict(True))
    else:
        if isinstance(job, dataset_images.ImageClassificationDatasetJob):
            return dataset_images.classification.views.show(
                job, related_jobs=related_jobs)
        elif isinstance(job, dataset_images.GenericImageDatasetJob):
            return dataset_images.generic.views.show(job,
                                                     related_jobs=related_jobs)
        elif isinstance(job, generic.GenericDatasetJob):
            return generic.views.show(job, related_jobs=related_jobs)
        else:
            raise werkzeug.exceptions.BadRequest('Invalid job type')
예제 #11
0
def image_classification_model_classify_one():
    """
    Classify one image and return the top 5 classifications

    Returns JSON when requested: {predictions: {category: confidence,...}}
    """
    job = job_from_request()

    image = None
    if 'image_url' in flask.request.form and flask.request.form['image_url']:
        image = utils.image.load_image(flask.request.form['image_url'])
    elif 'image_file' in flask.request.files and flask.request.files[
            'image_file']:
        outfile = tempfile.mkstemp(suffix='.bin')
        flask.request.files['image_file'].save(outfile[1])
        image = utils.image.load_image(outfile[1])
        os.close(outfile[0])
        os.remove(outfile[1])
    else:
        raise werkzeug.exceptions.BadRequest(
            'must provide image_url or image_file')

    # resize image
    db_task = job.train_task().dataset.train_db_task()
    height = db_task.image_dims[0]
    width = db_task.image_dims[1]
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(
        image,
        height,
        width,
        channels=db_task.image_dims[2],
        resize_mode=db_task.resize_mode,
    )

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form[
            'show_visualizations']:
        layers = 'all'

    predictions, visualizations = job.train_task().infer_one(
        image, snapshot_epoch=epoch, layers=layers)
    # take top 5
    predictions = [(p[0], round(100.0 * p[1], 2)) for p in predictions[:5]]

    if request_wants_json():
        return flask.jsonify({'predictions': predictions})
    else:
        return flask.render_template(
            'models/images/classification/classify_one.html',
            image_src=utils.image.embed_image_html(image),
            predictions=predictions,
            visualizations=visualizations,
        )
예제 #12
0
def generic_image_model_infer_many():
    """
    Infer many images
    """
    job = job_from_request()

    image_list = flask.request.files.get("image_list")
    if not image_list:
        raise werkzeug.exceptions.BadRequest("image_list is a required field")

    epoch = None
    if "snapshot_epoch" in flask.request.form:
        epoch = float(flask.request.form["snapshot_epoch"])

    paths = []
    images = []

    db_task = job.train_task().dataset.analyze_db_tasks()[0]
    height = db_task.image_height
    width = db_task.image_width
    channels = db_task.image_channels

    for line in image_list.readlines():
        line = line.strip()
        if not line:
            continue

        path = None
        # might contain a numerical label at the end
        match = re.match(r"(.*\S)\s+\d+$", line)
        if match:
            path = match.group(1)
        else:
            path = line

        try:
            image = utils.image.load_image(path)
            image = utils.image.resize_image(image, height, width, channels=channels, resize_mode="squash")
            paths.append(path)
            images.append(image)
        except utils.errors.LoadImageError as e:
            print e

    if not len(images):
        raise werkzeug.exceptions.BadRequest("Unable to load any images from the file")

    outputs = job.train_task().infer_many(images, snapshot_epoch=epoch)
    if outputs is None:
        raise RuntimeError("An error occured while processing the images")

    if request_wants_json():
        result = {}
        for i, path in enumerate(paths):
            result[path] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())
        return flask.jsonify({"outputs": result})
    else:
        return flask.render_template(
            "models/images/generic/infer_many.html", job=job, paths=paths, network_outputs=outputs
        )
예제 #13
0
파일: views.py 프로젝트: colek42/DIGITS
def image_classification_model_classify_one():
    """
    Classify one image and return the top 5 classifications

    Returns JSON when requested: {predictions: {category: confidence,...}}
    """
    job = job_from_request()

    image = None
    if 'image_url' in flask.request.form and flask.request.form['image_url']:
        image = utils.image.load_image(flask.request.form['image_url'])
    elif 'image_file' in flask.request.files and flask.request.files['image_file']:
        outfile = tempfile.mkstemp(suffix='.bin')
        flask.request.files['image_file'].save(outfile[1])
        image = utils.image.load_image(outfile[1])
        os.close(outfile[0])
        os.remove(outfile[1])
    else:
        raise werkzeug.exceptions.BadRequest('must provide image_url or image_file')

    # resize image
    db_task = job.train_task().dataset.train_db_task()
    height = db_task.image_dims[0]
    width = db_task.image_dims[1]
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(image, height, width,
            channels = db_task.image_dims[2],
            resize_mode = db_task.resize_mode,
            )

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
        layers = 'all'

    predictions, visualizations = None, None
    predictions, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)

    # take top 5
    if predictions:
        predictions = [(p[0], round(100.0*p[1],2)) for p in predictions[:5]]

    if request_wants_json():
        return flask.jsonify({'predictions': predictions})
    else:
        return flask.render_template('models/images/classification/classify_one.html',
                job             = job,
                image_src       = utils.image.embed_image_html(image),
                predictions     = predictions,
                visualizations  = visualizations,
                total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'),
                )
예제 #14
0
def image_classification_dataset_create():
    """
    Creates a new ImageClassificationDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationDatasetForm()
    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('datasets/images/classification/new.html', form=form), 400

    job = None
    try:
        job = ImageClassificationDatasetJob(
                name        = form.dataset_name.data,
                image_dims  = (
                    int(form.resize_height.data),
                    int(form.resize_width.data),
                    int(form.resize_channels.data),
                    ),
                resize_mode = form.resize_mode.data
                )

        if form.method.data == 'folder':
            from_folders(job, form)

        elif form.method.data == 'textfile':
            from_files(job, form)

        else:
            raise ValueError('method not supported')

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('datasets_show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #15
0
def image_classification_dataset_create():
    """
    Creates a new ImageClassificationDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationDatasetForm()
    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('datasets/images/classification/new.html', form=form), 400

    job = None
    try:
        job = ImageClassificationDatasetJob(
                name        = form.dataset_name.data,
                image_dims  = (
                    int(form.resize_height.data),
                    int(form.resize_width.data),
                    int(form.resize_channels.data),
                    ),
                resize_mode = form.resize_mode.data
                )

        if form.method.data == 'folder':
            from_folders(job, form)

        elif form.method.data == 'textfile':
            from_files(job, form)

        else:
            raise ValueError('method not supported')

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('datasets_show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #16
0
파일: views.py 프로젝트: aichemzee/DIGITS
def show_top_n(inference_job):

    # retrieve inference parameters
    model_job, _, _, top_n = inference_job.get_parameters()

    if inference_job.status.is_running():
        # the inference job is still running
        if request_wants_json():
            return flask.jsonify(inference_job.json_dict())
        else:
            return flask.render_template('inference/images/classification/top_n.html',
                model_job          = model_job,
                job                = inference_job,
                running            = True,
                )
    else:

        # retrieve inference data
        inputs, outputs, _ = inference_job.get_data()

        # delete job
        scheduler.delete_job(inference_job)

        results = None
        if outputs is not None and len(outputs) > 0:
            # convert to class probabilities for viewing
            last_output_name, last_output_data = outputs.items()[-1]
            scores = last_output_data

            if scores is None:
                raise RuntimeError('An error occured while processing the images')

            labels = model_job.train_task().get_labels()
            images = inputs['data']
            indices = (-scores).argsort(axis=0)[:top_n]
            results = []
            # Can't have more images per category than the number of images
            images_per_category = min(top_n, len(images))
            for i in xrange(indices.shape[1]):
                result_images = []
                for j in xrange(images_per_category):
                    result_images.append(images[indices[j][i]])
                results.append((
                        labels[i],
                        utils.image.embed_image_html(
                            utils.image.vis_square(np.array(result_images),
                                colormap='white')
                            )
                        ))

        return flask.render_template('inference/images/classification/top_n.html',
                model_job       = model_job,
                job             = inference_job,
                results         = results,
                running         = False,
                )
예제 #17
0
파일: views.py 프로젝트: DESHRAJ/DIGITS
def feature_extraction_dataset_create():
    """
    Creates a new FeatureExtractionDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    workspace = get_workspace_details(flask.request.url)
    form = FeatureExtractionDatasetForm()
    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('datasets/images/extraction/new.html', form=form, workspace = workspace), 400

    job = None
    try:
        job = FeatureExtractionDatasetJob(
                name        = form.dataset_name.data,
                image_dims  = (
                    int(form.resize_height.data),
                    int(form.resize_width.data),
                    int(form.resize_channels.data),
                    ),
                resize_mode = form.resize_mode.data,
                workspace = workspace,
                )

        #if form.method.data == 'folder':
        #    from_folders(job, form)

        #elif form.method.data == 'textfile':
        from_files(job, form)

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('datasets_show', job_id=job.id())+'?workspace='+workspace['workspace_hash'])

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #18
0
파일: views.py 프로젝트: JFerguson20/DIGITS
def image_classification_model_classify_one():
    """
    Classify one image and return the top 5 classifications

    Returns JSON when requested: {predictions: {category: confidence,...}}
    """
    job = scheduler.get_job(flask.request.args['job_id'])
    if job is None:
        raise werkzeug.exceptions.NotFound('Job not found')

    image = None
    if 'image_url' in flask.request.form and flask.request.form['image_url']:
        image = utils.image.load_image(flask.request.form['image_url'])
    elif 'image_file' in flask.request.files and flask.request.files['image_file']:
        with tempfile.NamedTemporaryFile() as outfile:
            flask.request.files['image_file'].save(outfile.name)
            image = utils.image.load_image(outfile.name)
    else:
        raise werkzeug.exceptions.BadRequest('No image given')

    # resize image
    db_task = job.train_task().dataset.train_db_task()
    height = db_task.image_dims[0]
    width = db_task.image_dims[1]
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(image, height, width,
            channels = db_task.image_dims[2],
            resize_mode = db_task.resize_mode,
            )

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
        layers = 'all'

    predictions, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)
    # take top 5
    predictions = [(p[0], round(100.0*p[1],2)) for p in predictions[:5]]

    if request_wants_json():
        return flask.jsonify({'predictions': predictions})
    else:
        return flask.render_template('models/images/classification/classify_one.html',
                image_src       = utils.image.embed_image_html(image),
                predictions     = predictions,
                visualizations  = visualizations,
                )
예제 #19
0
파일: views.py 프로젝트: CVML/DIGITS
def home():
    """
    DIGITS home page
    Returns information about each job on the server

    Returns JSON when requested:
        {
            datasets: [{id, name, status},...],
            models: [{id, name, status},...]
        }
    """
    running_datasets    = get_job_list(dataset.DatasetJob, True)
    completed_datasets  = get_job_list(dataset.DatasetJob, False)
    running_models      = get_job_list(model.ModelJob, True)
    completed_models    = get_job_list(model.ModelJob, False)

    if request_wants_json():
        return flask.jsonify({
            'datasets': [j.json_dict()
                for j in running_datasets + completed_datasets],
            'models': [j.json_dict()
                for j in running_models + completed_models],
            })
    else:
        new_dataset_options = [
                ('Images', [
                    {
                        'title': 'Classification',
                        'id': 'image-classification',
                        'url': flask.url_for('image_classification_dataset_new'),
                        },
                    ])
                ]
        new_model_options = [
                ('Images', [
                    {
                        'title': 'Classification',
                        'id': 'image-classification',
                        'url': flask.url_for('image_classification_model_new'),
                        },
                    ])
                ]

        return flask.render_template('home.html',
                new_dataset_options = new_dataset_options,
                running_datasets    = running_datasets,
                completed_datasets  = completed_datasets,
                new_model_options   = new_model_options,
                running_models      = running_models,
                completed_models    = completed_models,
                )
예제 #20
0
def home():
    """
    DIGITS home page
    Returns information about each job on the server

    Returns JSON when requested:
        {
            datasets: [{id, name, status},...],
            models: [{id, name, status},...]
        }
    """
    running_datasets    = get_job_list(dataset.DatasetJob, True)
    completed_datasets  = get_job_list(dataset.DatasetJob, False)
    running_models      = get_job_list(model.ModelJob, True)
    completed_models    = get_job_list(model.ModelJob, False)

    if request_wants_json():
        return flask.jsonify({
            'datasets': [j.json_dict()
                for j in running_datasets + completed_datasets],
            'models': [j.json_dict()
                for j in running_models + completed_models],
            })
    else:
        new_dataset_options = [
                ('Images', [
                    {
                        'title': 'Classification',
                        'id': 'image-classification',
                        'url': flask.url_for('image_classification_dataset_new'),
                        },
                    ])
                ]
        new_model_options = [
                ('Images', [
                    {
                        'title': 'Classification',
                        'id': 'image-classification',
                        'url': flask.url_for('image_classification_model_new'),
                        },
                    ])
                ]

        return flask.render_template('home.html',
                new_dataset_options = new_dataset_options,
                running_datasets    = running_datasets,
                completed_datasets  = completed_datasets,
                new_model_options   = new_model_options,
                running_models      = running_models,
                completed_models    = completed_models,
                )
예제 #21
0
파일: views.py 프로젝트: aichemzee/DIGITS
def top_n():
    """
    Classify many images and show the top N images per category by confidence
    """
    model_job = job_from_request()

    image_list = flask.request.files['image_list']
    if not image_list:
        raise werkzeug.exceptions.BadRequest('File upload not found')

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])
    if 'top_n' in flask.request.form and flask.request.form['top_n'].strip():
        top_n = int(flask.request.form['top_n'])
    else:
        top_n = 9

    if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
        image_folder = flask.request.form['image_folder']
        if not os.path.exists(image_folder):
            raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
    else:
        image_folder = None

    if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
        num_test_images = int(flask.request.form['num_test_images'])
    else:
        num_test_images = None

    paths, _ = read_image_list(image_list, image_folder, num_test_images)

    # create inference job
    inference_job = ImageInferenceTopNJob(
                username    = utils.auth.get_username(),
                name        = "TopN Image Classification",
                model       = model_job,
                images      = paths,
                epoch       = epoch,
                layers      = 'none',
                top_n       = top_n,
                )

    # schedule tasks
    scheduler.add_job(inference_job)

    if request_wants_json():
        return flask.jsonify(inference_job.json_dict())
    else:
        return flask.redirect(flask.url_for('digits.inference.views.show', job_id=inference_job.id()))
예제 #22
0
파일: views.py 프로젝트: rupertsmall/DIGITS
def generic_image_model_infer_one():
    """
    Infer one image
    """
    job = job_from_request()

    image = None
    if 'image_url' in flask.request.form and flask.request.form['image_url']:
        image = utils.image.load_image(flask.request.form['image_url'])
    elif 'image_file' in flask.request.files and flask.request.files['image_file']:
        outfile = tempfile.mkstemp(suffix='.bin')
        flask.request.files['image_file'].save(outfile[1])
        image = utils.image.load_image(outfile[1])
        os.close(outfile[0])
        os.remove(outfile[1])
    else:
        raise werkzeug.exceptions.BadRequest('must provide image_url or image_file')

    # resize image
    db_task = job.train_task().dataset.analyze_db_tasks()[0]
    height = db_task.image_height
    width = db_task.image_width
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(image, height, width,
            channels = db_task.image_channels,
            resize_mode = 'squash',
            )

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
        layers = 'all'

    outputs, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)

    if request_wants_json():
        return flask.jsonify({'outputs': dict((name, blob.tolist()) for name,blob in outputs.iteritems())})
    else:
        return flask.render_template('models/images/generic/infer_one.html',
                job             = job,
                image_src       = utils.image.embed_image_html(image),
                network_outputs = outputs,
                visualizations  = visualizations,
                total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'),
                )
예제 #23
0
파일: views.py 프로젝트: sayi21cn/DIGITS
def image_classification_model_classify_one():
    """
    Classify one image and return the top 5 classifications

    Returns JSON when requested: {predictions: {category: confidence,...}}
    """
    job = job_from_request()

    image = None
    if "image_url" in flask.request.form and flask.request.form["image_url"]:
        image = utils.image.load_image(flask.request.form["image_url"])
    elif "image_file" in flask.request.files and flask.request.files["image_file"]:
        with tempfile.NamedTemporaryFile() as outfile:
            flask.request.files["image_file"].save(outfile.name)
            image = utils.image.load_image(outfile.name)
    else:
        raise werkzeug.exceptions.BadRequest("must provide image_url or image_file")

    # resize image
    db_task = job.train_task().dataset.train_db_task()
    height = db_task.image_dims[0]
    width = db_task.image_dims[1]
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(
        image, height, width, channels=db_task.image_dims[2], resize_mode=db_task.resize_mode
    )

    epoch = None
    if "snapshot_epoch" in flask.request.form:
        epoch = float(flask.request.form["snapshot_epoch"])

    layers = "none"
    if "show_visualizations" in flask.request.form and flask.request.form["show_visualizations"]:
        layers = "all"

    predictions, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)
    # take top 5
    predictions = [(p[0], round(100.0 * p[1], 2)) for p in predictions[:5]]

    if request_wants_json():
        return flask.jsonify({"predictions": predictions})
    else:
        return flask.render_template(
            "models/images/classification/classify_one.html",
            image_src=utils.image.embed_image_html(image),
            predictions=predictions,
            visualizations=visualizations,
        )
예제 #24
0
파일: views.py 프로젝트: aichemzee/DIGITS
def classify_many():
    """
    Start a new classify_may job
    """

    # kicking off a new inference job
    model_job = job_from_request()
    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
        image_folder = flask.request.form['image_folder']
        if not os.path.exists(image_folder):
            raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
    else:
        image_folder = None

    if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
        num_test_images = int(flask.request.form['num_test_images'])
    else:
        num_test_images = None

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    paths, ground_truths = read_image_list(image_list, image_folder, num_test_images)

    # create inference job
    inference_job = ImageInferenceClassifyManyJob(
                username      = utils.auth.get_username(),
                name          = "Classify Many Images",
                model         = model_job,
                images        = paths,
                epoch         = epoch,
                layers        = 'none',
                ground_truths = ground_truths,
                )

    # schedule tasks
    scheduler.add_job(inference_job)

    if request_wants_json():
        return flask.jsonify(inference_job.json_dict())
    else:
        return flask.redirect(flask.url_for('digits.inference.views.show', job_id=inference_job.id()))
예제 #25
0
파일: views.py 프로젝트: qarth/DIGITS
def generic_image_model_infer_one():
    """
    Infer one image
    """
    job = job_from_request()

    image = None
    if 'image_url' in flask.request.form and flask.request.form['image_url']:
        image = utils.image.load_image(flask.request.form['image_url'])
    elif 'image_file' in flask.request.files and flask.request.files['image_file']:
        with tempfile.NamedTemporaryFile() as outfile:
            flask.request.files['image_file'].save(outfile.name)
            image = utils.image.load_image(outfile.name)
    else:
        raise werkzeug.exceptions.BadRequest('must provide image_url or image_file')

    # resize image
    db_task = job.train_task().dataset.analyze_db_tasks()[0]
    height = db_task.image_height
    width = db_task.image_width
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    image = utils.image.resize_image(image, height, width,
            channels = db_task.image_channels,
            resize_mode = 'squash',
            )

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
        layers = 'all'

    outputs, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)

    if request_wants_json():
        return flask.jsonify({'outputs': dict((name, blob.tolist()) for name,blob in outputs.iteritems())})
    else:
        return flask.render_template('models/images/generic/infer_one.html',
                image_src       = utils.image.embed_image_html(image),
                network_outputs = outputs,
                visualizations  = visualizations,
                )
예제 #26
0
def generic_image_model_infer_one():
    """
    Infer one image
    """
    job = job_from_request()

    image = None
    if "image_url" in flask.request.form and flask.request.form["image_url"]:
        image = utils.image.load_image(flask.request.form["image_url"])
    elif "image_file" in flask.request.files and flask.request.files["image_file"]:
        outfile = tempfile.mkstemp(suffix=".bin")
        flask.request.files["image_file"].save(outfile[1])
        image = utils.image.load_image(outfile[1])
        os.close(outfile[0])
        os.remove(outfile[1])
    else:
        raise werkzeug.exceptions.BadRequest("must provide image_url or image_file")

    # resize image
    db_task = job.train_task().dataset.analyze_db_tasks()[0]
    height = db_task.image_height
    width = db_task.image_width
    image = utils.image.resize_image(image, height, width, channels=db_task.image_channels, resize_mode="squash")

    epoch = None
    if "snapshot_epoch" in flask.request.form:
        epoch = float(flask.request.form["snapshot_epoch"])

    layers = "none"
    if "show_visualizations" in flask.request.form and flask.request.form["show_visualizations"]:
        layers = "all"

    outputs, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)

    if request_wants_json():
        return flask.jsonify({"outputs": dict((name, blob.tolist()) for name, blob in outputs.iteritems())})
    else:
        return flask.render_template(
            "models/images/generic/infer_one.html",
            job=job,
            image_src=utils.image.embed_image_html(image),
            network_outputs=outputs,
            visualizations=visualizations,
            total_parameters=sum(v["param_count"] for v in visualizations if v["vis_type"] == "Weights"),
        )
예제 #27
0
def models_show(job_id):
    """
    Show a ModelJob

    Returns JSON when requested:
        {id, name, directory, status, snapshots: [epoch,epoch,...]}
    """
    job = scheduler.get_job(job_id)
    if job is None:
        raise werkzeug.exceptions.NotFound('Job not found')

    if request_wants_json():
        return flask.jsonify(job.json_dict(True))
    else:
        if isinstance(job, model_images.ImageClassificationModelJob):
            return model_images.classification.views.show(job)
        else:
            raise werkzeug.exceptions.BadRequest('Invalid job type')
예제 #28
0
파일: views.py 프로젝트: JFerguson20/DIGITS
def datasets_show(job_id):
    """
    Show a DatasetJob

    Returns JSON when requested:
        {id, name, directory, status}
    """
    job = scheduler.get_job(job_id)
    if job is None:
        raise werkzeug.exceptions.NotFound('Job not found')

    if request_wants_json():
        return flask.jsonify(job.json_dict(True))
    else:
        if isinstance(job, dataset_images.ImageClassificationDatasetJob):
            return dataset_images.classification.views.show(job)
        else:
            raise werkzeug.exceptions.BadRequest('Invalid job type')
예제 #29
0
파일: views.py 프로젝트: aichemzee/DIGITS
def classify_one():
    """
    Classify one image and return the top 5 classifications

    Returns JSON when requested: {predictions: {category: confidence,...}}
    """
    model_job = job_from_request()

    if 'image_url' in flask.request.form and flask.request.form['image_url']:
        image_path = flask.request.form['image_url']
    elif 'image_file' in flask.request.files and flask.request.files['image_file']:
        outfile = tempfile.mkstemp(suffix='.png')
        flask.request.files['image_file'].save(outfile[1])
        image_path = outfile[1]
        os.close(outfile[0])
    else:
        raise werkzeug.exceptions.BadRequest('must provide image_url or image_file')

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
        layers = 'all'

    # create inference job
    inference_job = ImageInferenceClassifyOneJob(
                username    = utils.auth.get_username(),
                name        = "Classify One Image",
                model       = model_job,
                images      = [image_path],
                epoch       = epoch,
                layers      = layers
                )

    # schedule tasks
    scheduler.add_job(inference_job)

    if request_wants_json():
        return flask.jsonify(inference_job.json_dict())
    else:
        return flask.redirect(flask.url_for('digits.inference.views.show', job_id=inference_job.id()))
예제 #30
0
파일: views.py 프로젝트: hycis/DIGITS
def models_show(job_id):
    """
    Show a ModelJob

    Returns JSON when requested:
        {id, name, directory, status, snapshots: [epoch,epoch,...]}
    """
    job = scheduler.get_job(job_id)
    if job is None:
        raise werkzeug.exceptions.NotFound("Job not found")

    if request_wants_json():
        return flask.jsonify(job.json_dict(True))
    else:
        if isinstance(job, model_images.ImageClassificationModelJob):
            return model_images.classification.views.show(job)
        elif isinstance(job, model_images.GenericImageModelJob):
            return model_images.generic.views.show(job)
        else:
            raise werkzeug.exceptions.BadRequest("Invalid job type")
예제 #31
0
파일: views.py 프로젝트: JFerguson20/DIGITS
def handle_exception(e, status_code=500):
    if 'DIGITS_MODE_TEST' in os.environ:
        raise
    error_type = type(e).__name__
    message = str(e)
    trace = None
    if app.debug:
        trace = traceback.format_exc()

    if request_wants_json():
        details = {
                'message': message,
                'type': error_type,
                }
        if trace is not None:
            details['trace'] = trace.split('\n')
        return flask.jsonify({'error': details}), status_code
    else:
        return flask.render_template('500.html',
                title   = error_type,
                message = message,
                trace   = trace,
                ), status_code
예제 #32
0
파일: views.py 프로젝트: Cloud-CV/DIGITS
def home():
    """
    DIGITS home page
    Returns information about each job on the server

    Returns JSON when requested:
        {
            datasets: [{id, name, status},...],
            models: [{id, name, status},...]
        }
    """
    running_datasets    = get_job_list(dataset.DatasetJob, True)
    completed_datasets  = get_job_list(dataset.DatasetJob, False)
    running_models      = get_job_list(model.ModelJob, True)
    completed_models    = get_job_list(model.ModelJob, False)

    if request_wants_json():
        data = {
                'version': digits.__version__,
                'jobs_dir': config_value('jobs_dir'),
                'datasets': [j.json_dict()
                    for j in running_datasets + completed_datasets],
                'models': [j.json_dict()
                    for j in running_models + completed_models],
                }
        if config_value('server_name'):
            data['server_name'] = config_value('server_name')
        return flask.jsonify(data)
    else:
        new_dataset_options = [
                ('Images', [
                    {
                        'title': 'Classification',
                        'id': 'image-classification',
                        'url': flask.url_for('image_classification_dataset_new'),
                        },
                    {
                        'title': 'Other',
                        'id': 'image-generic',
                        'url': flask.url_for('generic_image_dataset_new'),
                        },
                    ])
                ]
        new_model_options = [
                ('Images', [
                    {
                        'title': 'Classification',
                        'id': 'image-classification',
                        'url': flask.url_for('image_classification_model_new'),
                        },
                    {
                        'title': 'Other',
                        'id': 'image-generic',
                        'url': flask.url_for('generic_image_model_new'),
                        },
                    ])
                ]

        return flask.render_template('home.html',
                new_dataset_options = new_dataset_options,
                running_datasets    = running_datasets,
                completed_datasets  = completed_datasets,
                new_model_options   = new_model_options,
                running_models      = running_models,
                completed_models    = completed_models,
                total_gpu_count     = len(scheduler.resources['gpus']),
                remaining_gpu_count = sum(r.remaining() for r in scheduler.resources['gpus']),
                )
예제 #33
0
def create():
    """
    Create a new GenericImageModelJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = GenericImageModelForm()
    form.dataset.choices = get_datasets()
    form.standard_networks.choices = []
    form.previous_networks.choices = get_previous_networks()

    prev_network_snapshots = get_previous_network_snapshots()

    ## Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('models/images/generic/new.html',
                    form = form,
                    frameworks = frameworks.get_frameworks(),
                    previous_network_snapshots = prev_network_snapshots,
                    previous_networks_fullinfo = get_previous_networks_fulldetails(),
                    multi_gpu = config_value('caffe_root')['multi_gpu'],
                    ), 400

    datasetJob = scheduler.get_job(form.dataset.data)
    if not datasetJob:
        raise werkzeug.exceptions.BadRequest(
                'Unknown dataset job_id "%s"' % form.dataset.data)

    # sweeps will be a list of the the permutations of swept fields
    # Get swept learning_rate
    sweeps = [{'learning_rate': v} for v in form.learning_rate.data]
    add_learning_rate = len(form.learning_rate.data) > 1

    # Add swept batch_size
    sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]]
    add_batch_size = len(form.batch_size.data) > 1
    n_jobs = len(sweeps)

    jobs = []
    for sweep in sweeps:
        # Populate the form with swept data to be used in saving and
        # launching jobs.
        form.learning_rate.data = sweep['learning_rate']
        form.batch_size.data = sweep['batch_size']

        # Augment Job Name
        extra = ''
        if add_learning_rate:
            extra += ' learning_rate:%s' % str(form.learning_rate.data[0])
        if add_batch_size:
            extra += ' batch_size:%d' % form.batch_size.data[0]

        job = None
        try:
            job = GenericImageModelJob(
                    username    = utils.auth.get_username(),
                    name        = form.model_name.data + extra,
                    dataset_id  = datasetJob.id(),
                    )

            # get framework (hard-coded to caffe for now)
            fw = frameworks.get_framework_by_id(form.framework.data)

            pretrained_model = None
            #if form.method.data == 'standard':
            if form.method.data == 'previous':
                old_job = scheduler.get_job(form.previous_networks.data)
                if not old_job:
                    raise werkzeug.exceptions.BadRequest(
                            'Job not found: %s' % form.previous_networks.data)

                use_same_dataset = (old_job.dataset_id == job.dataset_id)
                network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)

                for choice in form.previous_networks.choices:
                    if choice[0] == form.previous_networks.data:
                        epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
                        if epoch == 0:
                            pass
                        elif epoch == -1:
                            pretrained_model = old_job.train_task().pretrained_model
                        else:
                            for filename, e in old_job.train_task().snapshots:
                                if e == epoch:
                                    pretrained_model = filename
                                    break

                            if pretrained_model is None:
                                raise werkzeug.exceptions.BadRequest(
                                        "For the job %s, selected pretrained_model for epoch %d is invalid!"
                                        % (form.previous_networks.data, epoch))
                            if not (os.path.exists(pretrained_model)):
                                raise werkzeug.exceptions.BadRequest(
                                        "Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
                        break

            elif form.method.data == 'custom':
                network = fw.get_network_from_desc(form.custom_network.data)
                pretrained_model = form.custom_network_snapshot.data.strip()
            else:
                raise werkzeug.exceptions.BadRequest(
                        'Unrecognized method: "%s"' % form.method.data)

            policy = {'policy': form.lr_policy.data}
            if form.lr_policy.data == 'fixed':
                pass
            elif form.lr_policy.data == 'step':
                policy['stepsize'] = form.lr_step_size.data
                policy['gamma'] = form.lr_step_gamma.data
            elif form.lr_policy.data == 'multistep':
                policy['stepvalue'] = form.lr_multistep_values.data
                policy['gamma'] = form.lr_multistep_gamma.data
            elif form.lr_policy.data == 'exp':
                policy['gamma'] = form.lr_exp_gamma.data
            elif form.lr_policy.data == 'inv':
                policy['gamma'] = form.lr_inv_gamma.data
                policy['power'] = form.lr_inv_power.data
            elif form.lr_policy.data == 'poly':
                policy['power'] = form.lr_poly_power.data
            elif form.lr_policy.data == 'sigmoid':
                policy['stepsize'] = form.lr_sigmoid_step.data
                policy['gamma'] = form.lr_sigmoid_gamma.data
            else:
                raise werkzeug.exceptions.BadRequest(
                        'Invalid learning rate policy')

            if config_value('caffe_root')['multi_gpu']:
                if form.select_gpu_count.data:
                    gpu_count = form.select_gpu_count.data
                    selected_gpus = None
                else:
                    selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
                    gpu_count = None
            else:
                if form.select_gpu.data == 'next':
                    gpu_count = 1
                    selected_gpus = None
                else:
                    selected_gpus = [str(form.select_gpu.data)]
                    gpu_count = None

            # Python Layer File may be on the server or copied from the client.
            fs.copy_python_layer_file(
                bool(form.python_layer_from_client.data),
                job.dir(),
                (flask.request.files[form.python_layer_client_file.name]
                 if form.python_layer_client_file.name in flask.request.files
                 else ''), form.python_layer_server_file.data)

            job.tasks.append(fw.create_train_task(
                        job = job,
                        dataset = datasetJob,
                        train_epochs = form.train_epochs.data,
                        snapshot_interval = form.snapshot_interval.data,
                        learning_rate = form.learning_rate.data[0],
                        lr_policy = policy,
                        gpu_count = gpu_count,
                        selected_gpus = selected_gpus,
                        batch_size = form.batch_size.data[0],
                        batch_accumulation = form.batch_accumulation.data,
                        val_interval = form.val_interval.data,
                        pretrained_model = pretrained_model,
                        crop_size = form.crop_size.data,
                        use_mean = form.use_mean.data,
                        network = network,
                        random_seed = form.random_seed.data,
                        solver_type = form.solver_type.data,
                        shuffle = form.shuffle.data,
                        )
                    )

            ## Save form data with the job so we can easily clone it later.
            save_form_to_job(job, form)

            jobs.append(job)
            scheduler.add_job(job)
            if n_jobs == 1:
                if request_wants_json():
                    return flask.jsonify(job.json_dict())
                else:
                    return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))

        except:
            if job:
                scheduler.delete_job(job)
            raise

    if request_wants_json():
        return flask.jsonify(jobs=[job.json_dict() for job in jobs])

    # If there are multiple jobs launched, go to the home page.
    return flask.redirect('/')
예제 #34
0
def classify_many():
    """
    Classify many images and return the top 5 classifications for each

    Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
    """
    model_job = job_from_request()

    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
        image_folder = flask.request.form['image_folder']
        if not os.path.exists(image_folder):
            raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
    else:
        image_folder = None

    if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
        num_test_images = int(flask.request.form['num_test_images'])
    else:
        num_test_images = None

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    paths, ground_truths = read_image_list(image_list, image_folder, num_test_images)

    # create inference job
    inference_job = ImageInferenceJob(
        username=utils.auth.get_username(),
        name="Classify Many Images",
        model=model_job,
        images=paths,
        epoch=epoch,
        layers='none'
    )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, _ = inference_job.get_data()

    # set return status code
    status_code = 500 if inference_job.status == 'E' else 200

    # delete job
    scheduler.delete_job(inference_job)

    if outputs is not None and len(outputs) < 1:
        # an error occurred
        outputs = None

    if inputs is not None:
        # retrieve path and ground truth of images that were successfully processed
        paths = [paths[idx] for idx in inputs['ids']]
        ground_truths = [ground_truths[idx] for idx in inputs['ids']]

    # defaults
    classifications = None
    show_ground_truth = None
    top1_accuracy = None
    top5_accuracy = None
    confusion_matrix = None
    per_class_accuracy = None
    labels = None

    if outputs is not None:
        # convert to class probabilities for viewing
        last_output_name, last_output_data = outputs.items()[-1]
        if len(last_output_data) < 1:
            raise werkzeug.exceptions.BadRequest(
                'Unable to classify any image from the file')

        scores = last_output_data
        # take top 5
        indices = (-scores).argsort()[:, :5]

        labels = model_job.train_task().get_labels()
        n_labels = len(labels)

        # remove invalid ground truth
        ground_truths = [x if x is not None and (0 <= x < n_labels) else None for x in ground_truths]

        # how many pieces of ground truth to we have?
        n_ground_truth = len([1 for x in ground_truths if x is not None])
        show_ground_truth = n_ground_truth > 0

        # compute classifications and statistics
        classifications = []
        n_top1_accurate = 0
        n_top5_accurate = 0
        confusion_matrix = np.zeros((n_labels, n_labels), dtype=np.dtype(int))
        for image_index, index_list in enumerate(indices):
            result = []
            if ground_truths[image_index] is not None:
                if ground_truths[image_index] == index_list[0]:
                    n_top1_accurate += 1
                if ground_truths[image_index] in index_list:
                    n_top5_accurate += 1
                if (0 <= ground_truths[image_index] < n_labels) and (0 <= index_list[0] < n_labels):
                    confusion_matrix[ground_truths[image_index], index_list[0]] += 1
            for i in index_list:
                # `i` is a category in labels and also an index into scores
                # ignore prediction if we don't have a label for the corresponding class
                # the user might have set the final fully-connected layer's num_output to
                # too high a value
                if i < len(labels):
                    result.append((labels[i], round(100.0 * scores[image_index, i], 2)))
            classifications.append(result)

        # accuracy
        if show_ground_truth:
            top1_accuracy = round(100.0 * n_top1_accurate / n_ground_truth, 2)
            top5_accuracy = round(100.0 * n_top5_accurate / n_ground_truth, 2)
            per_class_accuracy = []
            for x in xrange(n_labels):
                n_examples = sum(confusion_matrix[x])
                per_class_accuracy.append(
                    round(100.0 * confusion_matrix[x, x] / n_examples, 2) if n_examples > 0 else None)
        else:
            top1_accuracy = None
            top5_accuracy = None
            per_class_accuracy = None

        # replace ground truth indices with labels
        ground_truths = [labels[x] if x is not None and (0 <= x < n_labels) else None for x in ground_truths]

    if request_wants_json():
        joined = dict(zip(paths, classifications))
        return flask.jsonify({'classifications': joined}), status_code
    else:
        return flask.render_template('models/images/classification/classify_many.html',
                                     model_job=model_job,
                                     job=inference_job,
                                     paths=paths,
                                     classifications=classifications,
                                     show_ground_truth=show_ground_truth,
                                     ground_truths=ground_truths,
                                     top1_accuracy=top1_accuracy,
                                     top5_accuracy=top5_accuracy,
                                     confusion_matrix=confusion_matrix,
                                     per_class_accuracy=per_class_accuracy,
                                     labels=labels,
                                     ), status_code
예제 #35
0
파일: views.py 프로젝트: zjucsxxd/DIGITS
def infer_many():
    """
    Infer many images
    """
    model_job = job_from_request()

    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    if 'image_folder' in flask.request.form and flask.request.form[
            'image_folder'].strip():
        image_folder = flask.request.form['image_folder']
        if not os.path.exists(image_folder):
            raise werkzeug.exceptions.BadRequest(
                'image_folder "%s" does not exit' % image_folder)
    else:
        image_folder = None

    if 'num_test_images' in flask.request.form and flask.request.form[
            'num_test_images'].strip():
        num_test_images = int(flask.request.form['num_test_images'])
    else:
        num_test_images = None

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    if 'dont_resize' in flask.request.form and flask.request.form[
            'dont_resize']:
        resize = False
    else:
        resize = True

    paths = []

    for line in image_list.readlines():
        line = line.strip()
        if not line:
            continue

        path = None
        # might contain a numerical label at the end
        match = re.match(r'(.*\S)\s+\d+$', line)
        if match:
            path = match.group(1)
        else:
            path = line

        if not utils.is_url(path) and image_folder and not os.path.isabs(path):
            path = os.path.join(image_folder, path)
        paths.append(path)

        if num_test_images is not None and len(paths) >= num_test_images:
            break

    # create inference job
    inference_job = ImageInferenceJob(
        username=utils.auth.get_username(),
        name="Infer Many Images",
        model=model_job,
        images=paths,
        epoch=epoch,
        layers='none',
        resize=resize,
    )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, _ = inference_job.get_data()

    # set return status code
    status_code = 500 if inference_job.status == 'E' else 200

    # delete job folder and remove from scheduler list
    scheduler.delete_job(inference_job)

    if outputs is not None and len(outputs) < 1:
        # an error occurred
        outputs = None

    if inputs is not None:
        paths = [paths[idx] for idx in inputs['ids']]
        inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(
            model_job.dataset, inputs, outputs)
    else:
        inference_views_html = None
        header_html = None
        app_begin_html = None
        app_end_html = None

    if request_wants_json():
        result = {}
        for i, path in enumerate(paths):
            result[path] = dict(
                (name, blob[i].tolist()) for name, blob in outputs.iteritems())
        return flask.jsonify({'outputs': result}), status_code
    else:
        return flask.render_template(
            'models/images/generic/infer_many.html',
            model_job=model_job,
            job=inference_job,
            paths=paths,
            inference_views_html=inference_views_html,
            header_html=header_html,
            app_begin_html=app_begin_html,
            app_end_html=app_end_html,
        ), status_code
예제 #36
0
파일: views.py 프로젝트: zjucsxxd/DIGITS
def infer_extension():
    """
    Perform inference using the data from an extension inference form
    """
    model_job = job_from_request()

    inference_db_job = None
    try:
        # create an inference database
        inference_db_job = create_inference_db(model_job)
        db_path = inference_db_job.get_feature_db_path(constants.TEST_DB)

        # create database creation job
        epoch = None
        if 'snapshot_epoch' in flask.request.form:
            epoch = float(flask.request.form['snapshot_epoch'])

        layers = 'none'
        if 'show_visualizations' in flask.request.form and flask.request.form[
                'show_visualizations']:
            layers = 'all'

        # create inference job
        inference_job = ImageInferenceJob(
            username=utils.auth.get_username(),
            name="Inference",
            model=model_job,
            images=db_path,
            epoch=epoch,
            layers=layers,
            resize=False,
        )

        # schedule tasks
        scheduler.add_job(inference_job)

        # wait for job to complete
        inference_job.wait_completion()

    finally:
        if inference_db_job:
            scheduler.delete_job(inference_db_job)

    # retrieve inference data
    inputs, outputs, model_visualization = inference_job.get_data()

    # set return status code
    status_code = 500 if inference_job.status == 'E' else 200

    # delete job folder and remove from scheduler list
    scheduler.delete_job(inference_job)

    if outputs is not None and len(outputs) < 1:
        # an error occurred
        outputs = None

    if inputs is not None:
        keys = [str(idx) for idx in inputs['ids']]
        inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(
            model_job.dataset, inputs, outputs)
    else:
        inference_views_html = None
        header_html = None
        keys = None
        app_begin_html = None
        app_end_html = None

    if request_wants_json():
        result = {}
        for i, key in enumerate(keys):
            result[key] = dict(
                (name, blob[i].tolist()) for name, blob in outputs.iteritems())
        return flask.jsonify({'outputs': result}), status_code
    else:
        return flask.render_template(
            'models/images/generic/infer_extension.html',
            model_job=model_job,
            job=inference_job,
            keys=keys,
            inference_views_html=inference_views_html,
            header_html=header_html,
            app_begin_html=app_begin_html,
            app_end_html=app_end_html,
            visualizations=model_visualization,
            total_parameters=sum(v['param_count'] for v in model_visualization
                                 if v['vis_type'] == 'Weights'),
        ), status_code
예제 #37
0
파일: views.py 프로젝트: rupertsmall/DIGITS
def generic_image_model_create():
    """
    Create a new GenericImageModelJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = GenericImageModelForm()
    form.dataset.choices = get_datasets()
    form.standard_networks.choices = []
    form.previous_networks.choices = get_previous_networks()

    prev_network_snapshots = get_previous_network_snapshots()

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('models/images/generic/new.html',
                    form = form,
                    previous_network_snapshots = prev_network_snapshots,
                    previous_networks_fullinfo = get_previous_networks_fulldetails(),
                    multi_gpu = config_value('caffe_root')['multi_gpu'],
                    ), 400

    datasetJob = scheduler.get_job(form.dataset.data)
    if not datasetJob:
        raise werkzeug.exceptions.BadRequest(
                'Unknown dataset job_id "%s"' % form.dataset.data)

    job = None
    try:
        job = GenericImageModelJob(
                name        = form.model_name.data,
                dataset_id  = datasetJob.id(),
                )

        # get framework (hard-coded to caffe for now)
        fw = frameworks.get_framework_by_id('caffe')

        pretrained_model = None
        #if form.method.data == 'standard':
        if form.method.data == 'previous':
            old_job = scheduler.get_job(form.previous_networks.data)
            if not old_job:
                raise werkzeug.exceptions.BadRequest(
                        'Job not found: %s' % form.previous_networks.data)

            network = fw.get_network_from_previous(old_job.train_task().network)

            for choice in form.previous_networks.choices:
                if choice[0] == form.previous_networks.data:
                    epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
                    if epoch == 0:
                        pass
                    elif epoch == -1:
                        pretrained_model = old_job.train_task().pretrained_model
                    else:
                        for filename, e in old_job.train_task().snapshots:
                            if e == epoch:
                                pretrained_model = filename
                                break

                        if pretrained_model is None:
                            raise werkzeug.exceptions.BadRequest(
                                    "For the job %s, selected pretrained_model for epoch %d is invalid!"
                                    % (form.previous_networks.data, epoch))
                        if not (os.path.exists(pretrained_model)):
                            raise werkzeug.exceptions.BadRequest(
                                    "Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
                    break

        elif form.method.data == 'custom':
            network = fw.get_network_from_desc(form.custom_network.data)
            pretrained_model = form.custom_network_snapshot.data.strip()
        else:
            raise werkzeug.exceptions.BadRequest(
                    'Unrecognized method: "%s"' % form.method.data)

        policy = {'policy': form.lr_policy.data}
        if form.lr_policy.data == 'fixed':
            pass
        elif form.lr_policy.data == 'step':
            policy['stepsize'] = form.lr_step_size.data
            policy['gamma'] = form.lr_step_gamma.data
        elif form.lr_policy.data == 'multistep':
            policy['stepvalue'] = form.lr_multistep_values.data
            policy['gamma'] = form.lr_multistep_gamma.data
        elif form.lr_policy.data == 'exp':
            policy['gamma'] = form.lr_exp_gamma.data
        elif form.lr_policy.data == 'inv':
            policy['gamma'] = form.lr_inv_gamma.data
            policy['power'] = form.lr_inv_power.data
        elif form.lr_policy.data == 'poly':
            policy['power'] = form.lr_poly_power.data
        elif form.lr_policy.data == 'sigmoid':
            policy['stepsize'] = form.lr_sigmoid_step.data
            policy['gamma'] = form.lr_sigmoid_gamma.data
        else:
            raise werkzeug.exceptions.BadRequest(
                    'Invalid learning rate policy')

        if config_value('caffe_root')['multi_gpu']:
            if form.select_gpu_count.data:
                gpu_count = form.select_gpu_count.data
                selected_gpus = None
            else:
                selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
                gpu_count = None
        else:
            if form.select_gpu.data == 'next':
                gpu_count = 1
                selected_gpus = None
            else:
                selected_gpus = [str(form.select_gpu.data)]
                gpu_count = None

        job.tasks.append(fw.create_train_task(
                    job_dir         = job.dir(),
                    dataset         = datasetJob,
                    train_epochs    = form.train_epochs.data,
                    snapshot_interval   = form.snapshot_interval.data,
                    learning_rate   = form.learning_rate.data,
                    lr_policy       = policy,
                    gpu_count       = gpu_count,
                    selected_gpus   = selected_gpus,
                    batch_size      = form.batch_size.data,
                    val_interval    = form.val_interval.data,
                    pretrained_model= pretrained_model,
                    crop_size       = form.crop_size.data,
                    use_mean        = bool(form.use_mean.data),
                    network         = network,
                    random_seed     = form.random_seed.data,
                    solver_type     = form.solver_type.data,
                    )
                )

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('models_show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #38
0
def create():
    """
    Creates a new GenericImageDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = GenericImageDatasetForm()

    ## Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('datasets/images/generic/new.html', form=form), 400

    job = None
    try:
        job = GenericImageDatasetJob(
                username    = utils.auth.get_username(),
                name        = form.dataset_name.data,
                mean_file   = form.prebuilt_mean_file.data.strip(),
                )

        if form.method.data == 'prebuilt':
            pass
        else:
            raise ValueError('method not supported')

        force_same_shape = form.force_same_shape.data

        job.tasks.append(
                tasks.AnalyzeDbTask(
                    job_dir     = job.dir(),
                    database    = form.prebuilt_train_images.data,
                    purpose     = form.prebuilt_train_images.label.text,
                    force_same_shape = force_same_shape,
                    )
                )

        if form.prebuilt_train_labels.data:
            job.tasks.append(
                    tasks.AnalyzeDbTask(
                        job_dir     = job.dir(),
                        database    = form.prebuilt_train_labels.data,
                        purpose     = form.prebuilt_train_labels.label.text,
                        force_same_shape = force_same_shape,
                        )
                    )

        if form.prebuilt_val_images.data:
            job.tasks.append(
                    tasks.AnalyzeDbTask(
                        job_dir     = job.dir(),
                        database    = form.prebuilt_val_images.data,
                        purpose     = form.prebuilt_val_images.label.text,
                        force_same_shape = force_same_shape,
                        )
                    )
            if form.prebuilt_val_labels.data:
                job.tasks.append(
                        tasks.AnalyzeDbTask(
                            job_dir     = job.dir(),
                            database    = form.prebuilt_val_labels.data,
                            purpose     = form.prebuilt_val_labels.label.text,
                            force_same_shape = force_same_shape,
                            )
                        )

        ## Save form data with the job so we can easily clone it later.
        save_form_to_job(job, form)

        scheduler.add_job(job)

        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('digits.dataset.views.show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #39
0
파일: views.py 프로젝트: klqulei/DIGITS
def create():
    """
    Create a new ImageClassificationModelJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationModelForm()
    form.dataset.choices = get_datasets()
    form.standard_networks.choices = get_standard_networks()
    form.standard_networks.default = get_default_standard_network()
    form.previous_networks.choices = get_previous_networks()

    prev_network_snapshots = get_previous_network_snapshots()

    ## Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('models/images/classification/new.html',
                    form = form,
                    frameworks = frameworks.get_frameworks(),
                    previous_network_snapshots = prev_network_snapshots,
                    previous_networks_fullinfo = get_previous_networks_fulldetails(),
                    multi_gpu = config_value('caffe_root')['multi_gpu'],
                    ), 400

    datasetJob = scheduler.get_job(form.dataset.data)
    if not datasetJob:
        raise werkzeug.exceptions.BadRequest(
                'Unknown dataset job_id "%s"' % form.dataset.data)

    job = None
    try:
        job = ImageClassificationModelJob(
                username    = utils.auth.get_username(),
                name        = form.model_name.data,
                dataset_id  = datasetJob.id(),
                )
        # get handle to framework object
        fw = frameworks.get_framework_by_id(form.framework.data)

        pretrained_model = None
        if form.method.data == 'standard':
            found = False

            # can we find it in standard networks?
            network_desc = fw.get_standard_network_desc(form.standard_networks.data)
            if network_desc:
                found = True
                network = fw.get_network_from_desc(network_desc)

            if not found:
                raise werkzeug.exceptions.BadRequest(
                        'Unknown standard model "%s"' % form.standard_networks.data)
        elif form.method.data == 'previous':
            old_job = scheduler.get_job(form.previous_networks.data)
            if not old_job:
                raise werkzeug.exceptions.BadRequest(
                        'Job not found: %s' % form.previous_networks.data)

            use_same_dataset = (old_job.dataset_id == job.dataset_id)
            network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)

            for choice in form.previous_networks.choices:
                if choice[0] == form.previous_networks.data:
                    epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
                    if epoch == 0:
                        pass
                    elif epoch == -1:
                        pretrained_model = old_job.train_task().pretrained_model
                    else:
                        for filename, e in old_job.train_task().snapshots:
                            if e == epoch:
                                pretrained_model = filename
                                break

                        if pretrained_model is None:
                            raise werkzeug.exceptions.BadRequest(
                                    "For the job %s, selected pretrained_model for epoch %d is invalid!"
                                    % (form.previous_networks.data, epoch))
                        if not (os.path.exists(pretrained_model)):
                            raise werkzeug.exceptions.BadRequest(
                                    "Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
                    break

        elif form.method.data == 'custom':
            network = fw.get_network_from_desc(form.custom_network.data)
            pretrained_model = form.custom_network_snapshot.data.strip()
        else:
            raise werkzeug.exceptions.BadRequest(
                    'Unrecognized method: "%s"' % form.method.data)

        policy = {'policy': form.lr_policy.data}
        if form.lr_policy.data == 'fixed':
            pass
        elif form.lr_policy.data == 'step':
            policy['stepsize'] = form.lr_step_size.data
            policy['gamma'] = form.lr_step_gamma.data
        elif form.lr_policy.data == 'multistep':
            policy['stepvalue'] = form.lr_multistep_values.data
            policy['gamma'] = form.lr_multistep_gamma.data
        elif form.lr_policy.data == 'exp':
            policy['gamma'] = form.lr_exp_gamma.data
        elif form.lr_policy.data == 'inv':
            policy['gamma'] = form.lr_inv_gamma.data
            policy['power'] = form.lr_inv_power.data
        elif form.lr_policy.data == 'poly':
            policy['power'] = form.lr_poly_power.data
        elif form.lr_policy.data == 'sigmoid':
            policy['stepsize'] = form.lr_sigmoid_step.data
            policy['gamma'] = form.lr_sigmoid_gamma.data
        else:
            raise werkzeug.exceptions.BadRequest(
                    'Invalid learning rate policy')

        if config_value('caffe_root')['multi_gpu']:
            if form.select_gpus.data:
                selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
                gpu_count = None
            elif form.select_gpu_count.data:
                gpu_count = form.select_gpu_count.data
                selected_gpus = None
            else:
                gpu_count = 1
                selected_gpus = None
        else:
            if form.select_gpu.data == 'next':
                gpu_count = 1
                selected_gpus = None
            else:
                selected_gpus = [str(form.select_gpu.data)]
                gpu_count = None

        # Python Layer File may be on the server or copied from the client.
        fs.copy_python_layer_file(
            bool(form.python_layer_from_client.data),
            job.dir(),
            (flask.request.files[form.python_layer_client_file.name]
             if form.python_layer_client_file.name in flask.request.files
             else ''), form.python_layer_server_file.data)

        job.tasks.append(fw.create_train_task(
                    job_dir         = job.dir(),
                    dataset         = datasetJob,
                    train_epochs    = form.train_epochs.data,
                    snapshot_interval   = form.snapshot_interval.data,
                    learning_rate   = form.learning_rate.data,
                    lr_policy       = policy,
                    gpu_count       = gpu_count,
                    selected_gpus   = selected_gpus,
                    batch_size      = form.batch_size.data,
                    val_interval    = form.val_interval.data,
                    pretrained_model= pretrained_model,
                    crop_size       = form.crop_size.data,
                    use_mean        = form.use_mean.data,
                    network         = network,
                    random_seed     = form.random_seed.data,
                    solver_type     = form.solver_type.data,
                    shuffle         = form.shuffle.data,
                    )
                )

        ## Save form data with the job so we can easily clone it later.
        save_form_to_job(job, form)

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #40
0
파일: views.py 프로젝트: klqulei/DIGITS
def classify_many():
    """
    Classify many images and return the top 5 classifications for each

    Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
    """
    model_job = job_from_request()

    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
        image_folder = flask.request.form['image_folder']
        if not os.path.exists(image_folder):
            raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
    else:
        image_folder = None

    if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
        num_test_images = int(flask.request.form['num_test_images'])
    else:
        num_test_images = None

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    paths, ground_truths = read_image_list(image_list, image_folder, num_test_images)

    # create inference job
    inference_job = ImageInferenceJob(
                username    = utils.auth.get_username(),
                name        = "Classify Many Images",
                model       = model_job,
                images      = paths,
                epoch       = epoch,
                layers      = 'none'
                )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, _ = inference_job.get_data()

    # delete job
    scheduler.delete_job(inference_job)

    if outputs is not None and len(outputs) < 1:
        # an error occurred
        outputs = None

    if inputs is not None:
        # retrieve path and ground truth of images that were successfully processed
        paths = [paths[idx] for idx in inputs['ids']]
        ground_truths = [ground_truths[idx] for idx in inputs['ids']]

    classifications = None
    if outputs is not None:
        # convert to class probabilities for viewing
        last_output_name, last_output_data = outputs.items()[-1]
        if len(last_output_data) < 1:
            raise werkzeug.exceptions.BadRequest(
                    'Unable to classify any image from the file')

        scores = last_output_data
        # take top 5
        indices = (-scores).argsort()[:, :5]

        labels = model_job.train_task().get_labels()
        classifications = []
        for image_index, index_list in enumerate(indices):
            result = []
            for i in index_list:
                # `i` is a category in labels and also an index into scores
                result.append((labels[i], round(100.0*scores[image_index, i],2)))
            classifications.append(result)

        # replace ground truth indices with labels
        ground_truths = [labels[x] if x is not None and (0 <= x < len(labels)) else None for x in ground_truths]

    if request_wants_json():
        joined = dict(zip(paths, classifications))
        return flask.jsonify({'classifications': joined})
    else:
        return flask.render_template('models/images/classification/classify_many.html',
                model_job       = model_job,
                job             = inference_job,
                paths           = paths,
                classifications = classifications,
                show_ground_truth= not(ground_truths == [None]*len(ground_truths)),
                ground_truths   = ground_truths
                )
예제 #41
0
def image_classification_model_create():
    """
    Create a new ImageClassificationModelJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationModelForm()
    form.dataset.choices = get_datasets()
    form.standard_networks.choices = get_standard_networks()
    form.standard_networks.default = get_default_standard_network()
    form.previous_networks.choices = get_previous_networks()

    prev_network_snapshots = get_previous_network_snapshots()

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template(
                'models/images/classification/new.html',
                form=form,
                previous_network_snapshots=prev_network_snapshots,
                multi_gpu=config_value('caffe_root')['multi_gpu'],
            ), 400

    datasetJob = scheduler.get_job(form.dataset.data)
    if not datasetJob:
        raise werkzeug.exceptions.BadRequest('Unknown dataset job_id "%s"' %
                                             form.dataset.data)

    job = None
    try:
        job = ImageClassificationModelJob(
            name=form.model_name.data,
            dataset_id=datasetJob.id(),
        )

        network = caffe_pb2.NetParameter()
        pretrained_model = None
        if form.method.data == 'standard':
            found = False
            networks_dir = os.path.join(os.path.dirname(digits.__file__),
                                        'standard-networks')
            for filename in os.listdir(networks_dir):
                path = os.path.join(networks_dir, filename)
                if os.path.isfile(path):
                    match = re.match(
                        r'%s.prototxt' % form.standard_networks.data, filename)
                    if match:
                        with open(path) as infile:
                            text_format.Merge(infile.read(), network)
                        found = True
                        break
            if not found:
                raise werkzeug.exceptions.BadRequest(
                    'Unknown standard model "%s"' %
                    form.standard_networks.data)
        elif form.method.data == 'previous':
            old_job = scheduler.get_job(form.previous_networks.data)
            if not old_job:
                raise werkzeug.exceptions.BadRequest(
                    'Job not found: %s' % form.previous_networks.data)

            network.CopyFrom(old_job.train_task().network)
            # Rename the final layer
            # XXX making some assumptions about network architecture here
            ip_layers = [l for l in network.layer if l.type == 'InnerProduct']
            if len(ip_layers) > 0:
                ip_layers[-1].name = '%s_retrain' % ip_layers[-1].name

            for choice in form.previous_networks.choices:
                if choice[0] == form.previous_networks.data:
                    epoch = float(
                        flask.request.form['%s-snapshot' %
                                           form.previous_networks.data])
                    if epoch != 0:
                        for filename, e in old_job.train_task().snapshots:
                            if e == epoch:
                                pretrained_model = filename
                                break

                        if pretrained_model is None:
                            raise werkzeug.exceptions.BadRequest(
                                "For the job %s, selected pretrained_model for epoch %d is invalid!"
                                % (form.previous_networks.data, epoch))
                        if not (os.path.exists(pretrained_model)):
                            raise werkzeug.exceptions.BadRequest(
                                "Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details"
                            )
                    break

        elif form.method.data == 'custom':
            text_format.Merge(form.custom_network.data, network)
            pretrained_model = form.custom_network_snapshot.data.strip()
        else:
            raise werkzeug.exceptions.BadRequest('Unrecognized method: "%s"' %
                                                 form.method.data)

        policy = {'policy': form.lr_policy.data}
        if form.lr_policy.data == 'fixed':
            pass
        elif form.lr_policy.data == 'step':
            policy['stepsize'] = form.lr_step_size.data
            policy['gamma'] = form.lr_step_gamma.data
        elif form.lr_policy.data == 'multistep':
            policy['stepvalue'] = form.lr_multistep_values.data
            policy['gamma'] = form.lr_multistep_gamma.data
        elif form.lr_policy.data == 'exp':
            policy['gamma'] = form.lr_exp_gamma.data
        elif form.lr_policy.data == 'inv':
            policy['gamma'] = form.lr_inv_gamma.data
            policy['power'] = form.lr_inv_power.data
        elif form.lr_policy.data == 'poly':
            policy['power'] = form.lr_poly_power.data
        elif form.lr_policy.data == 'sigmoid':
            policy['stepsize'] = form.lr_sigmoid_step.data
            policy['gamma'] = form.lr_sigmoid_gamma.data
        else:
            raise werkzeug.exceptions.BadRequest(
                'Invalid learning rate policy')

        if config_value('caffe_root')['multi_gpu']:
            if form.select_gpu_count.data:
                gpu_count = form.select_gpu_count.data
                selected_gpus = None
            else:
                selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
                gpu_count = None
        else:
            if form.select_gpu.data == 'next':
                gpu_count = 1
                selected_gpus = None
            else:
                selected_gpus = [str(form.select_gpu.data)]
                gpu_count = None

        job.tasks.append(
            tasks.CaffeTrainTask(
                job_dir=job.dir(),
                dataset=datasetJob,
                train_epochs=form.train_epochs.data,
                snapshot_interval=form.snapshot_interval.data,
                learning_rate=form.learning_rate.data,
                lr_policy=policy,
                gpu_count=gpu_count,
                selected_gpus=selected_gpus,
                batch_size=form.batch_size.data,
                val_interval=form.val_interval.data,
                pretrained_model=pretrained_model,
                crop_size=form.crop_size.data,
                use_mean=bool(form.use_mean.data),
                network=network,
                random_seed=form.random_seed.data,
                solver_type=form.solver_type.data,
            ))

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('models_show',
                                                job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #42
0
def image_classification_model_classify_many():
    """
    Classify many images and return the top 5 classifications for each

    Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
    """
    job = job_from_request()

    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    paths = []
    images = []
    dataset = job.train_task().dataset

    for line in image_list.readlines():
        line = line.strip()
        if not line:
            continue

        path = None
        # might contain a numerical label at the end
        match = re.match(r'(.*\S)\s+\d+$', line)
        if match:
            path = match.group(1)
        else:
            path = line

        try:
            image = utils.image.load_image(path)
            image = utils.image.resize_image(
                image,
                dataset.image_dims[0],
                dataset.image_dims[1],
                channels=dataset.image_dims[2],
                resize_mode=dataset.resize_mode,
            )
            paths.append(path)
            images.append(image)
        except utils.errors.LoadImageError as e:
            print e

    if not len(images):
        raise werkzeug.exceptions.BadRequest(
            'Unable to load any images from the file')

    labels, scores = job.train_task().infer_many(images, snapshot_epoch=epoch)
    if scores is None:
        raise RuntimeError('An error occured while processing the images')

    # take top 5
    indices = (-scores).argsort()[:, :5]

    classifications = []
    for image_index, index_list in enumerate(indices):
        result = []
        for i in index_list:
            # `i` is a category in labels and also an index into scores
            result.append((labels[i], round(100.0 * scores[image_index, i],
                                            2)))
        classifications.append(result)

    if request_wants_json():
        joined = dict(zip(paths, classifications))
        return flask.jsonify({'classifications': joined})
    else:
        return flask.render_template(
            'models/images/classification/classify_many.html',
            paths=paths,
            classifications=classifications,
        )
예제 #43
0
def home(dataset_id=None):
    """
    DIGITS home page
    Returns information about each job on the server

    Returns JSON when requested:
        {
            datasets: [{id, name, status},...],
            models: [{id, name, status},...]
        }
    """

    # Dataset Job
    if dataset_id is None:
        job_type = dataset.DatasetJob
        name = 'Dataset'
    # Model Job
    else:
        job_type = model.ModelJob
        name = 'Model'

    running_jobs = get_job_list(job_type, True, dataset_id)
    completed_jobs = get_job_list(job_type, False, dataset_id)

    if request_wants_json():
        data = {
            'version': digits.__version__,
            'jobs_dir': config_value('jobs_dir'),
            'job_type': name,
            'jobs': [j.json_dict() for j in running_jobs + completed_jobs],
        }
        if config_value('server_name'):
            data['server_name'] = config_value('server_name')
        return flask.jsonify(data)
    else:
        if dataset_id is None:
            name = 'Dataset'
            dataset_name = None
            options = [('New Dataset', [
                {
                    'title': 'Images',
                    'id': 'images',
                    'url': flask.url_for('image_classification_dataset_new'),
                },
                {
                    'title': 'Generic',
                    'id': 'generic',
                    'url': flask.url_for('generic_image_dataset_new'),
                },
            ])]
        else:
            dataset_name, dataset_type = get_dataset_name(dataset_id)
            if dataset_type == 'Image Classification Dataset':
                options = [('New Model', [
                    {
                        'title':
                        'Classification',
                        'id':
                        'classification',
                        'url':
                        flask.url_for('image_classification_model_new',
                                      dataset_id=dataset_id),
                    },
                ])]
            elif dataset_type == 'Generic Image Dataset':
                options = [('New Model', [
                    {
                        'title':
                        'Generic',
                        'id':
                        'generic-classification',
                        'url':
                        flask.url_for('generic_image_model_new',
                                      dataset_id=dataset_id),
                    },
                ])]

        return flask.render_template(
            'home.html',
            name=name,
            dataset_name=dataset_name,
            dataset_id=dataset_id,
            options=options,
            running_jobs=running_jobs,
            completed_jobs=completed_jobs,
            total_gpu_count=len(scheduler.resources['gpus']),
            remaining_gpu_count=sum(r.remaining()
                                    for r in scheduler.resources['gpus']),
        )
예제 #44
0
def infer_db():
    """
    Infer a database
    """
    model_job = job_from_request()

    if not 'db_path' in flask.request.form or flask.request.form['db_path'] is None:
        raise werkzeug.exceptions.BadRequest('db_path is a required field')

    db_path = flask.request.form['db_path']

    if not os.path.exists(db_path):
            raise werkzeug.exceptions.BadRequest('DB "%s" does not exit' % db_path)

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    # create inference job
    inference_job = ImageInferenceJob(
                username    = utils.auth.get_username(),
                name        = "Infer Many Images",
                model       = model_job,
                images      = db_path,
                epoch       = epoch,
                layers      = 'none',
                )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, _ = inference_job.get_data()

    # delete job folder and remove from scheduler list
    scheduler.delete_job(inference_job)

    if outputs is not None and len(outputs) < 1:
        # an error occurred
        outputs = None

    if inputs is not None:
        keys = [str(idx) for idx in inputs['ids']]
    else:
        keys = None

    if request_wants_json():
        result = {}
        for i, key in enumerate(keys):
            result[key] = dict((name, blob[i].tolist()) for name,blob in outputs.iteritems())
        return flask.jsonify({'outputs': result})
    else:
        return flask.render_template('models/images/generic/infer_db.html',
                model_job       = model_job,
                job             = inference_job,
                keys            = keys,
                network_outputs = outputs,
                )
예제 #45
0
def create():
    """
    Creates a new GenericImageDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = GenericImageDatasetForm()

    # Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('datasets/images/generic/new.html',
                                         form=form), 400

    job = None
    try:
        job = GenericImageDatasetJob(
            username=utils.auth.get_username(),
            name=form.dataset_name.data,
            group=form.group_name.data,
            mean_file=form.prebuilt_mean_file.data.strip(),
        )

        if form.method.data == 'prebuilt':
            pass
        else:
            raise ValueError('method not supported')

        force_same_shape = form.force_same_shape.data

        job.tasks.append(
            tasks.AnalyzeDbTask(
                job_dir=job.dir(),
                database=form.prebuilt_train_images.data,
                purpose=form.prebuilt_train_images.label.text,
                force_same_shape=force_same_shape,
            ))

        if form.prebuilt_train_labels.data:
            job.tasks.append(
                tasks.AnalyzeDbTask(
                    job_dir=job.dir(),
                    database=form.prebuilt_train_labels.data,
                    purpose=form.prebuilt_train_labels.label.text,
                    force_same_shape=force_same_shape,
                ))

        if form.prebuilt_val_images.data:
            job.tasks.append(
                tasks.AnalyzeDbTask(
                    job_dir=job.dir(),
                    database=form.prebuilt_val_images.data,
                    purpose=form.prebuilt_val_images.label.text,
                    force_same_shape=force_same_shape,
                ))
            if form.prebuilt_val_labels.data:
                job.tasks.append(
                    tasks.AnalyzeDbTask(
                        job_dir=job.dir(),
                        database=form.prebuilt_val_labels.data,
                        purpose=form.prebuilt_val_labels.label.text,
                        force_same_shape=force_same_shape,
                    ))

        # Save form data with the job so we can easily clone it later.
        save_form_to_job(job, form)

        scheduler.add_job(job)

        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(
                flask.url_for('digits.dataset.views.show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #46
0
def create(extension_id):
    """
    Creates a new GenericDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = GenericDatasetForm()
    form_valid = form.validate_on_submit()

    extension_class = extensions.data.get_extension(extension_id)
    extension_form = extension_class.get_dataset_form()
    extension_form_valid = extension_form.validate_on_submit()

    if not (extension_form_valid and form_valid):
        # merge errors
        errors = form.errors.copy()
        errors.update(extension_form.errors)

        template, context = extension_class.get_dataset_template(
            extension_form)
        rendered_extension = flask.render_template_string(template, **context)

        if request_wants_json():
            return flask.jsonify({'errors': errors}), 400
        else:
            return flask.render_template(
                'datasets/generic/new.html',
                extension_title=extension_class.get_title(),
                extension_id=extension_id,
                extension_html=rendered_extension,
                form=form,
                errors=errors), 400

    # create instance of extension class
    extension = extension_class(**extension_form.data)

    job = None
    try:
        # create job
        job = GenericDatasetJob(
            username=utils.auth.get_username(),
            name=form.dataset_name.data,
            group=form.group_name.data,
            backend=form.dsopts_backend.data,
            feature_encoding=form.dsopts_feature_encoding.data,
            label_encoding=form.dsopts_label_encoding.data,
            batch_size=int(form.dsopts_batch_size.data),
            num_threads=int(form.dsopts_num_threads.data),
            force_same_shape=form.dsopts_force_same_shape.data,
            extension_id=extension_id,
            extension_userdata=extension.get_user_data(),
        )

        ## Save form data with the job so we can easily clone it later.
        utils.forms.save_form_to_job(job, form)
        utils.forms.save_form_to_job(job, extension_form)

        # schedule tasks
        scheduler.add_job(job)

        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(
                flask.url_for('digits.dataset.views.show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #47
0
파일: views.py 프로젝트: hieuvodoi/digits_7
def home(tab=2):
    """
    DIGITS home page
    Returns information about each job on the server

    Returns JSON when requested:
        {
            datasets: [{id, name, status},...],
            models: [{id, name, status},...]
        }
    """
    running_datasets = get_job_list(dataset.DatasetJob, True)
    completed_datasets = get_job_list(dataset.DatasetJob, False)
    running_models = get_job_list(model.ModelJob, True)
    completed_models = get_job_list(model.ModelJob, False)

    if request_wants_json():
        data = {
            'version':
            digits.__version__,
            'jobs_dir':
            config_value('jobs_dir'),
            'datasets':
            [j.json_dict() for j in running_datasets + completed_datasets],
            'models':
            [j.json_dict() for j in running_models + completed_models],
        }
        if config_value('server_name'):
            data['server_name'] = config_value('server_name')
        return flask.jsonify(data)
    else:
        new_dataset_options = {
            'Images': {
                'image-classification': {
                    'title':
                    'Classification',
                    'url':
                    flask.url_for(
                        'digits.dataset.images.classification.views.new'),
                },
                'image-other': {
                    'title':
                    'Other',
                    'url':
                    flask.url_for('digits.dataset.images.generic.views.new'),
                },
            },
        }

        new_model_options = {
            'Images': {
                'image-classification': {
                    'title':
                    'Classification',
                    'url':
                    flask.url_for(
                        'digits.model.images.classification.views.new'),
                },
                'image-other': {
                    'title': 'Other',
                    'url':
                    flask.url_for('digits.model.images.generic.views.new'),
                },
            },
        }

        load_model_options = {
            'Images': {
                'pretrained-model': {
                    'title': 'Upload Pretrained Model',
                    'id': 'uploadPretrainedModel',
                    'url': flask.url_for('digits.pretrained_model.views.new'),
                },
                'access-model-store': {
                    'title': 'Retrieve from Model Store',
                    'id': 'retrieveModelStore',
                    'url': flask.url_for('digits.store.views.store'),
                }
            },
        }

        # add dataset options for known dataset extensions
        data_extensions = extensions.data.get_extensions()
        for extension in data_extensions:
            ext_category = extension.get_category()
            ext_title = extension.get_title()
            ext_title = ext_title[:21] + ' ..' if len(
                ext_title) > 21 else ext_title
            ext_id = extension.get_id()
            if ext_category not in new_dataset_options:
                new_dataset_options[ext_category] = {}
            new_dataset_options[ext_category][ext_id] = {
                'title':
                ext_title,
                'url':
                flask.url_for('digits.dataset.generic.views.new',
                              extension_id=ext_id),
            }
            if ext_category not in new_model_options:
                new_model_options[ext_category] = {}
            new_model_options[ext_category][ext_id] = {
                'title':
                ext_title,
                'url':
                flask.url_for('digits.model.images.generic.views.new',
                              extension_id=ext_id),
            }

        return flask.render_template(
            'home.html',
            tab=tab,
            new_dataset_options=new_dataset_options,
            running_datasets=running_datasets,
            completed_datasets=completed_datasets,
            new_model_options=new_model_options,
            running_models=running_models,
            completed_models=completed_models,
            load_model_options=load_model_options,
            total_gpu_count=len(scheduler.resources['gpus']),
            remaining_gpu_count=sum(r.remaining()
                                    for r in scheduler.resources['gpus']),
        )
예제 #48
0
def classify_many():
    """
    Classify many images and return the top 5 classifications for each

    Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
    """
    job = job_from_request()

    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    if 'image_folder' in flask.request.form and flask.request.form[
            'image_folder'].strip():
        image_folder = flask.request.form['image_folder']
        if not os.path.exists(image_folder):
            raise werkzeug.exceptions.BadRequest(
                'image_folder "%s" does not exit' % image_folder)
    else:
        image_folder = None

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    paths = []
    images = []
    ground_truths = []
    dataset = job.train_task().dataset

    for line in image_list.readlines():
        line = line.strip()
        if not line:
            continue

        path = None
        # might contain a numerical label at the end
        match = re.match(r'(.*\S)\s+(\d+)$', line)
        if match:
            path = match.group(1)
            ground_truth = int(match.group(2))
        else:
            path = line
            ground_truth = None

        try:
            if not utils.is_url(path) and image_folder and not os.path.isabs(
                    path):
                path = os.path.join(image_folder, path)
            image = utils.image.load_image(path)
            image = utils.image.resize_image(
                image,
                dataset.image_dims[0],
                dataset.image_dims[1],
                channels=dataset.image_dims[2],
                resize_mode=dataset.resize_mode,
            )
            paths.append(path)
            images.append(image)
            ground_truths.append(ground_truth)
        except utils.errors.LoadImageError as e:
            print e

    if not len(images):
        raise werkzeug.exceptions.BadRequest(
            'Unable to load any images from the file')

    labels, scores = job.train_task().infer_many(images, snapshot_epoch=epoch)
    if scores is None:
        raise RuntimeError('An error occured while processing the images')

    # take top 5
    indices = (-scores).argsort()[:, :5]

    classifications = []
    for image_index, index_list in enumerate(indices):
        result = []
        for i in index_list:
            # `i` is a category in labels and also an index into scores
            result.append((labels[i], round(100.0 * scores[image_index, i],
                                            2)))
        classifications.append(result)

    # replace ground truth indices with labels
    ground_truths = [
        labels[x] if x is not None and (0 <= x < len(labels)) else None
        for x in ground_truths
    ]

    if request_wants_json():
        joined = dict(zip(paths, classifications))
        return flask.jsonify({'classifications': joined})
    else:
        return flask.render_template(
            'models/images/classification/classify_many.html',
            job=job,
            paths=paths,
            classifications=classifications,
            show_ground_truth=not (ground_truths
                                   == [None] * len(ground_truths)),
            ground_truths=ground_truths)
예제 #49
0
파일: views.py 프로젝트: yyyreal/DIGITS
def generic_image_dataset_create():
    """
    Creates a new GenericImageDatasetJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = GenericImageDatasetForm()
    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('datasets/images/generic/new.html',
                                         form=form), 400

    job = None
    try:
        job = GenericImageDatasetJob(
            name=form.dataset_name.data,
            mean_file=form.prebuilt_mean_file.data.strip(),
        )

        if form.method.data == 'prebuilt':
            pass
        else:
            raise ValueError('method not supported')

        job.tasks.append(
            tasks.AnalyzeDbTask(
                job_dir=job.dir(),
                database=form.prebuilt_train_images.data,
                purpose=form.prebuilt_train_images.label.text,
            ))

        if form.prebuilt_train_labels.data:
            job.tasks.append(
                tasks.AnalyzeDbTask(
                    job_dir=job.dir(),
                    database=form.prebuilt_train_labels.data,
                    purpose=form.prebuilt_train_labels.label.text,
                ))

        if form.prebuilt_val_images.data:
            job.tasks.append(
                tasks.AnalyzeDbTask(
                    job_dir=job.dir(),
                    database=form.prebuilt_val_images.data,
                    purpose=form.prebuilt_val_images.label.text,
                ))
            if form.prebuilt_val_labels.data:
                job.tasks.append(
                    tasks.AnalyzeDbTask(
                        job_dir=job.dir(),
                        database=form.prebuilt_val_labels.data,
                        purpose=form.prebuilt_val_labels.label.text,
                    ))

        scheduler.add_job(job)

        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(
                flask.url_for('datasets_show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #50
0
파일: views.py 프로젝트: zjucsxxd/DIGITS
def infer_one():
    """
    Infer one image
    """
    model_job = job_from_request()

    remove_image_path = False
    if 'image_path' in flask.request.form and flask.request.form['image_path']:
        image_path = flask.request.form['image_path']
    elif 'image_file' in flask.request.files and flask.request.files[
            'image_file']:
        outfile = tempfile.mkstemp(suffix='.bin')
        flask.request.files['image_file'].save(outfile[1])
        image_path = outfile[1]
        os.close(outfile[0])
        remove_image_path = True
    else:
        raise werkzeug.exceptions.BadRequest(
            'must provide image_path or image_file')

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form[
            'show_visualizations']:
        layers = 'all'

    if 'dont_resize' in flask.request.form and flask.request.form[
            'dont_resize']:
        resize = False
    else:
        resize = True

    # create inference job
    inference_job = ImageInferenceJob(
        username=utils.auth.get_username(),
        name="Infer One Image",
        model=model_job,
        images=[image_path],
        epoch=epoch,
        layers=layers,
        resize=resize,
    )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, model_visualization = inference_job.get_data()

    # set return status code
    status_code = 500 if inference_job.status == 'E' else 200

    # delete job folder and remove from scheduler list
    scheduler.delete_job(inference_job)

    if remove_image_path:
        os.remove(image_path)

    if inputs is not None and len(inputs['data']) == 1:
        image = utils.image.embed_image_html(inputs['data'][0])
        visualizations, header_html, app_begin_html, app_end_html = get_inference_visualizations(
            model_job.dataset, inputs, outputs)
        inference_view_html = visualizations[0]
    else:
        image = None
        inference_view_html = None
        header_html = None
        app_begin_html = None
        app_end_html = None

    if request_wants_json():
        return flask.jsonify({
            'outputs':
            dict((name, blob.tolist()) for name, blob in outputs.iteritems())
        }), status_code
    else:
        return flask.render_template(
            'models/images/generic/infer_one.html',
            model_job=model_job,
            job=inference_job,
            image_src=image,
            inference_view_html=inference_view_html,
            header_html=header_html,
            app_begin_html=app_begin_html,
            app_end_html=app_end_html,
            visualizations=model_visualization,
            total_parameters=sum(v['param_count'] for v in model_visualization
                                 if v['vis_type'] == 'Weights'),
        ), status_code
예제 #51
0
파일: views.py 프로젝트: iwalkdaline/DIGITS
def home():
    """
    DIGITS home page
    Returns information about each job on the server

    Returns JSON when requested:
        {
            datasets: [{id, name, status},...],
            models: [{id, name, status},...]
        }
    """
    running_datasets = get_job_list(dataset.DatasetJob, True)
    completed_datasets = get_job_list(dataset.DatasetJob, False)
    running_models = get_job_list(model.ModelJob, True)
    completed_models = get_job_list(model.ModelJob, False)

    if request_wants_json():
        data = {
            'version':
            digits.__version__,
            'jobs_dir':
            config_value('jobs_dir'),
            'datasets':
            [j.json_dict() for j in running_datasets + completed_datasets],
            'models':
            [j.json_dict() for j in running_models + completed_models],
        }
        if config_value('server_name'):
            data['server_name'] = config_value('server_name')
        return flask.jsonify(data)
    else:
        new_dataset_options = [('Images', [
            {
                'title': 'Classification',
                'id': 'image-classification',
                'url': flask.url_for('image_classification_dataset_new'),
            },
            {
                'title': 'Other',
                'id': 'image-generic',
                'url': flask.url_for('generic_image_dataset_new'),
            },
        ])]
        new_model_options = [('Images', [
            {
                'title': 'Classification',
                'id': 'image-classification',
                'url': flask.url_for('image_classification_model_new'),
            },
            {
                'title': 'Other',
                'id': 'image-generic',
                'url': flask.url_for('generic_image_model_new'),
            },
        ])]

        return flask.render_template(
            'home.html',
            new_dataset_options=new_dataset_options,
            running_datasets=running_datasets,
            completed_datasets=completed_datasets,
            new_model_options=new_model_options,
            running_models=running_models,
            completed_models=completed_models,
            total_gpu_count=len(scheduler.resources['gpus']),
            remaining_gpu_count=sum(r.remaining()
                                    for r in scheduler.resources['gpus']),
        )
예제 #52
0
파일: views.py 프로젝트: zjucsxxd/DIGITS
def infer_db():
    """
    Infer a database
    """
    model_job = job_from_request()

    if 'db_path' not in flask.request.form or flask.request.form[
            'db_path'] is None:
        raise werkzeug.exceptions.BadRequest('db_path is a required field')

    db_path = flask.request.form['db_path']

    if not os.path.exists(db_path):
        raise werkzeug.exceptions.BadRequest('DB "%s" does not exit' % db_path)

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    if 'dont_resize' in flask.request.form and flask.request.form[
            'dont_resize']:
        resize = False
    else:
        resize = True

    # create inference job
    inference_job = ImageInferenceJob(
        username=utils.auth.get_username(),
        name="Infer Many Images",
        model=model_job,
        images=db_path,
        epoch=epoch,
        layers='none',
        resize=resize,
    )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, _ = inference_job.get_data()

    # set return status code
    status_code = 500 if inference_job.status == 'E' else 200

    # delete job folder and remove from scheduler list
    scheduler.delete_job(inference_job)

    if outputs is not None and len(outputs) < 1:
        # an error occurred
        outputs = None

    if inputs is not None:
        keys = [str(idx) for idx in inputs['ids']]
        inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(
            model_job.dataset, inputs, outputs)
    else:
        inference_views_html = None
        header_html = None
        keys = None
        app_begin_html = None
        app_end_html = None

    if request_wants_json():
        result = {}
        for i, key in enumerate(keys):
            result[key] = dict(
                (name, blob[i].tolist()) for name, blob in outputs.iteritems())
        return flask.jsonify({'outputs': result}), status_code
    else:
        return flask.render_template(
            'models/images/generic/infer_db.html',
            model_job=model_job,
            job=inference_job,
            keys=keys,
            inference_views_html=inference_views_html,
            header_html=header_html,
            app_begin_html=app_begin_html,
            app_end_html=app_end_html,
        ), status_code
예제 #53
0
파일: views.py 프로젝트: bygreencn/DIGITS
def infer_many():
    """
    Infer many images
    """
    model_job = job_from_request()

    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
        image_folder = flask.request.form['image_folder']
        if not os.path.exists(image_folder):
            raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
    else:
        image_folder = None

    if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
        num_test_images = int(flask.request.form['num_test_images'])
    else:
        num_test_images = None

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:
        resize = False
    else:
        resize = True

    paths = []

    for line in image_list.readlines():
        line = line.strip()
        if not line:
            continue

        path = None
        # might contain a numerical label at the end
        match = re.match(r'(.*\S)\s+\d+$', line)
        if match:
            path = match.group(1)
        else:
            path = line

        if not utils.is_url(path) and image_folder and not os.path.isabs(path):
            path = os.path.join(image_folder, path)
        paths.append(path)

        if num_test_images is not None and len(paths) >= num_test_images:
            break

    # create inference job
    inference_job = ImageInferenceJob(
        username=utils.auth.get_username(),
        name="Infer Many Images",
        model=model_job,
        images=paths,
        epoch=epoch,
        layers='none',
        resize=resize,
        )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, _ = inference_job.get_data()

    # set return status code
    status_code = 500 if inference_job.status == 'E' else 200

    # delete job folder and remove from scheduler list
    scheduler.delete_job(inference_job)

    if outputs is not None and len(outputs) < 1:
        # an error occurred
        outputs = None

    if inputs is not None:
        paths = [paths[idx] for idx in inputs['ids']]
        inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(
            model_job.dataset,
            inputs,
            outputs)
    else:
        inference_views_html = None
        header_html = None
        app_begin_html = None
        app_end_html = None

    if request_wants_json():
        result = {}
        for i, path in enumerate(paths):
            result[path] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())
        return flask.jsonify({'outputs': result}), status_code
    else:
        return flask.render_template(
            'models/images/generic/infer_many.html',
            model_job=model_job,
            job=inference_job,
            paths=paths,
            inference_views_html=inference_views_html,
            header_html=header_html,
            app_begin_html=app_begin_html,
            app_end_html=app_end_html,
            ), status_code
예제 #54
0
def classify_one():
    """
    Classify one image and return the top 5 classifications

    Returns JSON when requested: {predictions: {category: confidence,...}}
    """
    model_job = job_from_request()

    remove_image_path = False
    if 'image_path' in flask.request.form and flask.request.form['image_path']:
        image_path = flask.request.form['image_path']
    elif 'image_file' in flask.request.files and flask.request.files['image_file']:
        outfile = tempfile.mkstemp(suffix='.png')
        flask.request.files['image_file'].save(outfile[1])
        image_path = outfile[1]
        os.close(outfile[0])
        remove_image_path = True
    else:
        raise werkzeug.exceptions.BadRequest('must provide image_path or image_file')

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    layers = 'none'
    if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
        layers = 'all'

    # create inference job
    inference_job = ImageInferenceJob(
        username=utils.auth.get_username(),
        name="Classify One Image",
        model=model_job,
        images=[image_path],
        epoch=epoch,
        layers=layers
    )

    # schedule tasks
    scheduler.add_job(inference_job)

    # wait for job to complete
    inference_job.wait_completion()

    # retrieve inference data
    inputs, outputs, visualizations = inference_job.get_data()

    # set return status code
    status_code = 500 if inference_job.status == 'E' else 200

    # delete job
    scheduler.delete_job(inference_job)

    if remove_image_path:
        os.remove(image_path)

    image = None
    predictions = []
    if inputs is not None and len(inputs['data']) == 1:
        image = utils.image.embed_image_html(inputs['data'][0])
        # convert to class probabilities for viewing
        last_output_name, last_output_data = outputs.items()[-1]

        if len(last_output_data) == 1:
            scores = last_output_data[0].flatten()
            indices = (-scores).argsort()
            labels = model_job.train_task().get_labels()
            predictions = []
            for i in indices:
                # ignore prediction if we don't have a label for the corresponding class
                # the user might have set the final fully-connected layer's num_output to
                # too high a value
                if i < len(labels):
                    predictions.append((labels[i], scores[i]))
            predictions = [(p[0], round(100.0 * p[1], 2)) for p in predictions[:5]]

    if request_wants_json():
        return flask.jsonify({'predictions': predictions}), status_code
    else:
        return flask.render_template('models/images/classification/classify_one.html',
                                     model_job=model_job,
                                     job=inference_job,
                                     image_src=image,
                                     predictions=predictions,
                                     visualizations=visualizations,
                                     total_parameters=sum(v['param_count']
                                                          for v in visualizations if v['vis_type'] == 'Weights'),
                                     ), status_code
예제 #55
0
파일: views.py 프로젝트: photon1976/DIGITS
def generic_image_model_create():
    """
    Create a new GenericImageModelJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = GenericImageModelForm()
    form.dataset.choices = get_datasets()
    form.standard_networks.choices = []
    form.previous_networks.choices = get_previous_networks()

    prev_network_snapshots = get_previous_network_snapshots()

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template(
                'models/images/generic/new.html',
                form=form,
                previous_network_snapshots=prev_network_snapshots,
                previous_networks_fullinfo=get_previous_networks_fulldetails(),
                multi_gpu=config_value('caffe_root')['multi_gpu'],
            ), 400

    datasetJob = scheduler.get_job(form.dataset.data)
    if not datasetJob:
        raise werkzeug.exceptions.BadRequest('Unknown dataset job_id "%s"' %
                                             form.dataset.data)

    job = None
    try:
        job = GenericImageModelJob(
            name=form.model_name.data,
            dataset_id=datasetJob.id(),
        )

        # get framework (hard-coded to caffe for now)
        fw = frameworks.get_framework_by_id('caffe')

        pretrained_model = None
        #if form.method.data == 'standard':
        if form.method.data == 'previous':
            old_job = scheduler.get_job(form.previous_networks.data)
            if not old_job:
                raise werkzeug.exceptions.BadRequest(
                    'Job not found: %s' % form.previous_networks.data)

            network = fw.get_network_from_previous(
                old_job.train_task().network)

            for choice in form.previous_networks.choices:
                if choice[0] == form.previous_networks.data:
                    epoch = float(
                        flask.request.form['%s-snapshot' %
                                           form.previous_networks.data])
                    if epoch != 0:
                        for filename, e in old_job.train_task().snapshots:
                            if e == epoch:
                                pretrained_model = filename
                                break

                        if pretrained_model is None:
                            raise werkzeug.exceptions.BadRequest(
                                "For the job %s, selected pretrained_model for epoch %d is invalid!"
                                % (form.previous_networks.data, epoch))
                        if not (os.path.exists(pretrained_model)):
                            raise werkzeug.exceptions.BadRequest(
                                "Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details"
                            )
                    break

        elif form.method.data == 'custom':
            network = fw.get_network_from_desc(form.custom_network.data)
            pretrained_model = form.custom_network_snapshot.data.strip()
        else:
            raise werkzeug.exceptions.BadRequest('Unrecognized method: "%s"' %
                                                 form.method.data)

        policy = {'policy': form.lr_policy.data}
        if form.lr_policy.data == 'fixed':
            pass
        elif form.lr_policy.data == 'step':
            policy['stepsize'] = form.lr_step_size.data
            policy['gamma'] = form.lr_step_gamma.data
        elif form.lr_policy.data == 'multistep':
            policy['stepvalue'] = form.lr_multistep_values.data
            policy['gamma'] = form.lr_multistep_gamma.data
        elif form.lr_policy.data == 'exp':
            policy['gamma'] = form.lr_exp_gamma.data
        elif form.lr_policy.data == 'inv':
            policy['gamma'] = form.lr_inv_gamma.data
            policy['power'] = form.lr_inv_power.data
        elif form.lr_policy.data == 'poly':
            policy['power'] = form.lr_poly_power.data
        elif form.lr_policy.data == 'sigmoid':
            policy['stepsize'] = form.lr_sigmoid_step.data
            policy['gamma'] = form.lr_sigmoid_gamma.data
        else:
            raise werkzeug.exceptions.BadRequest(
                'Invalid learning rate policy')

        if config_value('caffe_root')['multi_gpu']:
            if form.select_gpu_count.data:
                gpu_count = form.select_gpu_count.data
                selected_gpus = None
            else:
                selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
                gpu_count = None
        else:
            if form.select_gpu.data == 'next':
                gpu_count = 1
                selected_gpus = None
            else:
                selected_gpus = [str(form.select_gpu.data)]
                gpu_count = None

        job.tasks.append(
            fw.create_train_task(
                job_dir=job.dir(),
                dataset=datasetJob,
                train_epochs=form.train_epochs.data,
                snapshot_interval=form.snapshot_interval.data,
                learning_rate=form.learning_rate.data,
                lr_policy=policy,
                gpu_count=gpu_count,
                selected_gpus=selected_gpus,
                batch_size=form.batch_size.data,
                val_interval=form.val_interval.data,
                pretrained_model=pretrained_model,
                crop_size=form.crop_size.data,
                use_mean=bool(form.use_mean.data),
                network=network,
                random_seed=form.random_seed.data,
                solver_type=form.solver_type.data,
            ))

        scheduler.add_job(job)
        if request_wants_json():
            return flask.jsonify(job.json_dict())
        else:
            return flask.redirect(flask.url_for('models_show',
                                                job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
예제 #56
0
def create():
    """
    Create a new ImageClassificationModelJob

    Returns JSON when requested: {job_id,name,status} or {errors:[]}
    """
    form = ImageClassificationModelForm()
    form.dataset.choices = get_datasets()
    form.standard_networks.choices = get_standard_networks()
    form.standard_networks.default = get_default_standard_network()
    form.previous_networks.choices = get_previous_networks()
    form.pretrained_networks.choices = get_pretrained_networks()

    prev_network_snapshots = get_previous_network_snapshots()

    # Is there a request to clone a job with ?clone=<job_id>
    fill_form_if_cloned(form)

    if not form.validate_on_submit():
        if request_wants_json():
            return flask.jsonify({'errors': form.errors}), 400
        else:
            return flask.render_template('models/images/classification/new.html',
                                         form=form,
                                         frameworks=frameworks.get_frameworks(),
                                         previous_network_snapshots=prev_network_snapshots,
                                         previous_networks_fullinfo=get_previous_networks_fulldetails(),
                                         pretrained_networks_fullinfo=get_pretrained_networks_fulldetails(),
                                         multi_gpu=config_value('caffe')['multi_gpu'],
                                         ), 400

    datasetJob = scheduler.get_job(form.dataset.data)
    if not datasetJob:
        raise werkzeug.exceptions.BadRequest(
            'Unknown dataset job_id "%s"' % form.dataset.data)

    # sweeps will be a list of the the permutations of swept fields
    # Get swept learning_rate
    sweeps = [{'learning_rate': v} for v in form.learning_rate.data]
    add_learning_rate = len(form.learning_rate.data) > 1

    # Add swept batch_size
    sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]]
    add_batch_size = len(form.batch_size.data) > 1
    n_jobs = len(sweeps)

    jobs = []
    for sweep in sweeps:
        # Populate the form with swept data to be used in saving and
        # launching jobs.
        form.learning_rate.data = sweep['learning_rate']
        form.batch_size.data = sweep['batch_size']

        # Augment Job Name
        extra = ''
        if add_learning_rate:
            extra += ' learning_rate:%s' % str(form.learning_rate.data[0])
        if add_batch_size:
            extra += ' batch_size:%d' % form.batch_size.data[0]

        job = None
        try:
            job = ImageClassificationModelJob(
                username=utils.auth.get_username(),
                name=form.model_name.data + extra,
                group=form.group_name.data,
                dataset_id=datasetJob.id(),
            )
            # get handle to framework object
            fw = frameworks.get_framework_by_id(form.framework.data)

            pretrained_model = None
            if form.method.data == 'standard':
                found = False

                # can we find it in standard networks?
                network_desc = fw.get_standard_network_desc(form.standard_networks.data)
                if network_desc:
                    found = True
                    network = fw.get_network_from_desc(network_desc)

                if not found:
                    raise werkzeug.exceptions.BadRequest(
                        'Unknown standard model "%s"' % form.standard_networks.data)
            elif form.method.data == 'previous':
                old_job = scheduler.get_job(form.previous_networks.data)
                if not old_job:
                    raise werkzeug.exceptions.BadRequest(
                        'Job not found: %s' % form.previous_networks.data)

                use_same_dataset = (old_job.dataset_id == job.dataset_id)
                network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)

                for choice in form.previous_networks.choices:
                    if choice[0] == form.previous_networks.data:
                        epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
                        if epoch == 0:
                            pass
                        elif epoch == -1:
                            pretrained_model = old_job.train_task().pretrained_model
                        else:
                            # verify snapshot exists
                            pretrained_model = old_job.train_task().get_snapshot(epoch, download=True)
                            if pretrained_model is None:
                                raise werkzeug.exceptions.BadRequest(
                                    "For the job %s, selected pretrained_model for epoch %d is invalid!"
                                    % (form.previous_networks.data, epoch))
                            # the first is the actual file if a list is returned, other should be meta data
                            if isinstance(pretrained_model, list):
                                pretrained_model = pretrained_model[0]

                            if not (os.path.exists(pretrained_model)):
                                raise werkzeug.exceptions.BadRequest(
                                    "Pretrained_model for the selected epoch doesn't exist. "
                                    "May be deleted by another user/process. "
                                    "Please restart the server to load the correct pretrained_model details.")
                            # get logical path
                            pretrained_model = old_job.train_task().get_snapshot(epoch)
                        break

            elif form.method.data == 'pretrained':
                pretrained_job = scheduler.get_job(form.pretrained_networks.data)
                model_def_path = pretrained_job.get_model_def_path()
                weights_path = pretrained_job.get_weights_path()

                network = fw.get_network_from_path(model_def_path)
                pretrained_model = weights_path

            elif form.method.data == 'custom':
                network = fw.get_network_from_desc(form.custom_network.data)
                pretrained_model = form.custom_network_snapshot.data.strip()
            else:
                raise werkzeug.exceptions.BadRequest(
                    'Unrecognized method: "%s"' % form.method.data)

            policy = {'policy': form.lr_policy.data}
            if form.lr_policy.data == 'fixed':
                pass
            elif form.lr_policy.data == 'step':
                policy['stepsize'] = form.lr_step_size.data
                policy['gamma'] = form.lr_step_gamma.data
            elif form.lr_policy.data == 'multistep':
                policy['stepvalue'] = form.lr_multistep_values.data
                policy['gamma'] = form.lr_multistep_gamma.data
            elif form.lr_policy.data == 'exp':
                policy['gamma'] = form.lr_exp_gamma.data
            elif form.lr_policy.data == 'inv':
                policy['gamma'] = form.lr_inv_gamma.data
                policy['power'] = form.lr_inv_power.data
            elif form.lr_policy.data == 'poly':
                policy['power'] = form.lr_poly_power.data
            elif form.lr_policy.data == 'sigmoid':
                policy['stepsize'] = form.lr_sigmoid_step.data
                policy['gamma'] = form.lr_sigmoid_gamma.data
            else:
                raise werkzeug.exceptions.BadRequest(
                    'Invalid learning rate policy')

            if config_value('caffe')['multi_gpu']:
                if form.select_gpus.data:
                    selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
                    gpu_count = None
                elif form.select_gpu_count.data:
                    gpu_count = form.select_gpu_count.data
                    selected_gpus = None
                else:
                    gpu_count = 1
                    selected_gpus = None
            else:
                if form.select_gpu.data == 'next':
                    gpu_count = 1
                    selected_gpus = None
                else:
                    selected_gpus = [str(form.select_gpu.data)]
                    gpu_count = None

            # Set up data augmentation structure
            data_aug = {}
            data_aug['flip'] = form.aug_flip.data
            data_aug['quad_rot'] = form.aug_quad_rot.data
            data_aug['rot'] = form.aug_rot.data
            data_aug['scale'] = form.aug_scale.data
            data_aug['noise'] = form.aug_noise.data
            data_aug['contrast'] = form.aug_contrast.data
            data_aug['whitening'] = form.aug_whitening.data
            data_aug['hsv_use'] = form.aug_hsv_use.data
            data_aug['hsv_h'] = form.aug_hsv_h.data
            data_aug['hsv_s'] = form.aug_hsv_s.data
            data_aug['hsv_v'] = form.aug_hsv_v.data

            # Python Layer File may be on the server or copied from the client.
            fs.copy_python_layer_file(
                bool(form.python_layer_from_client.data),
                job.dir(),
                (flask.request.files[form.python_layer_client_file.name]
                 if form.python_layer_client_file.name in flask.request.files
                 else ''), form.python_layer_server_file.data)

            job.tasks.append(fw.create_train_task(
                job=job,
                dataset=datasetJob,
                train_epochs=form.train_epochs.data,
                snapshot_interval=form.snapshot_interval.data,
                learning_rate=form.learning_rate.data[0],
                lr_policy=policy,
                gpu_count=gpu_count,
                selected_gpus=selected_gpus,
                batch_size=form.batch_size.data[0],
                batch_accumulation=form.batch_accumulation.data,
                val_interval=form.val_interval.data,
                traces_interval=form.traces_interval.data,
                pretrained_model=pretrained_model,
                crop_size=form.crop_size.data,
                use_mean=form.use_mean.data,
                network=network,
                random_seed=form.random_seed.data,
                solver_type=form.solver_type.data,
                rms_decay=form.rms_decay.data,
                shuffle=form.shuffle.data,
                data_aug=data_aug,
            )
            )

            # Save form data with the job so we can easily clone it later.
            save_form_to_job(job, form)

            jobs.append(job)
            scheduler.add_job(job)
            if n_jobs == 1:
                if request_wants_json():
                    return flask.jsonify(job.json_dict())
                else:
                    return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))

        except:
            if job:
                scheduler.delete_job(job)
            raise

    if request_wants_json():
        return flask.jsonify(jobs=[j.json_dict() for j in jobs])

    # If there are multiple jobs launched, go to the home page.
    return flask.redirect('/')
예제 #57
0
파일: views.py 프로젝트: yyyreal/DIGITS
def generic_image_model_infer_many():
    """
    Infer many images
    """
    job = job_from_request()

    image_list = flask.request.files.get('image_list')
    if not image_list:
        raise werkzeug.exceptions.BadRequest('image_list is a required field')

    epoch = None
    if 'snapshot_epoch' in flask.request.form:
        epoch = float(flask.request.form['snapshot_epoch'])

    paths = []
    images = []

    db_task = job.train_task().dataset.analyze_db_tasks()[0]
    height = db_task.image_height
    width = db_task.image_width
    if job.train_task().crop_size:
        height = job.train_task().crop_size
        width = job.train_task().crop_size
    channels = db_task.image_channels

    for line in image_list.readlines():
        line = line.strip()
        if not line:
            continue

        path = None
        # might contain a numerical label at the end
        match = re.match(r'(.*\S)\s+\d+$', line)
        if match:
            path = match.group(1)
        else:
            path = line

        try:
            image = utils.image.load_image(path)
            image = utils.image.resize_image(image, height, width,
                    channels = channels,
                    resize_mode = 'squash',
                    )
            paths.append(path)
            images.append(image)
        except utils.errors.LoadImageError as e:
            print e

    if not len(images):
        raise werkzeug.exceptions.BadRequest(
                'Unable to load any images from the file')

    outputs = job.train_task().infer_many(images, snapshot_epoch=epoch)
    if outputs is None:
        raise RuntimeError('An error occured while processing the images')

    if request_wants_json():
        result = {}
        for i, path in enumerate(paths):
            result[path] = dict((name, blob[i].tolist()) for name,blob in outputs.iteritems())
        return flask.jsonify({'outputs': result})
    else:
        return flask.render_template('models/images/generic/infer_many.html',
                paths           = paths,
                network_outputs = outputs,
                )