def upload_case(project_id):
    """
    Central endpoint to upload images
    """

    # Make sure file is actually included
    if 'image' not in request.files:
        flash('No file appended', category='error')
        return redirect(request.referrer)

    # Make sure it is a valid nifti
    image_file = request.files['image']
    try:
        fh = FileHolder(fileobj=GzipFile(fileobj=image_file))
        image_nifti = Nifti1Image.from_file_map({'header': fh, 'image': fh})
    except:
        traceback.print_exc()
        flash('File is not a valid nifti', category='error')
        return json.dumps({'success': False}), 400, {
            'ContentType': 'application/json'
        }

    # Save image to correct path
    r = request
    image_path = os.path.join(app.config['DATA_PATH'],
                              current_project.short_name, "images",
                              image_file.filename)
    if os.path.exists(image_path):
        flash('File already exists', category="error")
        return json.dumps({'success': False}), 400, {
            'ContentType': 'application/json'
        }
    nibabel.save(image_nifti, image_path)

    # Create entry for db (with empty segmentation)
    image = Image(project=current_project, name=image_file.filename)
    segmentation = ManualSegmentation(image=image, project=current_project)

    # Parse attributes
    attributes = json.loads(request.form['attributes'])
    for attribute, value in attributes.items():
        if hasattr(image, attribute) and value != "":
            setattr(image, attribute, value)

    db.session.add(image)
    db.session.add(segmentation)
    db.session.commit()
    return json.dumps({'success': True}), 200, {
        'ContentType': 'application/json'
    }
Example #2
0
def process_file():
    if 'Authorization' not in request.headers.keys():
        response = make_response('Missing authentication token', 400)
    else:
        full_token = request.headers['Authorization']

        if full_token is None:
            response = make_response('Unauthorized action! Token is invalid',
                                     401)

        else:
            token = full_token.strip().split(" ")[1]
            user = user_service.get_user_by_token(token)

            if user is None:
                response = make_response(
                    'Unauthorized action! Token is invalid', 401)
            else:
                if 'file' not in request.files.keys():
                    response = make_response('Invalid parameters', 400)
                else:
                    file = request.files['file']

                    if file is None:
                        response = make_response('File is missing!', 409)
                    elif file_service.check_file_type(file):
                        fh = FileHolder(fileobj=GzipFile(
                            fileobj=io.BytesIO(file.read())))
                        img = Nifti1Image.from_file_map({
                            'header': fh,
                            'image': fh
                        })
                        file_id = file_service.segment_image(img)
                        scan_name = file.filename.split(".")[0]
                        scan = scan_service.save_scan(user.username, file_id,
                                                      scan_name)
                        response = make_response(scan, 200)
                    else:
                        response = make_response('Invalid file type!', 415)

    response.headers['Content-Type'] = 'application/json'
    return response
Example #3
0
def col_img(batch,
            to_tensor=True,
            nyul_params=None,
            use_zscore=False,
            pl_prob=0,
            elastic_prob=0):
    """
    Collator function for dataloader. When putting this into the dataloader, use a lambda function(batch) with the parameters you need.
    Args:
        batch: not sure, but entirely handled by the DataLoader itself.
        to_tensor (bool): If False, then return NumPy array. If True, return torch tensors.
        nyul_params (dict): dict containing keys "percs" and "standard_scales" from nyul functions
        use_zscore (bool): If True and nyul_params is None, then use Z-score normalization.
        pl_prob (float): Probability of doing power law transformation
        elastic_prob (float): Not currently available, but would be probability of doing elastic deformation.
    """
    bytes_data_list = [list(batch[i].items())[1][1] for i in range(5)]
    bytes_data_keys = [
        list(batch[i].items())[0][1].split('_')[-1] for i in range(5)
    ]
    bytes_data_dict = dict(zip(bytes_data_keys, bytes_data_list))

    bb = BytesIO(bytes_data_dict['flair'])
    fh = FileHolder(fileobj=bb)
    f_flair = Nifti1Image.from_file_map({
        'header': fh,
        'image': fh
    }).get_fdata()
    bb = BytesIO(bytes_data_dict['seg'])
    fh = FileHolder(fileobj=bb)
    f_seg = Nifti1Image.from_file_map({'header': fh, 'image': fh}).get_fdata()
    bb = BytesIO(bytes_data_dict['t1'])
    fh = FileHolder(fileobj=bb)
    f_t1 = Nifti1Image.from_file_map({'header': fh, 'image': fh}).get_fdata()
    bb = BytesIO(bytes_data_dict['t1ce'])
    fh = FileHolder(fileobj=bb)
    f_t1ce = Nifti1Image.from_file_map({'header': fh, 'image': fh}).get_fdata()
    bb = BytesIO(bytes_data_dict['t2'])
    fh = FileHolder(fileobj=bb)
    f_t2 = Nifti1Image.from_file_map({'header': fh, 'image': fh}).get_fdata()

    padding = [(0, 0), (0, 0), (2, 3)]
    f_flair = np.expand_dims(np.pad(f_flair, padding), axis=0)
    f_t1 = np.expand_dims(np.pad(f_t1, padding), axis=0)
    f_t2 = np.expand_dims(np.pad(f_t2, padding), axis=0)
    f_t1ce = np.expand_dims(np.pad(f_t1ce, padding), axis=0)
    f_seg = np.pad(f_seg, padding)
    concat = np.concatenate([f_t1, f_t1ce, f_t2, f_flair], axis=0)
    f_seg = np.expand_dims(f_seg, axis=0)

    assert not ((nyul_params is not None) and (use_zscore))

    # Normalizations
    if nyul_params is not None:
        percss = nyul_params['percs']
        standard_scales = nyul_params['standard_scales']
        for i in range(concat.shape[0]):
            concat[i] = dataloader_hist_norm(concat[i],
                                             percss[i],
                                             standard_scales[i],
                                             f_seg,
                                             ignore_zero=True) / 100

    if use_zscore:
        for i in range(concat.shape[0]):
            concat[i] = Zscore_normalize(concat[i], floor=-3)

    # Augmentations - Elastic transform not implemented (quickly) yet

    if random() < pl_prob:
        concat, f_seg = power_law_transformation(concat, f_seg)

    assert elastic_prob == 0
    if random() < elastic_prob:
        concat, f_seg = elastic_transform(concat, f_seg, sigma=5)

    if to_tensor:
        concat = torch.as_tensor(concat).half()
        f_seg = torch.as_tensor(f_seg)

    return ([concat, f_seg])
def upload_case_segmentation(project_id, case_id):
    """
    Central endpoint to upload segmentations
    """

    # Make sure file is actually included
    if 'segmentation' not in request.files:
        flash('No file appended', category="error")
        return redirect(request.referrer)

        # Make sure it is a valid nifti
    segmentation_file = request.files["segmentation"]
    try:
        fh = FileHolder(fileobj=GzipFile(fileobj=segmentation_file))
        segmentation_nifti = Nifti1Image.from_file_map({
            'header': fh,
            'image': fh
        })
    except:
        traceback.print_exc()
        flash('File is not a valid nifti', category="error")
        return json.dumps({'success': False}), 400, {
            'ContentType': 'application/json'
        }

    # Make sure corresponding image exists
    image_name = request.headers["image_name"]
    image_path = os.path.join(app.config['DATA_PATH'],
                              current_project.short_name, "images", image_name)

    if not os.path.exists(image_path):
        flash('No corresponding image found', category="error")
        return redirect(request.referrer)

    # Make sure that sizes match
    image_nifti = nibabel.load(image_path)
    if image_nifti.shape[:-1] != segmentation_nifti.shape[:-1]:
        flash('Image dimensions do not match segmentation dimensions',
              category="error")
        return redirect(request.referrer)

    # Update database
    image = db.session.query(Image).filter(
        Image.name == segmentation_file.filename).first()
    if image is None:
        flash('No corresponding image found', category="error")
        return redirect(request.referrer)
    segmentation = db.session.query(ManualSegmentation).filter(
        ManualSegmentation.image == image).first()
    if segmentation is None:
        segmentation = ManualSegmentation(project=current_project, image=image)
        db.session.add(segmentation)

    # Save file
    segmentation_path = os.path.join(app.config['DATA_PATH'],
                                     current_project.short_name,
                                     "manual_segmentations",
                                     segmentation_file.filename)
    nibabel.save(segmentation_nifti, segmentation_path)

    db.session.commit()
    return json.dumps({'success': True}), 200, {
        'ContentType': 'application/json'
    }