Exemplo n.º 1
0
def send_message():
    from models import Patient, Doctor, Message
    from errors import ApiError, ErrorCodes
    from tools import ok

    request_body = request.json
    patient_id = request_body["patient_id"]
    doctor_id = request_body["doctor_id"]
    from_patient = request_body["from_patient"]
    message_text = request_body["message_text"]

    patient = db.session.query(Patient).filter(
        Patient.id == patient_id).first()
    doctor = db.session.query(Doctor).filter(Doctor.id == doctor_id).first()

    if not patient or not doctor:
        return ApiError(
            status_code=ErrorCodes.BAD_REQUEST,
            message="Patient or doctor does not exist. Unable to send message"
        ).to_json()

    message = Message()
    message.date_sent = int(time.time())
    message.from_patient = from_patient
    message.message_text = message_text
    message.doctor_id = doctor_id
    message.patient_id = patient_id

    patient.messages.append(message)
    db.session.commit()

    return ok(message.to_json())
Exemplo n.º 2
0
def get_patients():
    from models import Patient, HeartRate
    from tools import ok
    from sqlalchemy.sql import label

    max_timestamp = label("max_timestamp",
                          db.func.max(HeartRate.unix_timestamp))
    min_timestamp = label("min_timestamp",
                          db.func.min(HeartRate.unix_timestamp))
    patients = db.session\
        .query(
            Patient,
            max_timestamp,
            min_timestamp
        )\
        .filter(Patient.id == HeartRate.patient_id)\
        .with_entities(Patient.id,
                       Patient.first_name,
                       Patient.last_name,
                       Patient.birth_date,
                       min_timestamp,
                       max_timestamp)\
        .group_by(Patient.id)\
        .all()

    body = dict()
    body["patients"] = patients
    return ok(body)
Exemplo n.º 3
0
def get_patient(last_name, first_name):
    from models import Patient
    from sqlalchemy import func
    from tools import ok
    patient = Patient.query.filter(
        func.lower(Patient.last_name) == func.lower(last_name) and func.lower(
            Patient.first_name) == func.lower(first_name)).first_or_404()
    return ok(patient.to_json())
Exemplo n.º 4
0
def get_all_messages(patient_id):
    from models import Message
    from tools import ok

    messages = db.session.query(Message).filter(
        Message.patient_id == patient_id)
    messages = Message.serialize_list(messages)
    return ok(messages)
Exemplo n.º 5
0
def get_patient_data_in_time_window(last_name, first_name):
    from models import Patient, ActivityType, HeartRate, Steps
    from sqlalchemy import func
    from errors import ApiError, ErrorCodes
    from tools import ok

    start_unix_time = request.args.get("start")
    end_unix_time = request.args.get("end")

    if not start_unix_time or not end_unix_time:
        return ApiError(
            status_code=ErrorCodes.BAD_REQUEST,
            message=
            "Must include unix timestamps in query parameters start and end."
        ).to_json()

    try:
        start_unix_time = int(start_unix_time)
        end_unix_time = int(end_unix_time)
    except ValueError:
        return ApiError(
            status_code=ErrorCodes.BAD_REQUEST,
            message="Unix timestamps given in start and end must be integers"
        ).to_json()
    finally:
        patient = db.session.query(Patient).filter(
            func.lower(Patient.last_name) == func.lower(last_name) and
            func.lower(Patient.first_name) == func.lower(first_name)).first()

        activity_measures = patient.activity_type_measures.filter(
            (ActivityType.unix_timestamp >= start_unix_time)
            & (ActivityType.unix_timestamp <= end_unix_time)).all()
        heart_rates = patient.heart_rate_measures.filter(
            (HeartRate.unix_timestamp >= start_unix_time)
            & (HeartRate.unix_timestamp <= end_unix_time)).all()
        steps = patient.step_measures.filter(
            (Steps.unix_timestamp >= start_unix_time)
            & (Steps.unix_timestamp <= end_unix_time)).all()

        patient.activity_type_measures = activity_measures
        patient.heart_rate_measures = heart_rates
        patient.step_measures = steps
        return ok(patient.to_json())
Exemplo n.º 6
0
def load(ctx, drop):
    """Load zones from a folder of zip files containing shapefiles"""
    title("Extracting zones from datasets")
    zones = DB()

    if drop:
        info("Drop existing collection")
        zones.drop()

    with ok("Creating index (level,code)"):
        zones.create_index([("level", ASCENDING), ("code", ASCENDING)])
    info("Creating index (level,keys)")
    zones.create_index([("level", ASCENDING), ("keys", ASCENDING)])
    info("Creating index (parents)")
    zones.create_index("parents")

    total = 0

    for level in ctx.obj["levels"]:
        info('Processing level "{0}"'.format(level.id))
        total += level.load(DL_DIR, zones)

    success("Done: Loaded {0} zones".format(total))
Exemplo n.º 7
0
def load(ctx, drop):
    '''Load zones from a folder of zip files containing shapefiles'''
    title('Extracting zones from datasets')
    zones = DB()

    if drop:
        info('Drop existing collection')
        zones.drop()

    with ok('Creating index (level,code)'):
        zones.create_index([('level', ASCENDING), ('code', ASCENDING)])
    info('Creating index (level,keys)')
    zones.create_index([('level', ASCENDING), ('keys', ASCENDING)])
    info('Creating index (parents)')
    zones.create_index('parents')

    total = 0

    for level in ctx.obj['levels']:
        info('Processing level "{0}"'.format(level.id))
        total += level.load(DL_DIR, zones)

    success('Done: Loaded {0} zones'.format(total))
Exemplo n.º 8
0
def load(ctx, drop):
    '''Load zones from a folder of zip files containing shapefiles'''
    title('Extracting zones from datasets')
    zones = DB()

    if drop:
        info('Drop existing collection')
        zones.drop()

    with ok('Creating index (level,code)'):
        zones.create_index([('level', ASCENDING), ('code', ASCENDING)])
    info('Creating index (level,keys)')
    zones.create_index([('level', ASCENDING), ('keys', ASCENDING)])
    info('Creating index (parents)')
    zones.create_index('parents')

    total = 0

    for level in ctx.obj['levels']:
        info('Processing level "{0}"'.format(level.id))
        total += level.load(DL_DIR, zones)

    success('Done: Loaded {0} zones'.format(total))
Exemplo n.º 9
0
def dist(ctx, pretty, split, compress, serialization, keys):
    """Dump a distributable file"""
    keys = keys and keys.split(",")
    title("Dumping data to {serialization} with keys {keys}".format(serialization=serialization, keys=keys))
    geozones = DB()
    filenames = []

    if not exists(DIST_DIR):
        os.makedirs(DIST_DIR)

    os.chdir(DIST_DIR)
    level_ids = [l.id for l in ctx.obj["levels"]]

    if split:
        for level_id in level_ids:
            filename = "zones-{level}.{serialization}".format(
                level=level_id.replace("/", "-"), serialization=serialization
            )
            with ok("Generating {filename}".format(filename=filename)):
                zones = geozones.find({"level": level_id})
                if serialization == "json":
                    with open(filename, "w") as out:
                        geojson.dump(zones, out, pretty=pretty, keys=keys)
                else:
                    packer = msgpack.Packer(use_bin_type=True)
                    with open(filename, "wb") as out:
                        for zone in zones:
                            out.write(packer.pack(zone))
            filenames.append(filename)
    else:
        filename = "zones.{serialization}".format(serialization=serialization)
        with ok("Generating {filename}".format(filename=filename)):
            zones = geozones.find({"level": {"$in": level_ids}})
            if serialization == "json":
                with open(filename, "w") as out:
                    geojson.dump(zones, out, pretty=pretty, keys=keys)
            else:
                packer = msgpack.Packer(use_bin_type=True)
                with open(filename, "wb") as out:
                    for zone in zones:
                        out.write(packer.pack(zone))
        filenames.append(filename)

    filename = "levels.{serialization}".format(serialization=serialization)
    with ok("Generating {filename}".format(filename=filename)):
        data = [
            {"id": level.id, "label": level.label, "parents": [p.id for p in level.parents]}
            for level in ctx.obj["levels"]
        ]
        if serialization == "json":
            with open(filename, "w") as out:
                if pretty:
                    json.dump(data, out, indent=4)
                else:
                    json.dump(data, out)
        else:
            packer = msgpack.Packer(use_bin_type=True)
            with open(filename, "wb") as out:
                for item in data:
                    out.write(packer.pack(item))
        filenames.append(filename)

    if compress:
        filename = "geozones-translations.tar.xz"
        with ok("Compressing to {0}".format(filename)):
            with tarfile.open(filename, "w:xz") as txz:
                txz.add(join(ctx.obj["home"], "translations"), "translations")

        filename = "geozones-split.tar.xz" if split else "geozones.tar.xz"

        filename = "geozones{split}-{serialization}.tar.xz".format(
            split="-split" if split else "", serialization=serialization
        )
        with ok("Compressing to {0}".format(filename)):
            with tarfile.open(filename, "w:xz") as txz:
                for name in filenames:
                    txz.add(name)
                # Add translations
                txz.add(join(ctx.obj["home"], "translations"), "translations")

    os.chdir(ctx.obj["home"])
Exemplo n.º 10
0
def compute_health_metrics_in_time_window(patient_id):
    from models import HeartRate
    from errors import ApiError, ErrorCodes
    from tools import ok

    start_unix_time = request.args.get("start")
    end_unix_time = request.args.get("end")

    if not start_unix_time or not end_unix_time:
        return ApiError(
            status_code=ErrorCodes.BAD_REQUEST,
            message=
            "Must include unix timestamps in query parameters start and end."
        ).to_json()

    try:
        start_unix_time = int(start_unix_time)
        end_unix_time = int(end_unix_time)
    except ValueError:
        return ApiError(
            status_code=ErrorCodes.BAD_REQUEST,
            message="Unix timestamps given in start and end must be integers"
        ).to_json()
    finally:
        payload = dict()

        # Produce a list of RR intervals based on time window
        rr_list = db.session.query(HeartRate).filter(
            (HeartRate.patient_id == patient_id)
            & (HeartRate.unix_timestamp >= start_unix_time)
            & (HeartRate.unix_timestamp <= end_unix_time)).with_entities(
                HeartRate.rr).all()
        rrs = [rr for rr_sublist in rr_list for rr in rr_sublist]

        if len(rrs) < 1:
            return ApiError(
                status_code=ErrorCodes.BAD_REQUEST,
                message="Insufficient RR intervals in time window {} to {}".
                format(start_unix_time, end_unix_time)).to_json()

        time_domain_measures = dict()
        non_linear_measures = dict()
        non_linear_measures["poincare"] = dict()

        def time_domain_worker():
            [ann, sdnn, p_nn50, p_nn20, r_mssd] = timeDomain(rrs)
            time_domain_measures["ann"] = ann
            time_domain_measures["sdnn"] = sdnn
            time_domain_measures["pnn50"] = p_nn50
            time_domain_measures["pnn20"] = p_nn20
            time_domain_measures["rmssd"] = r_mssd

        def sample_entropy_worker():
            try:
                r = 0.2 * np.std(rrs)
                non_linear_measures["sample_entropy"] = sampEn(rrs, 2, r)
            except ValueError:
                non_linear_measures["sample_entropy"] = 0.0

        def dfa_worker():
            non_linear_measures["dfa"] = dict()

            if len(rrs) > 0:
                upper_scale_limit = min(1000, len(rrs))
                [scales, f, alpha] = scalingExponent(rrs, 5, upper_scale_limit,
                                                     20, 1, 2)

                non_linear_measures["dfa"]["scales"] = scales.tolist()
                non_linear_measures["dfa"][
                    "fluctuation_coefficients"] = f.tolist()
                non_linear_measures["dfa"]["alpha"] = alpha

        def poincare_coefficient():
            coefficient = correlation_coef(rrs)
            non_linear_measures["poincare"][
                "correlation_coefficient"] = coefficient

        def eclipse_fitting():
            standard_deviations = eclipse_fitting_methods(rrs)
            non_linear_measures["poincare"]["standard_deviations"] = dict()
            non_linear_measures["poincare"]["standard_deviations"][
                "sd1"] = standard_deviations["SD1"]
            non_linear_measures["poincare"]["standard_deviations"][
                "sd2"] = standard_deviations["SD2"]
            non_linear_measures["poincare"]["rr1"] = rrs[:-1]
            non_linear_measures["poincare"]["rr2"] = rrs[1:]

        t1 = Thread(target=time_domain_worker)
        t2 = Thread(target=sample_entropy_worker)
        t3 = Thread(target=dfa_worker)
        t4 = Thread(target=poincare_coefficient)
        t5 = Thread(target=eclipse_fitting)
        threads = [t1, t2, t3, t4, t5]
        t1.start()
        t2.start()
        t3.start()
        t4.start()
        t5.start()

        for thread in threads:
            thread.join()

        payload["time_domain_measures"] = time_domain_measures
        payload["non_linear_measures"] = non_linear_measures

        return ok(payload)
Exemplo n.º 11
0
def dist(ctx, pretty, split, compress, serialization):
    '''Dump a distributable file'''
    title('Dumping data to {serialization}'.format(
        serialization=serialization))
    geozones = DB()
    filenames = []

    if not exists(DIST_DIR):
        os.makedirs(DIST_DIR)

    os.chdir(DIST_DIR)
    level_ids = [l.id for l in ctx.obj['levels']]

    if split:
        for level_id in level_ids:
            filename = 'zones-{level}.{serialization}'.format(
                level=level_id.replace('/', '-'), serialization=serialization)
            with ok('Generating {filename}'.format(filename=filename)):
                zones = geozones.find({'level': level_id})
                if serialization == 'json':
                    with open(filename, 'w') as out:
                        geojson.dump(zones, out, pretty=pretty)
                else:
                    packer = msgpack.Packer(use_bin_type=True)
                    with open(filename, 'wb') as out:
                        for zone in zones:
                            out.write(packer.pack(zone))
            filenames.append(filename)
    else:
        filename = 'zones.{serialization}'.format(serialization=serialization)
        with ok('Generating {filename}'.format(filename=filename)):
            zones = geozones.find({'level': {'$in': level_ids}})
            if serialization == 'json':
                with open(filename, 'w') as out:
                    geojson.dump(zones, out, pretty=pretty)
            else:
                packer = msgpack.Packer(use_bin_type=True)
                with open(filename, 'wb') as out:
                    for zone in zones:
                        out.write(packer.pack(zone))
        filenames.append(filename)

    filename = 'levels.{serialization}'.format(serialization=serialization)
    with ok('Generating {filename}'.format(filename=filename)):
        data = [{
            'id': level.id,
            'label': level.label,
            'parents': [p.id for p in level.parents]
        } for level in ctx.obj['levels']]
        if serialization == 'json':
            with open(filename, 'w') as out:
                if pretty:
                    json.dump(data, out, indent=4)
                else:
                    json.dump(data, out)
        else:
            packer = msgpack.Packer(use_bin_type=True)
            with open(filename, 'wb') as out:
                for item in data:
                    out.write(packer.pack(item))
        filenames.append(filename)

    if compress:
        filename = 'geozones-translations.tar.xz'
        with ok('Compressing to {0}'.format(filename)):
            with tarfile.open(filename, 'w:xz') as txz:
                txz.add(join(ctx.obj['home'], 'translations'), 'translations')

        filename = 'geozones-split.tar.xz' if split else 'geozones.tar.xz'

        filename = 'geozones{split}-{serialization}.tar.xz'.format(
            split='-split' if split else '', serialization=serialization)
        with ok('Compressing to {0}'.format(filename)):
            with tarfile.open(filename, 'w:xz') as txz:
                for name in filenames:
                    txz.add(name)
                # Add translations
                txz.add(join(ctx.obj['home'], 'translations'), 'translations')

    os.chdir(ctx.obj['home'])
Exemplo n.º 12
0
def dist(ctx, pretty, split, compress, serialization):
    '''Dump a distributable file'''
    title(
        'Dumping data to {serialization}'.format(serialization=serialization))
    geozones = DB()
    filenames = []

    if not exists(DIST_DIR):
        os.makedirs(DIST_DIR)

    os.chdir(DIST_DIR)
    level_ids = [l.id for l in ctx.obj['levels']]

    if split:
        for level_id in level_ids:
            filename = 'zones-{level}.{serialization}'.format(
                level=level_id.replace('/', '-'), serialization=serialization)
            with ok('Generating {filename}'.format(filename=filename)):
                zones = geozones.find({'level': level_id})
                if serialization == 'json':
                    with open(filename, 'w') as out:
                        geojson.dump(zones, out, pretty=pretty)
                else:
                    packer = msgpack.Packer(use_bin_type=True)
                    with open(filename, 'wb') as out:
                        for zone in zones:
                            out.write(packer.pack(zone))
            filenames.append(filename)
    else:
        filename = 'zones.{serialization}'.format(serialization=serialization)
        with ok('Generating {filename}'.format(filename=filename)):
            zones = geozones.find({'level': {'$in': level_ids}})
            if serialization == 'json':
                with open(filename, 'w') as out:
                    geojson.dump(zones, out, pretty=pretty)
            else:
                packer = msgpack.Packer(use_bin_type=True)
                with open(filename, 'wb') as out:
                    for zone in zones:
                        out.write(packer.pack(zone))
        filenames.append(filename)

    filename = 'levels.{serialization}'.format(serialization=serialization)
    with ok('Generating {filename}'.format(filename=filename)):
        data = [{
            'id': level.id,
            'label': level.label,
            'parents': [p.id for p in level.parents]
        } for level in ctx.obj['levels']]
        if serialization == 'json':
            with open(filename, 'w') as out:
                if pretty:
                    json.dump(data, out, indent=4)
                else:
                    json.dump(data, out)
        else:
            packer = msgpack.Packer(use_bin_type=True)
            with open(filename, 'wb') as out:
                for item in data:
                    out.write(packer.pack(item))
        filenames.append(filename)

    if compress:
        filename = 'geozones-translations.tar.xz'
        with ok('Compressing to {0}'.format(filename)):
            with tarfile.open(filename, 'w:xz') as txz:
                txz.add(join(ctx.obj['home'], 'translations'), 'translations')

        filename = 'geozones-split.tar.xz' if split else 'geozones.tar.xz'

        filename = 'geozones{split}-{serialization}.tar.xz'.format(
            split='-split' if split else '', serialization=serialization)
        with ok('Compressing to {0}'.format(filename)):
            with tarfile.open(filename, 'w:xz') as txz:
                for name in filenames:
                    txz.add(name)
                # Add translations
                txz.add(join(ctx.obj['home'], 'translations'), 'translations')

    os.chdir(ctx.obj['home'])