Пример #1
0
def normalize_item(item):
    logging.debug("Normalize item")
    # convert from dict to data_object
    data_object = DataObject.from_dict(item)
    # check for sf and cr
    if not (data_object.insulin_sensitivity > 0
            and data_object.carb_ratio > 0):
        raise Exception(
            "Sensitivity factor and Carb Ratio must be in data object, to normalize data"
        )

    # get carb data
    carbs = data_object.data['mealValue']
    # Normalize carb data with carb ratio
    normalized_carbs = carbs / data_object.carb_ratio
    # Save old values and change to new value
    data_object.data['non_normalized_carbs'] = data_object.data['mealValue']
    data_object.data['mealValue'] = normalized_carbs

    # get bolus and data
    bolus = data_object.data['bolusValue']
    basal = data_object.data['basalValue']
    # Normalize insulin data with insulin sensitivity
    normalized_bolus = bolus * data_object.insulin_sensitivity
    normalized_basal = basal * data_object.insulin_sensitivity
    # Save old values and replace with normalized
    data_object.data['non_normalized_bolus'] = data_object.data['bolusValue']
    data_object.data['non_normalized_basal'] = data_object.data['basalValue']

    data_object.data['bolusValue'] = normalized_bolus
    data_object.data['basalValue'] = normalized_basal

    return data_object.to_dict()
Пример #2
0
def detect_outliers(item):
    dataObject = DataObject.from_dict(item)
    cgm = dataObject.data['cgmValue']
    m = max(cgm)
    mi = min(cgm)
    if m > 600 or m < 0 or mi < 20 or mi > 400:
        logging.debug(dataObject.start_time)
        logging.debug(m)
        return True
    return False
Пример #3
0
def plot(item):
    logging.info('plotting {}'.format(item.doc_id))
    dataObject = DataObject.from_dict(item)
    cgm = dataObject.data['cgmValue']
    if max(cgm) > 600 or max(cgm) < 0:
        logging.info(max(cgm))
        logging.info("something wrong")

    dataObject.data['cgmValue'].plot()
    plt.scatter(dataObject.data['cgmValue_original'].index,
                dataObject.data['cgmValue_original'])

    plt.savefig('{}results/outliers/doc-{}.png'.format(path, item.doc_id))
    plt.close()
    logging.info("end")
Пример #4
0
 def add_events(self):
     count = 0
     # Iterate through all items in db
     for item in self.db.search(~where('valid').exists()
                                & (~where('carb').exists()
                                   | ~where('bolus').exists()
                                   | ~where('basal').exists())):
         if not hasattr(item, 'carb'):
             data_object = DataObject.from_dict(item)
             logging.debug("doc id: {}".format(item.doc_id))
             data_object.add_events()
             di = data_object.to_dict()
             self.db.update(di, where('start_time') == di['start_time'])
             count += 1
     self.db.storage.flush()
     logging.info("updated {} items in DB".format(count))
Пример #5
0
def add_gradient(db: TinyDB):
    logging.debug("Calculate Gradient for items")
    gradients = []
    items = db.search(~where('gradient-s').exists())
    for item in items:
        logging.debug("#: {}\tdoc id: {}".format(len(gradients), item.doc_id))
        data_object = DataObject.from_dict(item)
        data = data_object.data['cgmValue'].values
        d1 = data[630:690]
        d2 = data[635:695]
        max_gradient = max(d2 - d1)
        logging.debug("max gradient {}".format(max_gradient))
        item['gradient-s'] = max_gradient
        gradients.append(max_gradient)
        db.write_back([item])
    db.storage.flush()
    logging.info("Added gradient to {} items".format(len(items)))
Пример #6
0
def check_events(data_object: DataObject) -> bool:
    # check for events in the prediction time frame
    event_types = ['carb', 'bolus']  # basal events are fine
    # check that there is at least a carb or a bolus event
    if not any(map(lambda x: hasattr(data_object, x + '_events'),
                   event_types)):
        return False
    for key in event_types:
        if hasattr(data_object, key + '_events'):
            events = data_object.__getattribute__(key + '_events')
            if key == 'bolus':
                events = events[events['units'] > 0]
            # Check that there are no events in the prediction window
            if not events.index[np.logical_and(events.index >= 600,
                                               events.index < 730)].empty:
                logging.debug("events in prediction ")
                return False
            # Check that there are events in the training window
            if events.index[events.index <= 600].empty:
                return False
    return True
Пример #7
0
 def check_valid(self, early: True):
     # get all items which are not flagged valid
     items = self.db.search(~where('valid').exists())
     #items = self.db.all()
     random.shuffle(items)
     logging.info("checking {} items".format(len(items)))
     remove_ids = []
     valid_ids = []
     valid_events = []
     valid_cgms = []
     issues = []
     for item in items:
         valid, v_events, v_cgm, issue = check_data_object(
             DataObject.from_dict(item), early)
         valid_events.append(v_events)
         valid_cgms.append(v_cgm)
         if not v_cgm:
             issues.append(issue)
         if valid:
             valid_ids.append(item.doc_id)
         if not valid:
             remove_ids.append(item.doc_id)
     # remove invalid items
     ##self.db.remove(doc_ids = remove_ids)
     # remember valid items
     if not early:
         logging.debug("not valid events {}".format(
             len(valid_events) - sum(valid_events)))
         logging.debug(
             "not valid cgms {}".format(len(valid_cgms) - sum(valid_cgms)))
         logging.debug("less than 10: {}".format(len(issues) - sum(issues)))
         logging.debug("max difference < 75: {}".format(sum(issues)))
         self.db.update({'valid': True}, doc_ids=valid_ids)
     self.db.update({'valid': False}, doc_ids=remove_ids)
     self.db.storage.flush()
     logging.info("removed {} items form db".format(len(remove_ids)))
     logging.info("Still {} items in db".format(
         len(self.db.search(where('valid') == True))))
Пример #8
0
def add_to_db(data: pd.DataFrame, db: TinyDB,
              profile: Profile_reader) -> [pd.DataFrame]:
    # split data in to prediction windows
    # set start and end time
    start_time = data.index[0] + timedelta(hours=long_window_length -
                                           window_length)
    final_start_time = data.index[-1] - timedelta(hours=window_length)
    counter = 0
    # get all starting times
    times = list(map(lambda x: x['start_time'], db.all()))
    # Loop Through Data
    while start_time < final_start_time:
        # Check if time frame exists
        if not start_time.isoformat() in times:
            data_object = DataObject()
            data_object.id = profile.get_id()
            data_object.insulin_sensitivity = profile.get_insulin_sensitivity()
            data_object.carb_ratio = profile.get_carb_ratio()

            data_object.set_start_time(start_time)
            end_time = start_time + timedelta(hours=window_length)
            data_object.set_end_time(end_time)
            # select data for this window --- SHORT
            subset_short = data.loc[start_time <= data.index]
            subset_short = subset_short.loc[end_time >= subset_short.index]
            # set index to minutes
            subset_short.index = np.arange(0.0, len(subset_short))
            data_object.set_data_short(subset_short)
            # select data for this window --- LONG
            subset_long = data.loc[start_time - timedelta(
                hours=long_window_length - window_length) <= data.index]
            subset_long = subset_long.loc[end_time >= subset_long.index]
            # set index to minutes
            subset_long.index = np.arange(0.0, len(subset_long))
            data_object.set_data_long(subset_long)
            db.insert(data_object.to_dict())
            counter += 1
            #if counter >= 2:
            #    break
            print('Processing [%d]\r' % counter, end="")
        start_time += timedelta(minutes=delta_length)
    db.storage.flush()
    return counter
Пример #9
0
def rolling(db: TinyDB, user_data: UserData):
    check_directories()
    predictionWindow = PredictionWindow()

    results = []
    prediction_carb_optimized = []
    i = 0
    loop_start = datetime.now()

    # create random iterator over valid items without a result
    elements = db.search(where('result').exists() & (where('valid') == True))

    elements = list(filter(check_time, elements))

    #elements = list(filter(lambda x: any(list(map(lambda y: abs(y['errors'][0]) > 70, x['result']))), elements))

    random.shuffle(elements)
    logging.info("number of unprocessed items {}".format(len(elements)))

    last_save = 0
    for item in elements:
        # Break out of loop if enough results or it takes too long
        if len(results) >= config.PREDICTION_CONFIG['max_number_of_results'] or \
                (datetime.now() - loop_start).seconds > config.PREDICTION_CONFIG['runtime_in_minutes'] * 60:
            break
        logger.info("#:{} \t #R:{}\tdoc_id: {}".format(i, len(results),
                                                       item.doc_id))
        # Get element
        data_object = DataObject.from_dict(item)

        predictionWindow.startTime = data_object.start_time
        logger.info(predictionWindow.startTime.isoformat())
        predictionWindow.endTime = data_object.end_time
        # select data for this window
        predictionWindow.data = data_object.data
        predictionWindow.data_long = data_object.data_long
        predictionWindow.events = pd.concat([
            data_object.carb_events, data_object.basal_events,
            data_object.bolus_events
        ])
        predictionWindow.userData = user_data
        predictionWindow.plot = config.PREDICTION_CONFIG['create_plots']
        predictionWindow.features_90 = data_object.features_90

        # prediction_carb_optimized.append(checkOptimizer.check(predictionWindow))

        if checkData.check_window(predictionWindow.data, user_data):
            # Call to Predictors
            res, order, features = check.check_and_plot(predictionWindow, item)
            # Write result back into db
            if res is not None:
                results.append(res)
                if 'result' in item:
                    item['result'] = item['result'] + res
                    db.write_back([item])
                else:
                    db.update({'result': res}, doc_ids=[item.doc_id])
            if features is not None:
                if 'features-90' in item:
                    item['features-90'] = features
                    db.write_back([item])
                else:
                    db.update({'features-90': features}, doc_ids=[item.doc_id])
            if order is not None:
                if 'features' in item:
                    item['features'] = order
                    db.write_back([item])
                else:
                    db.update({'features': order}, doc_ids=[item.doc_id])

        if len(results) > 10 + last_save:
            last_save = len(results)
            db.storage.flush()
    db.storage.flush()
    logger.info("length of result {}".format(len(results)))
    # save all prediction carb optimized values to a json file
    to_file(prediction_carb_optimized)

    return results