예제 #1
0
def main():
    logging.info("Calculating ISF")
    data_16 = read_data(data_csv_path_16)
    data_17 = read_data(data_csv_path_17)
    data = pd.concat([data_16, data_17])
    isf = eighteen_hundred_rule(data)
    logging.info("ISF calculated by 1800 rule: {}".format(isf))
 def test_convert_glucose(self):
     data = readData.read_data(filename)
     data = convertData.select_columns(data)
     data = convertData.create_time_index(data)
     self.assertIsInstance(data['glucoseAnnotation'][0], str)
     data = convertData.convert_glucose_annotation(data)
     self.assertIsInstance(data['glucoseAnnotation'][0], float)
 def test_drop_dat_and_time(self):
     data = readData.read_data(filename)
     data = convertData.select_columns(data)
     data = convertData.create_time_index(data)
     self.assertEqual(7, data.shape[1])
     data = convertData.drop_date_and_time(data)
     self.assertEqual(5, data.shape[1])
 def test_interpolate_cgm(self):
     data = readData.read_data(filename)
     data = convertData.select_columns(data)
     data = convertData.create_time_index(data)
     data = convertData.convert_glucose_annotation(data)
     self.assertGreater(data['cgmValue'].isna().sum(), 10)
     data = convertData.interpolate_cgm(data)
     self.assertEqual(0, data['cgmValue'].isna().sum())
예제 #5
0
 def load(self, filename):
     # Read file as pandas Dataframe
     logging.debug("read data...")
     data_original = read_data(filename)
     # Prepare Data
     logging.debug("prepare data...")
     data = prepare_data(data_original)
     # Get Info from Profile.json
     profile_reader = Profile_reader(filename)
     # add data to tiny db
     logging.debug("add data to database...")
     count = add_to_db(data, self.db, profile_reader)
     logging.info("added {} new items to DB".format(count))
예제 #6
0
def main():
    logger.info("Start Main!")
    # SELECT OPTIONS
    run_autotune: bool = False  # Select True if autotune should run. If data set has been run before, set to False to improve speed.
    create_plots: bool = False  # Select True if you want a plot for every prediction window

    # SET USER DATA
    user_data = UserData(bginitial = 100.0, cratio = 5, idur = 4, inputeeffect = None, sensf = 41, simlength = 13,
                         predictionlength = 180, stats = None)

    # LOAD DATA
    data = readData.read_data(filename)

    # CLEAN UP DATA FRAME
    data = convertData.convert(data)

    # INTERPOLATE CGM MEASURES
    data = convertData.interpolate_cgm(data)

    # GET SENSITIVITY FACTOR AND CARBRATIO FOR EVERY DAY
    logger.info("Run Autotune? " + str(run_autotune))
    if run_autotune:
        autotune_res = autotune.runAutotune(data)
    else:
        autotune_res = autotune.getAllSensAndCR(data)

    # MAKE A ROLLING PREDICTION
    logger.info("Start Prediciton")
    prediction_result = rolling.rolling(data, pd.Timedelta('15 minutes'), user_data, autotune_res, create_plots)
    logger.info("Finished prediction")

    # ANALYSE PREDICTION RESULTS
    summary, all_data = analyze.getSummary(prediction_result)

    # CREATE PLOTS FOR ANALYSE SUMMARY
    analyze.createErrorPlots(summary, all_data)


    # CREATE A GIF OUT OF THE PREDICTION PLOTS
    #if create_plots:
    #    gifmaker.makeGif(path + "results/plots/", data)

    logger.info("Main finished!")
예제 #7
0
파일: train.py 프로젝트: wgwangang/mycodes
def main():
    train_batch_sample, train_batch_label = readData.read_data(TRAIN_RECORDS,
                                                               batch_size=64)
    train_batch_sample = tf.add(
        train_batch_sample,
        tf.random_normal(shape=tf.shape(train_batch_sample), stddev=0.1))

    test_batch_sample, test_batch_label = readData.read_data(TEST_RECORDS,
                                                             batch_size=64)

    train_batch_label_one_hot = tf.one_hot(train_batch_label,
                                           depth=config_.NUM_CLASSES,
                                           name="train_one_hot")
    test_batch_label_one_hot = tf.one_hot(test_batch_label,
                                          depth=config_.NUM_CLASSES)

    # build TSCGAN
    model = basic.TSCGAN()
    d_loss, g_loss = model.graph(train_batch_sample, train_batch_label_one_hot)

    train_top1, train_top5 = model.accuracy(model.logits_,
                                            train_batch_label_one_hot)

    d_train_op = model.get_opt(d_loss)
    g_train_op = model.get_opt(g_loss, D_or_G='G')

    # d for test
    test_logits, _ = model.D(test_batch_sample, reuse=True, training=False)

    ###########################################################################
    test_batch_label_ = tf.identity(test_batch_label, name="test_batch_label")
    test_pred = tf.arg_max(test_logits, dimension=1, name="test_pred")
    ###########################################################################

    test_top1, test_top5 = model.accuracy(test_logits,
                                          test_batch_label_one_hot,
                                          phrase='test')

    d_var_list = tf.get_collection(
        tf.GraphKeys.SAVEABLE_OBJECTS, scope="D") + tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope='D')

    d_saver = tf.train.Saver(var_list=d_var_list)
    with tf.Session() as sess:
        writer = tf.summary.FileWriter(logdir='./ckpt', graph=sess.graph)
        tf.local_variables_initializer().run()
        tf.global_variables_initializer().run()

        coord = tf.train.Coordinator()

        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            while not coord.should_stop():
                global GLOBAL_STEP
                # Run training steps or whatever
                train_num_iteration(sess=sess,
                                    d_train_op=d_train_op,
                                    g_train_op=g_train_op)
                test_num_iteration(sess=sess, test_logits=test_logits)
                d_saver.save(sess=sess,
                             save_path="ckpt/%s.ckpt" % DataSetName,
                             global_step=GLOBAL_STEP)
                GLOBAL_STEP += 1
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        except KeyboardInterrupt:
            print('interrupted...')
        finally:
            print("cleaning...")
            # When done, ask the threads to stop.
            coord.request_stop()

            # Wait for threads to finish.
        coord.join(threads)
        sess.close()
 def test_select_columns(self):
     data = readData.read_data(filename)
     self.assertEqual(28, data.shape[1])
     data = convertData.select_columns(data)
     self.assertEqual(7, data.shape[1])
 def test_create_time_index(self):
     data = readData.read_data(filename)
     data = convertData.select_columns(data)
     self.assertEqual(type(data.index[0]), int)
     data = convertData.create_time_index(data)
     self.assertIsInstance(data.index[0], pd._libs.tslib.Timestamp)
예제 #10
0
def main():
    train_batch_sample, train_batch_label = readData.read_data(TRAIN_RECORDS,
                                                               config=config_)
    # train_batch_sample = tf.add(train_batch_sample,
    #                             tf.random_normal(shape=tf.shape(train_batch_sample), stddev=0.1))
    train_batch_label_one_hot = tf.one_hot(train_batch_label,
                                           depth=config_.NUM_CLASSES,
                                           name="train_one_hot")
    ########################## prepare test data
    _, _, _, labels, features = bulid_record.build_dataset(
        config_.TEST_RECORDS)
    test_batch_sample = tf.Variable(features,
                                    trainable=False,
                                    dtype=tf.float32)
    test_batch_label = tf.Variable(labels, trainable=False, dtype=tf.int64)
    test_batch_label_one_hot = tf.one_hot(test_batch_label,
                                          depth=config_.NUM_CLASSES)
    ############################################

    # build TSCGAN
    model = tsganv2.TSCGAN(config=config_)
    logits, _ = model.D(train_batch_sample)

    loss = tf.nn.softmax_cross_entropy_with_logits(
        labels=train_batch_label_one_hot, logits=logits)
    loss = tf.reduce_mean(loss, name='d_loss')
    train_top1, train_top5 = model.accuracy(logits, train_batch_label_one_hot)

    d_train_op = model.get_opt(loss, regulizer=False)
    # g_train_op = model.get_opt(g_loss, D_or_G='G')

    # d for test
    test_logits, _ = model.D(test_batch_sample, reuse=True, training=False)

    ###########################################################################
    test_batch_label_ = tf.identity(test_batch_label, name="test_batch_label")
    test_pred = tf.arg_max(test_logits, dimension=1, name="test_pred")
    ###########################################################################

    test_top1, test_top5 = model.accuracy(test_logits,
                                          test_batch_label_one_hot,
                                          phrase='test')

    d_var_list = tf.get_collection(
        tf.GraphKeys.SAVEABLE_OBJECTS, scope="D") + tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope='D')

    d_saver = tf.train.Saver(var_list=d_var_list)
    with tf.Session() as sess:
        writer = tf.summary.FileWriter(logdir=os.path.join(
            current_dir, 'ckpt_cnn'),
                                       graph=sess.graph)
        tf.local_variables_initializer().run()
        tf.global_variables_initializer().run()

        coord = tf.train.Coordinator()

        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            while not coord.should_stop():
                global GLOBAL_STEP
                # Run training steps or whatever
                train_accu, train_loss = train_num_iteration(
                    sess=sess,
                    d_train_op=d_train_op,
                    num_iteration=NUM_TRAIN_ITERATION)
                model.decay_learning_rate()

                test_accu = test_num_iteration(
                    sess=sess, model=model, num_iteration=NUM_TEST_ITERATION)
                print(
                    "step %d, training loss %.6f , training accuracy %.6f , test accuracy %.6f."
                    % (GLOBAL_STEP * NUM_TRAIN_ITERATION, train_loss,
                       train_accu, test_accu))

                d_saver.save(sess=sess,
                             save_path=os.path.join(
                                 current_dir,
                                 "ckpt_cnn/%s.ckpt" % DataSetName),
                             global_step=GLOBAL_STEP)
                GLOBAL_STEP += 1
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        except KeyboardInterrupt:
            print('interrupted...')
        finally:
            # When done, ask the threads to stop.
            coord.request_stop()
            # Wait for threads to finish.
        coord.join(threads)
        sess.close()
        print("all threads are closed! ")
예제 #11
0
def main():

    train_batch_sample, train_batch_label = readData.read_data(TRAIN_RECORDS,
                                                               config=config_)
    # train_batch_sample = tf.add(train_batch_sample,
    #                             tf.random_normal(shape=tf.shape(train_batch_sample), stddev=0.1))
    ########################## prepare test data
    _, _, _, labels, features = bulid_record.build_dataset(
        config_.TEST_RECORDS)
    test_batch_sample = tf.Variable(features,
                                    trainable=False,
                                    dtype=tf.float32)
    test_batch_label = tf.Variable(labels, trainable=False, dtype=tf.int64)
    ############################################
    # test_batch_sample, test_batch_label = readData.read_data(TEST_RECORDS, config=config_, is_training=False)

    train_batch_label_one_hot = tf.one_hot(train_batch_label,
                                           depth=config_.NUM_CLASSES,
                                           name="train_one_hot")
    test_batch_label_one_hot = tf.one_hot(test_batch_label,
                                          depth=config_.NUM_CLASSES)

    # build TSCGAN

    model = tsganv2.TSCGAN(config=config_)
    d_loss, g_loss = model.graph(train_batch_sample, train_batch_label_one_hot)

    train_top1, train_top5 = model.accuracy(model.logits_,
                                            train_batch_label_one_hot)

    d_train_op = model.get_opt(d_loss, regulizer=False)
    g_train_op = model.get_opt(g_loss, D_or_G='G', regulizer=False)

    # g_train_op = model.get_opt(g_loss, D_or_G='G')

    # d for test
    test_logits, _ = model.D(test_batch_sample, reuse=True, training=False)

    ###########################################################################
    test_batch_label_ = tf.identity(test_batch_label, name="test_batch_label")
    test_pred = tf.arg_max(test_logits, dimension=1, name="test_pred")
    ###########################################################################

    test_top1, test_top5 = model.accuracy(test_logits,
                                          test_batch_label_one_hot,
                                          phrase='test')
    gpu_conf = tf.GPUOptions(allow_growth=True)
    sess_conf = tf.ConfigProto(gpu_options=gpu_conf)
    with tf.Session(config=sess_conf) as sess:
        writer = tf.summary.FileWriter(logdir=os.path.join(
            current_dir, 'ckpt'),
                                       graph=sess.graph)
        tf.local_variables_initializer().run()
        tf.global_variables_initializer().run()

        coord = tf.train.Coordinator()

        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        train_loss_track = []
        test_loss_track = []
        try:
            while not coord.should_stop():
                global GLOBAL_STEP
                # Run training steps or whatever
                train_accu, train_loss = train_num_iteration(
                    sess=sess,
                    d_train_op=d_train_op,
                    g_train_op=g_train_op,
                    num_iteration=NUM_TRAIN_ITERATION)
                model.decay_learning_rate()

                test_accu = test_num_iteration(sess=sess,
                                               model=model,
                                               num_iteration=1)

                train_loss_track.append(train_accu)
                test_loss_track.append(test_accu)

                print(
                    "step %d, training loss %.6f , training accuracy %.6f , test accuracy %.6f."
                    % (GLOBAL_STEP * NUM_TRAIN_ITERATION, train_loss,
                       train_accu, test_accu))
                if GLOBAL_STEP % 100 == 0:
                    model.save_ckpt(save_path=os.path.join(
                        current_dir, "ckpt/GAN%s.ckpt" % DataSetName),
                                    global_step=GLOBAL_STEP)

                GLOBAL_STEP += 1
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        except KeyboardInterrupt:
            print('Keyboard Interrupt')
        finally:
            print('saving accu curve.')
            plt.plot(train_loss_track, label='train_accu')
            plt.plot(test_loss_track, label='test_accu')
            plt.legend(loc='lower right')
            plt.savefig('%s_gan_train_test.jpg' % config_.DATASET_NAME)
            print("max accu is ", max(test_loss_track))
            # When done, ask the threads to stop.
            coord.request_stop()
            # Wait for threads to finish.
        coord.join(threads)
        sess.close()
        print("all threads are closed! ")
예제 #12
0
def prep_csv():
    data = read_data(filename)
    data = convert(data)
    data = interpolate_cgm(data)
    cgm = data['cgmValue']
    cgm.to_csv(path + 'data/csv/cgm_17_1-6.csv')