示例#1
0
 def by_infile(self, infile):
     try:
         shutil.rmtree(self.OUTPUT_DIR)
     except:
         pass
     self.db_open()
     json_data = self.get_events_from_infile(infile)
     # build preprocessor
     ppr = Preprocessor()
     # Process raw data
     #X, Y, events_found = ppr.get_raw_data(DIMENSION, [RAW_FILE], bad)
     X, Y, events_found = ppr.get_from_json(self.DIMENSION, json_data)
     X, Y = ppr.remove_outliers(X, Y)
     X, Y = ppr.normalize(X, Y)
     trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(
         X, Y, 0.0, 1.0)
     ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY,
                                   self.INPUT_DIR)
     # build adapter
     adapter = MACAdapter(self.INPUT_DIR, self.DIMENSION, self.FOLDS)
     # build model
     convnet = ConvNet(self.DIMENSION)
     # build server
     server = ConvNetServer(adapter,
                            self.OUTPUT_DIR,
                            batch_size=self.BATCH_SIZE,
                            verbose=True,
                            use=True)
     x, durs, _ = server.get_testing_batch()
     with tf.Session() as sess:
         init = tf.global_variables_initializer()
         sess.run(init)
         convnet.restore(sess, self.INITIAL_WEIGHTS)
         predictions = sess.run((convnet.predictor),
                                feed_dict={
                                    convnet.x: x,
                                    convnet.durs: durs
                                })
     # Get event ids
     _, _, ids = adapter.get_ids()
     results = [{
         "eventID": int(ids[i]),
         "ml": {
             "aircraftProbability":
             round(np.around(predictions[i][0], decimals=4), 4),
             "model":
             self.MODEL
         }
     } for i in range(0, len(ids))]
     for result in results:
         self.insert_result_for_event(result)
     self.db_close()
示例#2
0
'''
# Only run if this is the main module to be run
if __name__ == '__main__':

    # build preprocessor
    ppr = Preprocessor()

    # Process raw data
    X, Y, events_found = ppr.get_raw_data(DIMENSION, [RAW_FILE], bad)
    X, Y = ppr.remove_outliers(X, Y)
    X, Y = ppr.normalize(X, Y)
    trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(X, Y, 0.0, 1.0)
    ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY, INPUT_DIR)

    # build adapter
    adapter = MACAdapter(INPUT_DIR, DIMENSION, FOLDS)

    # build model
    convnet = ConvNet(DIMENSION)

    # build server
    server = ConvNetServer(adapter,
                           OUTPUT_DIR,
                           batch_size=BATCH_SIZE,
                           verbose=True,
                           use=True)

    x, durs, _ = server.get_testing_batch()

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
示例#3
0
'''
Tester
'''
# Number of trials to do for each fold (stats will be averaged)
TRIALS_PER_FOLD = 5


'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':

    # build adapter
    adapter = MACAdapter(INPUT_DIR, DIMENSION, FOLDS)

    # build model
    convnet = ConvNet(DIMENSION)

    # build server
    server = ConvNetServer(adapter, OUTPUT_DIR,
                        batch_size = BATCH_SIZE,
                        verbose = False)

    # build trainer
    trainer = ConvNetTrainer(convnet, server, EPOCHS, STEPS_PER_EPOCH,
                          optimizer = OPTIMIZER,
                          opt_kwargs = OPT_KWARGS,
                          keep_prob = KEEP_PROB,
                          batch_size = BATCH_SIZE,
示例#4
0
# file location of weights to restore from (i.e. weights/model1.ckpt)
INITIAL_WEIGHTS = './poor.ckpt'
# probability value to use for dropout
KEEP_PROB = 1.0
# training batch size
BATCH_SIZE = 400
# step at which to log status at modulo 0
DISPLAY_STEP = 5
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':

    # build adapter
    adapter = MACAdapter(INPUT_DIR)

    # build model
    convnet = ConvNet(10)

    # build server
    server = ConvNetServer(adapter, OUTPUT_DIR, batch_size=BATCH_SIZE)

    # build trainer
    trainer = ConvNetTrainer(convnet,
                             server,
                             EPOCHS,
                             STEPS_PER_EPOCH,
                             optimizer=OPTIMIZER,
                             opt_kwargs=OPT_KWARGS)