Exemplo n.º 1
0
 def test_saving(self):
     data_lines = \
         [
             'Temperature1: 56',
             'Temperature2=54 ',
             'Temperature3=53',
             'Temperature3=52',
             'Temperature1=666',
             'Temperature1',
             'Error',
             'Temperature1=666degrees',
             'Temperature4=No data',
             'Temperature1=54',
             'Temperature1=55',
             'Temperature4=40',
             'Temperature3=55',
         ]
     parser = DataParser()
     for line in data_lines:
         parser.parse(line)
     parser.save_results('mixed_json_results.json')
     self.assertTrue(
         filecmp.cmp('mixed_json_results.json', 'json_files//mixed_json_results.json'),
         'Files are different')
     os.remove('mixed_json_results.json')
Exemplo n.º 2
0
    def test_good_data(self):
        data_lines = \
            [
                'Temperature1=56',
                'Temperature2=54 ',
                'Temperature3=53',
                'Temperature3=52',
                'Temperature1=666',
            ]
        expected_result = \
            {
                1:
                    [
                        56,
                        666
                    ],
                2:
                    [
                        54
                    ],
                3:
                    [
                        53,
                        52
                    ],
            }

        parser = DataParser()
        for line in data_lines:
            parser.parse(line)
        parsing_results = parser.get_results()
        self.assertEqual(expected_result, parsing_results)
Exemplo n.º 3
0
 def test_bad_data(self):
     data_lines = \
         [
             'Temperature1: 56',
             'Temperature2=54 ',
             'Temperature3=53',
             'Temperature3=52',
             'Temperature1=666',
             'Temperature1',
             'Error',
             'Temperature1=666degrees',
             'Temperature4=No data',
         ]
     expected_result = \
         {
             1:
                 [
                     666
                 ],
             2:
                 [
                     54
                 ],
             3:
                 [
                     53,
                     52
                 ],
         }
     parser = DataParser()
     for line in data_lines:
         parser.parse(line)
     parsing_results = parser.get_results()
     self.assertEqual(expected_result, parsing_results)
Exemplo n.º 4
0
def main(args):
    data_parser = DataParser(data_prefix=args.data_prefix,
                             images_dirpath=args.images_dirpath,
                             masks_dirpath=args.masks_dirpath,
                             img_masks_filepath=args.img_masks_filepath,
                             contours_type=args.contours_type,
                             logs_prefix=args.logs_prefix,
                             visualize_contours=args.visualize_contours)
    data_parser.parse()
Exemplo n.º 5
0
def main(_):
    feat_dict = FeatureDictionary()
    print("feature_size: %d" % feat_dict.feature_size)
    print("field_size: %d" % feat_dict.field_size)
    print(feat_dict.col2feat_id.keys())
    dataparser = DataParser(feat_dict, FLAGS.label)
    train_ids, train_vals, train_labels = dataparser.parse(infile="%s\\train_sample.csv" % FLAGS.data_dir)
    print("len of train: %d" % len(train_ids))
    test_ids, test_vals, test_labels = dataparser.parse(infile="%s\\test_sample.csv" % FLAGS.data_dir)
    print("len of test: %d" % len(test_ids))

    # ------bulid Tasks------
    model_params = {
        "field_size": feat_dict.field_size,
        "feature_size": feat_dict.feature_size,
        "embedding_size": FLAGS.embedding_size,
        "learning_rate": FLAGS.learning_rate,
        "l2_reg": FLAGS.l2_reg,
        "deep_layers": FLAGS.deep_layers,
        "dropout": FLAGS.dropout,
        "experts_num": 3,
        "experts_units": 32,
        "use_experts_bias": True,
        "use_gate_bias": True
    }
    print(model_params)
    DeepFM = build_model_estimator(model_params)
    # DeepFM = tf.contrib.estimator.add_metrics(DeepFM, my_auc)

    if FLAGS.task_type == 'train':
        train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(train_ids, train_vals, train_labels,
                                                                      num_epochs=FLAGS.num_epochs,
                                                                      batch_size=FLAGS.batch_size))
        eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(test_ids, test_vals, test_labels,
                                                                    num_epochs=1,
                                                                    batch_size=FLAGS.batch_size),
                                          steps=None, start_delay_secs=1000, throttle_secs=1200)
        tf.estimator.train_and_evaluate(DeepFM, train_spec, eval_spec)
        results = DeepFM.evaluate(
            input_fn=lambda: input_fn(test_ids, test_vals, test_labels, num_epochs=1, batch_size=FLAGS.batch_size))
        for key in results:
            log.info("%s : %s" % (key, results[key]))
    elif FLAGS.task_type == 'eval':
        results = DeepFM.evaluate(input_fn=lambda: input_fn(test_ids, test_vals, test_labels,
                                                            num_epochs=1, batch_size=FLAGS.batch_size))
        for key in results:
            log.info("%s : %s" % (key, results[key]))
    elif FLAGS.task_type == 'infer':
        preds = DeepFM.predict(input_fn=lambda: input_fn(test_ids, test_vals, test_labels,
                                                         num_epochs=1, batch_size=FLAGS.batch_size),
                               predict_keys="prob")
        with open(FLAGS.data_dir+"/pred.txt", "w") as fo:
            for prob in preds:
                fo.write("%f\n" % (prob['prob']))
Exemplo n.º 6
0
class DataSession(QObject):
    signal_start_background_job = pyqtSignal()

    def __init__(self, plot_painter, params, port_settings=DEFAULT_PORT_SETTINGS):
        super().__init__()

        self.data_parser = DataParser(params)
        self.plot_painter = plot_painter

        self.worker = SerialWorker(port_settings)
        self.thread = QThread()
        self.worker.moveToThread(self.thread)
        self.worker.read_data_signal.connect(self.add_data)
        self.signal_start_background_job.connect(self.worker.run)

    def run(self):
        self.thread.start()
        self.signal_start_background_job.emit()

    def add_data(self, sensor_data):
        logger.debug(f"Data was read: {sensor_data}")
        parser_data = self.data_parser.parse(sensor_data)
        if parser_data:
            self.plot_painter.add_data(parser_data)
            # TODO: save to DB

    def stop(self, additional_params):
        self.worker.stop()
        self.thread.quit()
        self.thread.wait(1000)
        json_file_name = (
                "data_"
                + datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S")
                + ".json"
        )
        self.data_parser.save_results(json_file_name, additional_params)
Exemplo n.º 7
0
    ccer.setCoordOrigin(0)
    ccer.convertCoord([0,1,2,3,4,5,6,7,8])
    v1 = Visualizer(ccer.converted_coord_dict)
    v1.create_image_for_1_frame(0)
    v1.create_image_for_1_frame(1)
    v1.create_image_for_1_frame(2)
    v1.create_image_for_1_frame(3)
    v1.create_image_for_1_frame(4)
    v1.create_image_for_1_frame(5)
    v1.create_image_for_1_frame(6)
    v1.create_image_for_1_frame(7)
    v1.create_image_for_1_frame(8)
    '''

    p1 = DataParser("./data/0002.txt",'lidar')
    p1.parse()
    #p2 = DataParser("./data/0.txt",'mono-camera')
    #p2.parse()    
    ccer = CoordCoverter(p1.result_dict, "./data_tracking_oxts/0002.txt")
    ccer.setCoordOrigin(0)
    ccer.convertCoord([ i for i in range(60)])
    print(ccer.converted_coord_dict.keys())

    v1 = Visualizer(ccer.converted_coord_dict)
    
    predictor = Predictor(ccer.converted_coord_dict, current_frame_id = 10, predictor_mode = "AMM")
    predictor.predict(period_of_predict = 0.0)    
    v1.create_image_for_1_frame_predict(10, predictor.predict_dict, [846])
    
    predictor.predict(period_of_predict = 0.5)    
    v1.create_image_for_1_frame_predict(15, predictor.predict_dict, [846])
Exemplo n.º 8
0
from data_parser import DataParser
from verification_plotter import OverlappingPlot

""" demo for data parser and data loader pipeline """
if __name__ == '__main__':
    
    data_path = 'C:\\CodingChallengePhase1\\final_data'
    visual_result_path = 'visual_results'


    """ demo for data parser pipeline """
    data_parser = DataParser(data_path)
    # set verify to draw verification image which is 
    # the overlap of dicom image, contour and mask
    data_parser.verify = 1   
    data_parser.parse()

    """ demo for data loader pipeline """
    batch_size = 8
    epochs = 2
    batch_iterator = BatchIterator('data.h5', batch_size, epochs)
    while(1):
        batch = batch_iterator.next()
        if (batch is None):
            break

        # save the verification images
        for b in range(batch_size):
            overlap_file = 'loader_' + batch['name'][b] + '.png'
            overlap_file = os.path.join(visual_result_path, overlap_file)
            overlap_plt = OverlappingPlot(overlap_file, batch['img'][b], batch['mask'][b])
Exemplo n.º 9
0
        RSME_mean += RSME_tmp
            
    RSME_mean = RSME_mean/len(frame_id_list)
    
    print("===============")
    print(RSME_mean)
    
    
    plot_RSME((x_camera, y_camera), (x_lidar, y_lidar))
    #predictor.predict(period_of_predict = 0.5)
    '''
    
    
    
    p_gt = DataParser("./data/0008.txt",'lidar')
    p_gt.parse()  
    gt_ccer = CoordCoverter(p_gt.result_dict, "./data_tracking_oxts/0008.txt")
    gt_ccer.setCoordOrigin(0)
    gt_ccer.convertCoord([ i for i in range(100)])

    
    p1 = DataParser("./data/8.txt",'mono-camera')
    p1.parse()  
    ccer = CoordCoverter(p1.result_dict, "./data_tracking_oxts/0008.txt")
    ccer.setCoordOrigin(0)
    ccer.convertCoord([ i for i in range(100)])

    print('+++++++++')
    print(p1.result_dict[10][296])    
    print('+++++++++')
    print(p_gt.result_dict[10][3702])