def main(argv):
    scenario_plotter = ScenarioPlotter()

    elbow = Elbow()
    elbow_controller = IntegralElbow()
    observer_elbow = IntegralElbow()

    # Test moving the elbow with constant separation.
    initial_X = numpy.matrix([[0.0], [0.0]])
    initial_X = numpy.matrix([[0.707], [0.707]])
    R = numpy.matrix([[1.0], [3.0], [1.0]])
    scenario_plotter.run_test(elbow, goal=R, controller_elbow=elbow_controller,
                              observer_elbow=observer_elbow, iterations=2000)

    scenario_plotter.Plot(elbow)

    # Write the generated constants out to a file.
    print(len(argv))
    if len(argv) != 5:
        glog.fatal('Expected .h file name and .cc file name for the elbow and integral elbow.')
    else:
        namespaces = ['shit']
        elbow = Elbow('Elbow')
        loop_writer = control_loop.ControlLoopWriter(
            'Elbow', [elbow], namespaces=namespaces)
        loop_writer.Write(argv[1], argv[2])

        integral_elbow = IntegralElbow('IntegralElbow')
        integral_loop_writer = control_loop.ControlLoopWriter(
            'IntegralElbow', [integral_elbow], namespaces=namespaces)
        integral_loop_writer.Write(argv[3], argv[4])
def main(argv):
  loaded_mass = 25
  #loaded_mass = 0
  elevator = Elevator(mass=13 + loaded_mass)
  elevator_controller = Elevator(mass=13 + 15)
  observer_elevator = Elevator(mass=13 + 15)
  #observer_elevator = None

  # Test moving the elevator with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0], [0.01], [0.0]])
  #initial_X = numpy.matrix([[0.0], [0.0], [0.00], [0.0]])
  R = numpy.matrix([[1.0], [0.0], [0.0], [0.0]])
  run_test(elevator, initial_X, R, controller_elevator=elevator_controller,
           observer_elevator=observer_elevator)

  # Write the generated constants out to a file.
  if len(argv) != 3:
    glog.fatal('Expected .h file name and .cc file name for the elevator.')
  else:
    namespaces = ['y2015', 'control_loops', 'fridge']
    elevator = Elevator("Elevator")
    loop_writer = control_loop.ControlLoopWriter("Elevator", [elevator],
                                                 namespaces=namespaces)
  if argv[1][-3:] == '.cc':
    loop_writer.Write(argv[2], argv[1])
  else:
    loop_writer.Write(argv[1], argv[2])
Exemple #3
0
def main(argv):
  if FLAGS.plot:
    loaded_mass = 25
    #loaded_mass = 0
    arm = Arm(mass=13 + loaded_mass)
    #arm_controller = Arm(mass=13 + 15)
    #observer_arm = Arm(mass=13 + 15)
    #observer_arm = None

    integral_arm = IntegralArm(mass=13 + loaded_mass)
    integral_arm.X_hat[0, 0] += 0.02
    integral_arm.X_hat[2, 0] += 0.02
    integral_arm.X_hat[4] = 0

    # Test moving the arm with constant separation.
    initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    run_integral_test(arm, initial_X, R, integral_arm, disturbance=2)

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the arm and augmented arm.')
  else:
    namespaces = ['y2015', 'control_loops', 'fridge']
    arm = Arm('Arm', mass=13)
    loop_writer = control_loop.ControlLoopWriter('Arm', [arm],
                                                 namespaces=namespaces)
    loop_writer.Write(argv[1], argv[2])

    integral_arm = IntegralArm('IntegralArm', mass=13)
    loop_writer = control_loop.ControlLoopWriter('IntegralArm', [integral_arm],
                                                 namespaces=namespaces)
    loop_writer.Write(argv[3], argv[4])
Exemple #4
0
def main(argv):
  argv = FLAGS(argv)
  glog.init()

  scenario_plotter = ScenarioPlotter()

  intake = Intake()
  intake_controller = IntegralIntake()
  observer_intake = IntegralIntake()

  # Test moving the intake with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[numpy.pi/2.0], [0.0], [0.0]])
  scenario_plotter.run_test(intake, end_goal=R,
                            controller_intake=intake_controller,
                            observer_intake=observer_intake, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the intake and integral intake.')
  else:
    namespaces = ['y2016_bot3', 'control_loops', 'intake']
    intake = Intake("Intake")
    loop_writer = control_loop.ControlLoopWriter('Intake', [intake],
                                                 namespaces=namespaces)
    loop_writer.Write(argv[1], argv[2])

    integral_intake = IntegralIntake("IntegralIntake")
    integral_loop_writer = control_loop.ControlLoopWriter("IntegralIntake", [integral_intake],
                                                          namespaces=namespaces)
    integral_loop_writer.Write(argv[3], argv[4])
Exemple #5
0
def main(argv):
  scenario_plotter = ScenarioPlotter()

  intake = Intake()
  intake_controller = IntegralIntake()
  observer_intake = IntegralIntake()

  # Test moving the intake with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[0.1], [0.0], [0.0]])
  scenario_plotter.run_test(intake, end_goal=R,
                            controller_intake=intake_controller,
                            observer_intake=observer_intake, iterations=400)

  if FLAGS.plot:
    scenario_plotter.Plot()

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the intake and integral intake.')
  else:
    namespaces = ['y2017', 'control_loops', 'superstructure', 'intake']
    intake = Intake('Intake')
    loop_writer = control_loop.ControlLoopWriter('Intake', [intake],
                                                 namespaces=namespaces)
    loop_writer.AddConstant(control_loop.Constant('kFreeSpeed', '%f',
                                                  intake.free_speed))
    loop_writer.AddConstant(control_loop.Constant('kOutputRatio', '%f',
                                                  intake.G * intake.r))
    loop_writer.Write(argv[1], argv[2])

    integral_intake = IntegralIntake('IntegralIntake')
    integral_loop_writer = control_loop.ControlLoopWriter('IntegralIntake', [integral_intake],
                                                          namespaces=namespaces)
    integral_loop_writer.Write(argv[3], argv[4])
def main(argv):
  argv = FLAGS(argv)

  scenario_plotter = ScenarioPlotter()

  shoulder = Shoulder()
  shoulder_controller = IntegralShoulder()
  observer_shoulder = IntegralShoulder()

  # Test moving the shoulder with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[numpy.pi / 2.0], [0.0], [0.0]])
  scenario_plotter.run_test(shoulder, goal=R, controller_shoulder=shoulder_controller,
                            observer_shoulder=observer_shoulder, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the shoulder and integral shoulder.')
  else:
    namespaces = ['y2016', 'control_loops', 'superstructure']
    shoulder = Shoulder("Shoulder")
    loop_writer = control_loop.ControlLoopWriter('Shoulder', [shoulder],
                                                 namespaces=namespaces)
    loop_writer.Write(argv[1], argv[2])

    integral_shoulder = IntegralShoulder("IntegralShoulder")
    integral_loop_writer = control_loop.ControlLoopWriter("IntegralShoulder", [integral_shoulder],
                                                          namespaces=namespaces)
    integral_loop_writer.Write(argv[3], argv[4])
Exemple #7
0
def load(dpath):
    """
    Loads data from directory.

    Arguments:
    dpath -- directory

    Returns:
    train_set_x_orig -- train set features
    train_set_y_orig -- train set labels
    test_set_x_orig -- train set features
    test_set_y_orig -- test set labels
    classes -- list of classes
    """

    if not os.path.exists(dpath):
        log.fatal('{0} does not eixst'.format(dpath))
        sys.exit(0)

    train_dataset = h5py.File(os.path.join(dpath, 'train_catvnoncat.h5'), "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:])
    train_set_y_orig = np.array(train_dataset["train_set_y"][:])

    test_dataset = h5py.File(os.path.join(dpath, 'test_catvnoncat.h5'), "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:])
    test_set_y_orig = np.array(test_dataset["test_set_y"][:])

    classes = np.array(test_dataset["list_classes"][:])

    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))

    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
Exemple #8
0
def main(argv):
  scenario_plotter = ScenarioPlotter()

  wrist = Wrist()
  wrist_controller = IntegralWrist()
  observer_wrist = IntegralWrist()

  # Test moving the wrist with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[1.0], [0.0], [0.0]])
  scenario_plotter.run_test(wrist, goal=R, controller_wrist=wrist_controller,
                            observer_wrist=observer_wrist, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the wrist and integral wrist.')
  else:
    namespaces = ['y2016', 'control_loops', 'superstructure']
    wrist = Wrist('Wrist')
    loop_writer = control_loop.ControlLoopWriter(
        'Wrist', [wrist], namespaces=namespaces)
    loop_writer.Write(argv[1], argv[2])

    integral_wrist = IntegralWrist('IntegralWrist')
    integral_loop_writer = control_loop.ControlLoopWriter(
        'IntegralWrist', [integral_wrist], namespaces=namespaces)
    integral_loop_writer.Write(argv[3], argv[4])
    def move_output_files(self, pipeline_type, dataset):
        """ Moves all output files for a particular pipeline and dataset
            from their temporary logging location during runtime to the evaluation location.

            Args:
                pipeline_type: a pipeline representing a set of parameters to use, as
                    defined in the experiments yaml file for the dataset in question.
                dataset: a dataset to run as defined in the experiments yaml file.
        """
        dataset_name = dataset["name"]
        dataset_results_dir = os.path.join(self.results_dir, dataset_name)
        dataset_pipeline_result_dir = os.path.join(dataset_results_dir,
                                                   pipeline_type)

        log.debug(
            "\033[1mMoving output dir:\033[0m \n %s \n \033[1m to destination:\033[0m \n %s"
            % (self.pipeline_output_dir, dataset_pipeline_result_dir))

        try:
            evt.move_output_from_to(self.pipeline_output_dir,
                                    dataset_pipeline_result_dir)
        except:
            log.fatal(
                "\033[1mFailed copying output dir: \033[0m\n %s \n \033[1m to destination: %s \033[0m\n"
                % (self.pipeline_output_dir, dataset_pipeline_result_dir))
Exemple #10
0
    def test_load(self):
        dpath = os.environ['DATASETS_DIR']
        if dpath == '':
            log.fatal('Got empty DATASETS_DIR')
            sys.exit(0)

        log.info('running test_load...')
        log.info('directory: {0}'.format(dpath))

        train_x_orig, train_y, test_x_orig, test_y, classes = load(dpath)
        log.info('train_x_orig.shape: {0}'.format(train_x_orig.shape))
        log.info('train_y.shape: {0}'.format(train_y.shape))
        log.info('test_x_orig.shape: {0}'.format(test_x_orig.shape))
        log.info('test_y.shape: {0}'.format(test_y.shape))
        log.info('classes.shape: {0}'.format(classes.shape))

        self.assertEqual(train_x_orig.shape, (209, 64, 64, 3))
        self.assertEqual(train_y.shape, (1, 209))
        self.assertEqual(test_x_orig.shape, (50, 64, 64, 3))
        self.assertEqual(test_y.shape, (1, 50))
        self.assertEqual(classes.shape, (2, ))

        index = 25
        # plt.imshow(train_x_orig[index])
        result = classes[train_y[0, index]].decode("utf-8")
        log.info('cat or non-cat?: {0}'.format(result))
        self.assertEqual(result, 'cat')
Exemple #11
0
def main(argv):
  scenario_plotter = ScenarioPlotter()

  shooter = Shooter()
  shooter_controller = IntegralShooter()
  observer_shooter = IntegralShooter()

  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[0.0], [100.0], [0.0]])
  scenario_plotter.run_test(shooter, goal=R, controller_shooter=shooter_controller,
                            observer_shooter=observer_shooter, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name')
  else:
    namespaces = ['y2016', 'control_loops', 'shooter']
    shooter = Shooter('Shooter')
    loop_writer = control_loop.ControlLoopWriter('Shooter', [shooter],
                                                 namespaces=namespaces)
    loop_writer.Write(argv[1], argv[2])

    integral_shooter = IntegralShooter('IntegralShooter')
    integral_loop_writer = control_loop.ControlLoopWriter(
        'IntegralShooter', [integral_shooter], namespaces=namespaces)
    integral_loop_writer.Write(argv[3], argv[4])
def aggregate_all_results(results_dir, use_pgo=False):
    """ Aggregate APE results and draw APE boxplot as well as write latex table
    with results:
        Args:
            - result_dir: path to the directory with results ordered as follows:
               \* dataset_name:
               |___\* pipeline_type:
               |   |___results.yaml
               |___\* pipeline_type:
               |   |___results.yaml
               \* dataset_name:
               |___\* pipeline_type:
               |   |___results.yaml
               Basically all subfolders with a results.yaml will be examined.
            - use_pgo: whether to aggregate all results for VIO or for PGO trajectory.
                set to True for PGO and False (default) for VIO
        Returns:
            - stats: a nested dictionary with the statistics and results of all pipelines:
                * First level ordered with dataset_name as keys:
                * Second level ordered with pipeline_type as keys:
                * Each stats[dataset_name][pipeline_type] value has:
                    * absolute_errors: an evo Result type with trajectory and APE stats.
                    * relative_errors: RPE stats.
    """
    import fnmatch
    # Load results.
    log.info("Aggregate dataset results.")
    # Aggregate all stats for each pipeline and dataset
    yaml_filename = 'results_vio.yaml'
    if use_pgo:
        yaml_filename = 'results_pgo.yaml'
    stats = dict()
    for root, dirnames, filenames in os.walk(results_dir):
        for results_filename in fnmatch.filter(filenames, yaml_filename):
            results_filepath = os.path.join(root, results_filename)
            # Get pipeline name
            pipeline_name = os.path.basename(root)
            # Get dataset name
            dataset_name = os.path.basename(os.path.split(root)[0])
            # Collect stats
            if stats.get(dataset_name) is None:
                stats[dataset_name] = dict()

            try:
                stats[dataset_name][pipeline_name] = yaml.load(
                    open(results_filepath, 'r'), Loader=yaml.Loader)
            except yaml.YAMLError as e:
                raise Exception("Error in results file: ", e)
            except:
                log.fatal("\033[1mFailed opening file: \033[0m\n %s" %
                          results_filepath)

            log.debug("Check stats from: " + results_filepath)
            try:
                evt.check_stats(stats[dataset_name][pipeline_name])
            except Exception as e:
                log.warning(e)

    return stats
Exemple #13
0
def main(argv):
  argv = FLAGS(argv)

  claw = Claw()
  if FLAGS.plot:
    # Test moving the claw with constant separation.
    initial_X = numpy.matrix([[-1.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[1.0], [0.0], [0.0], [0.0]])
    run_test(claw, initial_X, R)

    # Test just changing separation.
    initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[0.0], [1.0], [0.0], [0.0]])
    run_test(claw, initial_X, R)

    # Test changing both separation and position at once.
    initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[1.0], [1.0], [0.0], [0.0]])
    run_test(claw, initial_X, R)

    # Test a small separation error and a large position one.
    initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[2.0], [0.05], [0.0], [0.0]])
    run_test(claw, initial_X, R)

    # Test a small separation error and a large position one.
    initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[-0.5], [1.0], [0.0], [0.0]])
    run_test(claw, initial_X, R)

    # Test opening with the top claw at the limit.
    initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[-1.5], [1.5], [0.0], [0.0]])
    claw.hard_pos_limits = (-1.6, 0.1)
    claw.pos_limits = (-1.5, 0.0)
    run_test(claw, initial_X, R)
    claw.pos_limits = None

    # Test opening with the bottom claw at the limit.
    initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
    R = numpy.matrix([[0], [1.5], [0.0], [0.0]])
    claw.hard_pos_limits = (-0.1, 1.6)
    claw.pos_limits = (0.0, 1.6)
    run_test(claw, initial_X, R)
    claw.pos_limits = None

  # Write the generated constants out to a file.
  if len(argv) != 3:
    glog.fatal('Expected .h file name and .cc file name for the claw.')
  else:
    namespaces = ['y2014', 'control_loops', 'claw']
    claw = Claw('Claw')
    loop_writer = control_loop.ControlLoopWriter('Claw', [claw],
                                                 namespaces=namespaces)
    loop_writer.AddConstant(control_loop.Constant('kClawMomentOfInertiaRatio',
      '%f', claw.J_top / claw.J_bottom))
    loop_writer.AddConstant(control_loop.Constant('kDt', '%f',
          claw.dt))
    loop_writer.Write(argv[1], argv[2])
def create_full_path_if_not_exists(filename):
    if not os.path.exists(os.path.dirname(filename)):
        try:
            log.debug('Creating non-existent path: %s' % filename)
            os.makedirs(os.path.dirname(filename))
        except OSError as exc: # Guard against race condition
            if exc.errno != errno.EEXIST:
                log.fatal("Could not create inexistent filename: " + filename)
def main(argv):
    if FLAGS.plot:
        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
    elif len(argv) != 5:
        glog.fatal('Expected .h file name and .cc file name')
    else:
        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], 'testbench',
                                           drivetrain.kDrivetrain)
Exemple #16
0
    def test_backend(self):
        exec_path = os.environ['SERVER_EXEC']
        if exec_path == '':
            log.fatal('Got empty backend-web-server path')
            sys.exit(0)
        if not os.path.exists(exec_path):
            log.fatal('{0} does not eixst'.format(exec_path))
            sys.exit(0)

        log.info('Running {0}'.format(exec_path))
        backend_proc = BACKEND(exec_path)
        backend_proc.setDaemon(True)
        backend_proc.start()

        log.info('Sleeping...')
        time.sleep(5)

        endpoint = 'http://localhost:2200/cats-request/queue'

        log.info('Posting client requests...')
        # invalid item
        item = {
            'bucket': '/cats-request',
            'key': '/cats-request',
            'value': '',
            'request_id': '',
        }
        itemresp1 = post_item(endpoint, item)
        self.assertIsNot(itemresp1['error'], '')

        # valid item
        item['value'] = 'foo'
        item['request_id'] = 'id'
        itemresp2 = post_item(endpoint, item)
        self.assertEqual(itemresp2['error'], u'unknown request ID \"id\"')

        def cleanup():
            log.info('Killing backend-web-server...')
            backend_proc.kill()

            backend_proc.join()
            log.info('backend-web-server output: {0}'.format(
                backend_proc.stderr))

        self.addCleanup(cleanup)

        time.sleep(3)

        log.info('Fetching items...')
        try:
            fetch_item(endpoint, timeout=5)

        except requests.exceptions.ReadTimeout:
            log.info('Got expected timeout!')

        log.info('Done!')
Exemple #17
0
def main(argv):
  argv = FLAGS(argv)
  glog.init()

  scenario_plotter = ScenarioPlotter()

  J_accelerating = 18
  J_decelerating = 7

  arm = Arm(name='AcceleratingArm', J=J_accelerating)
  arm_integral_controller = IntegralArm(
      name='AcceleratingIntegralArm', J=J_accelerating)
  arm_observer = IntegralArm()

  # Test moving the shoulder with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])
  R = numpy.matrix([[numpy.pi / 2.0],
                    [0.0],
                    [0.0], #[numpy.pi / 2.0],
                    [0.0],
                    [0.0],
                    [0.0]])
  arm.X = initial_X[0:4, 0]
  arm_observer.X = initial_X

  scenario_plotter.run_test(arm=arm,
                            end_goal=R,
                            iterations=300,
                            controller=arm_integral_controller,
                            observer=arm_observer)

  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the wrist and integral wrist.')
  else:
    namespaces = ['y2016', 'control_loops', 'superstructure']
    decelerating_arm = Arm(name='DeceleratingArm', J=J_decelerating)
    loop_writer = control_loop.ControlLoopWriter(
        'Arm', [arm, decelerating_arm], namespaces=namespaces)
    loop_writer.Write(argv[1], argv[2])

    decelerating_integral_arm_controller = IntegralArm(
        name='DeceleratingIntegralArm', J=J_decelerating)

    integral_loop_writer = control_loop.ControlLoopWriter(
        'IntegralArm',
        [arm_integral_controller, decelerating_integral_arm_controller],
        namespaces=namespaces)
    integral_loop_writer.AddConstant(control_loop.Constant("kV_shoulder", "%f",
          arm_integral_controller.shoulder_Kv))
    integral_loop_writer.Write(argv[3], argv[4])

  if FLAGS.plot:
    scenario_plotter.Plot()
Exemple #18
0
def open_db_ro(lmdb_file):
    """Open the lmdb in READ ONLY mode, if the db file not exist, return
    None
    """
    db = None
    if os.path.exists(lmdb_file):
        try:
            db = lmdb.open(lmdb_file, readonly=True)
        except:
            log.fatal('\033[0;31mOpen lmdb %s error\033[0m' % lmdb_file)
            return None
    return db
Exemple #19
0
def open_file(file_name, param_str):
    try:
        fp = open(file_name, param_str)
    except IOError:
        # May be the encoding of file name is not write
        try:
            fp = open(file_name.decode('utf8'), param_str)
        except:
            log.fatal('\033[01;31mERROR:\033[0m Can not open %s' % file_name)
            sys.exit(2)

    return fp
Exemple #20
0
def main(argv):
    if FLAGS.plot:
        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
    elif len(argv) != 5:
        glog.fatal('Expected .h file name and .cc file name')
    else:
        namespaces = [
            'third_party', 'frc971', 'control_loops', 'drivetrain', 'y2016'
        ]
        polydrivetrain.WritePolyDrivetrainFullName(argv[1:3], argv[3:5],
                                                   namespaces, namespaces,
                                                   drivetrain.kDrivetrain)
Exemple #21
0
def main(argv):
  scenario_plotter = ScenarioPlotter()

  indexer = Indexer()
  indexer_controller = IntegralIndexer()
  observer_indexer = IntegralIndexer()

  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[0.0], [20.0], [0.0]])
  scenario_plotter.run_test(indexer, goal=R, controller_indexer=indexer_controller,
                            observer_indexer=observer_indexer, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  scenario_plotter = ScenarioPlotter()

  indexer = Indexer()
  indexer_controller = IntegralIndexer(voltage_error_noise=1.5)
  observer_indexer = IntegralIndexer(voltage_error_noise=1.5)

  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[0.0], [20.0], [0.0]])
  scenario_plotter.run_test(indexer, goal=R, controller_indexer=indexer_controller,
                            observer_indexer=observer_indexer, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  if len(argv) != 7:
    glog.fatal('Expected .h file name and .cc file names')
  else:
    namespaces = ['y2017', 'control_loops', 'superstructure', 'indexer']
    indexer = Indexer('Indexer')
    loop_writer = control_loop.ControlLoopWriter('Indexer', [indexer],
                                                 namespaces=namespaces)
    loop_writer.AddConstant(control_loop.Constant(
        'kFreeSpeed', '%f', indexer.free_speed))
    loop_writer.AddConstant(control_loop.Constant(
        'kOutputRatio', '%f', indexer.G))
    loop_writer.Write(argv[1], argv[2])

    integral_indexer = IntegralIndexer('IntegralIndexer')
    integral_loop_writer = control_loop.ControlLoopWriter(
        'IntegralIndexer', [integral_indexer], namespaces=namespaces)
    integral_loop_writer.Write(argv[3], argv[4])

    stuck_integral_indexer = IntegralIndexer('StuckIntegralIndexer',
                                             voltage_error_noise=1.5)
    stuck_integral_loop_writer = control_loop.ControlLoopWriter(
        'StuckIntegralIndexer', [stuck_integral_indexer], namespaces=namespaces)
    stuck_integral_loop_writer.Write(argv[5], argv[6])
Exemple #22
0
    def _exec_bash_tool(tool_name):
        """Execute bash tool configured in config."""
        tool = config.Config.get_tool(tool_name)
        if tool is None:
            msg = 'Cannot find config for tool {}'.format(tool_name)
            glog.fatal(msg)
            return msg, httplib.BAD_REQUEST

        # Construct the command string by joining all components.
        tool.command[0] = config.Config.get_realpath(tool.command[0])
        cmd_str = ' '.join(tool.command)
        system_cmd.run_in_background(cmd_str, tool.stdout_file,
                                     tool.stderr_file)
Exemple #23
0
    def _exec_bash_tool(tool_name):
        """Execute bash tool configured in config."""
        tool = config.Config.get_tool(tool_name)
        if tool is None:
            msg = 'Cannot find config for tool {}'.format(tool_name)
            glog.fatal(msg)
            return msg, httplib.BAD_REQUEST

        # Construct the command string by joining all components.
        tool.command[0] = config.Config.get_realpath(tool.command[0])
        cmd_str = ' '.join(tool.command)
        system_cmd.run_in_background(cmd_str, tool.stdout_file,
                                     tool.stderr_file)
def main(argv):
    src_file = None
    dst_file = None
    src_content = []
    help_msg = 'dataset_shuffle_index.py -i <infile> -o <shuffledfile>'

    try:
        opts, args = getopt.getopt(argv, 'hi:o:')
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            src_file = arg
        elif opt == '-o':
            dst_file = arg

    if src_file is None or dst_file is None:
        print help_msg
        sys.exit(2)

    # Open the files
    try:
        src_fp = open(src_file, 'r')
    except IOError:
        log.fatal('Can not open %s' % src_file)
        sys.exit(2)
    try:
        dst_fp = open(dst_file, 'w')
    except IOError:
        log.fatal('Can not open %s' % dst_file)
        sys.exit(2)

    # Load the src file
    log.info('Loading %s ...' % src_file)
    for line in src_fp.readlines():
        src_content.append(line)
    # shuffle the lines
    log.info('Shuffling lines ...')
    random.shuffle(src_content)
    log.info('Writing %s ...' % dst_file)
    # Write it the dst_file
    for line in src_content:
        dst_fp.writelines(line)
    src_fp.close()
    dst_fp.close()
    log.info('Finished')
Exemple #25
0
    def _get_endpoint_error_loss(net,
                                 loss_weights,
                                 groundtruths,
                                 weights=None,
                                 loss_type=None):
        """
        Returns endpoint error loss. Options are:
         - L2
         - Huber
         - L2 weighted by uncertainty, like eqn 8 in:
           https://arxiv.org/pdf/1703.04977.pdf
        """
        losses = []
        log.check_eq(len(groundtruths), len(loss_weights))
        log.check_eq(len(net.estimator_net) + 1, len(loss_weights), \
            ("You do not have an appropriate number of loss weights. "
             "Should have {}".format(1 + len(net.estimator_net))))
        with tf.name_scope('endpoint_loss'):
            for i, w in enumerate(loss_weights):
                if i < len(loss_weights) - 1:
                    prediction = net.estimator_net[i].get_flow()
                else:
                    if net.options.use_context_net is False:
                        log.warn(
                            'Context network is not set up, so there is no ' +
                            'need to penalize flow at the finest resolution.')
                        break
                    prediction = net.get_output_flow()

                dim = prediction.shape.as_list()[1]
                loss_name = '{}x{}'.format(dim, dim)

                gt_at_scale = groundtruths[dim]
                log.check_eq(gt_at_scale.shape.as_list()[1],
                             prediction.shape.as_list()[1])
                log.check_eq(gt_at_scale.shape.as_list()[2],
                             prediction.shape.as_list()[2])

                if loss_type == 'HUBER':
                    loss = tf_utils.endpoint_huber_loss_at_scale(
                        prediction, gt_at_scale, weights) * w
                elif loss_type == 'L2':
                    loss = tf_utils.endpoint_loss_at_scale(
                        prediction, gt_at_scale, weights) * w
                else:
                    log.fatal("Unrecognized loss type -- should specify "
                              "{'HUBER', 'L2' 'WEIGHTED'}.")
                tf.summary.scalar(loss_name, loss)
                losses.append(loss)
        return losses
Exemple #26
0
def main(argv):
  scenario_plotter = ScenarioPlotter()

  column = Column()
  column_controller = IntegralColumn()
  observer_column = IntegralColumn()

  initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
  R = numpy.matrix([[0.0], [10.0], [5.0], [0.0], [0.0], [0.0]])
  scenario_plotter.run_test(column, end_goal=R, controller_column=column_controller,
                            observer_column=observer_column, iterations=400)

  if FLAGS.plot:
    scenario_plotter.Plot()

  if len(argv) != 7:
    glog.fatal('Expected .h file name and .cc file names')
  else:
    namespaces = ['y2017', 'control_loops', 'superstructure', 'column']
    column = Column('Column')
    loop_writer = control_loop.ControlLoopWriter('Column', [column],
                                                 namespaces=namespaces)
    loop_writer.AddConstant(control_loop.Constant(
        'kIndexerFreeSpeed', '%f', column.indexer.free_speed))
    loop_writer.AddConstant(control_loop.Constant(
        'kIndexerOutputRatio', '%f', column.indexer.G))
    loop_writer.AddConstant(control_loop.Constant(
        'kTurretFreeSpeed', '%f', column.turret.free_speed))
    loop_writer.AddConstant(control_loop.Constant(
        'kTurretOutputRatio', '%f', column.turret.G))
    loop_writer.Write(argv[1], argv[2])

    # IntegralColumn controller 1 will disable the indexer.
    integral_column = IntegralColumn('IntegralColumn')
    disabled_integral_column = IntegralColumn('DisabledIntegralColumn',
                                              disable_indexer=True)
    integral_loop_writer = control_loop.ControlLoopWriter(
        'IntegralColumn', [integral_column, disabled_integral_column],
        namespaces=namespaces)
    integral_loop_writer.Write(argv[3], argv[4])

    stuck_integral_column = IntegralColumn('StuckIntegralColumn', voltage_error_noise=8.0)
    stuck_integral_loop_writer = control_loop.ControlLoopWriter(
        'StuckIntegralColumn', [stuck_integral_column], namespaces=namespaces)
    stuck_integral_loop_writer.Write(argv[5], argv[6])
def move_output_from_to(from_dir, to_dir):
    try:
        if (os.path.exists(to_dir)):
            rmtree(to_dir)
    except:
        log.info("Directory:" + to_dir + " does not exist, we can safely move output.")
    try:
        if (os.path.isdir(from_dir)):
            move(from_dir, to_dir)
        else:
            log.info("There is no output directory...")
    except:
        print("Could not move output from: " + from_dir + " to: " + to_dir)
        raise
    try:
        os.makedirs(from_dir)
    except:
        log.fatal("Could not mkdir: " + from_dir)
Exemple #28
0
    def execute_cmd(module_name, cmd_name):
        """"""
        # Run command on all modules if the module name is exactly 'all'.
        if module_name == 'all':
            for conf in config.Config.get_pb().modules:
                ModuleApi._run_command(conf, cmd_name)
            runtime_status.RuntimeStatus.broadcast_status_if_changed()
            return 'OK', httplib.OK

        # Or else, run command on the specified module.
        conf = config.Config.get_module(module_name)
        if conf is None:
            msg = 'Cannot find config for module {}'.format(module_name)
            glog.fatal(msg)
            return msg, httplib.BAD_REQUEST
        result = ModuleApi._run_command(conf, cmd_name)
        runtime_status.RuntimeStatus.broadcast_status_if_changed()
        return result
Exemple #29
0
    def execute_cmd(module_name, cmd_name):
        """"""
        # Run command on all modules if the module name is exactly 'all'.
        if module_name == 'all':
            for conf in config.Config.get_pb().modules:
                ModuleApi._run_command(conf, cmd_name)
            runtime_status.RuntimeStatus.broadcast_status_if_changed()
            return 'OK', httplib.OK

        # Or else, run command on the specified module.
        conf = config.Config.get_module(module_name)
        if conf is None:
            msg = 'Cannot find config for module {}'.format(module_name)
            glog.fatal(msg)
            return msg, httplib.BAD_REQUEST
        result = ModuleApi._run_command(conf, cmd_name)
        runtime_status.RuntimeStatus.broadcast_status_if_changed()
        return result
def main(argv):
  argv = FLAGS(argv)

  loaded_mass = 7+4.0
  #loaded_mass = 0
  #observer_elevator = None

  # Test moving the Elevator
  initial_X = numpy.matrix([[0.0], [0.0]])
  up_R = numpy.matrix([[0.4572], [0.0], [0.0]])
  down_R = numpy.matrix([[0.0], [0.0], [0.0]])
  totemass = 3.54
  scenario_plotter = ScenarioPlotter()

  elevator_controller = IntegralElevator(mass=4*totemass + loaded_mass)
  observer_elevator = IntegralElevator(mass=4*totemass + loaded_mass)

  for i in xrange(0, 7):
    elevator = Elevator(mass=i*totemass + loaded_mass)
    glog.info('Actual poles are %s', str(numpy.linalg.eig(elevator.A - elevator.B * elevator_controller.K[0, 0:2])[0]))

    elevator.X = initial_X
    scenario_plotter.run_test(elevator, goal=up_R, controller_elevator=elevator_controller,
                              observer_elevator=observer_elevator, iterations=200)
    scenario_plotter.run_test(elevator, goal=down_R, controller_elevator=elevator_controller,
                              observer_elevator=observer_elevator, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the Elevator and integral elevator.')
  else:
    design_mass = 4*totemass + loaded_mass
    elevator = Elevator("Elevator", mass=design_mass)
    loop_writer = control_loop.ControlLoopWriter("Elevator", [elevator],
                                                 namespaces=['y2015_bot3', 'control_loops', 'elevator'])
    loop_writer.Write(argv[1], argv[2])

    integral_elevator = IntegralElevator("IntegralElevator", mass=design_mass)
    integral_loop_writer = control_loop.ControlLoopWriter("IntegralElevator", [integral_elevator],
                                                          namespaces=['y2015_bot3', 'control_loops', 'elevator'])
    integral_loop_writer.Write(argv[3], argv[4])
Exemple #31
0
    def test_etcd(self):
        """etcd test function
        """
        exec_path = os.environ['ETCD_EXEC']
        if exec_path == '':
            log.fatal('Got empty etcd path')
            sys.exit(0)
        if not os.path.exists(exec_path):
            log.fatal('{0} does not eixst'.format(exec_path))
            sys.exit(0)

        log.info('Running {0}'.format(exec_path))
        etcd_proc = ETCD(exec_path)
        etcd_proc.setDaemon(True)
        etcd_proc.start()

        log.info('Sleeping...')
        time.sleep(5)

        log.info('Launching watch requests...')
        watch_thread = threading.Thread(target=self.watch_routine)
        watch_thread.setDaemon(True)
        watch_thread.start()

        time.sleep(3)

        log.info('Launching client requests...')
        log.info(put('http://localhost:2379', 'foo', 'bar'))

        # Python 2
        # self.assertEqual(get('http://localhost:2379', 'foo'), 'bar')
        # Python 3
        self.assertEqual(get('http://localhost:2379', 'foo'), b'bar')

        log.info('Waing for watch...')
        watch_thread.join()

        log.info('Killing etcd...')
        etcd_proc.kill()

        etcd_proc.join()
        log.info('etcd output: {0}'.format(etcd_proc.stderr))

        log.info('Done!')
Exemple #32
0
def read_one_orbdif_file(file_path):
    glog.fatal("====> read : " + file_path)

    if not os.path.exists(file_path):
        glog.error("The file not exist : " + file_path)
        return

    PRN = ""
    RMS = ""
    LSQ = {}
    ORI = {}
    SAT = []
    try:
        with open(file_path) as file_object:
            # get the sat list
            lines = file_object.readlines()
            for line in lines:
                if "SAT" in line:
                    line = line[line.find("SAT") + 4:]
                    SAT = line.split()
                    for sat in SAT:
                        ORI[sat] = []
                    break
                else:
                    continue

            # get the ORI data
            for line in lines:
                line = line.rstrip()
                if "GPST" in line:
                    continue
                elif "SAT" in line:
                    break
                else:
                    tmp = line.split()
                    ORI[tmp[1]].append(line)
    except:
        glog.error("The file not open : " + file_path)
        sys.exit()
    else:
        glog.info("Os error : " + file_path)

    result = {"ORI": ORI, "SAT": SAT}
    return result
Exemple #33
0
    def __init__(self):
        super(Classifier, self).__init__()
        if cmd_args.gm == 'mean_field':
            model = EmbedMeanField
        elif cmd_args.gm == 'loopy_bp':
            model = EmbedLoopyBP
        elif cmd_args.gm == 'DGCNN':
            model = DGCNN
        else:
            log.fatal('unknown gm %s' % cmd_args.gm)
            sys.exit()

        if cmd_args.gm == 'DGCNN':
            self.s2v = model(latent_dim=cmd_args.latent_dim,
                             output_dim=cmd_args.out_dim,
                             num_node_feats=(cmd_args.feat_dim +
                                             cmd_args.attr_dim),
                             num_edge_feats=0,
                             k=cmd_args.sortpooling_k)
        else:
            self.s2v = model(latent_dim=cmd_args.latent_dim,
                             output_dim=cmd_args.out_dim,
                             num_node_feats=cmd_args.feat_dim,
                             num_edge_feats=0,
                             max_lv=cmd_args.max_lv)

        out_dim = cmd_args.out_dim
        if out_dim == 0:
            if cmd_args.gm == 'DGCNN':
                out_dim = self.s2v.dense_dim
            else:
                out_dim = cmd_args.latent_dim
        if False:
            self.mlp = MLPClassifier(input_size=out_dim,
                                     hidden_size=cmd_args.hidden,
                                     num_class=cmd_args.num_class,
                                     with_dropout=cmd_args.dropout)

        self.mlp = MaxRecallAtPrecision(input_size=out_dim,
                                        hidden_size=cmd_args.hidden,
                                        alpha=0.6,
                                        with_dropout=cmd_args.dropout)
Exemple #34
0
def main(argv):
  scenario_plotter = ScenarioPlotter()

  if FLAGS.plot:
    iterations = 200

    initial_X = numpy.matrix([[0.0], [0.0], [0.0]])
    R = numpy.matrix([[0.0], [100.0], [100.0], [0.0]])

    scenario_plotter_int = ScenarioPlotter()

    shooter = Shooter()
    shooter_controller = IntegralShooter()
    observer_shooter_hybrid = IntegralShooter()

    scenario_plotter_int.run_test(shooter, goal=R, controller_shooter=shooter_controller,
      observer_shooter=observer_shooter_hybrid, iterations=iterations,
      hybrid_obs = True)

    scenario_plotter_int.Plot()

    pylab.show()

  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name')
  else:
    namespaces = ['y2017', 'control_loops', 'superstructure', 'shooter']
    shooter = Shooter('Shooter')
    loop_writer = control_loop.ControlLoopWriter('Shooter', [shooter],
                                                 namespaces=namespaces)
    loop_writer.AddConstant(control_loop.Constant(
        'kFreeSpeed', '%f', shooter.free_speed))
    loop_writer.AddConstant(control_loop.Constant(
        'kOutputRatio', '%f', shooter.G))
    loop_writer.Write(argv[1], argv[2])

    integral_shooter = IntegralShooter('IntegralShooter')
    integral_loop_writer = control_loop.ControlLoopWriter(
        'IntegralShooter', [integral_shooter], namespaces=namespaces,
        plant_type='StateFeedbackHybridPlant',
        observer_type='HybridKalman')
    integral_loop_writer.Write(argv[3], argv[4])
Exemple #35
0
    def test_cats(self):
        dpath = os.environ['DATASETS_DIR']
        if dpath == '':
            log.fatal('Got empty DATASETS_DIR')
            sys.exit(0)

        param_path = os.environ['CATS_PARAM_PATH']
        if param_path == '':
            log.fatal('Got empty CATS_PARAM_PATH')
            sys.exit(0)

        log.info('running test_cats...')
        log.info('directory path: {0}'.format(dpath))
        log.info('parameters path: {0}'.format(param_path))

        img_path = os.path.join(dpath, 'gray-cat.jpeg')
        parameters = np.load(param_path).item()

        img_result = classify(img_path, parameters)

        log.info('img_result: {0}'.format(img_result))
        self.assertEqual(img_result, 'cat')
Exemple #36
0
def main(argv):

  scenario_plotter = ScenarioPlotter()

  hood = Hood()
  hood_controller = IntegralHood()
  observer_hood = IntegralHood()

  # Test moving the hood with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[numpy.pi/4.0], [0.0], [0.0]])
  scenario_plotter.run_test(hood, end_goal=R,
                            controller_hood=hood_controller,
                            observer_hood=observer_hood, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the hood and integral hood.')
  else:
    namespaces = ['y2017', 'control_loops', 'superstructure', 'hood']
    hood = Hood('Hood')
    loop_writer = control_loop.ControlLoopWriter('Hood', [hood],
                                                 namespaces=namespaces)
    loop_writer.AddConstant(control_loop.Constant(
        'kFreeSpeed', '%f', hood.free_speed))
    loop_writer.AddConstant(control_loop.Constant(
        'kOutputRatio', '%f', hood.G))
    loop_writer.Write(argv[1], argv[2])

    integral_hood = IntegralHood('IntegralHood')
    integral_loop_writer = control_loop.ControlLoopWriter('IntegralHood', [integral_hood],
                                                          namespaces=namespaces)
    integral_loop_writer.AddConstant(control_loop.Constant('kLastReduction', '%f',
          integral_hood.last_G))
    integral_loop_writer.Write(argv[3], argv[4])
Exemple #37
0
    def _run_command(conf, cmd_name):
        """Implementation of running command on module."""
        cmd = next((cmd for cmd in conf.supported_commands
                    if cmd.name == cmd_name), None)
        if cmd is None:
            msg = 'Cannot find command {} for module {}'.format(
                cmd_name, conf.name)
            glog.fatal(msg)
            return msg, httplib.BAD_REQUEST

        # Construct the command string by joining all components.
        cmd.command[0] = config.Config.get_realpath(cmd.command[0])
        cmd_str = ' '.join(cmd.command)
        system_cmd.run_in_background(cmd_str, cmd.stdout_file, cmd.stderr_file)

        # Update module status.
        module_status = runtime_status.RuntimeStatus.get_module(conf.name)
        if cmd_name == 'start':
            module_status.status = runtime_status_pb2.ModuleStatus.STARTED
        elif cmd_name == 'stop':
            module_status.status = runtime_status_pb2.ModuleStatus.STOPPED

        return 'OK', httplib.OK
Exemple #38
0
def main(argv):
  loaded_mass = 0
  #loaded_mass = 0
  claw = Claw(mass=4 + loaded_mass)
  claw_controller = Claw(mass=5 + 0)
  observer_claw = Claw(mass=5 + 0)
  #observer_claw = None

  # Test moving the claw with constant separation.
  initial_X = numpy.matrix([[0.0], [0.0]])
  R = numpy.matrix([[1.0], [0.0]])
  run_test(claw, initial_X, R, controller_claw=claw_controller,
           observer_claw=observer_claw)

  # Write the generated constants out to a file.
  if len(argv) != 3:
    glog.fatal('Expected .h and .cc filename for claw.')
  else:
    namespaces = ['y2015', 'control_loops', 'claw']
    claw = Claw('Claw')
    loop_writer = control_loop.ControlLoopWriter('Claw', [claw],
                                                 namespaces=namespaces)
    loop_writer.Write(argv[1], argv[2])
Exemple #39
0
    def get_optimizer(self, loss):
        """ Returns the optimizer. """
        self.global_step = tf.Variable(0, trainable=False)
        self.learning_rate = self._get_exponential_decay_learning_rate(
            self.global_step)
        if self._config['optimizer'] == 'SGD':
            opt = tf.train.MomentumOptimizer(learning_rate=self.learning_rate,
                                             momentum=self._config['momentum'],
                                             use_nesterov=True)
        elif self._config['optimizer'] == 'ADAM':
            opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                         beta1=self._config['adam_beta1'],
                                         beta2=self._config['adam_beta2'])
        else:
            log.fatal("UNRECOGNIZED OPTIMIZER TYPE")

        gradients, variables = zip(*opt.compute_gradients(loss))
        if 'max_gradient_norm' in self._config:
            gradients, _ = tf.clip_by_global_norm(
                gradients, self._config['max_gradient_norm'])

        with tf.name_scope('gradients'):
            variable_summary(tf.global_norm(gradients))
        return opt.apply_gradients(zip(gradients, variables))
Exemple #40
0
    def _run_command(conf, cmd_name):
        """Implementation of running command on module."""
        cmd = next(
            (cmd for cmd in conf.supported_commands if cmd.name == cmd_name),
            None)
        if cmd is None:
            msg = 'Cannot find command {} for module {}'.format(
                cmd_name, conf.name)
            glog.fatal(msg)
            return msg, httplib.BAD_REQUEST

        # Construct the command string by joining all components.
        cmd.command[0] = config.Config.get_realpath(cmd.command[0])
        cmd_str = ' '.join(cmd.command)
        system_cmd.run_in_background(cmd_str, cmd.stdout_file, cmd.stderr_file)

        # Update module status.
        module_status = runtime_status.RuntimeStatus.get_module(conf.name)
        if cmd_name == 'start':
            module_status.status = runtime_status_pb2.ModuleStatus.STARTED
        elif cmd_name == 'stop':
            module_status.status = runtime_status_pb2.ModuleStatus.STOPPED

        return 'OK', httplib.OK
Exemple #41
0
                    return None

            return item

        except requests.exceptions.ConnectionError as err:
            log.warning('Connection error: {0}'.format(err))
            time.sleep(5)

        except:
            log.warning('Unexpected error: {0}'.format(sys.exc_info()[0]))
            raise


if __name__ == "__main__":
    if len(sys.argv) == 1:
        log.fatal('Got empty endpoint: {0}'.format(sys.argv))
        sys.exit(1)

    EP = sys.argv[1]
    if EP == '':
        log.fatal('Got empty endpoint: {0}'.format(sys.argv))
        sys.exit(1)

    param_path = os.environ['CATS_PARAM_PATH']
    if param_path == '':
        log.fatal('Got empty CATS_PARAM_PATH')
        sys.exit(1)

    # 1. Initialize parameters / Define hyperparameters
    # 2. Loop for num_iterations:
    #     a. Forward propagation
Exemple #42
0
def try_log():
    log.debug('2333happy deubgging....')
    log.info('it works')
    log.warn('something not ideal')
    log.error('something went wrong')
    log.fatal('AAAAAAAAAAA!')
doy_xml = ""
xml_dir = args.xml                      # xml file for each day, format is yyyydoy.xml
log_dir = args.dir
bin_dir = args.bin
grt_bin = "/project/jdhuang/GNSS_software/GREAT/great_pco_L3/build/Bin/"
scp_dir = "/workfs/jdhuang/great_projects/e_all/scripts/great"                              # scripts dir
mod_xml = "/workfs/jdhuang/great_projects/e_all/scripts/great/gnss_tb_ge_3_freq.xml"        # model xml
python_version = "python3.7"                                                                # python version

year ='%04s' % args.year
idoy ='%03s' % args.idoy

yeardoy = "%04s%03s" % (year, idoy)

# create the xml file
glog.fatal("==========> beg trimcor.")
doy_xml = os.path.join(xml_dir, "gns_trimcor_%4s%03d.xml" % (args.year, int(args.idoy)))
create_xml_py = os.path.join(scp_dir, "gns_xml_create_xml.py")
tool.run_py_cmd(python_version=python_version, python_name=create_xml_py, log_dir=log_dir,log_name="create_xml.cmd_log", year=year, idoy=idoy, ilen=1, idst=mod_xml, odst=doy_xml)


print(grt_bin)
print(doy_xml)
print(log_dir)
print(bin_dir)

work_dir = os.path.join(log_dir,year,idoy)
os.system("mkdir -p " + work_dir)
os.chdir(work_dir)

tool.run_grt_cmd(app_dir=grt_bin, app_name="great_trimcor", xml_path=doy_xml, log_dir="./", log_name="great_tbedit.cmd_log", bin=bin_dir)
Exemple #44
0
prj_dir = os.path.join(prj_dir, '%03s' % args.idoy)
prj_log = os.path.join(prj_log, '%03s' % args.idoy, "log_tb")
prj_log13 = os.path.join(prj_log13, '%03s' % args.idoy, "log_tb")
tool.check_path(prj_dir)
tool.check_path(xml_dir)
tool.check_path(prj_log)
tool.check_path(prj_log13)

os.chdir(prj_dir)
os.system("rm -r ./*")
os.system("mkdir ./log_tb")
os.system("cp -rf %s/*log   ./log_tb" % prj_log)  # 将统一存放的log文件粘贴到prj路径下
os.system("cp -rf %s/*log13 ./log_tb" % prj_log13)
tool.check_path(prd_dir)
tool.check_path(log_dir)
glog.fatal("crt dir is : " + os.getcwd())

# create the xml file
glog.fatal("=========>  beg GNS POD IF.")
glog.fatal("==========> beg create the xml file.")
doy_xml = os.path.join(xml_dir,
                       "gns_pod_%4s%03d.xml" % (args.year, int(args.idoy)))
create_xml_py = os.path.join(scp_dir, "gns_xml_create_xml.py")
update_xml_py = os.path.join(scp_dir, "gns_xml_update_editres.py")
update_ambfix_xml_py = os.path.join(scp_dir, "gns_xml_update_ambfix.py")
os.system("chmod +x " + create_xml_py)
os.system("chmod +x " + update_xml_py)
os.system("chmod +x " + update_ambfix_xml_py)

glog.fatal("==========> creat xml.")
tool.run_py_cmd(python_version=python_version,
Exemple #45
0
import glog as log

log.info("It works.")
log.warn("Something not ideal")
log.error("Something went wrong")
log.fatal("AAAAAAAAAAAAAAA!")

log.check(False)
Exemple #46
0
doy_xml = ""
xml_dir = args.xml  # xml file for each day, format is yyyydoy.xml
log_dir = args.dir
bin_dir = args.bin
grt_bin = "/project/jdhuang/GNSS_software/GREAT/great_pco_L3/build/Bin/"
scp_dir = "/workfs/jdhuang/great_projects/e_all/scripts/great"  # scripts dir
mod_xml = "/workfs/jdhuang/great_projects/e_all/scripts/great/gnss_tb_ge_3_freq.xml"  # model xml
python_version = "python3.7"  # python version

year = '%04s' % args.year
idoy = '%03s' % args.idoy

yeardoy = "%04s%03s" % (year, idoy)

# create the xml file
glog.fatal("==========> beg tb.")
doy_xml = os.path.join(xml_dir,
                       "gns_tb_%4s%03d.xml" % (args.year, int(args.idoy)))
create_xml_py = os.path.join(scp_dir, "gns_xml_create_xml.py")
tool.run_py_cmd(python_version=python_version,
                python_name=create_xml_py,
                log_dir=log_dir,
                log_name="create_xml.cmd_log",
                year=year,
                idoy=idoy,
                ilen=1,
                idst=mod_xml,
                odst=doy_xml)

print(grt_bin)
print(doy_xml)
Exemple #47
0
def main(argv):
    db_file = None
    skip_num = None
    data_path = '../data'
    overwrite = False
    help_msg = 'download_image.py -i <lmdbfile> -o[optional] <datapath>\
--overwrite[optional] --skip <num>\n\
-i <lmdbfile>       The input lmdb file contains the exif of photos\n\
-o <datapath>       The path where to store the downloaded photos\n\
--overwrite         If set, overwrite the exists photos, default not\n\
--skip <num>        Skip the first XX photos'

    try:
        opts, args = getopt.getopt(argv, 'hi:o:', ['overwrite', 'skip='])
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            db_file = arg
        elif opt == '-o':
            data_path = arg
        elif opt == '--overwrite':
            overwrite = True
        elif opt == '--skip':
            skip_num = int(arg)
        else:
            print help_msg
            sys.exit(2)

    if db_file is None:
        print help_msg
        sys.exit(2)

    # Try to open the database file
    db = lt.open_db_ro(db_file)
    if db is None:
        log.fatal('\033[0;31mCan not open %s\033[0m' % db_file)
        sys.exit(2)

    # Get the entries from the database
    entries = db.stat()['entries']
    # Entries counter
    counter = 0
    # Check the data path
    if not tb.check_path(data_path):
        log.info('Create new dir %s' % data_path)
    # Iter the data base
    if skip_num is not None:
        log.info('Skipping the first %d entries...' % skip_num)
    with db.begin(write=False) as txn:
        with txn.cursor() as cur:
            for key, val in cur:
                counter += 1
                if skip_num is not None and counter < skip_num:
                    continue
                # Parse the val into dict
                val_dic = yaml.load(val)
                # Get the avaliable url to download photo
                photo = myxml.parse_xml_to_etree(val_dic['photo'])
                url = tb.get_url(photo, val_dic['urls'], True)
                # Download the url and save image
                log.info('Download %s from %s [%d/%d]' %
                         (key, url, counter, entries))
                try:
                    tb.download_url_and_save(url, key, overwrite, data_path)
                except:
                    log.error('\033[0;31mFailed to download %s from %s\033[0m'
                              % (key, url))
                    continue

    db.close()
        '/apollo/monitor/system_status',
        '/apollo/monitor/static_info',
    ]

    @staticmethod
    def ProcessBags(bags):
        for bag_file in bags:
            output_dir = os.path.join(os.path.dirname(bag_file), 'pnc_sample')
            output_bag = os.path.join(output_dir, os.path.basename(bag_file))
            if os.path.exists(output_bag):
                glog.info('Skip {} which has been processed'.format(bag_file))
                continue

            glog.info('Processing bag {}'.format(bag_file))
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            with rosbag.Bag(bag_file, 'r') as bag_in:
                with rosbag.Bag(output_bag, 'w') as bag_out:
                    for topic, msg, t in bag_in.read_messages(
                            topics=SamplePNC.TOPICS):
                        bag_out.write(topic, msg, t)


if __name__ == '__main__':
    if len(sys.argv) == 1:
        glog.fatal('Wrong arguments!')
        sys.exit(1)

    bags = sorted(sum([glob.glob(arg) for arg in sys.argv[1:]], []))
    SamplePNC.ProcessBags(bags)
def main(argv):
  vdrivetrain = VelocityDrivetrain()

  if not FLAGS.plot:
    if len(argv) != 5:
      glog.fatal('Expected .h file name and .cc file name')
    else:
      namespaces = ['y2014_bot3', 'control_loops', 'drivetrain']
      dog_loop_writer = control_loop.ControlLoopWriter(
          "VelocityDrivetrain", [vdrivetrain.drivetrain_low_low,
                         vdrivetrain.drivetrain_low_high,
                         vdrivetrain.drivetrain_high_low,
                         vdrivetrain.drivetrain_high_high],
                         namespaces=namespaces)

      dog_loop_writer.Write(argv[1], argv[2])

      cim_writer = control_loop.ControlLoopWriter("CIM", [CIM()])

      cim_writer.Write(argv[3], argv[4])
      return

  vl_plot = []
  vr_plot = []
  ul_plot = []
  ur_plot = []
  radius_plot = []
  t_plot = []
  left_gear_plot = []
  right_gear_plot = []
  vdrivetrain.left_shifter_position = 0.0
  vdrivetrain.right_shifter_position = 0.0
  vdrivetrain.left_gear = VelocityDrivetrain.LOW
  vdrivetrain.right_gear = VelocityDrivetrain.LOW

  glog.debug('K is %s', str(vdrivetrain.CurrentDrivetrain().K))

  if vdrivetrain.left_gear is VelocityDrivetrain.HIGH:
    glog.debug('Left is high')
  else:
    glog.debug('Left is low')
  if vdrivetrain.right_gear is VelocityDrivetrain.HIGH:
    glog.debug('Right is high')
  else:
    glog.debug('Right is low')

  for t in numpy.arange(0, 1.7, vdrivetrain.dt):
    if t < 0.5:
      vdrivetrain.Update(throttle=0.00, steering=1.0)
    elif t < 1.2:
      vdrivetrain.Update(throttle=0.5, steering=1.0)
    else:
      vdrivetrain.Update(throttle=0.00, steering=1.0)
    t_plot.append(t)
    vl_plot.append(vdrivetrain.X[0, 0])
    vr_plot.append(vdrivetrain.X[1, 0])
    ul_plot.append(vdrivetrain.U[0, 0])
    ur_plot.append(vdrivetrain.U[1, 0])
    left_gear_plot.append((vdrivetrain.left_gear is VelocityDrivetrain.HIGH) * 2.0 - 10.0)
    right_gear_plot.append((vdrivetrain.right_gear is VelocityDrivetrain.HIGH) * 2.0 - 10.0)

    fwd_velocity = (vdrivetrain.X[1, 0] + vdrivetrain.X[0, 0]) / 2
    turn_velocity = (vdrivetrain.X[1, 0] - vdrivetrain.X[0, 0])
    if abs(fwd_velocity) < 0.0000001:
      radius_plot.append(turn_velocity)
    else:
      radius_plot.append(turn_velocity / fwd_velocity)

  # TODO(austin):
  # Shifting compensation.

  # Tighten the turn.
  # Closed loop drive.

  pylab.plot(t_plot, vl_plot, label='left velocity')
  pylab.plot(t_plot, vr_plot, label='right velocity')
  pylab.plot(t_plot, ul_plot, label='left voltage')
  pylab.plot(t_plot, ur_plot, label='right voltage')
  pylab.plot(t_plot, radius_plot, label='radius')
  pylab.plot(t_plot, left_gear_plot, label='left gear high')
  pylab.plot(t_plot, right_gear_plot, label='right gear high')
  pylab.legend()
  pylab.show()
  return 0
def main(argv):
  argv = FLAGS(argv)

  vdrivetrain = VelocityDrivetrain()

  if not FLAGS.plot:
    if len(argv) != 5:
      glog.fatal('Expected .h file name and .cc file name')
    else:
      namespaces = ['c2017', 'subsystems', 'drivetrain']
      dog_loop_writer = control_loop.ControlLoopWriter(
          "VelocityDrivetrain", [vdrivetrain.drivetrain_low_low,
                         vdrivetrain.drivetrain_low_high,
                         vdrivetrain.drivetrain_high_low,
                         vdrivetrain.drivetrain_high_high],
                         namespaces=namespaces)

      dog_loop_writer.Write(argv[1], argv[2])

      cim_writer = control_loop.ControlLoopWriter(
          "CIM", [drivetrain.CIM()])

      cim_writer.Write(argv[3], argv[4])
      return

  vl_plot = []
  vr_plot = []
  ul_plot = []
  ur_plot = []
  radius_plot = []
  t_plot = []
  left_gear_plot = []
  right_gear_plot = []
  vdrivetrain.left_shifter_position = 0.0
  vdrivetrain.right_shifter_position = 0.0
  vdrivetrain.left_gear = VelocityDrivetrain.LOW
  vdrivetrain.right_gear = VelocityDrivetrain.LOW

  glog.debug('K is %s', str(vdrivetrain.CurrentDrivetrain().K))

  if vdrivetrain.left_gear is VelocityDrivetrain.HIGH:
    glog.debug('Left is high')
  else:
    glog.debug('Left is low')
  if vdrivetrain.right_gear is VelocityDrivetrain.HIGH:
    glog.debug('Right is high')
  else:
    glog.debug('Right is low')

  for t in numpy.arange(0, 1.7, vdrivetrain.dt):
    if t < 0.5:
      vdrivetrain.Update(throttle=0.00, steering=1.0)
    elif t < 1.2:
      vdrivetrain.Update(throttle=0.5, steering=1.0)
    else:
      vdrivetrain.Update(throttle=0.00, steering=1.0)
    t_plot.append(t)
    vl_plot.append(vdrivetrain.X[0, 0])
    vr_plot.append(vdrivetrain.X[1, 0])
    ul_plot.append(vdrivetrain.U[0, 0])
    ur_plot.append(vdrivetrain.U[1, 0])
    left_gear_plot.append((vdrivetrain.left_gear is VelocityDrivetrain.HIGH) * 2.0 - 10.0)
    right_gear_plot.append((vdrivetrain.right_gear is VelocityDrivetrain.HIGH) * 2.0 - 10.0)

    fwd_velocity = (vdrivetrain.X[1, 0] + vdrivetrain.X[0, 0]) / 2
    turn_velocity = (vdrivetrain.X[1, 0] - vdrivetrain.X[0, 0])
    if abs(fwd_velocity) < 0.0000001:
      radius_plot.append(turn_velocity)
    else:
      radius_plot.append(turn_velocity / fwd_velocity)

  # TODO(austin):
  # Shifting compensation.

  # Tighten the turn.
  # Closed loop drive.

  pylab.plot(t_plot, vl_plot, label='left velocity')
  pylab.plot(t_plot, vr_plot, label='right velocity')
  pylab.plot(t_plot, ul_plot, label='left voltage')
  pylab.plot(t_plot, ur_plot, label='right voltage')
  pylab.plot(t_plot, radius_plot, label='radius')
  pylab.plot(t_plot, left_gear_plot, label='left gear high')
  pylab.plot(t_plot, right_gear_plot, label='right gear high')
  pylab.legend()
  pylab.show()
  return 0
Exemple #51
0
 def test_fatal(self):
     log.fatal('test')
def main(argv):
    db_file = None
    list_file = None
    img_path = None
    ext = ".jpg"
    if_check = False
    help_msg = "dataset_create_imagelist.py -i <lmdb> -p <image path> -o <list>\
--check\n\
-i <lmdb>           The input lmdb database file\n\
-o <list>           The output image list file\n\
-p <image path>     The path which store the downloaded images\n\
--check [optional]  Force to check if the jpg image can be loaded.\n\
                    Which will slow down the process. Default False"
    try:
        opts, args = getopt.getopt(argv, "hi:p:o:", ["check"])
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == "-h":
            print help_msg
            sys.exit()
        elif opt == "-i":
            db_file = arg
        elif opt == "-o":
            list_file = arg
        elif opt == "-p":
            img_path = arg
        elif opt == "--check":
            if_check = True
        else:
            print help_msg
            sys.exit(2)
    # Check arguments
    if db_file is None or list_file is None or img_path is None:
        print help_msg
        sys.exit(2)

    # Check if the image path exists
    log.info("Check image path %s" % img_path)
    if os.path.exists(img_path) is False:
        log.fatal("Can not locate the image path %s" % img_path)
        sys.exit(2)
    # Create the text list file
    log.info("Open the image list file %s" % list_file)
    try:
        fp = open(list_file, "w")
    except IOError:
        log.fatal("Can not open %s for writing" % list_file)
        sys.exit(2)
    # open the lmdb file
    log.info("Open db file %s" % db_file)
    db = lt.open_db_ro(db_file)
    db_stat = db.stat()
    log.info("Total Entries: %d" % db_stat["entries"])
    bar = eb.EasyProgressBar()
    bar.set_end(db_stat["entries"])
    bar.start()
    counter = 0
    err_counter = 0
    # Iter the whole database
    with db.begin(write=False) as txn:
        with txn.cursor() as cur:
            for key, val in cur:
                counter += 1
                # Get the avaliable url to download photo
                try:
                    val_dic = yaml.load(val)
                    photo = myxml.parse_xml_to_etree(val_dic["photo"])
                    photo_id = photo.get("id")
                    focal_in35 = int(val_dic["focal_in35"])
                except:
                    err_counter += 1
                    continue
                # Filter our some error value
                if focal_in35 < 5 or focal_in35 > 200:
                    continue
                # Get the image full name
                if img_path[-1] == r"/":
                    img_name = img_path + photo_id + ext
                else:
                    img_name = img_path + r"/" + photo_id + ext
                img_name = os.path.abspath(img_name)
                # Check if the image exists
                if if_check:
                    # Load the image
                    try:
                        Image.open(img_name)
                    except:
                        err_counter += 1
                        continue
                else:
                    if os.path.exists(img_name) is False:
                        err_counter += 1
                        continue

                # Write the info to list file
                fp.writelines(img_name + " %d\n" % focal_in35)
                bar.update(counter)
    # Finish the loop
    db.close()
    fp.close()
    bar.finish()
    log.info("Finished. errors: %d" % err_counter)
def main(argv):
    src_file = None
    dst_file = None
    config_file = None
    result_dict = {}
    help_msg = 'dataset_split_class.py -i <indexfile> -o <output> -c <config>\n\
-i <file>           The input index text file\n\
-o <file>           The output index text file\n\
-c <file>           The configure xml file'

    try:
        opts, args = getopt.getopt(argv, 'hi:c:o:')
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            src_file = arg
        elif opt == '-o':
            dst_file = arg
        elif opt == '-c':
            config_file = arg
        else:
            print help_msg
            sys.exit(2)

    if src_file is None or dst_file is None or config_file is None:
        print help_msg
        sys.exit(2)

    # Check the config file
    log.info('Parsing configure file: %s' % config_file)
    config = myxml.parse_classifier_xml(config_file)
    result_dict = dict.fromkeys(config)
    if config is None:
        log.fatal('Parse configure file %s error' % config_file)
        sys.exit(2)
    # Check the src_file
    log.info('Opening %s' % src_file)
    try:
        src_fp = open(src_file, 'r')
    except IOError:
        log.fatal('Can not open %s' % src_file)
        sys.exit(2)
    # Open the dst file
    log.info('Opening %s' % dst_file)
    try:
        dst_fp = open(dst_file, 'w')
    except IOError:
        log.fatal('Can not open %s' % dst_file)
        sys.exit(2)

    # loop the src_file
    for line in src_fp.readlines():
        element = line.split(' ')
        if len(element) != 2:
            log.warn('\033[31mWARNING:\033[0m Extra space in %s' % line)
            continue
        focal_length = int(element[-1])
        image_path = element[0]
        # Get the label
        label = get_class(config, focal_length)
        if label is None:
            log.warn('\033[32mSKIP:\033[0m %s' % line)
            continue
        if result_dict[label] is None:
            result_dict[label] = 1
        else:
            result_dict[label] += 1
        # Write the new file
        dst_fp.writelines(image_path + ' %d\n' % label)

    src_fp.close()
    dst_fp.close()
    log.info('Final result: %s' % str(result_dict))
    log.info('Finished')
Exemple #54
0
prj_log = os.path.join(prj_log, '%4s' % args.year)
tool.check_path(prj_dir)
tool.check_path(prj_log)
prj_dir = os.path.join(prj_dir, '%03s' % args.idoy)
prj_log = os.path.join(prj_log, '%03s' % args.idoy, "log_tb")
tool.check_path(prj_dir)
tool.check_path(xml_dir)
tool.check_path(prj_log)

os.chdir(prj_dir)
os.system("rm -r ./*")  # 注意,会将工程目录清空
os.system("mkdir ./log_tb")
os.system("cp -rf %s/*log   ./log_tb" % prj_log)  # 将统一存放的log文件粘贴到prj路径下
tool.check_path(prd_dir)
tool.check_path(log_dir)
glog.fatal("crt dir is : " + os.getcwd())

# create the xml file
glog.fatal("==========> beg GNSS POD ALL MODE.")
glog.fatal("==========> beg create the xml file.")
doy_xml = os.path.join(xml_dir,
                       "gns_pod_%4s%03d.xml" % (args.year, int(args.idoy)))
create_xml_py = os.path.join(scp_dir, "gns_xml_create_xml.py")
update_xml_py = os.path.join(scp_dir, "gns_xml_update_editres.py")
update_ambfix_xml_py = os.path.join(scp_dir, "gns_xml_update_ambfix.py")

os.system("chmod +x " + create_xml_py)
os.system("chmod +x " + update_xml_py)
os.system("chmod +x " + update_ambfix_xml_py)

glog.fatal("==========> creat xml.")