コード例 #1
0
def execute(gpu, exp_batch, exp_alias, city_name='Town01', memory_use=0.2, host='127.0.0.1'):
    # host,port,gpu_number,path,show_screen,resolution,noise_type,config_path,type_of_driver,experiment_name,city_name,game,drivers_name
    #drive_config.city_name = city_name
    # TODO Eliminate drive config.

    print("Running ", __file__, " On GPU ", gpu, "of experiment name ", exp_alias)
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu


    if not os.path.exists('_output_logs'):
        os.mkdir('_output_logs')


    sys.stdout = open(os.path.join('_output_logs',
                      g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"), "a", buffering=1)


    #vglrun - d:7.$GPU $CARLA_PATH / CarlaUE4 / Binaries / Linux / CarlaUE4 / Game / Maps /$TOWN - windowed - benchmark - fps = 10 - world - port =$PORT;
    #sleep    100000

    carla_process, port = start_carla_simulator(gpu, exp_batch, exp_alias, city_name)


    merge_with_yaml(os.path.join('configs', exp_batch, exp_alias+'.yaml'))
    set_type_of_process('drive', city_name)



    log_level = logging.WARNING

    logging.StreamHandler(stream=None)
    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)

    # TODO we have some external class that control this weather thing.

    """
    if city_name == 'Town01':
        experiment_suite = ECCVTrainingSuite()
    else:
        experiment_suite = ECCVGeneralizationSuite()
    """
    experiment_suite = TestSuite()

    coil_logger.add_message('Loading', {'Poses': experiment_suite._poses()})



    while True:
        try:
            coil_logger.add_message('Loading', {'CARLAClient': host+':'+str(port)})
            with make_carla_client(host, port) as client:


                # Now actually run the driving_benchmark

                latest = 0
                # While the checkpoint is not there
                while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE):


                    # Get the correct checkpoint
                    if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):

                        latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)
                        checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias
                                                             , 'checkpoints', str(latest) + '.pth'))

                        coil_agent = CoILAgent(checkpoint)
                        coil_logger.add_message({'Iterating': {"Checkpoint": latest}})
                        # TODO: Change alias to actual experiment name.
                        run_driving_benchmark(coil_agent, experiment_suite, city_name,
                                              exp_batch + '_' + exp_alias + '_' + str(latest)
                                              , False, host, port)

                        # Read the resulting dictionary
                        #with open(os.path.join('_benchmark_results',
                        #                       exp_batch+'_'+exp_alias + 'iteration', 'metrics.json')
                        #          , 'r') as f:
                        #    summary_dict = json.loads(f.read())

                        # TODO: When you add the message you need to check if the experiment continues properly



                        # TODO: WRITE AN EFICIENT PARAMETRIZED OUTPUT SUMMARY FOR TEST.

                        #test_agent.finish_model()

                        #test_agent.write(results)

                    else:
                        time.sleep(0.1)

                break


        except TCPConnectionError as error:
            logging.error(error)
            time.sleep(1)
            carla_process.kill()
            break
        except KeyboardInterrupt:
            carla_process.kill()
            coil_logger.add_message('Error', {'Message': 'Killed By User'})
            break
        except:
            traceback.print_exc()
            carla_process.kill()
            coil_logger.add_message('Error', {'Message': 'Something Happened'})
            break

    carla_process.kill()
コード例 #2
0
    args = argparser.parse_args()
    if args.debug:
        log_level = logging.DEBUG
    elif args.verbose:
        log_level = logging.INFO
    else:
        log_level = logging.WARNING

    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
    logging.info('listening to server %s:%s', args.host, args.port)

    # We instantiate a forward agent, a simple policy that just set
    # acceleration as 0.9 and steering as zero
    agent = L5Agent()

    # We instantiate an experiment suite. Basically a set of experiments
    # that are going to be evaluated on this benchmark.
    if args.corl_2017:
        experiment_suite = CoRL2017(args.city_name)
    else:
        print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
               ' experiment suites, you should run'
               ' python driving_benchmark_example.py --corl-2017')
        experiment_suite = BasicExperimentSuite(args.city_name)

    # Now actually run the driving_benchmark
    run_driving_benchmark(agent, experiment_suite, args.city_name,
                          args.log_name, args.continue_experiment,
                          args.host, args.port)
コード例 #3
0
def execute(gpu,
            exp_batch,
            exp_alias,
            city_name='Town01',
            memory_use=0.2,
            host='127.0.0.1'):
    # host,port,gpu_number,path,show_screen,resolution,noise_type,config_path,type_of_driver,experiment_name,city_name,game,drivers_name
    #drive_config.city_name = city_name
    # TODO Eliminate drive config.

    print("Running ", __file__, " On GPU ", gpu, "of experiment name ",
          exp_alias)
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu

    sys.stdout = open(str(os.getpid()) + ".out", "a", buffering=1)

    #vglrun - d:7.$GPU $CARLA_PATH / CarlaUE4 / Binaries / Linux / CarlaUE4 / Game / Maps /$TOWN - windowed - benchmark - fps = 10 - world - port =$PORT;
    #sleep    100000

    carla_process, port = start_carla_simulator(gpu, exp_batch, exp_alias,
                                                city_name)

    merge_with_yaml(os.path.join(exp_batch, exp_alias + '.yaml'))
    set_type_of_process('test')

    #test_agent = CarlaDrive(experiment_name)

    # TODO we have some external class that control this weather thing.
    """
    if city_name == 'Town01':
        experiment_suite = ECCVTrainingSuite()
    else:
        experiment_suite = ECCVGeneralizationSuite()
    """
    experiment_suite = TestSuite()

    while True:
        try:

            with make_carla_client(host, port) as client:

                # Now actually run the driving_benchmark

                latest = 0
                # While the checkpoint is not there
                while not maximun_checkpoint_reach(latest,
                                                   g_conf.TEST_SCHEDULE):

                    # Get the correct checkpoint
                    if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):

                        latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)
                        checkpoint = torch.load(
                            os.path.join('_logs', exp_batch, exp_alias,
                                         'checkpoints',
                                         str(latest) + '.pth'))

                        coil_agent = CoILAgent(checkpoint)
                        run_driving_benchmark(
                            coil_agent, experiment_suite, city_name,
                            exp_batch + '_' + exp_alias + 'iteration', False,
                            host, port)

                        # Read the resulting dictionary
                        with open(
                                os.path.join(
                                    '_benchmark_results',
                                    exp_batch + '_' + exp_alias + 'iteration',
                                    'metrics.json'), 'r') as f:
                            summary_dict = json.loads(f.read())

                        # TODO: When you add the message you need to check if the experiment continues properly
                        coil_logger.add_message(
                            {'Running': {
                                "DBSummary": summary_dict
                            }})

                        #test_agent.finish_model()

                        #test_agent.write(results)

                    else:
                        time.sleep(0.1)
                # TODO: is this really needed ??? I believe not.
                #monitorer.export_results(os.path.join('_benchmark_results',
                #                                      exp_batch + '_' +exp_alias +'iteration'))
                break

        except TCPConnectionError as error:
            logging.error(error)
            time.sleep(1)
            carla_process.kill()

        except KeyboardInterrupt:
            carla_process.kill()
        except:
            traceback.print_exc()
            carla_process.kill()

    carla_process.kill()
コード例 #4
0
def execute(gpu,
            exp_batch,
            exp_alias,
            drive_conditions,
            memory_use=0.2,
            host='127.0.0.1',
            suppress_output=True,
            no_screen=False):

    try:

        print("Running ", __file__, " On GPU ", gpu, "of experiment name ",
              exp_alias)
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu

        if not os.path.exists('_output_logs'):
            os.mkdir('_output_logs')

        merge_with_yaml(os.path.join('configs', exp_batch,
                                     exp_alias + '.yaml'))

        print("drive cond", drive_conditions)
        exp_set_name, town_name = drive_conditions.split('_')

        if g_conf.USE_ORACLE:
            control_filename = 'control_output_auto.csv'
        else:
            control_filename = 'control_output.csv'

        if exp_set_name == 'ECCVTrainingSuite':
            experiment_set = ECCVTrainingSuite()
            set_type_of_process('drive', drive_conditions)
        elif exp_set_name == 'ECCVGeneralizationSuite':
            experiment_set = ECCVGeneralizationSuite()
            set_type_of_process('drive', drive_conditions)
        elif exp_set_name == 'TestT1':
            experiment_set = TestT1()
            set_type_of_process('drive', drive_conditions)
        elif exp_set_name == 'TestT2':
            experiment_set = TestT2()
            set_type_of_process('drive', drive_conditions)
        else:

            raise ValueError(" Exp Set name is not correspondent to a city")

        if suppress_output:
            sys.stdout = open(os.path.join(
                '_output_logs',
                g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"),
                              "a",
                              buffering=1)
            #sys.stderr = open(os.path.join('_output_logs',
            #                  'err_'+g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"),
            #                  "a", buffering=1)

        carla_process, port = start_carla_simulator(gpu, town_name, no_screen)

        coil_logger.add_message(
            'Loading', {'Poses': experiment_set.build_experiments()[0].poses})

        coil_logger.add_message('Loading',
                                {'CARLAClient': host + ':' + str(port)})

        # Now actually run the driving_benchmark

        latest = get_latest_evaluated_checkpoint()
        if latest is None:  # When nothing was tested, get latest returns none, we fix that.
            latest = 0

            csv_outfile = open(
                os.path.join('_logs', exp_batch, exp_alias,
                             g_conf.PROCESS_NAME + '_csv', control_filename),
                'w')

            csv_outfile.write(
                "%s,%s,%s,%s,%s,%s,%s,%s\n" %
                ('step', 'episodes_completion', 'intersection_offroad',
                 'intersection_otherlane', 'collision_pedestrians',
                 'collision_vehicles', 'episodes_fully_completed',
                 'driven_kilometers'))
            csv_outfile.close()

        # Write the header of the summary file used conclusion
        # While the checkpoint is not there

        while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE):

            try:
                # Get the correct checkpoint
                if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):

                    latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)
                    checkpoint = torch.load(
                        os.path.join('_logs', exp_batch, exp_alias,
                                     'checkpoints',
                                     str(latest) + '.pth'))

                    coil_agent = CoILAgent(checkpoint, town_name)

                    coil_logger.add_message('Iterating',
                                            {"Checkpoint": latest}, latest)

                    run_driving_benchmark(
                        coil_agent, experiment_set, town_name,
                        exp_batch + '_' + exp_alias + '_' + str(latest) +
                        '_drive_' + control_filename[:-4], True, host, port)

                    path = exp_batch + '_' + exp_alias + '_' + str(latest) \
                           + '_' + g_conf.PROCESS_NAME.split('_')[0] + '_' + control_filename[:-4] \
                           + '_' + g_conf.PROCESS_NAME.split('_')[1] + '_' + g_conf.PROCESS_NAME.split('_')[2]

                    print(path)
                    print("Finished")
                    benchmark_json_path = os.path.join(get_latest_path(path),
                                                       'metrics.json')
                    with open(benchmark_json_path, 'r') as f:
                        benchmark_dict = json.loads(f.read())

                    averaged_dict = compute_average_std(
                        [benchmark_dict], experiment_set.weathers,
                        len(experiment_set.build_experiments()))
                    print(averaged_dict)
                    csv_outfile = open(
                        os.path.join('_logs', exp_batch, exp_alias,
                                     g_conf.PROCESS_NAME + '_csv',
                                     control_filename), 'a')

                    csv_outfile.write(
                        "%d,%f,%f,%f,%f,%f,%f,%f\n" %
                        (latest, averaged_dict['episodes_completion'],
                         averaged_dict['intersection_offroad'],
                         averaged_dict['intersection_otherlane'],
                         averaged_dict['collision_pedestrians'],
                         averaged_dict['collision_vehicles'],
                         averaged_dict['episodes_fully_completed'],
                         averaged_dict['driven_kilometers']))

                    csv_outfile.close()

                    # TODO: When you add the message you need to check if the experiment continues properly

                    # TODO: WRITE AN EFICIENT PARAMETRIZED OUTPUT SUMMARY FOR TEST.

                else:
                    time.sleep(0.1)

            except TCPConnectionError as error:
                logging.error(error)
                time.sleep(1)
                carla_process.kill()
                coil_logger.add_message('Error',
                                        {'Message': 'TCP serious Error'})
                exit(1)
            except KeyboardInterrupt:
                carla_process.kill()
                coil_logger.add_message('Error', {'Message': 'Killed By User'})
                exit(1)
            except:
                traceback.print_exc()
                carla_process.kill()
                coil_logger.add_message('Error',
                                        {'Message': 'Something Happened'})
                exit(1)

        coil_logger.add_message('Finished', {})

    except KeyboardInterrupt:
        traceback.print_exc()
        carla_process.kill()
        coil_logger.add_message('Error', {'Message': 'Killed By User'})

    except:
        traceback.print_exc()
        carla_process.kill()
        coil_logger.add_message('Error', {'Message': 'Something happened'})

    carla_process.kill()
コード例 #5
0
ファイル: gendata.py プロジェクト: linjucs/carla
        help='If you want to continue the experiment with the same name'
    )

    args = argparser.parse_args()
    if args.debug:
        log_level = logging.DEBUG
    elif args.verbose:
        log_level = logging.INFO
    else:
        log_level = logging.WARNING

    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
    logging.info('listening to server %s:%s', args.host, args.port)

    if args.hn:
        agent = AutoPilotAgent050()
    else:
        agent = AutoPilotAgent025()

    if args.experiment == 'C':
        corl = Fctl2018T(args.city_name, args.times)
    elif args.experiment == 'S':
        corl = Fctl2018S(args.city_name, args.times)
    else:
        corl = Fctl2018(args.city_name, args.times)

    # Now actually run the driving_benchmark
    run_driving_benchmark(agent, corl, args.city_name,
                          args.log_name, args.continue_experiment,
                          args.host, args.port, args.gd)
コード例 #6
0
    args = argparser.parse_args()
    if args.debug:
        log_level = logging.DEBUG
    elif args.verbose:
        log_level = logging.INFO
    else:
        log_level = logging.WARNING

    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
    logging.info('listening to server %s:%s', args.host, args.port)

    # We instantiate a forward agent, a simple policy that just set
    # acceleration as 0.9 and steering as zero
    agent = ForwardAgent()

    # We instantiate an experiment suite. Basically a set of experiments
    # that are going to be evaluated on this benchmark.
    if args.corl_2017:
        experiment_suite = CoRL2017(args.city_name)
    else:
        print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
               ' experiment suites, you should run'
               ' python driving_benchmark_example.py --corl-2017')
        experiment_suite = BasicExperimentSuite(args.city_name)

    # Now actually run the driving_benchmark
    run_driving_benchmark(agent, experiment_suite, args.city_name,
                          args.log_name, args.continue_experiment,
                          args.host, args.port)
コード例 #7
0
    def __init__(self,
                 town='Town01_nemesisA',
                 task='turn-right',
                 weather='ClearNoon',
                 port=2000,
                 save_images=False,
                 gpu_num=0,
                 experiment_name='baseline',
                 adversary_name="adversary_",
                 intersection="42_47"):
        """
        Adversary environment for Carla Simulator
        """
        print("Starting CARLA gym environment")
        print("Ensure that CARLA is running on port", port)
        self.town = town
        self.task = task
        self.weather = WEATHER_DICT[weather]
        self.port = port
        self.save_images = save_images
        self.gpu_num = gpu_num
        self.experiment_name = experiment_name
        self.intersection = intersection
        self.counter = 0  # counter if more than 1 experiments are run

        self.agent = None
        self.avoid_stopping = False
        self.iterations = 1
        self.adversary_name = adversary_name

        if self.adversary_name == "adversary_":
            adversary_name_other = "adversarybeta_"
        else:
            adversary_name_other = "adversary_"
        image_label_other = adversary_name_other + self.town + ".png"
        self._load_agent()

        # defines what kinds of experiments are going to be run
        self.experiment_suite = AdversarySuite(self.town, self.task,
                                               self.weather, self.iterations,
                                               self.intersection)

        # load the adversary generator
        self.adversary = Shape(self.town, adversary_name=self.adversary_name)

        self.log_dir = '_benchmarks_results/' + self.town + '/'
        self.update_csv_file()

        print("Running the baseline scenario.")
        # runs the experiment for the baseline case (no attack)
        run_driving_benchmark(self.agent,
                              self.experiment_suite,
                              log_name=self.experiment_name,
                              city_name=self.town,
                              port=self.port,
                              save_images=save_images)

        # some metrics that are collected
        self.baseline_steer_grad = self.get_steer_gradient()
        self.baseline_steer = self.get_steer()
        self.positions = self.get_xy()
コード例 #8
0
 def step(self):
     self.experiment_name = 'adversary_{}'.format(self.opt)
     self.adversary.lines_rotate(opt=self.opt,color_option=self.color_option)
     self.adversary_other.lines_rotate(opt=self.opt,color_option="True")
     self.agent = ImitationLearning(self.town, self.task, self.intersection, self.save_choice,self.avoid_stopping,opt=self.opt,min_frames=self.min_frames,max_frames=self.max_frames, trajectory_no=self.trajectory_no)
     run_driving_benchmark(agent=self.agent, experiment_suite=self.experiment_suite, city_name=self.town, log_name=self.experiment_name, port=self.port) 
コード例 #9
0
targetSteer = targetSteer[:
                          MAX_LEN]  # subset steering angles to maximum number of allowed frames

env.task = baseline_task
env.scene = baseline_scene
env.experiment_name = 'baseline'

# reset experiment suite with base task + scene
env.experiment_suite = AdversarySuite(env.town, env.task, env.weather,
                                      env.iterations, env.scene)

# run the baseline simulation
print("Running the simulation for the baseline path.")
run_driving_benchmark(env.agent,
                      env.experiment_suite,
                      log_name=env.experiment_name,
                      city_name=env.town,
                      port=env.port,
                      save_images=False)
print("Complete.")
baseSteer = env.get_steer()
MAX_LEN_B = int(len(baseSteer) * .8)
baseSteer = baseSteer[:MAX_LEN_B]


def target(pos1,
           rot1,
           pos2=0,
           rot2=0,
           width=10,
           length=200,
           colorR=0,