Exemplo n.º 1
0
model.load_state_dict(checkpoint['state_dict'])

model.eval()
print(len(dataset))

save_dir = os.path.join(args.gradcam_path, args.type)
if not os.path.isdir(save_dir):
    os.mkdir(save_dir)

count = 0
for data in dataloader:

    for i in range(g_conf.BATCH_SIZE):
        controls = data['directions']
        output = model.forward_branch(
            torch.squeeze(data['rgb']).cuda(),
            dataset.extract_inputs(data).cuda(), controls)
        activations = model.get_perception_activations(
            torch.squeeze(data['rgb']).cuda())[4].detach()
        # gradcam results in the suppmat are computed using brake
        output[i, 2].backward(
        )  # backprop from the steer (0), throttle (1) or brake (2)
        gradients = model.get_perception_gradients()
        pooled_gradients = torch.mean(torch.mean(torch.mean(gradients, 3), 2),
                                      0)

        for j in range(
                512):  # number of feature maps = 512 for conv4, 256 for conv3
            activations[:, j, :, :] *= pooled_gradients[j]

        heatmap = torch.mean(activations, dim=1).squeeze()
Exemplo n.º 2
0
class CoILBaselineCEXP(Agent):
    def setup(self, path_to_config_file):

        yaml_conf, checkpoint_number, agent_name, encoder_params = checkpoint_parse_configuration_file(
            path_to_config_file)

        # Take the checkpoint name and load it
        if encoder_params is not None:
            self.checkpoint = torch.load(
                os.path.join(
                    '/',
                    os.path.join(*os.path.realpath(__file__).split('/')[:-2]),
                    '_logs',
                    yaml_conf.split('/')[-2],
                    yaml_conf.split('/')[-1].split('.')[-2] + '_' +
                    str(encoder_params['encoder_checkpoint']), 'checkpoints',
                    str(checkpoint_number) + '.pth'))

            # Once the ENCODER_MODEL_CONFIGURATION was defined, we use the pre-trained encoder model to extract bottleneck Z and drive the E-t-E agent
            self.encoder_checkpoint = torch.load(
                os.path.join(
                    '/',
                    os.path.join(*os.path.realpath(__file__).split('/')[:-2]),
                    '_logs', encoder_params['encoder_folder'],
                    encoder_params['encoder_exp'], 'checkpoints',
                    str(encoder_params['encoder_checkpoint']) + '.pth'))

            self.encoder_model = CoILModel(g_conf.ENCODER_MODEL_TYPE,
                                           g_conf.ENCODER_MODEL_CONFIGURATION)
            self.encoder_model.load_state_dict(
                self.encoder_checkpoint['state_dict'])
            self.encoder_model.cuda()
            self.encoder_model.eval()

        else:
            self.checkpoint = torch.load(
                os.path.join(
                    '/',
                    os.path.join(*os.path.realpath(__file__).split('/')[:-2]),
                    '_logs',
                    yaml_conf.split('/')[-2],
                    yaml_conf.split('/')[-1].split('.')[-2], 'checkpoints',
                    str(checkpoint_number) + '.pth'))

        # do the merge here
        # TODO THE MERGE IS REQUIRED DEPENDING ON THE SITUATION
        g_conf.immutable(False)
        merge_with_yaml(
            os.path.join(
                '/', os.path.join(*os.path.realpath(__file__).split('/')[:-2]),
                yaml_conf), encoder_params)

        self._model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION,
                                g_conf.ENCODER_MODEL_CONFIGURATION)
        self.first_iter = True
        logging.info("Setup Model")
        # Load the model and prepare set it for evaluation
        self._model.load_state_dict(self.checkpoint['state_dict'])
        self._model.cuda()
        self._model.eval()
        self.latest_image = None
        self.latest_image_tensor = None
        # We add more time to the curve commands
        self._expand_command_front = 5
        self._expand_command_back = 3

        # TODO: Merge with Felipe's code
        self._msn = None
        self._lat_ref = 0
        self._lon_ref = 0
        # Check the agent name
        self._name = agent_name

        self.count = 0

    def sensors(self):
        sensors = [{
            'type': 'sensor.camera.rgb',
            'x': 2.0,
            'y': 0.0,
            'z': 1.40,
            'roll': 0.0,
            'pitch': -15.0,
            'yaw': 0.0,
            'width': 800,
            'height': 600,
            'fov': 100,
            'id': 'rgb'
        }, {
            'type': 'sensor.can_bus',
            'reading_frequency': 25,
            'id': 'can_bus'
        }, {
            'type': 'sensor.other.gnss',
            'x': 0.7,
            'y': -0.4,
            'z': 1.60,
            'id': 'GPS'
        }]

        return sensors

    """
    def make_state(self, exp):
        # state is divided in three parts, the speed, the angle_error, the high level command
        # Get the closest waypoint

        #waypoint, _ = self._get_current_wp_direction(exp._ego_actor.get_transform().location, exp._route)
        #norm, angle = compute_magnitude_angle(waypoint.location, exp._ego_actor.get_transform().location,
        #                                      exp._ego_actor.get_transform().rotation.yaw)
        #return np.array([_get_forward_speed(exp._ego_actor) / 12.0,  # Normalize to by dividing by 12
        #                 angle / 180.0])
        self._global_plan = exp._route

        input_data = exp._sensor_interface.get_data()
        # TODO this should be capilarized
        #if 'scenario' in g_conf.MEASUREMENTS_INPUTS:
        #    scenario = convert_scenario_name_number(exp._environment_data['exp_measurements'])
        #    input_data.update({'scenario': scenario})
        return input_data
    """

    def make_state(self, exp):
        """
        This function also do the necessary processing of the state for the run step function
        :param exp:
        :return:
        """

        self._global_plan = exp._route

        # we also need to get the latitute longitude ref
        # TODO this needs to be adpated for a CARLA challenge submission
        self._lat_ref = exp._lat_ref
        self._lon_ref = exp._lon_ref

        input_data = exp._sensor_interface.get_data()
        self._vehicle_pos = exp._ego_actor.get_transform().location
        # TODO this should be capilarized

        input_data.update(
            {'sensor_input': self._process_sensors(input_data['rgb'][1])})
        if self._msn is not None:
            input_data.update(
                {'scenario': self._msn(input_data['sensor_input'])})

        #if 'scenario' in g_conf.MEASUREMENTS_INPUTS:
        #    scenario = convert_scenario_name_number(exp._environment_data['exp_measurements'])
        #    print (" SCENARIO NUMBER ", scenario)
        #    input_data.update({'scenario': scenario})

        return input_data

    def run_step(self, input_data):

        # Get the current directions for following the route

        directions = self._get_current_direction(self._vehicle_pos)
        logging.debug(" Current direction %f ", directions)

        # Take the forward speed and normalize it for it to go from 0-1
        network_input = input_data['can_bus'][1]['speed'] / g_conf.SPEED_FACTOR
        network_input = torch.cuda.FloatTensor([network_input]).unsqueeze(0)

        # TODO remove ifs
        #if 'scenario' in g_conf.MEASUREMENTS_INPUTS:
        #    network_input = torch.cat((torch.cuda.FloatTensor([input_data['scenario']]),
        #                               network_input), 1)

        # Compute the forward pass processing the sensors got from CARLA.
        # TODO we start with an if but we can build a class hierarquical !

        if g_conf.MODEL_TYPE in [
                'coil-icra', 'coil-icra-KLD', 'separate-supervised'
        ]:
            directions_tensor = torch.cuda.LongTensor([directions])
            #print("       Directions ", int(directions))
            if False:
                save_path = os.path.join('temp', 'ete_baseline')
                if not os.path.exists(save_path):
                    os.mkdir(save_path)
                save_image(
                    input_data['sensor_input'],
                    os.path.join(
                        save_path,
                        'run_input_' + str(self.count).zfill(5) + ".png"))
                self.count += 1
            model_outputs = self._model.forward_branch(
                input_data['sensor_input'], network_input, directions_tensor)

        elif g_conf.MODEL_TYPE in ['coil-icra-VAE']:
            directions_tensor = torch.cuda.LongTensor([directions])
            if g_conf.ENCODER_MODEL_TYPE in ['VAE']:
                if g_conf.LABELS_SUPERVISED:
                    input = torch.cat(
                        (input_data['sensor_input'], torch.zeros(
                            1, 1, 88, 200).cuda()),
                        dim=1)
                    recon_x, mu, _, z = self.encoder_model(input)
                else:
                    recon_x, mu, _, z = self.encoder_model(
                        input_data['sensor_input'])

            elif g_conf.ENCODER_MODEL_TYPE in ['Affordances']:
                mu, _ = self.encoder_model(input_data['sensor_input'])

            if False:
                save_path = os.path.join('temp', 'affordances_upperbound')
                if not os.path.exists(save_path):
                    os.mkdir(save_path)
                if g_conf.LABELS_SUPERVISED:
                    save_image(
                        input_data['sensor_input'],
                        os.path.join(
                            save_path,
                            'run_input_' + str(self.count).zfill(5) + ".png"))
                    split = torch.split(torch.squeeze(recon_x, dim=1), [3, 1],
                                        dim=1)
                    save_image(
                        split[0],
                        os.path.join(
                            save_path, 'run_recon_rgb_' +
                            str(self.count).zfill(5) + ".png"))
                    save_image(
                        split[1],
                        os.path.join(
                            save_path, 'run_recon_labels_' +
                            str(self.count).zfill(5) + ".png"))
                else:
                    save_image(
                        input_data['sensor_input'],
                        os.path.join(
                            save_path,
                            'run_input_' + str(self.count).zfill(5) + ".png"))
                    #save_image(recon_x, os.path.join(save_path, 'run_recon_' + str(self.count).zfill(5) + ".png"))
                self.count += 1

            model_outputs = self._model.forward_branch(mu, network_input,
                                                       directions_tensor)

            #print(' frame', self.count)
            #print(' direction', directions_tensor)
            #print(' branch output', model_outputs)

        elif g_conf.MODEL_TYPE in [
                'separate-supervised-NoSpeed', 'coil-icra-NoSpeed'
        ]:
            directions_tensor = torch.cuda.LongTensor([directions])
            if False:
                save_path = os.path.join('temp', 'ETE_resnet34_6')
                if not os.path.exists(save_path):
                    os.mkdir(save_path)
                save_image(
                    input_data['sensor_input'],
                    os.path.join(
                        save_path,
                        'run_input_' + str(self.count).zfill(5) + ".png"))
                self.count += 1
            model_outputs = self._model.forward_branch(
                input_data['sensor_input'], directions_tensor)

        else:
            directions_tensor = torch.cuda.FloatTensor(
                encode_directions(directions))
            model_outputs = self._model.forward(
                self._process_sensors(input_data['rgb'][1]), network_input,
                directions_tensor)[0]

        steer, throttle, brake = self._process_model_outputs(model_outputs[0])
        control = carla.VehicleControl()
        control.steer = float(steer)
        control.throttle = float(throttle)
        control.brake = float(brake)
        logging.debug("Output %f %f %f " %
                      (control.steer, control.throttle, control.brake))

        if self.first_iter:
            coil_logger.add_message('Iterating', {
                "Checkpoint": self.checkpoint['iteration'],
                'Agent': str(steer)
            }, self.checkpoint['iteration'])
        # There is the posibility to replace some of the predictions with oracle predictions.
        self.first_iter = False

        #print(['steer: ', control.steer, 'throttle: ', control.throttle, 'brake: ', control.brake])

        return control

    def get_attentions(self, layers=None):
        """
        Returns
            The activations obtained from the first layers of the latest iteration.

        """
        if layers is None:
            layers = [0, 1, 2]
        if self.latest_image_tensor is None:
            raise ValueError(
                'No step was ran yet. '
                'No image to compute the activations, Try Running ')
        all_layers = self._model.get_perception_layers(
            self.latest_image_tensor)
        cmap = plt.get_cmap('inferno')
        attentions = []
        for layer in layers:
            y = all_layers[layer]
            att = torch.abs(y).mean(1)[0].data.cpu().numpy()
            att = att / att.max()
            att = cmap(att)
            att = np.delete(att, 3, 2)
            attentions.append(imresize(att, [88, 200]))
        return attentions

    def _process_sensors(self, sensor):
        sensor = sensor[:, :, 0:3]  # BGRA->BRG drop alpha channel
        sensor = sensor[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], :, :]  # crop
        sensor = scipy.misc.imresize(sensor,
                                     (g_conf.SENSORS['rgb_central'][1],
                                      g_conf.SENSORS['rgb_central'][2]))
        self.latest_image = sensor

        sensor = np.swapaxes(sensor, 0, 1)
        sensor = np.transpose(sensor, (2, 1, 0))
        sensor = torch.from_numpy(sensor / 255.0).type(
            torch.FloatTensor).cuda()
        image_input = sensor.unsqueeze(0)
        self.latest_image_tensor = image_input

        return image_input

    def _get_current_direction(self, vehicle_position):

        #print("      number of waypoints in global plan:", len(self._global_plan))

        # for the current position and orientation try to get the closest one from the waypoints
        closest_id = 0
        min_distance = 100000
        for index in range(len(self._global_plan)):

            waypoint = self._global_plan[index][0]

            computed_distance = distance_vehicle(waypoint, vehicle_position)
            if computed_distance < min_distance:
                min_distance = computed_distance
                closest_id = index

        #print("      closest waypoint", closest_id)
        logging.debug("Closest waypoint {} dist {}".format(
            closest_id, min_distance))
        direction = self._global_plan[closest_id][1]

        if direction == RoadOption.LEFT:
            direction = 3.0
        elif direction == RoadOption.RIGHT:
            direction = 4.0
        elif direction == RoadOption.STRAIGHT:
            direction = 5.0
        else:
            direction = 2.0

        return direction

    def _process_model_outputs(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        if brake < 0.05:
            brake = 0.0

        if throttle > brake:
            brake = 0.0

        return steer, throttle, brake

    def _expand_commands(self, topological_plan):
        """ The idea is to make the intersection indications to last longer"""

        # O(2*N) algorithm , probably it is possible to do in O(N) with queues.

        # Get the index where curves start and end
        curves_start_end = []
        inside = False
        start = -1
        current_curve = RoadOption.LANEFOLLOW
        for index in range(len(topological_plan)):

            command = topological_plan[index][1]
            if command != RoadOption.LANEFOLLOW and not inside:
                inside = True
                start = index
                current_curve = command

            if command == RoadOption.LANEFOLLOW and inside:
                inside = False
                # End now is the index.
                curves_start_end.append([start, index, current_curve])
                if start == -1:
                    raise ValueError("End of curve without start")

                start = -1

        for start_end_index_command in curves_start_end:
            start_index = start_end_index_command[0]
            end_index = start_end_index_command[1]
            command = start_end_index_command[2]

            # Add the backwards curves ( Before the begginning)
            for index in range(1, self._expand_command_front + 1):
                changed_index = start_index - index
                if changed_index > 0:
                    topological_plan[changed_index] = (
                        topological_plan[changed_index][0], command)

            # add the onnes after the end
            for index in range(0, self._expand_command_back):
                changed_index = end_index + index
                if changed_index < len(topological_plan):
                    topological_plan[changed_index] = (
                        topological_plan[changed_index][0], command)

        return topological_plan
Exemplo n.º 3
0
def execute(gpu, exp_batch, exp_alias):
    # We set the visible cuda devices
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu

    # At this point the log file with the correct naming is created.
    merge_with_yaml(os.path.join(exp_batch, exp_alias + '.yaml'))
    set_type_of_process('validation')

    sys.stdout = open(str(os.getpid()) + ".out", "a", buffering=1)

    if monitorer.get_status(exp_batch, exp_alias,
                            g_conf.PROCESS_NAME)[0] == "Finished":
        # TODO: print some cool summary or not ?
        return

    #Define the dataset. This structure is has the __get_item__ redefined in a way
    #that you can access the HDFILES positions from the root directory as a in a vector.
    full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"],
                                g_conf.DATASET_NAME)

    dataset = CoILDataset(full_dataset,
                          transform=transforms.Compose([transforms.ToTensor()
                                                        ]))

    # Creates the sampler, this part is responsible for managing the keys. It divides
    # all keys depending on the measurements and produces a set of keys for each bach.

    # The data loader is the multi threaded module from pytorch that release a number of
    # workers to get all the data.
    # TODO: batch size an number of workers go to some configuration file
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=120,
                                              shuffle=False,
                                              num_workers=12,
                                              pin_memory=True)

    # TODO: here there is clearly a posibility to make a cool "conditioning" system.
    model = CoILModel(g_conf.MODEL_NAME)
    model.cuda()

    # TODO: The checkpoint will continue, so the logs should restart ??? OR continue were it was

    latest = get_latest_evaluated_checkpoint()
    if latest is None:  # When nothing was tested, get latest returns none, we fix that.
        latest = 0

    print(dataset.meta_data)

    while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE):

        if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):

            latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)

            checkpoint = torch.load(
                os.path.join('_logs', exp_batch, exp_alias, 'checkpoints',
                             str(latest) + '.pth'))
            checkpoint_iteration = checkpoint['iteration']
            print("Validation loaded ", checkpoint_iteration)

            for data in data_loader:

                input_data, labels = data
                control_position = np.where(
                    dataset.meta_data[:, 0] == 'control')[0][0]
                speed_position = np.where(
                    dataset.meta_data[:, 0] == 'speed_module')[0][0]
                print(torch.squeeze(input_data['rgb']).shape)

                print(control_position)
                print(speed_position)
                # Obs : Maybe we could also check for other branches ??
                output = model.forward_branch(
                    torch.squeeze(input_data['rgb']).cuda(),
                    labels[:, speed_position, :].cuda(),
                    labels[:, control_position, :].cuda())
                # TODO: clean this squeeze and dimension things

                for i in range(input_data['rgb'].shape[0]):

                    coil_logger.write_on_csv(
                        checkpoint_iteration,
                        [output[i][0], output[i][1], output[i][2]])

                #loss = criterion(output, labels)

                #loss.backward()

                #optimizer.step()

                #shutil.copyfile(filename, 'model_best.pth.tar')
        else:
            time.sleep(1)
            print("Waiting for the next Validation")
Exemplo n.º 4
0
def execute(gpu, exp_batch, exp_alias, validation_dataset, suppress_output):
    latest = None
    try:
        # We set the visible cuda devices
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu

        # At this point the log file with the correct naming is created.
        merge_with_yaml(os.path.join('configs', exp_batch,
                                     f'{exp_alias}.yaml'))
        # The validation dataset is always fully loaded, so we fix a very high number of hours
        g_conf.NUMBER_OF_HOURS = 10000
        set_type_of_process(process_type='validation',
                            param=validation_dataset)

        # Save the output to a file if so desired
        if suppress_output:
            save_output(exp_alias)

        # Define the dataset. This structure has the __get_item__ redefined in a way
        # that you can access the HDFILES positions from the root directory as a in a vector.
        full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"],
                                    validation_dataset)
        augmenter = Augmenter(None)
        # Definition of the dataset to be used. Preload name is just the validation data name
        dataset = CoILDataset(full_dataset,
                              transform=augmenter,
                              preload_name=validation_dataset,
                              process_type='validation')

        # Creates the sampler, this part is responsible for managing the keys. It divides
        # all keys depending on the measurements and produces a set of keys for each bach.

        # The data loader is the multi threaded module from pytorch that release a number of
        # workers to get all the data.
        data_loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=g_conf.BATCH_SIZE,
            shuffle=False,
            num_workers=g_conf.NUMBER_OF_LOADING_WORKERS,
            pin_memory=True)

        model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION,
                          g_conf.SENSORS).cuda()
        # The window used to keep track of the trainings
        l1_window = []
        latest = get_latest_evaluated_checkpoint()
        if latest is not None:  # When latest is noe
            l1_window = coil_logger.recover_loss_window(
                validation_dataset, None)

        # Keep track of the best loss and the iteration where it happens
        best_loss = 1000
        best_loss_iter = 0

        print(20 * '#')
        print('Starting validation!')
        print(20 * '#')

        # Check if the maximum checkpoint for validating has been reached
        while not maximum_checkpoint_reached(latest):
            # Wait until the next checkpoint is ready (assuming this is run whilst training the model)
            if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):
                # Get next checkpoint for validation according to the test schedule and load it
                latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)
                checkpoint = torch.load(
                    os.path.join('_logs', exp_batch, exp_alias, 'checkpoints',
                                 f'{latest}.pth'))
                checkpoint_iteration = checkpoint['iteration']

                model.load_state_dict(checkpoint['state_dict'])
                model.eval()  # Turn off dropout and batchnorm (if any)
                print(f"Validation loaded, checkpoint {checkpoint_iteration}")

                # Main metric will be the used loss for training the network
                criterion = Loss(g_conf.LOSS_FUNCTION)
                checkpoint_average_loss = 0

                # Counter
                iteration_on_checkpoint = 0

                with torch.no_grad():  # save some computation/memory
                    for data in data_loader:
                        # Compute the forward pass on a batch from the validation dataset
                        controls = data['directions'].cuda()
                        img = torch.squeeze(data['rgb']).cuda()
                        speed = dataset.extract_inputs(
                            data).cuda()  # this might not always be speed

                        # For auxiliary metrics
                        output = model.forward_branch(img, speed, controls)

                        # For the loss function
                        branches = model(img, speed)
                        loss_function_params = {
                            'branches': branches,
                            'targets': dataset.extract_targets(data).cuda(),
                            'controls': controls,
                            'inputs': speed,
                            'branch_weights': g_conf.BRANCH_LOSS_WEIGHT,
                            'variable_weights': g_conf.VARIABLE_WEIGHT
                        }
                        # It could be either waypoints or direct control
                        if 'waypoint1_angle' in g_conf.TARGETS:
                            write_waypoints_output(checkpoint_iteration,
                                                   output)
                        else:
                            write_regular_output(checkpoint_iteration, output)

                        loss, _ = criterion(loss_function_params)
                        loss = loss.data.tolist()

                        # Log a random position
                        position = random.randint(
                            0,
                            len(output.data.tolist()) - 1)

                        coil_logger.add_message(
                            'Iterating', {
                                'Checkpoint':
                                latest,
                                'Iteration':
                                f'{iteration_on_checkpoint * g_conf.BATCH_SIZE}/{len(dataset)}',
                                f'Validation Loss ({g_conf.LOSS_FUNCTION})':
                                loss,
                                'Output':
                                output[position].data.tolist(),
                                'GroundTruth':
                                dataset.extract_targets(
                                    data)[position].data.tolist(),
                                'Inputs':
                                dataset.extract_inputs(data)
                                [position].data.tolist()
                            }, latest)

                        # We get the average with a growing list of values
                        # Thanks to John D. Cook: http://www.johndcook.com/blog/standard_deviation/
                        iteration_on_checkpoint += 1
                        checkpoint_average_loss += (
                            loss -
                            checkpoint_average_loss) / iteration_on_checkpoint

                        print(
                            f"\rProgress: {100 * iteration_on_checkpoint * g_conf.BATCH_SIZE / len(dataset):3.4f}% - "
                            f"Average Loss ({g_conf.LOSS_FUNCTION}): {checkpoint_average_loss:.16f}",
                            end='')
                """
                    ########
                    Finish a round of validation, write results, wait for the next
                    ########
                """

                coil_logger.add_scalar(
                    f'Validation Loss ({g_conf.LOSS_FUNCTION})',
                    checkpoint_average_loss, latest, True)

                # Let's visualize the distribution of the loss
                coil_logger.add_histogram(
                    f'Validation Checkpoint Loss ({g_conf.LOSS_FUNCTION})',
                    checkpoint_average_loss, latest)

                if checkpoint_average_loss < best_loss:
                    best_loss = checkpoint_average_loss
                    best_loss_iter = latest

                coil_logger.add_message(
                    'Iterating', {
                        'Summary': {
                            'Loss': checkpoint_average_loss,
                            'BestLoss': best_loss,
                            'BestLossCheckpoint': best_loss_iter
                        },
                        'Checkpoint': latest
                    }, latest)

                l1_window.append(checkpoint_average_loss)
                coil_logger.write_on_error_csv(validation_dataset,
                                               checkpoint_average_loss, latest)

                # If we are using the finish when validation stops, we check the current checkpoint
                if g_conf.FINISH_ON_VALIDATION_STALE is not None:
                    if dlib.count_steps_without_decrease(l1_window) > 3 and \
                            dlib.count_steps_without_decrease_robust(l1_window) > 3:
                        coil_logger.write_stop(validation_dataset, latest)
                        break

            else:
                latest = get_latest_evaluated_checkpoint()
                time.sleep(1)

                coil_logger.add_message('Loading',
                                        {'Message': 'Waiting Checkpoint'})
                print("Waiting for the next Validation")

        print('\n' + 20 * '#')
        print('Finished validation!')
        print(20 * '#')
        coil_logger.add_message('Finished', {})

    except KeyboardInterrupt:
        coil_logger.add_message('Error', {'Message': 'Killed By User'})
        # We erase the output that was unfinished due to some process stop.
        if latest is not None:
            coil_logger.erase_csv(latest)

    except RuntimeError as e:
        if latest is not None:
            coil_logger.erase_csv(latest)
        coil_logger.add_message('Error', {'Message': str(e)})

    except:
        traceback.print_exc()
        coil_logger.add_message('Error', {'Message': 'Something Happened'})
        # We erase the output that was unfinished due to some process stop.
        if latest is not None:
            coil_logger.erase_csv(latest)
Exemplo n.º 5
0
class CoILAgent(Agent):
    def __init__(self, checkpoint):

        Agent.__init__(self)

        self.checkpoint = checkpoint  # We save the checkpoint for some interesting future use.
        self.model = CoILModel(g_conf.MODEL_NAME)

        self.model.load_state_dict(checkpoint['state_dict'])

        self.model.cuda()

    def run_step(self, measurements, sensor_data, directions, target):

        #control_agent = self._agent.run_step(measurements, None, target)

        speed = torch.cuda.FloatTensor(
            [measurements.player_measurements.forward_speed]).unsqueeze(0)
        print("Speed shape ", speed)
        directions_tensor = torch.cuda.LongTensor([directions])
        model_outputs = self.model.forward_branch(
            self._process_sensors(sensor_data), speed, directions_tensor)

        print(model_outputs)

        steer, throttle, brake = self._process_model_outputs(
            model_outputs[0], measurements.player_measurements.forward_speed)

        #control = self.compute_action(,
        #                              ,
        #                              directions)
        control = carla_protocol.Control()
        control.steer = steer
        control.throttle = throttle
        control.brake = brake
        # if self._auto_pilot:
        #    control.steer = control_agent.steer
        # TODO: adapt the client side agent for the new version. ( PROBLEM )
        #control.throttle = control_agent.throttle
        #control.brake = control_agent.brake

        # TODO: maybe change to a more meaningfull message ??
        return control

    def _process_sensors(self, sensors):

        iteration = 0
        for name, size in g_conf.SENSORS.items():

            sensor = sensors[name].data[
                g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], ...]
            if sensors[name].type == 'SemanticSegmentation':

                # TODO: the camera name has to be sincronized with what is in the experiment...
                sensor = join_classes(sensor)

                sensor = sensor[:, :, np.newaxis]

                image_transform = transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Resize((size[1], size[2]),
                                      interpolation=Image.NEAREST),
                    iag.ToGPU(),
                    iag.Multiply((1 / (number_of_seg_classes - 1)))
                ])
            else:

                image_transform = transforms.Compose([
                    transforms.ToPILImage(),
                    transforms.Resize((size[1], size[2])),
                    transforms.ToTensor(),
                    transforms.Normalize((0, 0, 0), (255, 255, 255)),
                    iag.ToGPU()
                ])

            sensor = np.swapaxes(sensor, 0, 1)

            sensor = np.flip(sensor.transpose((2, 0, 1)), axis=0)

            if iteration == 0:
                image_input = image_transform(sensor)
            else:
                image_input = torch.cat((image_input, sensor), 0)

            iteration += 1

        image_input = image_input.unsqueeze(0)

        return image_input

    def _process_model_outputs(self, outputs, speed):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        if brake < 0.2:
            brake = 0.0

        if throttle > brake:
            brake = 0.0
        else:
            throttle = throttle * 2
        if speed > 35.0 and brake == 0.0:
            throttle = 0.0

        return steer, throttle, brake

    """

    def compute_action(self, sensors, speed, direction):

        capture_time = time.time()


        sensor_pack = []



        for i in range(len(sensors)):

            sensor = sensors[i]

            sensor = sensor[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], :]

            if g_conf.param.SENSORS.keys()[i] == 'rgb':

                sensor = scipy.misc.imresize(sensor, [self._config.sensors_size[i][0],
                                                      self._config.sensors_size[i][1]])


            elif g_conf.param.SENSORS.keys()[i] == 'labels':

                sensor = scipy.misc.imresize(sensor, [self._config.sensors_size[i][0],
                                                      self._config.sensors_size[i][1]],
                                             interp='nearest')

                sensor = join_classes(sensor) * int(255 / (number_of_seg_classes - 1))

                sensor = sensor[:, :, np.newaxis]

            sensor_pack.append(sensor)

        if len(sensor_pack) > 1:

            image_input = np.concatenate((sensor_pack[0], sensor_pack[1]), axis=2)

        else:
            image_input = sensor_pack[0]

        image_input = image_input.astype(np.float32)
        image_input = np.multiply(image_input, 1.0 / 255.0)
        

        image_input = sensors[0]

        image_input = image_input.astype(np.float32)
        image_input = np.multiply(image_input, 1.0 / 255.0)
        # TODO: This will of course depend on the model , if it is based on sequences there are
        # TODO: different requirements
        #tensor = self.model(image_input)
        outputs = self.model.forward_branch(image_input, speed, direction)



        return control  # ,machine_output_functions.get_intermediate_rep(image_input,speed,self._config,self._sess,self._train_manager)

    """
    """
Exemplo n.º 6
0
class MPSCAgent(AutonomousAgent):
	def setup(self, path_to_config_file):

		yaml_conf, checkpoint_number = checkpoint_parse_configuration_file(path_to_config_file)

		# Take the checkpoint name and load it
		checkpoint = torch.load(os.path.join('/', os.path.join(*os.path.realpath(__file__).split('/')[:-2]),
											  '_logs',
											 yaml_conf.split('/')[-2], yaml_conf.split('/')[-1].split('.')[-2]
											 , 'checkpoints', str(checkpoint_number) + '.pth'))

		# merge the specific agent config with global config _g_conf
		merge_with_yaml(os.path.join('/', os.path.join(*os.path.realpath(__file__).split('/')[:-2]),
									 yaml_conf))

		self.checkpoint = checkpoint  # We save the checkpoint for some interesting future use.
		# TODO: retrain the model with MPSC
		self._model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
		self.first_iter = True
		logging.info("Setup Model")
		# Load the model and prepare set it for evaluation
		self._model.load_state_dict(checkpoint['state_dict'])
		self._model.cuda()
		self._model.eval()
		self.latest_image = None
		self.latest_image_tensor = None
		# We add more time to the curve commands
		self._expand_command_front = 5
		self._expand_command_back = 3
		# check map waypoint format => carla_data_provider & http://carla.org/2018/11/16/release-0.9.1/
		# e.g. from map.get_waypoint Waypoint(Transform(Location(x=338.763, y=226.453, z=0), Rotation(pitch=360, yaw=270.035, roll=0)))
		self.track = Track.ALL_SENSORS_HDMAP_WAYPOINTS # specify available track info, see autonomous_agent.py

	def sensors(self):
		# currently give the full suite of available sensors
		# check the config/installation of the sensors => https://carla.readthedocs.io/en/latest/cameras_and_sensors/
		sensors = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll':0.0, 'pitch':0.0, 'yaw': 0.0,
					'width': 800, 'height': 600, 'fov':100, 'id': 'Center'},
				   {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
					'yaw': -45.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'Left'},
				   {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0,
					'width': 800, 'height': 600, 'fov': 100, 'id': 'Right'},
				   {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
					'yaw': -45.0, 'id': 'LIDAR'},
				   {'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'},
				   {'type': 'sensor.can_bus', 'reading_frequency': 25, 'id': 'can_bus'},
				   {'type': 'sensor.hd_map', 'reading_frequency': 1, 'id': 'hdmap'},
				  ]
		return sensors

	def run_step(self, input_data, timestamp):
		# the core method
		# TODO
		# 1. request current localization 
		# input_data is obtained from sensors. => autonomous_agent.py def __call__(self)
		for key, value in input_data.items():
			print("input_data ", key, value)


# 		  
# ======[Agent] Wallclock_time = 2019-07-08 14:26:54.522155 / Sim_time = 1.4500000216066837
# input_data key  GPS (3755, array([49.00202793,  8.00463308,  1.58916414]))
# input_data key  can_bus (43, {'moi': 1.0, 'center_of_mass': {'x': 60.0, 'y': 0.0, 'z': -60.0}, 'linear_velocity': array([[<carla.libcarla.Vector3D object at 0x7fb4fa0e2348>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e2450>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e2608>],
#        [<carla.libcarla.Vector3D object at 0x7fb4fa0e22f0>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e2870>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e26b8>],
#        [<carla.libcarla.Vector3D object at 0x7fb4fa0e2500>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e2818>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0ddfa8>]], dtype=object), 'speed': -1.6444947256841175e-06, 'lateral_speed': array([[<carla.libcarla.Vector3D object at 0x7fb4fa0e4ad8>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e49d0>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e23a0>],
#        [<carla.libcarla.Vector3D object at 0x7fb4fa0e48c8>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e4ce8>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e23f8>],
#        [<carla.libcarla.Vector3D object at 0x7fb4fa0e4d40>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e4c90>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0e28c8>]], dtype=object), 'transform': <carla.libcarla.Transform object at 0x7fb4fa0de3f0>, 'damping_rate_zero_throttle_clutch_disengaged': 0.3499999940395355, 'max_rpm': 6000.0, 'clutch_strength': 10.0, 'drag_coefficient': 0.30000001192092896, 'linear_acceleration': array([[<carla.libcarla.Vector3D object at 0x7fb4fa0dd0e0>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0ddf50>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0d58c8>],
#        [<carla.libcarla.Vector3D object at 0x7fb4fa0dd088>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0dd138>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0d5088>],
#        [<carla.libcarla.Vector3D object at 0x7fb4fa0dd1e8>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0f6d98>,
#         <carla.libcarla.Vector3D object at 0x7fb4fa0d5920>]], dtype=object), 'damping_rate_full_throttle': 0.15000000596046448, 'use_gear_autobox': True, 'torque_curve': [{'x': 0.0, 'y': 400.0}, {'x': 1890.7607421875, 'y': 500.0}, {'x': 5729.57763671875, 'y': 400.0}], 'dimensions': {'width': 0.9279687404632568, 'height': 0.6399999856948853, 'length': 2.4543750286102295}, 'steering_curve': [{'x': 0.0, 'y': 1.0}, {'x': 20.0, 'y': 0.8999999761581421}, {'x': 60.0, 'y': 0.800000011920929}, {'x': 120.0, 'y': 0.699999988079071}], 'mass': 1850.0, 'wheels': [{'tire_friction': 3.5, 'steer_angle': 70.0, 'damping_rate': 0.25, 'disable_steering': False}, {'tire_friction': 3.5, 'steer_angle': 70.0, 'damping_rate': 0.25, 'disable_steering': False}, {'tire_friction': 3.5, 'steer_angle': 0.0, 'damping_rate': 0.25, 'disable_steering': False}, {'tire_friction': 3.5, 'steer_angle': 0.0, 'damping_rate': 0.25, 'disable_steering': False}]})
# input_data key  rgb (3753, array([[[135, 118, 110, 255],
#         [135, 118, 110, 255],
#         [136, 119, 110, 255],
#         ...,
#  		  [[114, 108, 105, 255],
#         [110, 105, 102, 255],
#         [112, 106, 104, 255],
#         ...,
#         [118, 112, 109, 255],
#         [118, 112, 109, 255],
#         [121, 115, 113, 255]]], dtype=uint8))
# Direction  RoadOption.LANEFOLLOW
# ego_trans
# Transform(Location(x=338.763, y=226.453, z=-0.0109183), Rotation(pitch=0.000136604, yaw=-89.9654, roll=-0.000274658))
# 1.9995784804148584/0.0

		localization = input_data['GPS']
		directions = self._get_current_direction(input_data['GPS'][1])
		logging.debug("Directions {}".format(directions))


		# 2. get recommended action from the NN controller (copy from CoILBaseline)
		# Take the forward speed and normalize it for it to go from 0-1
		norm_speed = input_data['can_bus'][1]['speed'] / g_conf.SPEED_FACTOR
		norm_speed = torch.cuda.FloatTensor([norm_speed]).unsqueeze(0)
		directions_tensor = torch.cuda.LongTensor([directions])
		# End-to-end part, feed in images from rgb sensor, then parse network output as controller
		# Compute the forward pass processing the sensors got from CARLA.
		model_outputs = self._model.forward_branch(self._process_sensors(input_data['rgb'][1]),
												   norm_speed,
												   directions_tensor)
		steer, throttle, brake = self._process_model_outputs(model_outputs[0])

		# 3. use inner-loop to simulate/approximate vehicle model
		# save the NN output as vehicle control
		sim_control = carla.VehicleControl()
		sim_control.steer = float(steer)
		sim_control.throttle = float(throttle)
		sim_control.brake = float(brake)
		logging.debug("inner loop for sim_control", sim_control)
		# TODO
		# copy a "parallel world" and create a "virtual agent" that has the same state with ego_vehicle
		sim_world = self.world # TODO: check how to copy the world, roads info are necessary, the rest optional
		sim_ego = sim_world.create_ego_vehicle(current_ego_states)

		sim_world.agent_instance = getattr(sim_world.module_agent, sim_world.module_agent.__name__)(args.config)
        correct_sensors, error_message = sim_world.valid_sensors_configuration(sim_world.sim_agent, sim_world.track)
		
		# pass the sim_control to virtual agent and run T timesteps
		sim_ego.apply_control(sim_control)
		# use current model to predict the following state-action series
		MPSC_controls = [] # TODO: check where u should init it
		for i in range(T):
			sim_ego.run_step() # TODO def run_step, update for sim_ego
			sim_ego.update()
			# 4. use MPSC to check safety at each future timestep
			safe = MPSC.check_safety(sim_ego.state, safety_boundary)
			
			if not safe:
				# if not safe, obtain MPSC control output
				logging.debug("use MPSC controller")
				control = MPSC_control
				MPSC_controls.append(MPSC_control) #  collect all "safe" o/p
				# 7. execute MPSC control and add it to new dataset
				break
			else:
				if i < T-1:
					continue
				else: # final step
					# if safe within all T timesteps, proceed to  use NN control output
					logging.debug("use NN controller")
					control = sim_control
		# 8. retrain the network and/or do policy aggregation
		if len(MPSC_controls):
			self.model.train(self.model, MPSC_controls)

		logging.debug("Control output ", control)
		# There is the posibility to replace some of the predictions with oracle predictions.
		self.first_iter = False
		return control
Exemplo n.º 7
0
class CoILAgent(object):

    def __init__(self, checkpoint, town_name, carla_version='0.84'):

        # Set the carla version that is going to be used by the interface
        self._carla_version = carla_version 
        self.checkpoint = checkpoint  # We save the checkpoint for some interesting future use.
        self._model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
        self.first_iter = True
        # Load the model and prepare set it for evaluation
        self._model.load_state_dict(checkpoint['state_dict'])
        self._model.cuda()
        self._model.eval()

        # this entire segment is for loading models for ensemble evaluation - take care for the paths and checkpoints
        '''
        self.weights = [0.25, 0.25, 0.25, 0.25] # simple ensemble
        self.model_ids = ['660000', '670000', '1070000', '2640000'] # model checkpoints
        self.models_dir = '/is/sg2/aprakash/Projects/carla_autonomous_driving/code/coiltraine/_logs/ensemble'
        self._ensemble_model_list = []
        for i in range(len(self.model_ids)):
            curr_checkpoint = torch.load(self.models_dir+'/resnet34imnet10S1/checkpoints/'+self.model_ids[i]+'.pth')
            self._ensemble_model_list.append(CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION))
            self._ensemble_model_list[i].load_state_dict(curr_checkpoint['state_dict'])
            self._ensemble_model_list[i].cuda().eval()
        '''
        self.latest_image = None
        self.latest_image_tensor = None

        # for image corruptions
        self.corruption_number = None
        self.severity = None
        
        if g_conf.USE_ORACLE or g_conf.USE_FULL_ORACLE: # for evaluating expert
            self.control_agent = CommandFollower(town_name)

    def run_step(self, measurements, sensor_data, directions, target, **kwargs):
        """
            Run a step on the benchmark simulation
        Args:
            measurements: All the float measurements from CARLA ( Just speed is used)
            sensor_data: All the sensor data used on this benchmark
            directions: The directions, high level commands
            target: Final objective. Not used when the agent is predicting all outputs.

        Returns:
            Controls for the vehicle on the CARLA simulator.

        """
        # only required if using corruptions module
        # self.corruption_number = kwargs.get('corruption_number', None)
        # self.severity = kwargs.get('severity', None)

        # Take the forward speed and normalize it for it to go from 0-1
        norm_speed = measurements.player_measurements.forward_speed / g_conf.SPEED_FACTOR
        norm_speed = torch.cuda.FloatTensor([norm_speed]).unsqueeze(0)
        directions_tensor = torch.cuda.LongTensor([directions])
        # Compute the forward pass processing the sensors got from CARLA.
        model_outputs = self._model.forward_branch(self._process_sensors(sensor_data), norm_speed,
                                                 directions_tensor)
        # run forward pass using felipe model
        # model_outputs_felipe = self._model_felipe.forward_branch(self._process_sensors(sensor_data), norm_speed,
        #                                           directions_tensor)
        
        # model_outputs[0] = torch.FloatTensor([(model_outputs[0][i].item()+model_outputs_felipe[0][i].item())/2.0 for i in range(3)]).cuda()
        steer, throttle, brake = self._process_model_outputs(model_outputs[0])
        # steer_f, throttle_f, brake_f = self._process_model_outputs(model_outputs_felipe[0])

        # ensemble
        '''
        steer_c = []
        throttle_c = []
        brake_c = []
        for i in range(len(self.model_ids)):
            mo = self._ensemble_model_list[i].forward_branch(self._process_sensors(sensor_data), norm_speed,
                                                  directions_tensor)
            s, t, b = self._process_model_outputs(mo[0])
            steer_c.append(s)
            throttle_c.append(t)
            brake_c.append(b)
        '''
        if self._carla_version == '0.9':
            import carla
            control = carla.VehicleControl()
        else:
            control = VehicleControl()
        # single model
        control.steer = float(steer)
        control.throttle = float(throttle)
        control.brake = float(brake)

        # ensemble
        # control.steer = float(np.average(steer_c, weights=self.weights))
        # control.throttle = float(np.average(throttle_c, weights=self.weights))
        # control.brake = float(np.average(brake_c, weights=self.weights))

        # There is the posibility to replace some of the predictions with oracle predictions.
        if g_conf.USE_ORACLE:
            control.steer, control.throttle, control.brake = self._get_oracle_prediction(
                measurements, sensor_data, target)

        if self.first_iter:
            coil_logger.add_message('Iterating', {"Checkpoint": self.checkpoint['iteration'],
                                                  'Agent': str(control.steer)},
                                    self.checkpoint['iteration'])
        self.first_iter = False

        return control

    # define run step for carla 9
    def run_step_carla9(self, observations):
        norm_speed = np.linalg.norm(observations['velocity'])/g_conf.SPEED_FACTOR
        norm_speed = torch.cuda.FloatTensor([norm_speed]).unsqueeze(0)
        directions_tensor = torch.cuda.LongTensor([int(observations['command'])])
        # print ('rgb: ', observations['big_cam'].shape)
        # print ('velocity: ', observations['velocity'])
        # print ('norm velocity: ', np.linalg.norm(observations['velocity']))
        # print ('norm_speed: ', norm_speed.shape, norm_speed.item())
        # print ('directions_tensor: ', directions_tensor.shape, directions_tensor.item())

        model_outputs = self._model.forward_branch(self._process_sensors(observations), norm_speed,
                                                  directions_tensor)

        steer, throttle, brake = self._process_model_outputs(model_outputs[0])

        if self._carla_version == '0.9':
            import carla
            control = carla.VehicleControl()
        else:
            control = VehicleControl()
        # single model
        control.steer = float(steer)
        control.throttle = float(throttle)
        control.brake = float(brake)
        
        return control

    def get_attentions(self, layers=None):
        """

        Returns
            The activations obtained from the first layers of the latest iteration.

        """
        if layers is None:
            layers = [0, 1, 2]
        if self.latest_image_tensor is None:
            raise ValueError('No step was ran yet. '
                             'No image to compute the activations, Try Running ')
        all_layers = self._model.get_perception_layers(self.latest_image_tensor)
        cmap = plt.get_cmap('inferno')
        attentions = []
        for layer in layers:
            y = all_layers[layer]
            att = torch.abs(y).mean(1)[0].data.cpu().numpy()
            att = att / att.max()
            att = cmap(att)
            att = np.delete(att, 3, 2)
            attentions.append(imresize(att, [88, 200]))
            # attentions.append(np.array(Image.fromarray(sensor).resize((200, 88))))
        return attentions

    def _process_sensors(self, sensors):

        iteration = 0
        for name, size in g_conf.SENSORS.items():

            if self._carla_version == '0.9':
                sensor = sensors[name][g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], ...]
            else:
                sensor = sensors[name].data[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], ...]

            sensor = scipy.misc.imresize(sensor, (size[1], size[2])) # depreciated
            # sensor = np.array(Image.fromarray(sensor).resize((size[2], size[1]))) # for running corruptions
            '''
            # corrupt the image here
            # print ('out of corruption: ', self.corruption_number, self.severity)
            if self.corruption_number is not None and self.severity is not None:
                # print ('in corruption: ', self.corruption_number, self.severity)
                sensor = corrupt(sensor, corruption_number=self.corruption_number, 
                                        severity=self.severity+1)
            '''
            self.latest_image = sensor

            sensor = np.swapaxes(sensor, 0, 1)

            sensor = np.transpose(sensor, (2, 1, 0))

            sensor = torch.from_numpy(sensor / 255.0).type(torch.FloatTensor).cuda()

            if iteration == 0:
                image_input = sensor
            else:
                image_input = torch.cat((image_input, sensor), 0)

            iteration += 1
    
        image_input = image_input.unsqueeze(0)

        self.latest_image_tensor = image_input

        return image_input

    def _process_model_outputs(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        steer, throttle, brake = outputs[0].item(), outputs[1].item(), outputs[2].item()
        # print ('steer: ', steer, 'throttle: ', throttle, 'brake: ', brake)
        
        # these heuristics are a part of the original benchmark, evaluation doesn't run properly without these
        if brake < 0.05:
            brake = 0.0

        if throttle > brake:
            brake = 0.0
        
        # print ('steer after heuristic: ', steer, 'throttle after heuristic: ', throttle, 'brake after heuristic: ', brake)
        return steer, throttle, brake


    def _process_model_outputs_wp(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        wpa1, wpa2, throttle, brake = outputs[3], outputs[4], outputs[1], outputs[2]
        if brake < 0.2:
            brake = 0.0

        if throttle > brake:
            brake = 0.0

        steer = 0.7 * wpa2

        if steer > 0:
            steer = min(steer, 1)
        else:
            steer = max(steer, -1)

        return steer, throttle, brake

    def _get_oracle_prediction(self, measurements, sensor_data, target):
        # For the oracle, the current version of sensor data is not really relevant.
        control, _ = self.control_agent.run_step(measurements, sensor_data, [], target)

        return control.steer, control.throttle, control.brake
class CoILAgent(object):
    def __init__(self,
                 checkpoint,
                 town_name,
                 carla_version='0.84',
                 vae_params=None):

        # Set the carla version that is going to be used by the interface
        self._carla_version = carla_version
        self.checkpoint = checkpoint  # We save the checkpoint for some interesting future use.
        self._model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
        self.first_iter = True
        # Load the model and prepare set it for evaluation
        self._model.load_state_dict(checkpoint['state_dict'])
        self._model.cuda()
        self._model.eval()

        self._vae_params = vae_params
        if g_conf.VAE_MODEL_CONFIGURATION != {}:
            # adding VAE model
            self._VAE_model = CoILModel('VAE', g_conf.VAE_MODEL_CONFIGURATION)
            self._VAE_model.cuda()
            VAE_checkpoint = torch.load(
                os.path.join('_logs', vae_params['vae_folder'],
                             vae_params['vae_exp'], 'checkpoints',
                             str(vae_params['vae_checkpoint']) + '.pth'))
            print(
                "VAE model ", str(vae_params['vae_checkpoint']),
                " already loaded from ",
                os.path.join('_logs', vae_params['vae_folder'],
                             vae_params['vae_exp'], 'checkpoints'))
            self._VAE_model.load_state_dict(VAE_checkpoint['state_dict'])
            self._VAE_model.eval()

        self.latest_image = None
        self.latest_image_tensor = None

        if g_conf.USE_ORACLE or g_conf.USE_FULL_ORACLE:
            self.control_agent = CommandFollower(town_name)

    def run_step(self, measurements, sensor_data, directions, target):
        """
            Run a step on the benchmark simulation
        Args:
            measurements: All the float measurements from CARLA ( Just speed is used)
            sensor_data: All the sensor data used on this benchmark
            directions: The directions, high level commands
            target: Final objective. Not used when the agent is predicting all outputs.

        Returns:
            Controls for the vehicle on the CARLA simulator.

        """

        # Take the forward speed and normalize it for it to go from 0-1
        norm_speed = measurements.player_measurements.forward_speed / g_conf.SPEED_FACTOR
        norm_speed = torch.cuda.FloatTensor([norm_speed]).unsqueeze(0)
        directions_tensor = torch.cuda.LongTensor([directions])
        # Compute the forward pass processing the sensors got from CARLA.
        if g_conf.VAE_MODEL_CONFIGURATION != {}:
            input_data = self._process_sensors(sensor_data)
            _, _, _, z = self._VAE_model(input_data)
            model_outputs = self._model.forward_branch(z, norm_speed,
                                                       directions_tensor)

        else:
            model_outputs = self._model.forward_branch(
                self._process_sensors(sensor_data), norm_speed,
                directions_tensor)

        steer, throttle, brake = self._process_model_outputs(model_outputs[0])
        if self._carla_version == '0.9':
            import carla
            control = carla.VehicleControl()
        else:
            control = VehicleControl()
        control.steer = float(steer)
        control.throttle = float(throttle)
        control.brake = float(brake)
        # There is the posibility to replace some of the predictions with oracle predictions.
        if g_conf.USE_ORACLE:
            _, control.throttle, control.brake = self._get_oracle_prediction(
                measurements, target)

        if self.first_iter:
            coil_logger.add_message('Iterating', {
                "Checkpoint": self.checkpoint['iteration'],
                'Agent': str(steer)
            }, self.checkpoint['iteration'])
        self.first_iter = False

        return control

    def get_attentions(self, layers=None):
        """

        Returns
            The activations obtained from the first layers of the latest iteration.

        """
        if layers is None:
            layers = [0, 1, 2]
        if self.latest_image_tensor is None:
            raise ValueError(
                'No step was ran yet. '
                'No image to compute the activations, Try Running ')
        all_layers = self._model.get_perception_layers(
            self.latest_image_tensor)
        cmap = plt.get_cmap('inferno')
        attentions = []
        for layer in layers:
            y = all_layers[layer]
            att = torch.abs(y).mean(1)[0].data.cpu().numpy()
            att = att / att.max()
            att = cmap(att)
            att = np.delete(att, 3, 2)
            attentions.append(imresize(att, [88, 200]))
        return attentions

    def _process_sensors(self, sensors):

        iteration = 0
        for name, size in g_conf.SENSORS.items():

            if self._carla_version == '0.9':
                sensor = sensors[name][g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1],
                                       ...]
            else:
                sensor = sensors[name].data[
                    g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], ...]

            sensor = scipy.misc.imresize(sensor, (size[1], size[2]))

            self.latest_image = sensor

            sensor = np.swapaxes(sensor, 0, 1)

            sensor = np.transpose(sensor, (2, 1, 0))

            sensor = torch.from_numpy(sensor / 255.0).type(
                torch.FloatTensor).cuda()

            if iteration == 0:
                image_input = sensor
            else:
                image_input = torch.cat((image_input, sensor), 0)

            iteration += 1

        image_input = image_input.unsqueeze(0)

        self.latest_image_tensor = image_input

        return image_input

    def _process_model_outputs(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        if brake < 0.05:
            brake = 0.0

        if throttle > brake:
            brake = 0.0

        return steer, throttle, brake

    def _process_model_outputs_wp(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        wpa1, wpa2, throttle, brake = outputs[3], outputs[4], outputs[
            1], outputs[2]
        if brake < 0.2:
            brake = 0.0

        if throttle > brake:
            brake = 0.0

        steer = 0.7 * wpa2

        if steer > 0:
            steer = min(steer, 1)
        else:
            steer = max(steer, -1)

        return steer, throttle, brake

    def _get_oracle_prediction(self, measurements, target):
        # For the oracle, the current version of sensor data is not really relevant.
        control, _, _, _, _ = self.control_agent.run_step(
            measurements, [], [], target)

        return control.steer, control.throttle, control.brake
Exemplo n.º 9
0
def execute(gpu, exp_batch, exp_alias, dataset_name, architecture,
            suppress_output):

    try:
        # We set the visible cuda devices
        torch.manual_seed(2)
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu

        # Validation available for:
        # coil_unit (UNIT + task combined)
        # coil_icra (Also used for finetuned models)
        # wgangp_lsd (Our architecture)

        architecture_name = architecture
        # At this point the log file with the correct naming is created.
        if architecture_name == 'coil_unit':
            pass
        elif architecture_name == 'wgangp_lsd':
            merge_with_yaml(
                os.path.join('/home/rohitrishabh/CoilWGAN/configs', exp_batch,
                             exp_alias + '.yaml'))
            set_type_of_process('validation', dataset_name)
        elif architecture_name == 'coil_icra':
            merge_with_yaml(
                os.path.join(
                    '/home/adas/CleanedCode/CoIL_Codes/coil_20-06/configs',
                    exp_batch, exp_alias + '.yaml'))
            set_type_of_process('validation', dataset_name)

            if monitorer.get_status(exp_batch, exp_alias + '.yaml',
                                    g_conf.PROCESS_NAME)[0] == "Finished":
                # TODO: print some cool summary or not ?
                return

        if not os.path.exists('_output_logs'):
            os.mkdir('_output_logs')

        if suppress_output:
            sys.stdout = open(os.path.join(
                '_output_logs',
                g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"),
                              "a",
                              buffering=1)

        #Define the dataset. This structure is has the __get_item__ redefined in a way
        #that you can access the HDFILES positions from the root directory as a in a vector.
        if dataset_name != []:
            full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"],
                                        dataset_name)
        else:
            full_dataset = os.environ["COIL_DATASET_PATH"]

        augmenter = Augmenter(None)

        dataset = CoILDataset(full_dataset, transform=augmenter)

        # Creates the sampler, this part is responsible for managing the keys. It divides
        # all keys depending on the measurements and produces a set of keys for each bach.

        # The data loader is the multi threaded module from pytorch that release a number of
        # workers to get all the data.
        # TODO: batch size an number of workers go to some configuration file
        batchsize = 30
        data_loader = torch.utils.data.DataLoader(dataset,
                                                  batch_size=batchsize,
                                                  shuffle=False,
                                                  num_workers=1,
                                                  pin_memory=True)

        # TODO: here there is clearly a posibility to make a cool "conditioning" system.

        if architecture_name == 'coil_unit':
            model_task, model_gen = CoILModel('coil_unit')
            model_task, model_gen = model_task.cuda(), model_gen.cuda()
        else:
            model = CoILModel(architecture_name)
            model.cuda()

        latest = 0

        # print (dataset.meta_data)
        best_loss = 1000
        best_error = 1000
        best_loss_mini = 1000
        best_loss_iter = 0
        best_error_iter = 0
        batch_size = 30
        best_loss_ckpt = ''

        if architecture_name == 'coil_unit':
            ckpts = glob.glob('/home/rohitrishabh/UNIT_DA/outputs/' +
                              exp_alias + '/checkpoints/gen*.pt')
        else:
            ckpts = glob.glob(
                os.path.join(
                    '/home/adas/CleanedCode/CoIL_Codes/coil_20-06/_logs',
                    exp_batch, exp_alias) + '/*.pth')

        if architecture_name == 'coil_unit':
            model_task.eval()
            model_gen.eval()
        else:
            model.eval()
        ckpts = sorted(ckpts)
        # TODO: refactor on the getting on the checkpoint organization needed
        for ckpt in ckpts:

            # if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):

            # latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)
            # ckpt = os.path.join('/datatmp/Experiments/rohitgan/_logs', exp_batch, exp_alias
            #                         , 'checkpoints', str(latest) + '.pth')
            checkpoint = torch.load(ckpt)
            print("Validation loaded ", ckpt)
            if architecture_name == 'wgangp_lsd':
                print(ckpt, checkpoint['best_loss_iter_F'])
                model.load_state_dict(checkpoint['stateF_dict'])
                model.eval()
            elif architecture_name == 'coil_unit':
                model_task.load_state_dict(checkpoint['task'])
                model_gen.load_state_dict(checkpoint['b'])
                model_task.eval()
                model_gen.eval()
            elif architecture_name == 'coil_icra':
                model.load_state_dict(checkpoint['state_dict'])
                model.eval()

            accumulated_loss = 0
            accumulated_error = 0
            iteration_on_checkpoint = 0
            datacount = 0
            for data in data_loader:

                input_data, float_data = data

                controls = float_data[:, dataset.controls_position(), :]

                camera_angle = float_data[:, 26, :]
                camera_angle = camera_angle.cuda()
                steer = float_data[:, 0, :]
                steer = steer.cuda()
                speed = float_data[:, 10, :]
                speed = speed.cuda()

                time_use = 1.0
                car_length = 3.0
                extra_factor = 2.5
                threshold = 1.0

                pos = camera_angle > 0.0
                pos = pos.type(torch.FloatTensor)
                neg = camera_angle <= 0.0
                neg = neg.type(torch.FloatTensor)
                pos = pos.cuda()
                neg = neg.cuda()

                rad_camera_angle = math.pi * (torch.abs(camera_angle)) / 180.0
                val = extra_factor * (torch.atan(
                    (rad_camera_angle * car_length) /
                    (time_use * speed + 0.05))) / 3.1415
                steer -= pos * torch.min(val, torch.Tensor([0.6]).cuda())
                steer += neg * torch.min(val, torch.Tensor([0.6]).cuda())

                steer = steer.cpu()
                float_data[:, 0, :] = steer
                float_data[:, 0, :][float_data[:, 0, :] > 1.0] = 1.0
                float_data[:, 0, :][float_data[:, 0, :] < -1.0] = -1.0

                datacount += 1
                control_position = 24
                speed_position = 10

                if architecture_name == 'wgangp_lsd':
                    embed, output = model(
                        torch.squeeze(input_data['rgb']).cuda(),
                        float_data[:, speed_position, :].cuda())

                    loss = torch.sum(
                        (output[0] -
                         dataset.extract_targets(float_data).cuda()
                         )**2).data.tolist()
                    mean_error = torch.sum(
                        torch.abs(output[0] -
                                  dataset.extract_targets(float_data).cuda())
                    ).data.tolist()

                elif architecture_name == 'coil_unit':
                    embed, n_b = model_gen.encode(
                        torch.squeeze(input_data['rgb']).cuda())
                    output = model_task(
                        embed,
                        Variable(float_data[:, speed_position, :]).cuda())

                    loss = torch.sum(
                        (output[0].data -
                         dataset.extract_targets(float_data).cuda())**2)
                    mean_error = torch.sum(
                        torch.abs(output[0].data -
                                  dataset.extract_targets(float_data).cuda()))

                elif architecture_name == 'coil_icra':
                    output = model.forward_branch(
                        torch.squeeze(input_data['rgb']).cuda(),
                        float_data[:, speed_position, :].cuda(),
                        float_data[:, control_position, :].cuda())

                    loss = torch.sum(
                        (output - dataset.extract_targets(float_data).cuda()
                         )**2).data.tolist()
                    mean_error = torch.sum(
                        torch.abs(output -
                                  dataset.extract_targets(float_data).cuda())
                    ).data.tolist()

                if loss < best_loss_mini:
                    best_loss_mini = loss

                accumulated_error += mean_error
                accumulated_loss += loss
                # error = torch.abs(output[0] - dataset.extract_targets(float_data).cuda())

                # Log a random position
                position = random.randint(0, len(float_data) - 1)
                iteration_on_checkpoint += 1

            print(datacount, len(data_loader), accumulated_loss)
            checkpoint_average_loss = accumulated_loss / float(
                datacount * batchsize)
            checkpoint_average_error = accumulated_error / float(
                datacount * batchsize)

            if checkpoint_average_loss < best_loss:
                best_loss = checkpoint_average_loss
                best_loss_iter = latest
                best_loss_ckpt = ckpt

            if checkpoint_average_error < best_error:
                best_error = checkpoint_average_error
                best_error_iter = latest

            print("current loss", checkpoint_average_loss)
            print("best_loss", best_loss)

            coil_logger.add_message(
                'Iterating', {
                    'Summary': {
                        'Error': checkpoint_average_error,
                        'Loss': checkpoint_average_loss,
                        'BestError': best_error,
                        'BestLoss': best_loss,
                        'BestLossCheckpoint': best_loss_iter,
                        'BestErrorCheckpoint': best_error_iter
                    },
                    'Checkpoint': latest
                }, latest)
            latest += 2000

        coil_logger.add_message('Finished', {})
        print("Best Validation Loss ckpt:", best_loss_ckpt)

        # TODO: DO ALL THE AMAZING LOGGING HERE, as a way to very the status in paralell.
        # THIS SHOULD BE AN INTERELY PARALLEL PROCESS

    except KeyboardInterrupt:
        coil_logger.add_message('Error', {'Message': 'Killed By User'})

    except:
        traceback.print_exc()

        coil_logger.add_message('Error', {'Message': 'Something Happened'})
Exemplo n.º 10
0
def execute(gpu, exp_batch, exp_alias, dataset_name, suppress_output):
    latest = None
    try:
        # We set the visible cuda devices
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu

        # At this point the log file with the correct naming is created.
        merge_with_yaml(os.path.join('configs', exp_batch,
                                     exp_alias + '.yaml'))
        # The validation dataset is always fully loaded, so we fix a very high number of hours
        g_conf.NUMBER_OF_HOURS = 10000
        set_type_of_process('validation', dataset_name)

        if not os.path.exists('_output_logs'):
            os.mkdir('_output_logs')

        if suppress_output:
            sys.stdout = open(os.path.join(
                '_output_logs', exp_alias + '_' + g_conf.PROCESS_NAME + '_' +
                str(os.getpid()) + ".out"),
                              "a",
                              buffering=1)
            sys.stderr = open(os.path.join(
                '_output_logs', exp_alias + '_err_' + g_conf.PROCESS_NAME +
                '_' + str(os.getpid()) + ".out"),
                              "a",
                              buffering=1)

        # Define the dataset.
        full_dataset = [
            os.path.join(os.environ["COIL_DATASET_PATH"], dataset_name)
        ]
        augmenter = Augmenter(None)
        # Definition of the dataset to be used. Preload name is just the validation data name
        dataset = CoILDataset(full_dataset,
                              transform=augmenter,
                              preload_names=[dataset_name])

        # The data loader is the multi threaded module from pytorch that release a number of
        # workers to get all the data.
        data_loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=g_conf.BATCH_SIZE,
            shuffle=False,
            num_workers=g_conf.NUMBER_OF_LOADING_WORKERS,
            pin_memory=True)

        # Create model.
        model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
        # The window used to keep track of the validation loss
        l1_window = []
        # If we have evaluated a checkpoint, get the validation losses of all the previously
        # evaluated checkpoints (validation loss is used for early stopping)
        latest = get_latest_evaluated_checkpoint()
        if latest is not None:  # When latest is noe
            l1_window = coil_logger.recover_loss_window(dataset_name, None)

        model.cuda()

        best_mse = 1000
        best_error = 1000
        best_mse_iter = 0
        best_error_iter = 0

        # Loop to validate all checkpoints as they are saved during training
        while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE):
            if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):
                with torch.no_grad():
                    # Get and load latest checkpoint
                    latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)

                    checkpoint = torch.load(
                        os.path.join('_logs', exp_batch, exp_alias,
                                     'checkpoints',
                                     str(latest) + '.pth'))
                    checkpoint_iteration = checkpoint['iteration']
                    print("Validation loaded ", checkpoint_iteration)

                    model.load_state_dict(checkpoint['state_dict'])
                    model.eval()

                    accumulated_mse = 0
                    accumulated_error = 0
                    iteration_on_checkpoint = 0
                    if g_conf.USE_REPRESENTATION_LOSS:
                        accumulated_perception_rep_mse = 0
                        accumulated_speed_rep_mse = 0
                        accumulated_intentions_rep_mse = 0
                        accumulated_rep_mse = 0
                        accumulated_perception_rep_error = 0
                        accumulated_speed_rep_error = 0
                        accumulated_intentions_rep_error = 0
                        accumulated_rep_error = 0

                    # Validation loop
                    for data in data_loader:

                        # Compute the forward pass on a batch from  the validation dataset
                        controls = data['directions']

                        # Run model forward and get outputs
                        # First case corresponds to squeeze network, second case corresponds to driving model without
                        # mimicking losses, last case corresponds to mimic network
                        if "seg" in g_conf.SENSORS.keys():
                            output = model.forward_branch(
                                data,
                                dataset.extract_inputs(data).cuda(), controls,
                                dataset.extract_intentions(data).cuda())
                        elif not g_conf.USE_REPRESENTATION_LOSS:
                            output = model.forward_branch(
                                data,
                                dataset.extract_inputs(data).cuda(), controls)
                        else:
                            output, intermediate_reps = model.forward_branch(
                                data,
                                dataset.extract_inputs(data).cuda(), controls)

                        write_regular_output(checkpoint_iteration, output)

                        # Compute control loss on current validation batch and accumulate it
                        targets_to_use = dataset.extract_targets(data)

                        mse = torch.mean(
                            (output - targets_to_use.cuda())**2).data.tolist()
                        mean_error = torch.mean(
                            torch.abs(output -
                                      targets_to_use.cuda())).data.tolist()

                        accumulated_error += mean_error
                        accumulated_mse += mse

                        error = torch.abs(output - targets_to_use.cuda())

                        # Compute mimicking losses on current validation batch and accumulate it
                        if g_conf.USE_REPRESENTATION_LOSS:
                            expert_reps = dataset.extract_representations(data)
                            # First L1 losses (seg mask, speed, intention mimicking losses)
                            if g_conf.USE_PERCEPTION_REP_LOSS:
                                perception_rep_loss = torch.sum(
                                    torch.abs(intermediate_reps[0] -
                                              expert_reps[0].cuda())
                                ).data.tolist() / (3 * output.shape[0])
                            else:
                                perception_rep_loss = 0
                            if g_conf.USE_SPEED_REP_LOSS:
                                speed_rep_loss = torch.sum(
                                    torch.abs(intermediate_reps[1] -
                                              expert_reps[1].cuda())
                                ).data.tolist() / (3 * output.shape[0])
                            else:
                                speed_rep_loss = 0
                            if g_conf.USE_INTENTION_REP_LOSS:
                                intentions_rep_loss = torch.sum(
                                    torch.abs(intermediate_reps[2] -
                                              expert_reps[2].cuda())
                                ).data.tolist() / (3 * output.shape[0])
                            else:
                                intentions_rep_loss = 0
                            rep_error = g_conf.REP_LOSS_WEIGHT * (
                                perception_rep_loss + speed_rep_loss +
                                intentions_rep_loss)
                            accumulated_perception_rep_error += perception_rep_loss
                            accumulated_speed_rep_error += speed_rep_loss
                            accumulated_intentions_rep_error += intentions_rep_loss
                            accumulated_rep_error += rep_error

                            # L2 losses now
                            if g_conf.USE_PERCEPTION_REP_LOSS:
                                perception_rep_loss = torch.sum(
                                    (intermediate_reps[0] -
                                     expert_reps[0].cuda())**
                                    2).data.tolist() / (3 * output.shape[0])
                            else:
                                perception_rep_loss = 0
                            if g_conf.USE_SPEED_REP_LOSS:
                                speed_rep_loss = torch.sum(
                                    (intermediate_reps[1] -
                                     expert_reps[1].cuda())**
                                    2).data.tolist() / (3 * output.shape[0])
                            else:
                                speed_rep_loss = 0
                            if g_conf.USE_INTENTION_REP_LOSS:
                                intentions_rep_loss = torch.sum(
                                    (intermediate_reps[2] -
                                     expert_reps[2].cuda())**
                                    2).data.tolist() / (3 * output.shape[0])
                            else:
                                intentions_rep_loss = 0
                            rep_mse = g_conf.REP_LOSS_WEIGHT * (
                                perception_rep_loss + speed_rep_loss +
                                intentions_rep_loss)
                            accumulated_perception_rep_mse += perception_rep_loss
                            accumulated_speed_rep_mse += speed_rep_loss
                            accumulated_intentions_rep_mse += intentions_rep_loss
                            accumulated_rep_mse += rep_mse

                        # Log a random position
                        position = random.randint(
                            0,
                            len(output.data.tolist()) - 1)

                        # Logging
                        if g_conf.USE_REPRESENTATION_LOSS:
                            total_mse = mse + rep_mse
                            total_error = mean_error + rep_error
                            coil_logger.add_message(
                                'Iterating', {
                                    'Checkpoint':
                                    latest,
                                    'Iteration':
                                    (str(iteration_on_checkpoint * 120) + '/' +
                                     str(len(dataset))),
                                    'MeanError':
                                    mean_error,
                                    'MSE':
                                    mse,
                                    'RepMeanError':
                                    rep_error,
                                    'RepMSE':
                                    rep_mse,
                                    'MeanTotalError':
                                    total_error,
                                    'TotalMSE':
                                    total_mse,
                                    'Output':
                                    output[position].data.tolist(),
                                    'GroundTruth':
                                    targets_to_use[position].data.tolist(),
                                    'Error':
                                    error[position].data.tolist(),
                                    'Inputs':
                                    dataset.extract_inputs(
                                        data)[position].data.tolist()
                                }, latest)
                        else:
                            coil_logger.add_message(
                                'Iterating', {
                                    'Checkpoint':
                                    latest,
                                    'Iteration':
                                    (str(iteration_on_checkpoint * 120) + '/' +
                                     str(len(dataset))),
                                    'MeanError':
                                    mean_error,
                                    'MSE':
                                    mse,
                                    'Output':
                                    output[position].data.tolist(),
                                    'GroundTruth':
                                    targets_to_use[position].data.tolist(),
                                    'Error':
                                    error[position].data.tolist(),
                                    'Inputs':
                                    dataset.extract_inputs(
                                        data)[position].data.tolist()
                                }, latest)
                        iteration_on_checkpoint += 1

                        if g_conf.USE_REPRESENTATION_LOSS:
                            print("Iteration %d  on Checkpoint %d : Error %f" %
                                  (iteration_on_checkpoint,
                                   checkpoint_iteration, total_error))
                        else:
                            print("Iteration %d  on Checkpoint %d : Error %f" %
                                  (iteration_on_checkpoint,
                                   checkpoint_iteration, mean_error))
                    """
                        ########
                        Finish a round of validation, write results, wait for the next
                        ########
                    """
                    # Compute average L1 and L2 losses over whole round of validation and log them
                    checkpoint_average_mse = accumulated_mse / (
                        len(data_loader))
                    checkpoint_average_error = accumulated_error / (
                        len(data_loader))
                    coil_logger.add_scalar('L2 Loss', checkpoint_average_mse,
                                           latest, True)
                    coil_logger.add_scalar('Loss', checkpoint_average_error,
                                           latest, True)

                    if g_conf.USE_REPRESENTATION_LOSS:
                        checkpoint_average_perception_rep_mse = accumulated_perception_rep_mse / (
                            len(data_loader))
                        checkpoint_average_speed_rep_mse = accumulated_speed_rep_mse / (
                            len(data_loader))
                        checkpoint_average_intentions_rep_mse = accumulated_intentions_rep_mse / (
                            len(data_loader))
                        checkpoint_average_rep_mse = accumulated_rep_mse / (
                            len(data_loader))
                        checkpoint_average_total_mse = checkpoint_average_mse + checkpoint_average_rep_mse

                        checkpoint_average_perception_rep_error = accumulated_perception_rep_error / (
                            len(data_loader))
                        checkpoint_average_speed_rep_error = accumulated_speed_rep_error / (
                            len(data_loader))
                        checkpoint_average_intentions_rep_error = accumulated_intentions_rep_error / (
                            len(data_loader))
                        checkpoint_average_rep_error = accumulated_rep_error / (
                            len(data_loader))
                        checkpoint_average_total_error = checkpoint_average_error + checkpoint_average_rep_mse

                        # Log L1/L2 loss terms
                        coil_logger.add_scalar(
                            'Perception Rep Loss',
                            checkpoint_average_perception_rep_mse, latest,
                            True)
                        coil_logger.add_scalar(
                            'Speed Rep Loss', checkpoint_average_speed_rep_mse,
                            latest, True)
                        coil_logger.add_scalar(
                            'Intentions Rep Loss',
                            checkpoint_average_intentions_rep_mse, latest,
                            True)
                        coil_logger.add_scalar('Overall Rep Loss',
                                               checkpoint_average_rep_mse,
                                               latest, True)
                        coil_logger.add_scalar('Total L2 Loss',
                                               checkpoint_average_total_mse,
                                               latest, True)

                        coil_logger.add_scalar(
                            'Perception Rep Error',
                            checkpoint_average_perception_rep_error, latest,
                            True)
                        coil_logger.add_scalar(
                            'Speed Rep Error',
                            checkpoint_average_speed_rep_error, latest, True)
                        coil_logger.add_scalar(
                            'Intentions Rep Error',
                            checkpoint_average_intentions_rep_error, latest,
                            True)
                        coil_logger.add_scalar('Total Rep Error',
                                               checkpoint_average_rep_error,
                                               latest, True)
                        coil_logger.add_scalar('Total Loss',
                                               checkpoint_average_total_error,
                                               latest, True)
                    else:
                        checkpoint_average_total_mse = checkpoint_average_mse
                        checkpoint_average_total_error = checkpoint_average_error

                    if checkpoint_average_total_mse < best_mse:
                        best_mse = checkpoint_average_total_mse
                        best_mse_iter = latest

                    if checkpoint_average_total_error < best_error:
                        best_error = checkpoint_average_total_error
                        best_error_iter = latest

                    # Print for logging / to terminal validation results
                    if g_conf.USE_REPRESENTATION_LOSS:
                        coil_logger.add_message(
                            'Iterating', {
                                'Summary': {
                                    'Control Error': checkpoint_average_error,
                                    'Control Loss': checkpoint_average_mse,
                                    'Rep Error': checkpoint_average_rep_error,
                                    'Rep Loss': checkpoint_average_rep_mse,
                                    'Error': checkpoint_average_total_error,
                                    'Loss': checkpoint_average_total_mse,
                                    'BestError': best_error,
                                    'BestMSE': best_mse,
                                    'BestMSECheckpoint': best_mse_iter,
                                    'BestErrorCheckpoint': best_error_iter
                                },
                                'Checkpoint': latest
                            }, latest)
                    else:
                        coil_logger.add_message(
                            'Iterating', {
                                'Summary': {
                                    'Error': checkpoint_average_error,
                                    'Loss': checkpoint_average_mse,
                                    'BestError': best_error,
                                    'BestMSE': best_mse,
                                    'BestMSECheckpoint': best_mse_iter,
                                    'BestErrorCheckpoint': best_error_iter
                                },
                                'Checkpoint': latest
                            }, latest)

                    # Save validation loss history (validation loss is used for early stopping)
                    l1_window.append(checkpoint_average_total_error)
                    coil_logger.write_on_error_csv(
                        dataset_name, checkpoint_average_total_error)

                    # Early stopping
                    if g_conf.FINISH_ON_VALIDATION_STALE is not None:
                        if dlib.count_steps_without_decrease(l1_window) > 3 and \
                                dlib.count_steps_without_decrease_robust(l1_window) > 3:
                            coil_logger.write_stop(dataset_name, latest)
                            break

            else:

                latest = get_latest_evaluated_checkpoint()
                time.sleep(1)

                coil_logger.add_message('Loading',
                                        {'Message': 'Waiting Checkpoint'})
                print("Waiting for the next Validation")

        coil_logger.add_message('Finished', {})

    except KeyboardInterrupt:
        coil_logger.add_message('Error', {'Message': 'Killed By User'})
        # We erase the output that was unfinished due to some process stop.
        if latest is not None:
            coil_logger.erase_csv(latest)

    except RuntimeError as e:
        if latest is not None:
            coil_logger.erase_csv(latest)
        coil_logger.add_message('Error', {'Message': str(e)})

    except:
        traceback.print_exc()
        coil_logger.add_message('Error', {'Message': 'Something Happened'})
        # We erase the output that was unfinished due to some process stop.
        if latest is not None:
            coil_logger.erase_csv(latest)
Exemplo n.º 11
0
class CoILAgent(object):

    def __init__(self, checkpoint, town_name, carla_version='0.84'):

        # Set the carla version that is going to be used by the interface
        self._carla_version = carla_version
        self.checkpoint = checkpoint  # We save the checkpoint for some interesting future use.
        # Create model
        self._model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
        self.first_iter = True
        # Load the model and prepare set it for evaluation
        self._model.load_state_dict(checkpoint['state_dict'])
        self._model.cuda()
        self._model.eval()

        # If we are evaluating squeeze model (so we are using ground truth seg mask), 
        # also run the autopilot to get its stop intentions
        if g_conf.USE_ORACLE or g_conf.USE_FULL_ORACLE or "seg" in g_conf.SENSORS.keys():
            self.control_agent = CommandFollower(town_name)

    def run_step(self, measurements, sensor_data, directions, target):
        """
            Run a step on the benchmark simulation
        Args:
            measurements: All the float measurements from CARLA ( Just speed is used)
            sensor_data: All the sensor data used on this benchmark
            directions: The directions, high level commands
            target: Final objective. Not used when the agent is predicting all outputs.

        Returns:
            Controls for the vehicle on the CARLA simulator.

        """
        # Get speed and high-level turning command
        # Take the forward speed and normalize it for it to go from 0-1
        norm_speed = measurements.player_measurements.forward_speed / g_conf.SPEED_FACTOR
        norm_speed = torch.cuda.FloatTensor([norm_speed]).unsqueeze(0)
        directions_tensor = torch.cuda.LongTensor([directions])

        # If we're evaluating squeeze network (so we are using ground truth seg mask)
        if "seg" in g_conf.SENSORS.keys():
            # Run the autopilot agent to get stop intentions
            _, state = self.control_agent.run_step(measurements, [], [], target)
            inputs_vec = []
            for input_name in g_conf.INTENTIONS:
                inputs_vec.append(float(state[input_name]))
            intentions = torch.cuda.FloatTensor(inputs_vec).unsqueeze(0)
            # Run squeeze network
            model_outputs = self._model.forward_branch(self._process_sensors(sensor_data), norm_speed,
                                                       directions_tensor, intentions, benchmark=True)
        else:
            # Run driving model
            model_outputs = self._model.forward_branch(self._process_sensors(sensor_data), norm_speed,
                                                       directions_tensor, benchmark=True)

        steer, throttle, brake = self._process_model_outputs(model_outputs[0])
        if self._carla_version == '0.9':
            import carla
            control = carla.VehicleControl()
        else:
            control = VehicleControl()
        control.steer = float(steer)
        control.throttle = float(throttle)
        control.brake = float(brake)
        # There is the posibility to replace some of the predictions with oracle predictions.
        if g_conf.USE_ORACLE:
            _, control.throttle, control.brake = self._get_oracle_prediction(
                measurements, target)

        if self.first_iter:
            coil_logger.add_message('Iterating', {"Checkpoint": self.checkpoint['iteration'],
                                                  'Agent': str(steer)},
                                    self.checkpoint['iteration'])
        self.first_iter = False
        
        return control

    def _process_sensors(self, sensors):

        iteration = 0
        sensor_dict = {}
        for name, size in g_conf.SENSORS.items():

            if self._carla_version == '0.9':
                sensor = sensors[name][g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], ...]
            else:
                sensor = sensors[name].data[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], ...]

            # Process RGB image or CARLA seg mask
            if name == 'rgb':
                # Resize image, convert it to [0, 1] BGR image
                sensor = scipy.misc.imresize(sensor, (size[1], size[2]))
                sensor = sensor[:, :, ::-1]
                sensor = np.swapaxes(sensor, 0, 1)
                sensor = np.transpose(sensor, (2, 1, 0))
                sensor = torch.from_numpy(sensor / 255.0).type(torch.FloatTensor)
            elif name == 'seg':
                seg = scipy.misc.imresize(sensor, (size[1], size[2]), 'nearest')
                # Re-map classes, mapping irrelevant classes to a "nuisance" class
                class_map = \
                    {0: 0, # None
                     1: 0, # Buildings -> None
                     2: 0, # Fences -> None
                     3: 0, # Other -> None
                     4: 1, # Pedestrians kept
                     5: 0, # Poles -> None
                     6: 2, # RoadLines kept
                     7: 3, # Roads kept
                     8: 2, # Sidewalks mapped to roadlines (both are boundaries of road)
                     9 : 0, # Vegetation -> None
                     10: 4, # Vehicles kept
                     11: 0, # Walls -> None
                     12: 5} # TrafficSigns kept (for traffic lights)
                new_seg = np.zeros((seg.shape[0], seg.shape[1]))
                # Remap classes
                for key, value in class_map.items():
                    new_seg[np.where(seg == key)] = value 
                # One hot encode seg mask, for now hardcode max of class map values + 1
                new_seg = np.eye(6)[new_seg.astype(np.int32)]
                new_seg = new_seg.transpose(2, 0, 1)
                new_seg = new_seg.astype(np.float)
                sensor = torch.from_numpy(new_seg).type(torch.FloatTensor)

            sensor = sensor.unsqueeze(0)
            sensor_dict[name] = sensor

        return sensor_dict

    def _process_model_outputs(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        if brake < 0.05:
            brake = 0.0

        if throttle > brake:
            brake = 0.0


        return steer, throttle, brake

    def _get_oracle_prediction(self, measurements, target):
        # For the oracle, the current version of sensor data is not really relevant.
        control, _ = self.control_agent.run_step(measurements, [], [], target)

        return control.steer, control.throttle, control.brake
Exemplo n.º 12
0
def execute(gpu,
            exp_batch='nocrash',
            exp_alias='resnet34imnet10S1',
            suppress_output=True,
            yaml_file=None):
    latest = None
    # try:
    # We set the visible cuda devices
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu

    # At this point the log file with the correct naming is created.
    path_to_yaml_file = os.path.join('configs', exp_batch, exp_alias + '.yaml')
    if yaml_file is not None:
        path_to_yaml_file = os.path.join(yaml_file, exp_alias + '.yaml')
    merge_with_yaml(path_to_yaml_file)
    # The validation dataset is always fully loaded, so we fix a very high number of hours
    # g_conf.NUMBER_OF_HOURS = 10000 # removed to simplify code
    """
    # commenting this segment to simplify code, uncomment if necessary
    set_type_of_process('validation', dataset_name)

    if not os.path.exists('_output_logs'):
        os.mkdir('_output_logs')

    if suppress_output:
        sys.stdout = open(os.path.join('_output_logs',
                                       exp_alias + '_' + g_conf.PROCESS_NAME + '_'
                                       + str(os.getpid()) + ".out"),
                          "a", buffering=1)
        sys.stderr = open(os.path.join('_output_logs',
                          exp_alias + '_err_' + g_conf.PROCESS_NAME + '_'
                                       + str(os.getpid()) + ".out"),
                          "a", buffering=1)
    """

    # Define the dataset. This structure is has the __get_item__ redefined in a way
    # that you can access the HDFILES positions from the root directory as a in a vector.

    full_dataset = os.path.join(
        os.environ["COIL_DATASET_PATH"], g_conf.DART_COVMAT_DATA
    )  # dataset used for computing dart covariance matrix

    augmenter = Augmenter(None)

    # Definition of the dataset to be used. Preload name is just the validation data name
    print('full dataset path: ', full_dataset)
    dataset = CoILDataset(full_dataset,
                          transform=augmenter,
                          preload_name=g_conf.DART_COVMAT_DATA
                          )  # specify DART_COVMAT_DATA in the config file

    # The data loader is the multi threaded module from pytorch that release a number of
    # workers to get all the data.
    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=g_conf.BATCH_SIZE,
        shuffle=False,
        num_workers=g_conf.NUMBER_OF_LOADING_WORKERS,
        pin_memory=True)

    model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
    """ removing this segment to simplify code
    # The window used to keep track of the trainings
    l1_window = []
    latest = get_latest_evaluated_checkpoint()
    if latest is not None:  # When latest is noe
        l1_window = coil_logger.recover_loss_window(g_conf.DART_COVMAT_DATA, None)
    """

    model.cuda()

    best_mse = 1000
    best_error = 1000
    best_mse_iter = 0
    best_error_iter = 0

    # modified validation code from here to run a single model checkpoint
    # used for computing the covariance matrix with the DART model checkpoint
    checkpoint = torch.load(
        g_conf.DART_MODEL_CHECKPOINT
    )  # specify DART_MODEL_CHECKPOINT in the config file
    checkpoint_iteration = checkpoint['iteration']
    print("Validation loaded ", checkpoint_iteration)
    model.load_state_dict(checkpoint['state_dict'])

    model.eval()
    accumulated_mse = 0
    accumulated_error = 0
    iteration_on_checkpoint = 0

    # considering steer, throttle & brake so 3x3 matrix
    normalized_covariate_shift = torch.zeros(3, 3)

    print('data_loader size: ', len(data_loader))
    for data in data_loader:

        # Compute the forward pass on a batch from the validation dataset
        controls = data['directions']
        output = model.forward_branch(
            torch.squeeze(data['rgb']).cuda(),
            dataset.extract_inputs(data).cuda(), controls)
        """ removing this segment to simplify code
        # It could be either waypoints or direct control
        if 'waypoint1_angle' in g_conf.TARGETS:
            write_waypoints_output(checkpoint_iteration, output)
        else:
            write_regular_output(checkpoint_iteration, output)
        """

        mse = torch.mean(
            (output - dataset.extract_targets(data).cuda())**2).data.tolist()
        mean_error = torch.mean(
            torch.abs(output -
                      dataset.extract_targets(data).cuda())).data.tolist()

        accumulated_error += mean_error
        accumulated_mse += mse
        error = torch.abs(output -
                          dataset.extract_targets(data).cuda()).data.cpu()

        ### covariate shift segment starts
        error = error.unsqueeze(dim=2)
        error_transpose = torch.transpose(error, 1, 2)
        # compute covariate shift
        covariate_shift = torch.matmul(error, error_transpose)
        # expand traj length tensor to Bx3x3 (considering steer, throttle & brake)
        traj_lengths = torch.stack([
            torch.stack([data['current_traj_length'].squeeze(dim=1)] * 3,
                        dim=1)
        ] * 3,
                                   dim=2)
        covariate_shift = covariate_shift / traj_lengths
        covariate_shift = torch.sum(covariate_shift, dim=0)
        # print ('current covariate shift: ', covariate_shift.shape)

        normalized_covariate_shift += covariate_shift
        ### covariate shift segment ends

        total_episodes = data['episode_count'][-1].data
        iteration_on_checkpoint += 1
        if iteration_on_checkpoint % 50 == 0:
            print('iteration: ', iteration_on_checkpoint)

    print('total episodes: ', total_episodes)
    normalized_covariate_shift = normalized_covariate_shift / total_episodes
    print('normalized covariate shift: ', normalized_covariate_shift.shape,
          normalized_covariate_shift)

    # save the matrix to restart directly from the mat file
    # np.save(os.path.join(g_conf.COVARIANCE_MATRIX_PATH, 'covariance_matrix_%s.npy'%g_conf.DART_COVMATH_DATA), normalized_covariate_shift)
    return normalized_covariate_shift.numpy()
    '''
Exemplo n.º 13
0
class CoILAgent(object):
    def __init__(self, checkpoint, town_name, carla_version='0.84'):

        # Set the carla version that is going to be used by the interface
        self._carla_version = carla_version
        self.checkpoint = checkpoint  # We save the checkpoint for some interesting future use.
        self._model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
        self.first_iter = True
        # Load the model and prepare set it for evaluation
        self._model.load_state_dict(checkpoint['state_dict'])
        self._model.cuda()
        self._model.eval()

        self.latest_image = None
        self.latest_image_tensor = None

        if g_conf.USE_ORACLE or g_conf.USE_FULL_ORACLE:
            self.control_agent = CommandFollower(town_name)

    def run_step(self, measurements, sensor_data, directions, target):
        """
            Run a step on the benchmark simulation
        Args:
            measurements: All the float measurements from CARLA ( Just speed is used)
            sensor_data: All the sensor data used on this benchmark
            directions: The directions, high level commands
            target: Final objective. Not used when the agent is predicting all outputs.

        Returns:
            Controls for the vehicle on the CARLA simulator.

        """

        # Take the forward speed and normalize it for it to go from 0-1
        norm_speed = measurements.player_measurements.forward_speed / g_conf.SPEED_FACTOR
        norm_speed = torch.cuda.FloatTensor([norm_speed]).unsqueeze(0)
        directions_tensor = torch.cuda.LongTensor([directions])
        # Compute the forward pass processing the sensors got from CARLA.
        model_outputs = self._model.forward_branch(
            self._process_sensors(sensor_data), norm_speed, directions_tensor)

        steer, throttle, brake = self._process_model_outputs(model_outputs[0])
        if self._carla_version == '0.9':
            import carla
            control = carla.VehicleControl()
        else:
            control = VehicleControl()
        control.steer = float(steer)
        control.throttle = float(throttle)
        control.brake = float(brake)
        # There is the posibility to replace some of the predictions with oracle predictions.
        if g_conf.USE_ORACLE:
            _, control.throttle, control.brake = self._get_oracle_prediction(
                measurements, target)

        if self.first_iter:
            coil_logger.add_message('Iterating', {
                "Checkpoint": self.checkpoint['iteration'],
                'Agent': str(steer)
            }, self.checkpoint['iteration'])
        self.first_iter = False

        return control

    def get_attentions(self, layers=None):
        """

        Returns
            The activations obtained from the first layers of the latest iteration.

        """
        if layers is None:
            layers = [0, 1, 2]
        if self.latest_image_tensor is None:
            raise ValueError(
                'No step was ran yet. '
                'No image to compute the activations, Try Running ')
        all_layers = self._model.get_perception_layers(
            self.latest_image_tensor)
        cmap = plt.get_cmap('inferno')
        attentions = []
        for layer in layers:
            y = all_layers[layer]
            att = torch.abs(y).mean(1)[0].data.cpu().numpy()
            att = att / att.max()
            att = cmap(att)
            att = np.delete(att, 3, 2)
            attentions.append(imresize(att, [150, 200]))
        return attentions

    def _process_sensors(self, sensors):

        colors = [[0, 0, 0], [70, 70, 70], [190, 153, 153], [250, 170, 160],
                  [220, 20, 60], [153, 153, 153], [157, 234, 50],
                  [128, 64, 128], [244, 35, 232], [107, 142, 35], [0, 0, 142],
                  [102, 102, 156], [220, 220, 0]]

        def label_to_color_0(e):
            return colors[e][0]

        def label_to_color_1(e):
            return colors[e][1]

        def label_to_color_2(e):
            return colors[e][2]

        iteration = 0
        for name, size in g_conf.SENSORS.items():

            sensor = sensors[name].data

            if (sensor.shape == (600, 800)):
                labels = sensor
                tmp = np.zeros((600, 800, 3))
                f0 = np.vectorize(label_to_color_0)
                f1 = np.vectorize(label_to_color_1)
                f2 = np.vectorize(label_to_color_2)
                tmp[:, :, 0] = f0(sensor)
                tmp[:, :, 1] = f1(sensor)
                tmp[:, :, 2] = f2(sensor)
                sensor = tmp

            sensor = scipy.misc.imresize(sensor, (size[1], size[2]))

            ##### Save sensor

            self.latest_image = sensor

            sensor = np.swapaxes(sensor, 0, 1)

            sensor = np.transpose(sensor, (2, 1, 0))

            sensor = torch.from_numpy(sensor / 255.0).type(
                torch.FloatTensor).cuda()

            if iteration == 0:
                image_input = sensor

            else:
                image_input = torch.cat((image_input, sensor), 0)

            iteration += 1

        image_input = image_input.unsqueeze(0)

        self.latest_image_tensor = image_input

        return image_input

    def _process_model_outputs(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        if brake < 0.05:
            brake = 0.0

        if throttle > brake:
            brake = 0.0

        return steer, throttle, brake

    def _process_model_outputs_wp(self, outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        wpa1, wpa2, throttle, brake = outputs[3], outputs[4], outputs[
            1], outputs[2]
        if brake < 0.2:
            brake = 0.0

        if throttle > brake:
            brake = 0.0

        steer = 0.7 * wpa2

        if steer > 0:
            steer = min(steer, 1)
        else:
            steer = max(steer, -1)

        return steer, throttle, brake

    def _get_oracle_prediction(self, measurements, target):
        # For the oracle, the current version of sensor data is not really relevant.
        control, _, _, _, _ = self.control_agent.run_step(
            measurements, [], [], target)

        return control.steer, control.throttle, control.brake
Exemplo n.º 14
0
def execute(gpu, exp_batch, exp_alias, dataset_name, suppress_output):
    latest = None
    try:
        # We set the visible cuda devices
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu

        # At this point the log file with the correct naming is created.
        merge_with_yaml(os.path.join('configs', exp_batch,
                                     exp_alias + '.yaml'))
        # The validation dataset is always fully loaded, so we fix a very high number of hours
        g_conf.NUMBER_OF_HOURS = 10000
        set_type_of_process('validation', dataset_name)

        if not os.path.exists('_output_logs'):
            os.mkdir('_output_logs')

        if suppress_output:
            sys.stdout = open(os.path.join(
                '_output_logs', exp_alias + '_' + g_conf.PROCESS_NAME + '_' +
                str(os.getpid()) + ".out"),
                              "a",
                              buffering=1)
            sys.stderr = open(os.path.join(
                '_output_logs', exp_alias + '_err_' + g_conf.PROCESS_NAME +
                '_' + str(os.getpid()) + ".out"),
                              "a",
                              buffering=1)

        # Define the dataset. This structure is has the __get_item__ redefined in a way
        # that you can access the HDFILES positions from the root directory as a in a vector.
        full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"],
                                    dataset_name)
        augmenter = Augmenter(None)
        # Definition of the dataset to be used. Preload name is just the validation data name
        dataset = CoILDataset(full_dataset,
                              transform=augmenter,
                              preload_name=dataset_name)

        # Creates the sampler, this part is responsible for managing the keys. It divides
        # all keys depending on the measurements and produces a set of keys for each bach.

        # The data loader is the multi threaded module from pytorch that release a number of
        # workers to get all the data.
        data_loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=g_conf.BATCH_SIZE,
            shuffle=False,
            num_workers=g_conf.NUMBER_OF_LOADING_WORKERS,
            pin_memory=True)

        model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)

        # Set ERFnet for segmentation
        model_erf = ERFNet(20)
        model_erf = torch.nn.DataParallel(model_erf)
        model_erf = model_erf.cuda()

        print("LOAD ERFNet - validate")

        def load_my_state_dict(
            model, state_dict
        ):  #custom function to load model when not all dict elements
            own_state = model.state_dict()
            for name, param in state_dict.items():
                if name not in own_state:
                    continue
                own_state[name].copy_(param)
            return model

        model_erf = load_my_state_dict(
            model_erf,
            torch.load(os.path.join('trained_models/erfnet_pretrained.pth')))
        model_erf.eval()
        print("ERFNet and weights LOADED successfully")

        # The window used to keep track of the trainings
        l1_window = []
        latest = get_latest_evaluated_checkpoint()
        if latest is not None:  # When latest is noe
            l1_window = coil_logger.recover_loss_window(dataset_name, None)

        model.cuda()

        best_mse = 1000
        best_error = 1000
        best_mse_iter = 0
        best_error_iter = 0

        while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE):

            if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):

                latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)

                checkpoint = torch.load(
                    os.path.join('_logs', exp_batch, exp_alias, 'checkpoints',
                                 str(latest) + '.pth'))
                checkpoint_iteration = checkpoint['iteration']
                print("Validation loaded ", checkpoint_iteration)

                model.load_state_dict(checkpoint['state_dict'])

                model.eval()
                accumulated_mse = 0
                accumulated_error = 0
                iteration_on_checkpoint = 0
                for data in data_loader:

                    # Compute the forward pass on a batch from  the validation dataset
                    controls = data['directions']

                    # Seg batch
                    rgbs = data['rgb']
                    with torch.no_grad():
                        outputs = model_erf(rgbs)
                    labels = outputs.max(1)[1].byte().cpu().data

                    seg_road = (labels == 0)
                    seg_not_road = (labels != 0)
                    seg = torch.stack((seg_road, seg_not_road), 1).float()

                    output = model.forward_branch(
                        torch.squeeze(seg).cuda(),
                        dataset.extract_inputs(data).cuda(), controls)

                    #                    output = model.foward_branch(torch.squeeze(rgbs).cuda(),
                    #                                                 dataset.extract_inputs(data).cuda(),controls)
                    # It could be either waypoints or direct control
                    if 'waypoint1_angle' in g_conf.TARGETS:
                        write_waypoints_output(checkpoint_iteration, output)
                    else:
                        write_regular_output(checkpoint_iteration, output)

                    mse = torch.mean(
                        (output - dataset.extract_targets(data).cuda()
                         )**2).data.tolist()
                    mean_error = torch.mean(
                        torch.abs(output -
                                  dataset.extract_targets(data).cuda())
                    ).data.tolist()

                    accumulated_error += mean_error
                    accumulated_mse += mse
                    error = torch.abs(output -
                                      dataset.extract_targets(data).cuda())

                    # Log a random position
                    position = random.randint(0, len(output.data.tolist()) - 1)

                    coil_logger.add_message(
                        'Iterating', {
                            'Checkpoint':
                            latest,
                            'Iteration': (str(iteration_on_checkpoint * 120) +
                                          '/' + str(len(dataset))),
                            'MeanError':
                            mean_error,
                            'MSE':
                            mse,
                            'Output':
                            output[position].data.tolist(),
                            'GroundTruth':
                            dataset.extract_targets(
                                data)[position].data.tolist(),
                            'Error':
                            error[position].data.tolist(),
                            'Inputs':
                            dataset.extract_inputs(data)
                            [position].data.tolist()
                        }, latest)
                    iteration_on_checkpoint += 1
                    print("Iteration %d  on Checkpoint %d : Error %f" %
                          (iteration_on_checkpoint, checkpoint_iteration,
                           mean_error))
                """
                    ########
                    Finish a round of validation, write results, wait for the next
                    ########
                """

                checkpoint_average_mse = accumulated_mse / (len(data_loader))
                checkpoint_average_error = accumulated_error / (
                    len(data_loader))
                coil_logger.add_scalar('Loss', checkpoint_average_mse, latest,
                                       True)
                coil_logger.add_scalar('Error', checkpoint_average_error,
                                       latest, True)

                if checkpoint_average_mse < best_mse:
                    best_mse = checkpoint_average_mse
                    best_mse_iter = latest

                if checkpoint_average_error < best_error:
                    best_error = checkpoint_average_error
                    best_error_iter = latest

                coil_logger.add_message(
                    'Iterating', {
                        'Summary': {
                            'Error': checkpoint_average_error,
                            'Loss': checkpoint_average_mse,
                            'BestError': best_error,
                            'BestMSE': best_mse,
                            'BestMSECheckpoint': best_mse_iter,
                            'BestErrorCheckpoint': best_error_iter
                        },
                        'Checkpoint': latest
                    }, latest)

                l1_window.append(checkpoint_average_error)
                coil_logger.write_on_error_csv(dataset_name,
                                               checkpoint_average_error)

                # If we are using the finish when validation stops, we check the current
                if g_conf.FINISH_ON_VALIDATION_STALE is not None:
                    if dlib.count_steps_without_decrease(l1_window) > 3 and \
                            dlib.count_steps_without_decrease_robust(l1_window) > 3:
                        coil_logger.write_stop(dataset_name, latest)
                        break

            else:

                latest = get_latest_evaluated_checkpoint()
                time.sleep(1)

                coil_logger.add_message('Loading',
                                        {'Message': 'Waiting Checkpoint'})
                print("Waiting for the next Validation")

        coil_logger.add_message('Finished', {})

    except KeyboardInterrupt:
        coil_logger.add_message('Error', {'Message': 'Killed By User'})
        # We erase the output that was unfinished due to some process stop.
        if latest is not None:
            coil_logger.erase_csv(latest)

    except RuntimeError as e:
        if latest is not None:
            coil_logger.erase_csv(latest)
        coil_logger.add_message('Error', {'Message': str(e)})

    except:
        traceback.print_exc()
        coil_logger.add_message('Error', {'Message': 'Something Happened'})
        # We erase the output that was unfinished due to some process stop.
        if latest is not None:
            coil_logger.erase_csv(latest)
Exemplo n.º 15
0
class CoILAgent(Agent):

	def __init__(self, checkpoint):



		#experiment_name='None', driver_conf=None, memory_fraction=0.18,
		#image_cut=[115, 510]):

		# use_planner=False,graph_file=None,map_file=None,augment_left_right=False,image_cut = [170,518]):

		Agent.__init__(self)
		# This should likely come from global
		#config_gpu = tf.ConfigProto()
		#config_gpu.gpu_options.visible_device_list = '0'

		#config_gpu.gpu_options.per_process_gpu_memory_fraction = memory_fraction
		#self._sess = tf.Session(config=config_gpu)

		# THIS DOES NOT WORK FOR FUSED PLUS LSTM
		#if self._config.number_frames_sequenced > self._config.number_frames_fused:
		#    self._config_train.batch_size = self._config.number_frames_sequenced
		#else:
		#    self._config_train.batch_size = self._config.number_frames_fused

		#self._train_manager = load_system(self._config_train)
		#self._config.train_segmentation = False
		self.model = CoILModel(g_conf.MODEL_NAME)

		self.model.load_state_dict(checkpoint['state_dict'])

		self.model.cuda()

		self.model.eval()


		#self.model.load_network(checkpoint)

		#self._sess.run(tf.global_variables_initializer())

		#self._control_function = getattr(machine_output_functions,
		#                                 self._train_manager._config.control_mode)
		# More elegant way to merge with autopilot
		#self._agent = Autopilot(ConfigAutopilot(driver_conf.city_name))

		#self._image_cut = driver_conf.image_cut
		#self._auto_pilot = driver_conf.use_planner

		#self._recording = False
		#self._start_time = 0


	def run_step(self, measurements, sensor_data, directions, target):


		self.model.eval()
		#control_agent = self._agent.run_step(measurements, None, target)
		print (" RUnning STEP ")
		speed = torch.cuda.FloatTensor([measurements.player_measurements.forward_speed]).unsqueeze(0)
		print("Speed is", speed)
		print ("Speed shape ", speed)
		directions_tensor = torch.cuda.LongTensor([directions])
		model_outputs = self.model.forward_branch(self._process_sensors(sensor_data), speed,
												  directions_tensor)

		print (model_outputs)

		steer, throttle, brake = self._process_model_outputs(model_outputs[0],
										 measurements.player_measurements.forward_speed)



		#control = self.compute_action(,
		#                              ,
		#                              directions)
		control = carla_protocol.Control()
		control.steer = steer
		control.throttle = throttle
		control.brake = brake
		# if self._auto_pilot:
		#    control.steer = control_agent.steer
		# TODO: adapt the client side agent for the new version. ( PROBLEM )
		#control.throttle = control_agent.throttle
		#control.brake = control_agent.brake

		# TODO: maybe change to a more meaningfull message ??
		return control


	def _process_sensors(self, sensors):


		iteration = 0
		for name, size in g_conf.SENSORS.items():

			sensor = sensors[name].data[175:375, ...] #300*800*3

			image_input = transform.resize(sensor, (size[1], size[2]))

			# transforms.Normalize([ 0.5315,  0.5521,  0.5205], [ 0.1960,  0.1810,  0.2217])

			print ("Image pixL ", image_input[:10][0][0])
			image_input = np.transpose(image_input, (2, 0, 1))
			image_input = torch.from_numpy(image_input).type(torch.FloatTensor).cuda()
			print ("torch size", image_input.size())

			img_np = np.uint8(np.transpose(image_input.cpu().numpy() * 255, (1 , 2, 0)))

			# plt.figure(1)
			# plt.subplot(1, 2, 1)
			# plt.imshow(sensor)

			# plt.subplot(1,2,2)
			# plt.imshow(img_np)
			# #
			# plt.show()

			# if  sensors[name].type == 'SemanticSegmentation':
			#     # TODO: the camera name has to be sincronized with what is in the experiment...
			#     sensor = join_classes(sensor)
			#
			#     sensor = sensor[:, :, np.newaxis]
			#
			#     image_transform = transforms.Compose([transforms.ToTensor(),
			#                        transforms.Resize((size[1], size[2]), interpolation=Image.NEAREST),
			#                        iag.ToGPU(), iag.Multiply((1 / (number_of_seg_classes - 1)))])
			# else:
			#
			#     image_transform = transforms.Compose([transforms.ToPILImage(),
			#                        transforms.Resize((size[1], size[2])),
			#                        transforms.ToTensor(), transforms.Normalize((0, 0 ,0), (255, 255, 255)),
			#                        iag.ToGPU()])
			# sensor = np.swapaxes(sensor, 0, 1)
			# print ("Sensor Previous SHape")
			# print (sensor.shape)
			# sensor = np.flip(sensor.transpose((2, 0, 1)), axis=0)
			# print ("Sensor Previous SHape PT2")
			# print (sensor.shape)
			# if iteration == 0:
			#     image_input = image_transform(sensor)
			# else:
			#     image_input = torch.cat((image_input, sensor), 0)

			iteration += 1

		# print (image_input.shape)
		image_input  = image_input.unsqueeze(0)
		print (image_input.shape)

		return image_input


	def _process_model_outputs(self,outputs, speed):
		"""
		 A bit of heuristics in the control, to eventually make car faster, for instance.
		Returns:

		"""
		steer, throttle, brake = outputs[0], outputs[1], outputs[2]
		# if steer > 0.5:
		# 	throttle *= (1 - steer + 0.3)
		# 	steer += 0.3
		# 	if steer > 1:
		# 		steer = 1
		# if steer < -0.5:
		# 	throttle *= (1 + steer + 0.3)
		# 	steer -= 0.3
		# 	if steer < -1:
		# 		steer = -1

		# if brake < 0.2:
		# 	brake = 0.0
		#
		if throttle > brake:
			brake = 0.0
		# else:
		# 	throttle = throttle * 2
		# if speed > 35.0 and brake == 0.0:
		# 	throttle = 0.0

		print ("Steer", steer, "Throttle", throttle)


		return steer, throttle, brake


	"""

	def compute_action(self, sensors, speed, direction):

		capture_time = time.time()


		sensor_pack = []



		for i in range(len(sensors)):

			sensor = sensors[i]

			sensor = sensor[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], :]

			if g_conf.param.SENSORS.keys()[i] == 'rgb':

				sensor = scipy.misc.imresize(sensor, [self._config.sensors_size[i][0],
													  self._config.sensors_size[i][1]])


			elif g_conf.param.SENSORS.keys()[i] == 'labels':

				sensor = scipy.misc.imresize(sensor, [self._config.sensors_size[i][0],
													  self._config.sensors_size[i][1]],
											 interp='nearest')

				sensor = join_classes(sensor) * int(255 / (number_of_seg_classes - 1))

				sensor = sensor[:, :, np.newaxis]

			sensor_pack.append(sensor)

		if len(sensor_pack) > 1:

			image_input = np.concatenate((sensor_pack[0], sensor_pack[1]), axis=2)

		else:
			image_input = sensor_pack[0]

		image_input = image_input.astype(np.float32)
		image_input = np.multiply(image_input, 1.0 / 255.0)


		image_input = sensors[0]

		image_input = image_input.astype(np.float32)
		image_input = np.multiply(image_input, 1.0 / 255.0)
		# TODO: This will of course depend on the model , if it is based on sequences there are
		# TODO: different requirements
		#tensor = self.model(image_input)
		outputs = self.model.forward_branch(image_input, speed, direction)



		return control  # ,machine_output_functions.get_intermediate_rep(image_input,speed,self._config,self._sess,self._train_manager)

	"""
	"""
Exemplo n.º 16
0
def execute(gpu, exp_batch, exp_alias, dataset_name):
    # We set the visible cuda devices

    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    # At this point the log file with the correct naming is created.
    merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml'))
    set_type_of_process('validation', dataset_name)

    if not os.path.exists('_output_logs'):
        os.mkdir('_output_logs')

    sys.stdout = open(os.path.join(
        '_output_logs', g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"),
                      "a",
                      buffering=1)

    if monitorer.get_status(exp_batch, exp_alias + '.yaml',
                            g_conf.PROCESS_NAME)[0] == "Finished":
        # TODO: print some cool summary or not ?
        return

    #Define the dataset. This structure is has the __get_item__ redefined in a way
    #that you can access the HDFILES positions from the root directory as a in a vector.
    full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"], dataset_name)

    print(full_dataset)
    dataset = CoILDataset(full_dataset,
                          transform=transforms.Compose([transforms.ToTensor()
                                                        ]))

    # Creates the sampler, this part is responsible for managing the keys. It divides
    # all keys depending on the measurements and produces a set of keys for each bach.

    # The data loader is the multi threaded module from pytorch that release a number of
    # workers to get all the data.
    # TODO: batch size an number of workers go to some configuration file
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=120,
                                              shuffle=False,
                                              num_workers=12,
                                              pin_memory=True)

    # TODO: here there is clearly a posibility to make a cool "conditioning" system.
    model = CoILModel(g_conf.MODEL_NAME)
    model.cuda()
    model.eval()

    criterion = Loss()

    latest = get_latest_evaluated_checkpoint()
    if latest is None:  # When nothing was tested, get latest returns none, we fix that.
        latest = 0

    latest = 200000

    best_loss = 1000.0
    best_error = 1000.0
    best_loss_iter = 0
    best_error_iter = 0
    print(dataset.meta_data[0][0])
    for k in dataset.meta_data:
        k[0] = str(k[0], 'utf-8')

    print(dataset.meta_data[0][0])
    cpts = glob.glob(
        '/home-local/rohitrishabh/coil_20-06/_logs/eccv/experiment_1/checkpoints/*.pth'
    )
    # while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE):
    for ckpt in cpts:

        # if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):

        # latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)
        latest = int(ckpt[-10:-4])

        # checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias
        #                         , 'checkpoints', str(latest) + '.pth'))
        checkpoint = torch.load(ckpt)
        checkpoint_iteration = checkpoint['iteration']
        print("Validation loaded ", checkpoint_iteration)

        accumulated_loss = 0.0
        accumulated_error = 0.0
        iteration_on_checkpoint = 0
        for data in data_loader:

            input_data, float_data = data
            control_position = np.where(
                dataset.meta_data[:, 0] == 'control')[0][0]
            speed_position = np.where(
                dataset.meta_data[:, 0] == 'speed_module')[0][0]
            # print (torch.squeeze(input_data['rgb']).shape)
            # print (control_position)
            # print (speed_position)
            # Obs : Maybe we could also check for other branches ??

            output = model.forward_branch(
                torch.squeeze(input_data['rgb']).cuda(),
                float_data[:, speed_position, :].cuda(),
                float_data[:, control_position, :].cuda())

            for i in range(input_data['rgb'].shape[0]):

                coil_logger.write_on_csv(
                    checkpoint_iteration,
                    [output[i][0], output[i][1], output[i][2]])

            # TODO: Change this a functional standard using the loss functions.

            loss = torch.mean(
                (output -
                 dataset.extract_targets(float_data).cuda())**2).data.tolist()
            mean_error = torch.mean(
                torch.abs(
                    output -
                    dataset.extract_targets(float_data).cuda())).data.tolist()
            accumulated_error += mean_error
            accumulated_loss += loss
            error = torch.abs(output -
                              dataset.extract_targets(float_data).cuda())

            # Log a random position
            position = random.randint(0, len(float_data) - 1)
            #print (output[position].data.tolist())
            coil_logger.add_message(
                'Iterating in Validation', {
                    'Checkpoint':
                    latest,
                    'Iteration': (str(iteration_on_checkpoint * 120) + '/' +
                                  str(len(dataset))),
                    'MeanError':
                    mean_error,
                    'Loss':
                    loss,
                    'Output':
                    output[position].data.tolist(),
                    'GroundTruth':
                    dataset.extract_targets(float_data)
                    [position].data.tolist(),
                    'Error':
                    error[position].data.tolist(),
                    'Inputs':
                    dataset.extract_inputs(float_data)[position].data.tolist()
                }, latest)
            iteration_on_checkpoint += 1

        checkpoint_average_loss = accumulated_loss / len(dataset)
        checkpoint_average_error = accumulated_error / len(dataset)
        coil_logger.add_scalar('Loss', checkpoint_average_loss, latest)
        coil_logger.add_scalar('Error', checkpoint_average_error, latest)
        print('Loss: ', checkpoint_average_loss, "----Error: ",
              checkpoint_average_error)

        if checkpoint_average_loss < best_loss:
            best_loss = checkpoint_average_loss
            best_loss_iter = latest

            state = {
                'state_dict': model.state_dict(),
                'best_loss': best_loss,
                'best_loss_iter': best_loss_iter
            }
            # TODO : maybe already summarize the best model ???
            torch.save(
                state,
                os.path.join('_logs', exp_batch, exp_alias,
                             'best_model_l2' + '.pth'))

        if checkpoint_average_error < best_error:
            best_error = checkpoint_average_error
            best_error_iter = latest

            state = {
                'state_dict': model.state_dict(),
                'best_error': best_error,
                'best_error_iter': best_error_iter
            }
            # TODO : maybe already summarize the best model ???
            torch.save(
                state,
                os.path.join('_logs', exp_batch, exp_alias,
                             'best_model_l1' + '.pth'))

        print('Best Loss: ', best_loss, "Checkpoint", best_loss_iter)
        print('Best Error: ', best_error, "Checkpoint", best_error_iter)

        coil_logger.add_message(
            'Iterating in Validation', {
                'Summary': {
                    'Error': checkpoint_average_error,
                    'Loss': checkpoint_average_loss,
                    'BestError': best_error,
                    'BestLoss': best_loss,
                    'BestLossCheckpoint': best_loss_iter,
                    'BestErrorCheckpoint': best_error_iter
                },
                'Checkpoint': latest
            })
Exemplo n.º 17
0
class CoILAgent(Agent):
    def __init__(self, checkpoint, architecture_name):

        #experiment_name='None', driver_conf=None, memory_fraction=0.18,
        #image_cut=[115, 510]):

        # use_planner=False,graph_file=None,map_file=None,augment_left_right=False,image_cut = [170,518]):

        Agent.__init__(self)
        # This should likely come from global
        #config_gpu = tf.ConfigProto()
        #config_gpu.gpu_options.visible_device_list = '0'

        #config_gpu.gpu_options.per_process_gpu_memory_fraction = memory_fraction
        #self._sess = tf.Session(config=config_gpu)

        # THIS DOES NOT WORK FOR FUSED PLUS LSTM
        #if self._config.number_frames_sequenced > self._config.number_frames_fused:
        #    self._config_train.batch_size = self._config.number_frames_sequenced
        #else:
        #    self._config_train.batch_size = self._config.number_frames_fused

        #self._train_manager = load_system(self._config_train)
        #self._config.train_segmentation = False
        self.architecture_name = architecture_name

        if architecture_name == 'coil_unit':
            self.model_task, self.model_gen = CoILModel('coil_unit')
            self.model_task, self.model_gen = self.model_task.cuda(
            ), self.model_gen.cuda()
        elif architecture_name == 'unit_task_only':
            self.model_task, self.model_gen = CoILModel('unit_task_only')
            self.model_task, self.model_gen = self.model_task.cuda(
            ), self.model_gen.cuda()
        else:
            self.model = CoILModel(architecture_name)
            self.model.cuda()

        if architecture_name == 'wgangp_lsd':
            # print(ckpt, checkpoint['best_loss_iter_F'])
            self.model.load_state_dict(checkpoint['stateF_dict'])
            self.model.eval()
        elif architecture_name == 'coil_unit':
            self.model_task.load_state_dict(checkpoint['task'])
            self.model_gen.load_state_dict(checkpoint['b'])
            self.model_task.eval()
            self.model_gen.eval()
        elif architecture_name == 'coil_icra':
            self.model.load_state_dict(checkpoint['state_dict'])
            self.model.eval()
        elif architecture_name == 'unit_task_only':
            self.model_task.load_state_dict(checkpoint['task_state_dict'])
            self.model_gen.load_state_dict(checkpoint['enc_state_dict'])
            self.model_task.eval()
            self.model_gen.eval()

        #self.model.load_network(checkpoint)

        #self._sess.run(tf.global_variables_initializer())

        #self._control_function = getattr(machine_output_functions,
        #                                 self._train_manager._config.control_mode)
        # More elegant way to merge with autopilot
        #self._agent = Autopilot(ConfigAutopilot(driver_conf.city_name))

        #self._image_cut = driver_conf.image_cut
        #self._auto_pilot = driver_conf.use_planner

        #self._recording = False
        #self._start_time = 0

    def run_step(self, measurements, sensor_data, directions, target):

        #control_agent = self._agent.run_step(measurements, None, target)
        print(" RUnning STEP ")
        speed = torch.cuda.FloatTensor(
            [measurements.player_measurements.forward_speed]).unsqueeze(0)
        print("Speed is", speed)
        print("Speed shape ", speed)
        directions_tensor = torch.cuda.LongTensor([directions])

        # model_outputs = self.model.forward_branch(self._process_sensors(sensor_data), speed,
        # 										  directions_tensor)
        if self.architecture_name == 'wgangp_lsd':
            embed, model_outputs = self.model(
                self._process_sensors(sensor_data), speed)

        elif self.architecture_name == 'coil_unit':
            embed, n_b = self.model_gen.encode(
                self._process_sensors(sensor_data))
            model_outputs = self.model_task(embed, speed)

        elif self.architecture_name == 'unit_task_only':
            embed, n_b = self.model_gen.encode(
                self._process_sensors(sensor_data))
            model_outputs = self.model_task(embed, speed)

        elif self.architecture_name == 'coil_icra':
            model_outputs = self.model.forward_branch(
                self._process_sensors(sensor_data), speed, directions_tensor)

        print(model_outputs)

        if self.architecture_name == 'coil_icra':
            steer, throttle, brake = self._process_model_outputs(
                model_outputs[0],
                measurements.player_measurements.forward_speed)
        else:
            steer, throttle, brake = self._process_model_outputs(
                model_outputs[0][0],
                measurements.player_measurements.forward_speed)

        control = carla_protocol.Control()
        control.steer = steer
        control.throttle = throttle
        control.brake = brake
        # if self._auto_pilot:
        #    control.steer = control_agent.steer
        # TODO: adapt the client side agent for the new version. ( PROBLEM )
        #control.throttle = control_agent.throttle
        #control.brake = control_agent.brake

        # TODO: maybe change to a more meaningfull message ??
        return control

    def _process_sensors(self, sensors):

        iteration = 0
        for name, size in g_conf.SENSORS.items():

            sensor = sensors[name].data[140:260, ...]  #300*800*3

            image_input = transform.resize(sensor, (128, 128))

            # transforms.Normalize([ 0.5315,  0.5521,  0.5205], [ 0.1960,  0.1810,  0.2217])

            image_input = np.transpose(image_input, (2, 0, 1))
            image_input = torch.from_numpy(image_input).type(
                torch.FloatTensor).cuda()
            image_input = image_input  #normalization
            print("torch size", image_input.size())

            img_np = np.uint8(
                np.transpose(image_input.cpu().numpy() * 255, (1, 2, 0)))

            # plt.figure(1)
            # plt.subplot(1, 2, 1)
            # plt.imshow(sensor)
            #
            # plt.subplot(1,2,2)
            # plt.imshow(img_np)
            # #
            # plt.show()

            iteration += 1

        # print (image_input.shape)
        image_input = image_input.unsqueeze(0)
        print(image_input.shape)

        return image_input

    def _process_model_outputs(self, outputs, speed):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """

        print("OUTPUTS", outputs)
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        # if steer > 0.5:
        # 	throttle *= (1 - steer + 0.3)
        # 	steer += 0.3
        # 	if steer > 1:
        # 		steer = 1
        # if steer < -0.5:
        # 	throttle *= (1 + steer + 0.3)
        # 	steer -= 0.3
        # 	if steer < -1:
        # 		steer = -1

        # if brake < 0.2:
        # 	brake = 0.0
        #
        if throttle > brake:
            brake = 0.0
        # else:
        # 	throttle = throttle * 2
        # if speed > 35.0 and brake == 0.0:
        # 	throttle = 0.0

        return steer, throttle, brake
Exemplo n.º 18
0
class CoILBaseline(AutonomousAgent):

    def setup(self, path_to_config_file):

        yaml_conf, checkpoint_number = checkpoint_parse_configuration_file(path_to_config_file)

        # Take the checkpoint name and load it
        checkpoint = torch.load(os.path.join(os.sep, os.path.join(*os.path.realpath(__file__).split(os.sep)[:-2]),
                                              '_logs',
                                             yaml_conf.split(os.sep)[-2], yaml_conf.split('/')[-1].split('.')[-2]
                                             , 'checkpoints', str(checkpoint_number) + '.pth'))

        # do the merge here
        merge_with_yaml(os.path.join(os.sep, os.path.join(*os.path.realpath(__file__).split(os.sep)[:-2]),
                                     yaml_conf))

        self.checkpoint = checkpoint  # We save the checkpoint for some interesting future use.
        self._model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
        self.first_iter = True
        logging.info("Setup Model")
        # Load the model and prepare set it for evaluation
        self._model.load_state_dict(checkpoint['state_dict'])
        self._model.cuda()
        self._model.eval()
        self.latest_image = None
        self.latest_image_tensor = None
        # We add more time to the curve commands
        self._expand_command_front = 5
        self._expand_command_back = 3
        self.track = 2  # Track.CAMERAS

    def sensors(self):
        sensors = [{'type': 'sensor.camera.rgb',
                   'x': 2.0, 'y': 0.0,
                    'z': 1.40, 'roll': 0.0,
                    'pitch': 0.0, 'yaw': 0.0,
                    'width': 800, 'height': 600,
                    'fov': 100,
                    'id': 'rgb'},
                   {'type': 'sensor.can_bus',
                    'reading_frequency': 25,
                    'id': 'can_bus'
                    },
                   {'type': 'sensor.other.gnss',
                    'x': 0.7, 'y': -0.4, 'z': 1.60,
                    'id': 'GPS'}
                   ]

        return sensors

    def run_step(self, input_data, timestamp):
        # Get the current directions for following the route
        directions = self._get_current_direction(input_data['GPS'][1])
        logging.debug("Directions {}".format(directions))

        # Take the forward speed and normalize it for it to go from 0-1
        norm_speed = input_data['can_bus'][1]['speed'] / g_conf.SPEED_FACTOR
        norm_speed = torch.cuda.FloatTensor([norm_speed]).unsqueeze(0)
        directions_tensor = torch.cuda.LongTensor([directions])
        # Compute the forward pass processing the sensors got from CARLA.
        model_outputs = self._model.forward_branch(self._process_sensors(input_data['rgb'][1]),
                                                   norm_speed,
                                                   directions_tensor)

        steer, throttle, brake = self._process_model_outputs(model_outputs[0])
        control = carla.VehicleControl()
        control.steer = float(steer)
        control.throttle = float(throttle)
        control.brake = float(brake)
        logging.debug("Output ", control)
        # There is the posibility to replace some of the predictions with oracle predictions.
        self.first_iter = False
        return control

    def get_attentions(self, layers=None):
        """
        Returns
            The activations obtained from the first layers of the latest iteration.

        """
        if layers is None:
            layers = [0, 1, 2]
        if self.latest_image_tensor is None:
            raise ValueError('No step was ran yet. '
                             'No image to compute the activations, Try Running ')
        all_layers = self._model.get_perception_layers(self.latest_image_tensor)
        cmap = plt.get_cmap('inferno')
        attentions = []
        for layer in layers:
            y = all_layers[layer]
            att = torch.abs(y).mean(1)[0].data.cpu().numpy()
            att = att / att.max()
            att = cmap(att)
            att = np.delete(att, 3, 2)
            attentions.append(scipy.misc.imresize(att, [88, 200]))
        return attentions

    def _process_sensors(self, sensor):
        sensor = sensor[:, :, 0:3]  # BGRA->BRG drop alpha channel
        sensor = sensor[:, :, ::-1]  # BGR->RGB
        # sensor = sensor[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], :, :]  # crop  # TODO: don't cut
        sensor = scipy.misc.imresize(sensor, (g_conf.SENSORS['rgb'][1], g_conf.SENSORS['rgb'][2]))
        self.latest_image = sensor

        sensor = np.swapaxes(sensor, 0, 1)
        sensor = np.transpose(sensor, (2, 1, 0))
        sensor = torch.from_numpy(sensor / 255.0).type(torch.FloatTensor).cuda()
        image_input = sensor.unsqueeze(0)
        self.latest_image_tensor = image_input

        return image_input

    def _get_current_direction(self, vehicle_position):

        # for the current position and orientation try to get the closest one from the waypoints
        closest_id = 0
        min_distance = 100000
        for index in range(len(self._global_plan)):

            waypoint = self._global_plan[index][0]

            computed_distance = distance_vehicle(waypoint, vehicle_position)
            if computed_distance < min_distance:
                min_distance = computed_distance
                closest_id = index

        print(f'Closest waypoint {closest_id} dist {min_distance}')
        direction = self._global_plan[closest_id][1]
        print("Direction ", direction)
        if direction == RoadOption.LEFT:
            direction = 3.0
        elif direction == RoadOption.RIGHT:
            direction = 4.0
        elif direction == RoadOption.STRAIGHT:
            direction = 5.0
        else:
            direction = 2.0

        return direction

    @staticmethod
    def _process_model_outputs(outputs):
        """
         A bit of heuristics in the control, to eventually make car faster, for instance.
        Returns:

        """
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        if brake < 0.05:
            brake = 0.0

        if throttle > brake:
            brake = 0.0

        return steer, throttle, brake

    def _expand_commands(self, topological_plan):
        """ The idea is to make the intersection indications to last longer"""

        # O(2*N) algorithm , probably it is possible to do in O(N) with queues.

        # Get the index where curves start and end
        curves_start_end = []
        inside = False
        start = -1
        current_curve = RoadOption.LANEFOLLOW
        for index in range(len(topological_plan)):

            command = topological_plan[index][1]
            if command != RoadOption.LANEFOLLOW and not inside:
                inside = True
                start = index
                current_curve = command

            if command == RoadOption.LANEFOLLOW and inside:
                inside = False
                # End now is the index.
                curves_start_end.append([start, index, current_curve])
                if start == -1:
                    raise ValueError("End of curve without start")

                start = -1

        for start_end_index_command in curves_start_end:
            start_index = start_end_index_command[0]
            end_index = start_end_index_command[1]
            command = start_end_index_command[2]

            # Add the backwards curves ( Before the begginning)
            for index in range(1, self._expand_command_front + 1):
                changed_index = start_index - index
                if changed_index > 0:
                    topological_plan[changed_index] = (topological_plan[changed_index][0], command)

            # add the onnes after the end
            for index in range(0, self._expand_command_back):
                changed_index = end_index + index
                if changed_index < len(topological_plan):
                    topological_plan[changed_index] = (topological_plan[changed_index][0], command)

        return topological_plan
class CoILAgent(Agent):
    def __init__(self, checkpoint):

        #experiment_name='None', driver_conf=None, memory_fraction=0.18,
        #image_cut=[115, 510]):

        # use_planner=False,graph_file=None,map_file=None,augment_left_right=False,image_cut = [170,518]):

        Agent.__init__(self)
        # This should likely come from global
        #config_gpu = tf.ConfigProto()
        #config_gpu.gpu_options.visible_device_list = '0'

        #config_gpu.gpu_options.per_process_gpu_memory_fraction = memory_fraction
        #self._sess = tf.Session(config=config_gpu)

        # THIS DOES NOT WORK FOR FUSED PLUS LSTM
        #if self._config.number_frames_sequenced > self._config.number_frames_fused:
        #    self._config_train.batch_size = self._config.number_frames_sequenced
        #else:
        #    self._config_train.batch_size = self._config.number_frames_fused

        #self._train_manager = load_system(self._config_train)
        #self._config.train_segmentation = False
        self.model = CoILModel(g_conf.MODEL_NAME)

        self.model.load_state_dict(checkpoint['state_dict'])

        self.model.cuda()

        #self.model.load_network(checkpoint)

        #self._sess.run(tf.global_variables_initializer())

        #self._control_function = getattr(machine_output_functions,
        #                                 self._train_manager._config.control_mode)
        # More elegant way to merge with autopilot
        #self._agent = Autopilot(ConfigAutopilot(driver_conf.city_name))

        #self._image_cut = driver_conf.image_cut
        #self._auto_pilot = driver_conf.use_planner

        #self._recording = False
        #self._start_time = 0

    def run_step(self, measurements, sensor_data, directions, target):

        # pos = (rewards.player_x,rewards.player_y,22)
        # ori =(rewards.ori_x,rewards.ori_y,rewards.ori_z)
        # pos,point = self.planner.get_defined_point(pos,ori,(target[0],target[1],22),(1.0,0.02,-0.001),self._select_goal)
        # direction = convert_to_car_coord(point[0],point[1],pos[0],pos[1],ori[0],ori[1])
        # image_filename_format = '_images/episode_{:0>3d}/{:s}/image_{:0>5d}.png'

        # sys.stdout = open(str("direction" + ".out", "a", buffering=1))
        #control_agent = self._agent.run_step(measurements, None, target)
        print(" RUnning STEP ")
        speed = torch.cuda.FloatTensor(
            [measurements.player_measurements.forward_speed]).unsqueeze(0)
        print("Speed shape ", speed)
        directions_tensor = torch.cuda.LongTensor([2])
        print("dir", directions_tensor)
        model_outputs = self.model.forward_branch(
            self._process_sensors(sensor_data), speed, directions_tensor)

        print(model_outputs)

        steer, throttle, brake = self._process_model_outputs(
            model_outputs[0], measurements.player_measurements.forward_speed)
        #control = self.compute_action(,
        #                              ,
        #                              directions)
        control = carla_protocol.Control()
        control.steer = steer
        control.throttle = throttle
        control.brake = brake
        # if self._auto_pilot:
        #    control.steer = control_agent.steer
        # TODO: adapt the client side agent for the new version. ( PROBLEM )
        #control.throttle = control_agent.throttle
        #control.brake = control_agent.brake

        # TODO: maybe change to a more meaningfull message ??
        return control

    def _process_sensors(self, sensors):

        iteration = 0
        for name, size in g_conf.SENSORS.items():

            sensor = sensors[name].data[
                g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], ...]
            # if  sensors[name].type == 'SemanticSegmentation':
            #
            #
            # 	# TODO: the camera name has to be sincronized with what is in the experiment...
            # 	sensor = join_classes(sensor)
            #
            # 	sensor = sensor[:, :, np.newaxis]
            #
            # 	image_transform = transforms.Compose([transforms.ToTensor(),
            # 					   transforms.Resize((size[1], size[2]), interpolation=Image.NEAREST),
            # 					   iag.ToGPU(), iag.Multiply((1 / (number_of_seg_classes - 1)))])
            # else:

            plt.figure(1)
            plt.subplot(1, 2, 1)
            plt.imshow(sensor)

            print("Sensor size:", sensor.shape)  #300 x 800 x 3
            sensor = np.transpose(sensor, (2, 0, 1))
            # image = resize(image,[self._image_size2,self._image_size1])
            print("begin transform Sensor size:", sensor.shape)  #300 x 800 x 3
            print("orginal sensor", sensor[0][0][:10])
            image_transform = transforms.Compose([
                transforms.ToPILImage(),
                transforms.Resize((size[1], size[2])),
                transforms.ToTensor(),
                iag.ToGPU()
            ])

            # print ("Sensor Olala")
            # sensor = np.transpose(sensor, (2, 0, 1))
            # print (sensor.shape)

            # sensor = np.swapaxes(sensor, 0, 1) #800 x 300 x 3
            # print ("Sensor Previous SHape")
            # print (sensor.shape)
            # sensor = np.transpose(sensor, (2, 1, 0)) #3 x 300 x 800
            # print ("Sensor Previous SHape PT2")
            # print (sensor.shape)
            # if iteration == 0:
            image_input = image_transform(sensor)
            print("After transform", image_input.size())

            # print("After making to numpy", img_np.shape)
            # image_input = np.transpose(img_np, (2, 0, 1))
            # print("1st transform", image_input.shape)

            img_np = image_input.cpu().numpy()
            print("img np pix", img_np[0][0][:10])
            img_np = np.uint8(np.transpose(img_np, (1, 2, 0)))
            print("2nd transform", img_np.shape)
            plt.subplot(1, 2, 2)
            plt.imshow(img_np)
            print("Before div by 255", image_input[0][0][:10])

            image_input = image_input / 255.0
            print("After div by 255", image_input[0][0][:10])
            # else:
            # 	image_input = torch.cat((image_input, sensor), 0)

            # iteration += 1
            # print("New shape", image_input.size())
            # img_np = image_input.cpu()
            # img_np = img_np.numpy()*255
            # print("Newew shape", img_np.shape)
            # img_np = np.uint8(np.transpose(img_np, (1,2,0)))
            # # img_np = np.uint8((image_input[0].cpu()).numpy() * 255)
            # # print (img_np.shape)
            # plt.subplot(1, 3, 3)
            # plt.imshow(img_np)
            plt.show()

        print(image_input.shape)
        image_input = image_input.unsqueeze(0)
        print(image_input.shape)

        return image_input

    def _process_model_outputs(self, outputs, speed):
        """
		 A bit of heuristics in the control, to eventually make car faster, for instance.
		Returns:

		"""
        steer, throttle, brake = outputs[0], outputs[1], outputs[2]
        # if brake < 0.2:
        # 	brake = 0.0
        #
        # if throttle > brake:
        # 	brake = 0.0
        # else:
        # 	throttle = throttle * 2
        # if speed > 35.0 and brake == 0.0:
        # 	throttle = 0.0

        return steer, throttle, brake

    """

	def compute_action(self, sensors, speed, direction):

		capture_time = time.time()


		sensor_pack = []



		for i in range(len(sensors)):

			sensor = sensors[i]

			sensor = sensor[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], :]

			if g_conf.param.SENSORS.keys()[i] == 'rgb':

				sensor = scipy.misc.imresize(sensor, [self._config.sensors_size[i][0],
													  self._config.sensors_size[i][1]])


			elif g_conf.param.SENSORS.keys()[i] == 'labels':

				sensor = scipy.misc.imresize(sensor, [self._config.sensors_size[i][0],
													  self._config.sensors_size[i][1]],
											 interp='nearest')

				sensor = join_classes(sensor) * int(255 / (number_of_seg_classes - 1))

				sensor = sensor[:, :, np.newaxis]

			sensor_pack.append(sensor)

		if len(sensor_pack) > 1:

			image_input = np.concatenate((sensor_pack[0], sensor_pack[1]), axis=2)

		else:
			image_input = sensor_pack[0]

		image_input = image_input.astype(np.float32)
		image_input = np.multiply(image_input, 1.0 / 255.0)


		image_input = sensors[0]

		image_input = image_input.astype(np.float32)
		image_input = np.multiply(image_input, 1.0 / 255.0)
		# TODO: This will of course depend on the model , if it is based on sequences there are
		# TODO: different requirements
		#tensor = self.model(image_input)
		outputs = self.model.forward_branch(image_input, speed, direction)



		return control  # ,machine_output_functions.get_intermediate_rep(image_input,speed,self._config,self._sess,self._train_manager)

	"""
    """