Example #1
0
def run():
    wrapper = Wrapper.Wrapper(Robot2I013())
    stratIA = StrategieIA.StrategieIA(wrapper,400.)
    fps = 60

    t2 = threading.Thread(target=updateStrats, args=(stratIA, fps))
    t2.start()
Example #2
0
 def test_rotation(self):
     degreParSecondeVoulu = random.uniform(0, 50)
     vitesse = random.randint(0, 500)
     rIRL = RobotIRLInterface.RobotIRLInterface(None)
     w = Wrapper.Wrapper(rIRL)
     w._rotation = degreParSecondeVoulu
     self.assertTrue(w._rotation == degreParSecondeVoulu)
Example #3
0
    def test_contruct_Wrapper(self):
        rIRL = RobotIRLInterface.RobotIRLInterface(None)
        w = Wrapper.Wrapper(rIRL)

        self.assertTrue(w.robotIRL == rIRL)
        self.assertTrue(w._vitesse == 0)
        self.assertTrue(w.rayon_roue == w.robotIRL.WHEEL_DIAMETER * 10e-1 / 2.)
        self.assertTrue(w.rayon_robot == w.robotIRL.WHEEL_BASE_WIDTH * 10e-1 / 2.)
        self.assertTrue(w.lastRotation == (None, None))
Example #4
0
def run(cote):
    wrapper = Wrapper.Wrapper(Robot2I013())
    startAvancer = StrategieAvancerDroitIRL.StrategieAvancerDroitIRL(wrapper, 70., 15.)
    startTourner = StrategieTournerIRL.StrategieTournerIRL(wrapper, 0., 0.)
    stratCarre = StrategiePolygone.StrategiePolygone(startAvancer, startTourner, int(cote))

    fps = 60

    t2 = threading.Thread(target=updateStrats, args=(stratCarre, fps))
    t2.start()
Example #5
0
def run():
    wrapper = Wrapper.Wrapper(Robot2I013())
    vitessemax = 600.
    stratADM = StrategieAvancerDroitMaxIRL2.StrategieAvancerDroitMaxIRL2(
        wrapper, vitessemax)

    fps = 60

    t2 = threading.Thread(target=updateStrats, args=(stratADM, fps))
    t2.start()
Example #6
0
def run():
    wrapper = Wrapper.Wrapper(Robot2I013())
    time.sleep(3)
    startAvancer = StrategieAvancerDroitMaxIRL2.StrategieAvancerDroitMaxIRL2(wrapper, 600.)
    startTourner = StrategieTournerIRL.StrategieTournerIRL(wrapper, 40., 40.)
    stratBalise = StrategieSuivreBalise.StrategieSuivreBalise(startAvancer, startTourner)

    fps = 60

    t2 = threading.Thread(target=updateStrats, args=(stratBalise, fps))
    t2.start()
Example #7
0
def main_func(cfg: DictConfig) -> None:
  print("Testing model {}".format(cfg.model_artifact))

  model_artifact = wandb_logger.experiment.use_artifact(cfg.model_artifact)
  model_path = model_artifact.download()

  model = Wrapper.load_from_checkpoint(os.path.join(model_path, cfg.model_filename))

  trainer = pl.Trainer(gpus=1, logger=wandb_logger)

  test_data_artifact = wandb_logger.experiment.use_artifact(cfg.test_data_artifact)
  test_dataset = test_data_artifact.download()
  test_data_module = VernierDataModule(os.path.join(test_dataset, cfg.test_data_filename), cfg.batch_size, head_n=cfg.head_n, 
  test_data_path=os.path.join(test_dataset, cfg.test_data_filename), ds_transform=[TimeShuffle(), ToTensor(cfg.is_channels_last)])

  trainer.test(model, datamodule=test_data_module)
Example #8
0
def main_func(cfg: DictConfig) -> None:
    print("Testing model {}".format(cfg.model_artifact))

    model_artifact = wandb_logger.experiment.use_artifact(cfg.model_artifact)
    model_path = model_artifact.download()

    #dataset_artifact = wandb_logger.experiment.use_artifact('videos_sqm_V-AV3:v0')
    #dataset_path = dataset_artifact.download()
    #print("Download done!")

    #data_module = ExperimentDataModule(os.path.join(dataset_path, 'video_data.hdf5'))

    model = Wrapper.load_from_checkpoint(
        os.path.join(model_path, cfg.model_filename))

    pv_conditions = ['V-PV{}'.format(n) for n in range(1, 13)]
    av_conditions = ['V-AV{}'.format(n) for n in range(1, 13)]
    conditions = pv_conditions + av_conditions

    pv_accuracy = []
    av_accuracy = []
    pv_cross_entropy = []
    av_cross_entropy = []

    def ig_attribution():
        test_batch_maker = BatchMaker('sqm',
                                      1,
                                      1,
                                      13, (64, 64, 3),
                                      'V-PV1',
                                      random_start_pos=False,
                                      random_size=False)
        batches_frames, batches_label = test_batch_maker.generate_batch()
        batches_frames = [
            torch.from_numpy(
                np.moveaxis(batch_frames, -1, 1).astype('float32'))
            for batch_frames in batches_frames
        ]
        images = torch.stack(batches_frames, 2)
        model.eval()
        baseline = torch.zeros_like(images)
        ig = IntegratedGradients(model)
        batches_label = batches_label.tolist()
        attributions = ig.attribute(images, baseline, target=batches_label)
        print('IG attributions:', attributions)
        for frame in range(13):
            display_image = images[0, :, frame, :, :].int()
            display_image = torch.transpose(display_image, 0, 2)
            print(display_image)
            plt.axis('off')
            plt.imshow(display_image)
            plt.show()
            frame_attrib = attributions[0, :, frame, :, :]
            max_attrib = torch.max(frame_attrib)
            min_attrib = torch.min(frame_attrib)
            frame_attrib = (frame_attrib - min_attrib) / (max_attrib -
                                                          min_attrib)
            frame_attrib = torch.transpose(frame_attrib, 0, 2)
            plt.imshow(frame_attrib)
            #plt.hist(frame_attrib.numpy().flatten())
            plt.show()
        frame_attr = np.sum(attributions.numpy(), axis=(0, 1, 3, 4))
        print("Frame attr", type(frame_attr), frame_attr)

    #ig_attribution()

    def test_batch(batch_maker, log_input=False, n_seq_log=4):
        batches_frames, batches_label = batch_maker.generate_batch()

        batches_frames = [
            torch.from_numpy(
                np.moveaxis(batch_frames, -1, 1).astype('float32'))
            for batch_frames in batches_frames
        ]

        images = torch.stack(batches_frames, 2)

        # B x C x T x H x W
        model_predictions = model(images)

        if log_input:
            # Log the test images
            video_sample = images.detach().cpu()[:n_seq_log].transpose(
                1, 2).numpy().astype('uint8')
            wandb_logger.experiment.log(
                {'video sample': wandb.Video(video_sample)})  # commit = False

        # If pro-vernier, should be reinforced toward ground truth
        # If anti-vernier, should be reinforced toward opposite of ground truth

        softmaxed = torch.nn.functional.softmax(model_predictions, dim=1)
        softmaxed = softmaxed.detach().numpy()
        prediction_label = np.argmax(softmaxed, axis=1)

        accuracy = sum(
            prediction_label == batches_label) / len(prediction_label)

        cross_entropy = float(
            torch.nn.functional.cross_entropy(
                model_predictions,
                torch.from_numpy(batches_label).type(torch.LongTensor)))

        return accuracy, cross_entropy

    # Test baseline conditions (only one vernier in each frame)
    baseline_accuracies = []
    baseline_cross_entropies = []
    for frame in range(13):
        baseline_accuracy = 0
        baseline_cross_entropy = 0
        batch_maker = BatchMaker('sqm',
                                 1,
                                 cfg.batch_size,
                                 13, (64, 64, 3),
                                 'V{}'.format(frame),
                                 random_start_pos=cfg.random_start_pos,
                                 random_size=cfg.random_size)
        for batch in range(cfg.n_batches):
            batch_accuracy, batch_cross_entropy = test_batch(
                batch_maker, log_input=cfg.log_test_data)
            baseline_accuracy += batch_accuracy
            baseline_cross_entropy += batch_cross_entropy
        baseline_accuracy = baseline_accuracy / cfg.n_batches
        baseline_cross_entropy = baseline_cross_entropy / cfg.n_batches

        baseline_accuracies.append(baseline_accuracy)
        baseline_cross_entropies.append(baseline_cross_entropy)

    for condition in pv_conditions:
        condition_accuracy = 0
        condition_cross_entropy = 0
        batch_maker = BatchMaker('sqm',
                                 1,
                                 cfg.batch_size,
                                 13, (64, 64, 3),
                                 condition,
                                 random_start_pos=cfg.random_start_pos,
                                 random_size=cfg.random_size)
        for batch in range(cfg.n_batches):
            batch_accuracy, batch_cross_entropy = test_batch(
                batch_maker, log_input=cfg.log_test_data)
            condition_accuracy += batch_accuracy
            condition_cross_entropy += batch_cross_entropy
        pv_accuracy.append(condition_accuracy / cfg.n_batches)
        pv_cross_entropy.append(condition_cross_entropy / cfg.n_batches)

    for condition in av_conditions:
        condition_accuracy = 0
        condition_cross_entropy = 0
        batch_maker = BatchMaker('sqm',
                                 1,
                                 cfg.batch_size,
                                 13, (64, 64, 3),
                                 condition,
                                 random_start_pos=cfg.random_start_pos,
                                 random_size=cfg.random_size)
        for batch in range(cfg.n_batches):
            batch_accuracy, batch_cross_entropy = test_batch(
                batch_maker, log_input=cfg.log_test_data)
            condition_accuracy += batch_accuracy
            condition_cross_entropy += batch_cross_entropy
        av_accuracy.append(condition_accuracy / cfg.n_batches)
        av_cross_entropy.append(condition_cross_entropy / cfg.n_batches)

    log_michael_plot(pv_accuracy, av_accuracy, baseline_accuracies)
    log_michael_plot_ce(pv_cross_entropy, av_cross_entropy,
                        baseline_cross_entropies)

    display_plot(pv_accuracy, av_accuracy, baseline_accuracies)
Example #9
0
 def test_vitesse(self):
     vitesse = random.randint(0, 500)
     rIRL = RobotIRLInterface.RobotIRLInterface(None)
     w = Wrapper.Wrapper(rIRL)
     w._vitesse = vitesse
     self.assertTrue(w._vitesse == vitesse)
Example #10
0
def main_func(cfg: DictConfig) -> None:
    #print(OmegaConf.to_yaml(cfg, resolve=True))

    print("Training model {} on task {} for maximum {} epochs".format(
        cfg.model.arch_id, cfg.rc.task, cfg.rc.n_epochs))

    wandb_logger.experiment.config.update({
        "num_epochs": cfg.rc.n_epochs,
        "batch_size": cfg.rc.batch_size
    })
    # TODO log the cfg.rc dictionary!

    train_data_artifact = wandb_logger.experiment.use_artifact(
        cfg.rc.train_data_artifact)
    train_dataset = train_data_artifact.download()

    if cfg.rc.separate_val:
        val_data_artifact = wandb_logger.experiment.use_artifact(
            cfg.rc.val_data_artifact)
        val_dataset = val_data_artifact.download()
        data_module = VernierDataModule(
            os.path.join(train_dataset, cfg.rc.train_data_filename),
            cfg.rc.batch_size,
            head_n=cfg.head_n,
            val_data_path=os.path.join(val_dataset, cfg.rc.val_data_filename),
            ds_transform=[ToTensor(cfg.rc.is_channels_last)],
            num_workers=cfg.num_workers)
    else:
        data_module = VernierDataModule(
            os.path.join(train_dataset, cfg.rc.train_data_filename),
            cfg.rc.batch_size,
            head_n=cfg.head_n,
            ds_transform=[ToTensor(cfg.rc.is_channels_last)],
            num_workers=cfg.num_workers)

    do_train = cfg.rc.do_train

    if cfg.load_model:
        input_model_artifact = wandb_logger.experiment.use_artifact(
            cfg.input_model_artifact)
        print("Loading model", cfg.input_model_artifact)
        model_path = input_model_artifact.download()
        model = Wrapper.load_from_checkpoint(
            os.path.join(model_path, cfg.model_filename),
            train_conv=do_train.train_conv,
            train_encoder=do_train.train_encoder,
            train_decoder=do_train.train_decoder)
    else:
        model = Wrapper(cfg.model.conv_module,
                        cfg.model.encoder_module,
                        cfg.model.decoder_module,
                        train_conv=do_train.train_conv,
                        train_encoder=do_train.train_encoder,
                        train_decoder=do_train.train_decoder)

    output_model_identifier = "{}_{}_{}".format(cfg.model.arch_id, cfg.rc.task,
                                                cfg.model_uuid)
    output_model_artifact = wandb.Artifact(
        "model_{}".format(output_model_identifier),
        type='model',
        metadata=OmegaConf.to_container(cfg, resolve=True))

    trainer = train_model(model, data_module, cfg.rc.n_epochs,
                          cfg.rc.val_every_n)

    trainer.save_checkpoint(cfg.model_filename)
    output_model_artifact.add_file(cfg.model_filename)

    wandb_logger.experiment.log_artifact(output_model_artifact)