Exemple #1
0
def main():
    "Loads the config file in sys.argv[1], and runs the experiment."
    config = get_config()
    experiment = ExperimentHandler(config)

    # for tmux sessions: set the pane name
    print("\033]2;%s\033\\" % config['experiment_name'])

    experiment.run()
    print("Experiment concluded.")
Exemple #2
0
def run_or_continue(experiment_name):
    config = get_config(experiment_name=experiment_name)

    # check whether there are already instances
    try:
        instance_pkl = get_latest_instance(config['output_folder']+config['experiment_name'] + '/')
        print("Continuing experiment %s" % instance_pkl)
        continue_experiment_pkl(instance_pkl)
    except:
        experiment = ExperimentHandler(config)
        print("Starting new instance of experiment %s" % config['experiment_name'])
        experiment.run()
    print("Experiment concluded.")
Exemple #3
0
def continue_experiment_pkl(filename):
    experiment = ExperimentHandler.load_experiment_from_file(filename)

    # # just for debugging now
    # with experiment:
    #     experiment._logger.print("[ WARNING | DEBUG ] single-element train/test sets", Logger.MESSAGE_WARNING )
    # loader = experiment._data_loader

    # loader.split_limits['train'] = 1
    # loader.split_limits['test'] = 1

    experiment.run()
Exemple #4
0
def continue_experiment(folder, instance=None):
    if instance is None:
        instance = get_latest_instance(folder)
    experiment = ExperimentHandler.load_experiment_from_file(
        os.path.join(folder, instance))

    # # just for debugging now
    # with experiment:
    #     experiment._logger.print("[ WARNING | DEBUG ] single-element train/test sets", Logger.MESSAGE_WARNING )
    # loader = experiment._data_loader

    # loader.split_limits['train'] = 1
    # loader.split_limits['test'] = 1

    experiment.run()
    def __init__(self, F=8, depth_scale=1, scale_augmentation=2.0, file=None):
        super().__init__()
        gpu = torch.device('cuda')

        self.depth_scale = depth_scale
        self.scale_augmentation = scale_augmentation

        if file is not None:
            pt = ExperimentHandler.load_experiment_from_file(file).network
            # classification between good/bad pixels using a UNet
            self.process = pt.process
            self.sigmoid = pt.sigmoid
            return

        # classification between good/bad pixels using a UNet
        self.process = UNet(4, F, 1, depth=2, batchnorms=False).to(gpu)
        self.sigmoid = torch.nn.Sigmoid().to(gpu)
Exemple #6
0
elements = Adapter().split['test']

combos = list(itertools.product(experiment_list, elements))

if len(sys.argv) > 1:
    idx = int(sys.argv[1])
    indices = [idx, ]
    combos = combos[idx:idx+1]
else:
    from random import shuffle
    indices = list(range(len(combos)))
    shuffle(indices)
    combos = [combos[number] for number in indices]

for number, combo in enumerate(combos):
    base_experiment = combo[0]
    element = combo[1]
    print("Doing %d/%d: %s -- %d" % (indices[number], len(indices), base_experiment, element))

    try:
        experiment_filename = get_latest_instance_base(base_experiment)
    except ValueError:
        print("Error getting updated instance for %s" % base_experiment)
        continue

    handler = ExperimentHandler.load_experiment_from_file(experiment_filename)
    # turn off scale augmentation if it is turned on
    handler.network.scale_augmentation = 1

    evaluate_experiment(handler, base_experiment, element)
    dataset = RedditDataset(data_root=data_root,
                            conversation_length=args.conversation_length)
elif args.dataset == 'cran':
    dataset = CranDataset(data_root=data_root,
                          conversation_length=args.conversation_length)
else:
    raise Exception('Invalid dataset selected.')

dataset.init()
indexer = Indexer(data_root=data_root,
                  encoder=encoder,
                  dataset=dataset,
                  window_size=args.window_size)
searcher = Searcher(indexer.indexer_dir,
                    encoder=encoder,
                    top_results=5,
                    comparison_func=compare_func)
experiment = ExperimentHandler(data_root=data_root,
                               dataset=dataset,
                               indexer=indexer,
                               searcher=searcher)

run_result = experiment.run()
results_dir = os.path.join(data_root, 'results')
os.makedirs(results_dir, exist_ok=True)

filename = '{}_{}_{}_{}_{}.pkl'.format(dataset.name, encoder.name,
                                       args.conversation_length.lower(),
                                       args.window_size, args.compare_func)
pickle.dump(run_result, open(os.path.join(results_dir, filename), 'wb'))
def main():
    with Logger("", log_to_file=False) as debug_log:
        handlers = {}
        debug_log.print("Loading experiments...")
        for ablation in ablations:
            pkl_file = get_latest_instance(os.path.join(base_folder, ablation))
            handlers[ablation] = ExperimentHandler.load_experiment_from_file(
                pkl_file)
            handlers[ablation].network.scale_augmentation = 1.0
        debug_log.print("Experiments loaded.")

        # we'll assume for now that basic parameters are shared
        handler = handlers[ablations[0]]
        center_views = handler._data_loader.adapter.valid_centerviews
        elements = sorted(handler._data_loader.adapter._all_elements())

        random.shuffle(elements)

        skip_element = False

        with torch.no_grad():
            for idx, element in enumerate(tqdm(elements, miniters=1)):
                print(element)
                for ablation in ablations:
                    if skip_element:
                        skip_element = False
                        break
                    print(f"  {ablation}")
                    data_fcn = handlers[ablation]._config[
                        'data_loader_options']['data_function']
                    for center_view in center_views:
                        output_folder = os.path.join(
                            handlers[ablation]._data_loader.adapter.datapath,
                            ablation, "depth", "Depth", "%.2f" %
                            handlers[ablation]._data_loader.adapter.im_scale,
                            "scan%d" % element)
                        out_name = os.path.join(
                            output_folder, "rect_%03d_points" % (center_view))
                        if (os.path.exists(out_name + ".trust.png")):
                            continue
                        data = data_fcn(handlers[ablation]._data_loader,
                                        element,
                                        center_view=center_view)
                        in_data = [x.cuda() for x in data[0]]
                        try:
                            ensure_dir(output_folder)
                        except FileExistsError:
                            skip_element = True
                            print(
                                "Skipping due to directory creation collision")
                            break

                        out = handlers[ablation].network(*in_data)
                        refined_depth = out[0].detach().cpu().numpy().squeeze()
                        depth_trust = out[1].detach().cpu().numpy().squeeze()
                        np.save(out_name + ".npy", refined_depth)
                        cv2.imwrite(out_name + ".png",
                                    color_depth_map(refined_depth))
                        np.save(out_name + ".trust.npy", depth_trust)
                        cv2.imwrite(
                            out_name + ".trust.png",
                            color_error_image(
                                np.power(2.0, 4 - depth_trust * 8)))
                    handlers[ablation]._data_loader.cache.clear()
                print("...finished")
Exemple #9
0
for depth_scale in depth_scales:
    print("Scaling world by a factor %f" % depth_scale)
    for experiment in experiments:
        run_results = [
            [images0, geo_estimates0, geo_estimates0, cameras0],
        ]
        runs = experiments[experiment]
        for run_idx, run in enumerate(runs):
            run = get_latest_instance_base(run)
            images = run_results[run_idx][0]
            estimates = run_results[run_idx][1]
            trusts = run_results[run_idx][2]
            cameras = run_results[run_idx][3]
            results = [images, [], [], cameras]
            with torch.no_grad():
                handler = ExperimentHandler.load_experiment_from_file(run)
                handler.network.depth_scale = depth_scale
                handler.network.scale_augmentation = 1.0

                for image_idx in views:
                    order = list(range(len(images)))
                    order[0], order[image_idx] = order[image_idx], order[0]
                    oimages = [images[idx] for idx in order]
                    oestimates = [estimates[idx] for idx in order]
                    otrusts = [trusts[idx] for idx in order]
                    ocameras = [cameras[idx] for idx in order]
                    image_tensor = torch.cat(oimages, dim=1)
                    estimate_tensor = torch.cat(oestimates, dim=1)
                    trust_tensor = torch.cat(otrusts, dim=1)
                    camera_tensor = torch.cat(ocameras, dim=1)
                    if run_idx == 0:
Exemple #10
0
    def __init__(self,
                 F=8,
                 local_network=None,
                 depth_scale=1,
                 scale_augmentation=1.0,
                 file=None,
                 refinement_only=False,
                 reset_trust=True):
        super().__init__()

        gpu = torch.device('cuda')

        if file is not None:
            pt = ExperimentHandler.load_experiment_from_file(file).network
            self.F = pt.F
            self.depth_scale = depth_scale
            self.scale_augmentation = scale_augmentation

            if local_network is not None:
                self.local_network = ExperimentHandler.load_experiment_from_file(
                    local_network).network
                self.local_network.scale_augmentation = 1
            else:
                self.local_network = pt.local_network

            self.shared_feature_network = pt.shared_feature_network
            self.inpainting_network = pt.inpainting_network

            # the refinement part: a relatively local (small depth) network used as a residual on the input depth
            self.refinement_network = pt.refinement_network

            # classification between the two options: refinement and inpainting
            self.choice_network = pt.choice_network

            if reset_trust:
                self.trust_network = UNet(2 * self.F + 17,
                                          2 * self.F,
                                          1,
                                          depth=4,
                                          batchnorms=False).to(gpu)
            else:
                # the trust network: how much do we trust our proposed refinement?
                self.trust_network = pt.trust_network

            self.softmax = pt.softmax
            self.sigmoid = pt.sigmoid

            # for curriculum learning: when do we turn the classification on?
            self.do_classification = pt.do_classification

            # for curriculum learning: when do we turn the refinement on?
            self.do_refinement = pt.do_refinement

            try:
                self.refinement_only = pt.refinement_only
            except:
                self.refinement_only = False

            return

        if local_network is None:
            raise UserWarning("Please pass a pre-trained local trust network!")

        self.local_network = ExperimentHandler.load_experiment_from_file(
            local_network).network
        self.local_network.eval()
        self.local_network.to(gpu)
        self.local_network.scale_augmentation = 1
        self.scale_augmentation = scale_augmentation
        self.F = F
        self.depth_scale = depth_scale

        # just calculating some shared features for the various heads
        self.shared_feature_network = UNet(16,
                                           F,
                                           2 * F,
                                           depth=3,
                                           batchnorms=False).to(gpu)

        # the inpainting part: large depth, for huge spatial support so we can easily figure out exactly how to inpaint
        self.inpainting_network = UNet(2 * F + 16,
                                       2 * F,
                                       1,
                                       depth=5,
                                       batchnorms=False).to(gpu)

        # the refinement part: a relatively local (small depth) network used as a residual on the input depth
        self.refinement_network = UNet(2 * F + 16,
                                       2 * F,
                                       1,
                                       depth=1,
                                       batchnorms=False).to(gpu)

        # classification between the two options: refinement and inpainting
        self.choice_network = UNet(2 * F + 18,
                                   2 * F,
                                   2,
                                   depth=3,
                                   batchnorms=False).to(gpu)

        # the trust network: how much do we trust our proposed refinement?
        self.trust_network = UNet(2 * F + 17,
                                  2 * F,
                                  1,
                                  depth=4,
                                  batchnorms=False).to(gpu)

        self.softmax = torch.nn.Softmax2d().to(gpu)
        self.sigmoid = torch.nn.Sigmoid().to(gpu)

        # for curriculum learning: when do we turn the classification on?
        self.do_classification = False

        # for curriculum learning: when do we turn the refinement on?
        self.do_refinement = False

        # for ablations: only refinement
        self.refinement_only = refinement_only
            module = 'network_architectures.DepthMapInitialTrust'
        elif module == 'experiments.local_depthtrust':
            module = 'experiments.initial_depthtrust'
        if name == 'DepthMapRefinement_v2_successive':
            name = 'DepthMapSuccessiveRefinement'
        elif name == 'DepthMapRefinement_v2':
            name = 'DepthMapInitialRefinement'
        elif name == 'DepthTrustLocal_v3':
            name = 'DepthMapInitialTrust'

        return super().find_class(module, name)


models = sorted(glob(os.path.join(input_folder, "*/*/experiment_state_*.pkl")))

for model in models:
    handler = ExperimentHandler.load_experiment_from_file(
        model, folder_override=input_folder, unpickler=MYTHUnpickler)
    handler._config['output_folder'] = output_folder
    handler.output_path = os.path.join(output_folder,
                                       handler.get_experiment_identifier())
    handler._logger.log_path.replace(input_folder, output_folder)
    # we also clear the optimization state, to save on space (~factor 3)
    handler.optimizer.state = defaultdict(dict)
    handler.save_state_to_file()

    # quick test: load this new one
    handler = ExperimentHandler.load_experiment_from_file(
        model.replace(input_folder, output_folder))

    print("processed %s" % (model))