def main(): parser = argparse.ArgumentParser() parser.add_argument("--data", required=True, help="Path to the inference folder.") parser.add_argument( "-s", "--shape_weights", required=True, help="Path to the shape network weights.", ) parser.add_argument( "-i", "--illumination_weights", required=True, help="Path to the illumination network weights.", ) parser.add_argument( "-b", "--brdf_weights", required=True, help="Path to the brdf network weights.", ) parser.add_argument( "-j", "--joint_weights", required=True, help="Path to the joint refinement network weights.", ) parser.add_argument( "--gpu", help="Comma separated list of GPU(s) to use. -1 Runs training/inference on CPU.", default="-1", type=str, ) parser.add_argument( "--linear", help="The images are in a linear color space. Otherwise sRGB is assumed", action="store_true", ) parser.add_argument( "--hdr", help="The images are in a HDR format such as .exr or .hdr. Otherwise LDR PNG images are assumed", action="store_true", ) args = parser.parse_args() with change_gpu(args.gpu): fullInference( args.data, args.shape_weights, args.illumination_weights, args.brdf_weights, args.joint_weights, not args.linear, not args.hdr, )
def get_parallel_runner_1(path): param_dict = np.load(path, encoding='latin1').item() cfg = PredictConfig(model=Model(), session_init=ParamRestore(param_dict), session_config=get_default_sess_config(0.99), input_names=['input'], output_names=['resized_map']) inque = mp.Queue() outque = mp.Queue() with change_gpu(0): proc = MultiProcessQueuePredictWorker(1, inque, outque, cfg) proc.start() with change_gpu(1): pred1 = OfflinePredictor(cfg) def func1(img): inque.put((0, [[img]])) func1.outque = outque def func2(img): return pred1([[img]])[0][0] return func1, func2
def train(self, args): self.args = args # Make sure the save path exist if not os.path.exists(self.args.save): os.makedirs(self.args.save) with change_gpu(self.args.gpu): train_df = self._dataflow() trainer = (SimpleTrainer() if get_num_gpu() <= 1 else SyncMultiGPUTrainerReplicated(get_num_gpu())) print("Found %d gpus. Using trainer:" % get_num_gpu(), trainer) # Setup callbacks self._default_callbacks() try: launch_train_with_config( self.pred_config(self.args, train_df, self.callbacks), trainer) except Exception as error: traceback.print_exc() else: # If everythin worked save a compated model self.export(os.path.join(self.args.save, "compact.pb"))
plt.title('Embedding using %s-loss' % algo_name) plt.savefig('%s.jpg' % algo_name) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('-a', '--algorithm', help='used algorithm', required=True, choices=["siamese", "cosine", "triplet", "softtriplet", "center"]) parser.add_argument('--visualize', help='export embeddings into an image', action='store_true') args = parser.parse_args() ALGO_CONFIGS = {"siamese": SiameseModel, "cosine": CosineModel, "triplet": TripletModel, "softtriplet": SoftTripletModel, "center": CenterModel} logger.auto_set_dir(name=args.algorithm) with change_gpu(args.gpu): if args.visualize: visualize(args.load, ALGO_CONFIGS[args.algorithm], args.algorithm) else: config = get_config(ALGO_CONFIGS[args.algorithm], args.algorithm) if args.load: config.session_init = SaverRestore(args.load) else: launch_train_with_config(config, SimpleTrainer())
def run(): """Start the Training/Testing""" import os log_name = [str(el) for el in [config.cmd, config.depth, config.out] if el] log_action = "k" if not config.log_ask else None auto_set_dir(name=log_name, action=log_action) with change_gpu(config.gpu): if config.npz_input_folder: # setup dataflow and get some data to know the height and width ds = setup_npz_dataflow(config.npz_input_folder) ds.reset_state() ds_iter = iter(ds) datapoint = next(ds_iter) config.C_HEIGHT = datapoint[1][0] config.C_WIDTH = datapoint[0].shape[2] del ds, ds_iter ds = setup_npz_dataflow(config.npz_input_folder) elif config.imagereader_image: ds = setup_imagereader_dataflow() else: ds = setup_dataflow(config.input_folder, config.range, config.random_data) # setup validation dataflow if requested valid_ds = setup_npz_dataflow(config.npz_validation_input_folder, prefetch=False) if config.npz_validation_input_folder else None if config.validation_input_folder: valid_ds = setup_dataflow(config.validation_input_folder) valid_ds.reset_state() model = None if (config.cmd != "test") or not config.load_frozen: model = Model(config.depth, config.mode) # SAVE MODEL if config.cmd == "save": cm.save_model(config.model, model, config.to, config.compact) # TEST MODEL elif config.cmd == "test": # TODO Fix this. this is not working anymore atm. # if config.imgs: if False: cm.visualize(config.model, Model, config.visualize) else: cm.test(ds, config.model, model) # TRAIN MODEL else: # write config to model directory config_file = os.path.join(tp.logger.get_logger_dir(), "settings.conf") with open(config_file, "w") as f: config.write(f) tp_config = get_config(ds, model, valid_ds=valid_ds) if config.load: tp_config.session_init = tp.SaverRestore(config.load) # trainer = tp.SyncMultiGPUTrainerReplicated(max(get_num_gpu(), 1)) trainer = tp.SimpleTrainer() tp.launch_train_with_config(tp_config, trainer)
def main(): parser = argparse.ArgumentParser() parser.add_argument( "stage", choices=["shape", "illumination", "brdf", "joint"], help="Step selector", ) parser.add_argument("--data", required=True, help="Path to the trainings data folder.") parser.add_argument( "-w", "--weights", required=True, help="Path to the corresponding network weights.", ) parser.add_argument( "--gpu", help= "Comma separated list of GPU(s) to use. -1 Runs training/inference on CPU.", default="-1", type=str, ) args = parser.parse_args() with change_gpu(args.gpu): if args.stage == "shape": translate( args.data, args.weights, ShapeNet(), Dataflows.SHAPE, InferenceStage.SHAPE, ) elif args.stage == "illumination": translate( args.data, args.weights, IllumNet(), Dataflows.ILLUMINATION, InferenceStage.ILLUMINATION, ) elif args.stage == "brdf": translate( args.data, args.weights, BrdfNet(), Dataflows.BRDF, InferenceStage.BRDF, ) infer.stepRender(args.data, InferenceStage.INITIAL_RENDERING) elif args.stage == "joint": translate( args.data, args.weights, JointNet(), Dataflows.JOINT, InferenceStage.JOINT, ) infer.stepRender(args.data, InferenceStage.FINAL_RENDERING)