def test_param_intersection(self): ''' Test that passes intersecting parameters and input/output blobs ''' m = self._create_model() with self.assertRaises(Exception): pe.PredictorExportMeta( predict_net=m.net, parameters=m.params, inputs=["data"] + m.params, outputs=["y"], shapes={ "y": (1, 10), "data": (1, 5) }, ) with self.assertRaises(Exception): pe.PredictorExportMeta( predict_net=m.net, parameters=m.params, inputs=["data"], outputs=["y"] + m.params, shapes={ "y": (1, 10), "data": (1, 5) }, )
def SaveModel(args, train_model, epoch): prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0]) predictor_export_meta = pred_exp.PredictorExportMeta( predict_net=train_model.net.Proto(), parameters=data_parallel_model.GetCheckpointParams(train_model), inputs=[prefix + "/data"], outputs=[prefix + "/softmax"], shapes={ prefix + "/softmax": (1, args.num_labels), prefix + "/data": (args.num_channels, args.image_size, args.image_size) } ) # save the train_model for the current epoch model_path = "%s/%s_%d.mdl" % ( args.file_store_path, args.save_model_name, epoch, ) # set db_type to be "minidb" instead of "log_file_db", which breaks # the serialization in save_to_db. Need to switch back to log_file_db # after migration pred_exp.save_to_db( db_type="minidb", db_destination=model_path, predictor_export_meta=predictor_export_meta, )
def test_meta_net_def_net_runs(self): for param, value in self.params.items(): workspace.FeedBlob(param, value) extra_init_net = core.Net('extra_init') extra_init_net.ConstantFill('data', 'data', value=1.0) pem = pe.PredictorExportMeta( predict_net=self.predictor_export_meta.predict_net, parameters=self.predictor_export_meta.parameters, inputs=self.predictor_export_meta.inputs, outputs=self.predictor_export_meta.outputs, shapes=self.predictor_export_meta.shapes, extra_init_net=extra_init_net, ) db_type = 'minidb' db_file = tempfile.NamedTemporaryFile(delete=False, suffix=".{}".format(db_type)) pe.save_to_db(db_type=db_type, db_destination=db_file.name, predictor_export_meta=pem) workspace.ResetWorkspace() meta_net_def = pe.load_from_db( db_type=db_type, filename=db_file.name, ) self.assertTrue("data" not in workspace.Blobs()) self.assertTrue("y" not in workspace.Blobs()) init_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_INIT_NET_TYPE) # 0-fills externalblobs blobs and runs extra_init_net workspace.RunNetOnce(init_net) self.assertTrue("data" in workspace.Blobs()) self.assertTrue("y" in workspace.Blobs()) print(workspace.FetchBlob("data")) np.testing.assert_array_equal(workspace.FetchBlob("data"), np.ones(shape=(1, 5))) np.testing.assert_array_equal(workspace.FetchBlob("y"), np.zeros(shape=(1, 10))) # Load parameters from DB global_init_net = pred_utils.GetNet(meta_net_def, pc.GLOBAL_INIT_NET_TYPE) workspace.RunNetOnce(global_init_net) # Run the net with a reshaped input and verify we are # producing good numbers (with our custom implementation) workspace.FeedBlob("data", np.random.randn(2, 5).astype(np.float32)) predict_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_NET_TYPE) workspace.RunNetOnce(predict_net) np.testing.assert_array_almost_equal( workspace.FetchBlob("y"), workspace.FetchBlob("data").dot(self.params["y_w"].T) + self.params["y_b"])
def SaveModel(args, train_model, epoch): prefix = "gpu_{}".format(train_model._devices[0]) predictor_export_meta = pred_exp.PredictorExportMeta( predict_net=train_model.net.Proto(), parameters=GetCheckpointParams(train_model), inputs=[prefix + "/data"], outputs=[prefix + "/softmax"], shapes={ prefix + "/softmax": (1, args.num_labels), prefix + "/data": (args.num_channels, args.clip_length_of if args.input_type else args.clip_length_rgb, args.crop_size, args.crop_size) }) # save the train_model for the current epoch model_path = "%s/%s_%d.mdl" % ( args.file_store_path, args.save_model_name, epoch, ) # save the model pred_exp.save_to_db( db_type='minidb', db_destination=model_path, predictor_export_meta=predictor_export_meta, )
def _export_to_logfiledb(args, net, init_net, inputs, out_file, extra_out_tensors=None): out_tensors = list(net.Proto().external_output) if extra_out_tensors is not None: out_tensors += extra_out_tensors params = list(set(net.Proto().external_input) - set(inputs)) net_type = None predictor_export_meta = predictor_exporter.PredictorExportMeta( predict_net=net, parameters=params, inputs=inputs, outputs=out_tensors, net_type=net_type, ) logger.info("Exporting Caffe2 model to {}".format(out_file)) predictor_exporter.save_to_db( db_type="log_file_db", db_destination=out_file, predictor_export_meta=predictor_export_meta, )
def save_model(deploy_model): """Write layer weights to files. These are used in Chapter 2. Also, write model to file. """ del_create_dir(WEIGHTS_DIR) # Iterate weights of model for i, blob in enumerate(deploy_model.params): blob_vals = workspace.FetchBlob(blob) wfpath = "{}/{}.npy".format(WEIGHTS_DIR, str(i)) # Write weights to file. # These are the weights we imported to our model in Chapter 2. print("Writing weights file:", wfpath) with open(wfpath, "w") as ofile: np.save(ofile, blob_vals, allow_pickle=False) # Create model for export. # Specify input and output blobs. pe_meta = pe.PredictorExportMeta( predict_net=deploy_model.net.Proto(), parameters=[str(b) for b in deploy_model.params], inputs=["input_images"], outputs=["softmax"], ) # Save model to file in minidb format. del_create_dir(MODEL_DIR) pe.save_to_db("minidb", os.path.join(MODEL_DIR, "mnist_model.minidb"), pe_meta) print("Deploy model written to: " + MODEL_DIR + "/mnist_model.minidb")
def save_caffe2_rep_to_db( caffe2_backend_rep, output_path, input_names, output_names, num_workers, ): # netdef external_input includes internally produced blobs actual_external_inputs = set() produced = set() for operator in caffe2_backend_rep.predict_net.op: for blob in operator.input: if blob not in produced: actual_external_inputs.add(blob) for blob in operator.output: produced.add(blob) for blob in output_names: if blob not in produced: actual_external_inputs.add(blob) param_names = [ blob for blob in actual_external_inputs if blob not in input_names ] init_net = core.Net(caffe2_backend_rep.init_net) predict_net = core.Net(caffe2_backend_rep.predict_net) # predictor_exporter requires disjoint params, inputs and outputs for i, param in enumerate(param_names): if param in output_names: saved_name = param + '_PARAM' init_net.Copy(param, saved_name) predict_net.Copy(saved_name, param) param_names[i] = saved_name output_shapes = {} for blob in output_names: output_shapes[blob] = (0,) # Required because of https://github.com/pytorch/pytorch/pull/6456/files with caffe2_backend_rep.workspace._ctx: workspace.RunNetOnce(init_net) predictor_export_meta = predictor_exporter.PredictorExportMeta( predict_net=predict_net, parameters=param_names, inputs=input_names, outputs=output_names, shapes=output_shapes, net_type='dag', num_workers=num_workers, ) predictor_exporter.save_to_db( db_type='minidb', db_destination=output_path, predictor_export_meta=predictor_export_meta, ) logger.info('Caffe2 predictor net saved as: {}'.format(output_path))
def test_meta_constructor(self): ''' Test that passing net itself instead of proto works ''' m = self._create_model() pe.PredictorExportMeta( predict_net=m.net, parameters=m.params, inputs=["data"], outputs=["y"], shapes={"y": (1, 10), "data": (1, 5)}, )
def export_nets_to_predictor_file(c2_prepared, input_names, output_names, predictor_path, extra_params=None): # netdef external_input includes internally produced blobs actual_external_inputs = set() produced = set() for operator in c2_prepared.predict_net.op: for blob in operator.input: if blob not in produced: actual_external_inputs.add(blob) for blob in operator.output: produced.add(blob) for blob in output_names: if blob not in produced: actual_external_inputs.add(blob) param_names = [ blob for blob in actual_external_inputs if blob not in input_names and blob not in output_names ] if extra_params is not None: param_names += extra_params init_net = core.Net(c2_prepared.init_net) predict_net = core.Net(c2_prepared.predict_net) # Required because of https://github.com/pytorch/pytorch/pull/6456/files with c2_prepared.workspace._ctx: workspace.RunNetOnce(init_net) predictor_export_meta = pe.PredictorExportMeta( predict_net=predict_net, parameters=param_names, inputs=input_names, outputs=output_names, shapes={x: () for x in input_names + output_names}, net_type="simple", ) pe.save_to_db( db_type=CAFFE2_DB_TYPE, db_destination=predictor_path, predictor_export_meta=predictor_export_meta, )
def setUp(self): np.random.seed(1) m = self._create_model() self.predictor_export_meta = pe.PredictorExportMeta( predict_net=m.net.Proto(), parameters=[str(b) for b in m.params], inputs=["data"], outputs=["y"], shapes={"y": (1, 10), "data": (1, 5)}, ) workspace.RunNetOnce(m.param_init_net) self.params = { param: workspace.FetchBlob(param) for param in self.predictor_export_meta.parameters} # Reset the workspace, to ensure net creation proceeds as expected. workspace.ResetWorkspace()
def SaveModel(train_model, save_dir, epoch): predictor_export_meta = pred_exp.PredictorExportMeta( predict_net=train_model.net.Proto(), parameters=GetCheckpointParams(train_model), inputs=['data_uint8'], outputs=['softmax'], shapes={ 'data': {1, 9600}, 'softmax': {1, 40} }) model_path = '%s/%s_%d.mdl' % ( save_dir, train_model.net.Proto().name, epoch, ) pred_exp.save_to_db( db_type='minidb', db_destination=model_path, predictor_export_meta=predictor_export_meta, )
def test_load_device_scope(self): for param, value in self.params.items(): workspace.FeedBlob(param, value) pem = pe.PredictorExportMeta( predict_net=self.predictor_export_meta.predict_net, parameters=self.predictor_export_meta.parameters, inputs=self.predictor_export_meta.inputs, outputs=self.predictor_export_meta.outputs, shapes=self.predictor_export_meta.shapes, net_type='dag', ) db_type = 'minidb' db_file = tempfile.NamedTemporaryFile(delete=False, suffix=".{}".format(db_type)) pe.save_to_db(db_type=db_type, db_destination=db_file.name, predictor_export_meta=pem) workspace.ResetWorkspace() with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 1)): meta_net_def = pe.load_from_db( db_type=db_type, filename=db_file.name, ) init_net = core.Net( pred_utils.GetNet(meta_net_def, pc.GLOBAL_INIT_NET_TYPE)) predict_init_net = core.Net( pred_utils.GetNet(meta_net_def, pc.PREDICT_INIT_NET_TYPE)) # check device options for op in list(init_net.Proto().op) + list( predict_init_net.Proto().op): self.assertEqual(1, op.device_option.device_id) self.assertEqual(caffe2_pb2.CPU, op.device_option.device_type)
for i in range(total_iters): workspace.RunNet(train_model.net) accuracy[i] = workspace.blobs["accuracy"] loss[i] = workspace.blobs['loss'] if i % 25 == 0: print("Iter: {}, loss: {}, accuracy: {}".format( i, loss[i], accuracy[i])) plt.plot(loss, 'b') plt.plot(accuracy, 'r') plt.title("Summary of Training Run") plt.xlabel("Iteration") plt.legend(("Loss", "Accuracy"), loc="upper right") plt.show() pe_meta = pe.PredictorExportMeta( predict_net=deploy_model.net.Proto(), parameters=[str(b) for b in deploy_model.params], inputs=["data"], outputs=["softmax"], ) pe.save_to_db("minidb", os.path.join(root_folder, "mnist_model.minidb"), pe_meta) print("Deploy model saved to:" + root_folder + "/mnist_model.minidb") blob = workspace.FetchBlob("data") plt.figure() plt.title("Batch of Testing Data") _ = visualize.NCHW.ShowMultiple(blob) workspace.ResetWorkspace(root_folder) print("The blobs in the workspace after reset: {}".format(workspace.Blobs())) predict_net = pe.prepare_prediction_net( os.path.join(root_folder, "mnist_model.minidb"), "minidb")