def build_torch_model(save_params=True): model = TorchModel() model.eval() d = torch.from_numpy(reader()) y = model(d) logger.info(y.tolist()) if save_params: torch.save(model.state_dict(), "torch_model.params")
def build_dynamic_network(load_params=None, save_params=False, use_structured_name=False): with fluid.dygraph.guard(place): model = TestModel("test") if load_params: model_state_dict, _ = fluid.load_dygraph(load_params) model.load_dict(model_state_dict, use_structured_name=use_structured_name) model.eval() d = fluid.dygraph.to_variable(reader()) p = model(d) logger.info(p.numpy()) if save_params: fluid.save_dygraph(model.state_dict(), "dynamic_params")
def log_arguments(self): logger.info("----------- Configuration Arguments -----------") for arg, value in sorted(self.arg_config.items()): logger.info("%s: %s" % (arg, value)) for arg, value in sorted(self.custom_config.items()): logger.info("%s: %s" % (arg, value)) logger.info("------------------------------------------------")
def send(self, title, content, retry=3): for _ in range(retry): try: msg = MIMEText(content, "plain", "utf-8") msg['From'] = Header(self.sender) msg['To'] = Header(self.receiver) msg['Subject'] = Header(title) if not self.use_ssl: server = smtplib.SMTP(self.server, self.port) else: server = smtplib.SMTP_SSL(self.server, self.port) server.login(self.sender, self.password) server.sendmail(self.sender, [self.receiver], msg.as_string()) server.quit() logger.info("email reminder send successfully!") break except Exception as e: logger.error("email send error! {}".format(e))
def build_static_network(save_params=False, load_pretrain=None): main_prog = fluid.Program() startup_prog = fluid.Program() exe = fluid.Executor(place) with fluid.program_guard(main_prog, startup_prog): data = fluid.layers.data(name="img", shape=[1, 3, 16, 16], append_batch_size=False) conv = fluid.layers.conv2d( input=data, num_filters=16, filter_size=3, param_attr=fluid.ParamAttr(name='conv.weight'), bias_attr=fluid.ParamAttr(name='conv.bias')) bn = fluid.layers.batch_norm( input=conv, act="relu", param_attr=fluid.ParamAttr(name='bn.scale'), bias_attr=fluid.ParamAttr(name='bn.offset'), moving_mean_name='bn.mean', moving_variance_name='bn.variance') batch_size = bn.shape[0] f = fluid.layers.reshape(bn, [batch_size, -1]) fc = fluid.layers.fc(input=f, size=3, param_attr=fluid.ParamAttr(name='fc.w_0'), bias_attr=fluid.ParamAttr(name='fc.b_0')) logits = fluid.layers.softmax(fc) eval_prog = main_prog.clone(True) exe.run(startup_prog) if load_pretrain: fluid.io.load_persistables(exe, load_pretrain, main_prog) d = {"img": reader()} result = exe.run(eval_prog, feed=d, fetch_list=[logits.name]) logger.info(result[0]) if save_params: if not os.path.exists("params"): os.mkdir("params") fluid.io.save_persistables(exe, "params", main_prog)
def _make_dynamic_state_dict(state_dict, data_type="float32"): with fluid.dygraph.guard(place): layer_helper = fluid.dygraph.layer_object_helper.LayerObjectHelper( "transform") model_state_dict = {} for name, value in state_dict.items(): temp_attr = fluid.ParamAttr(name=name) shape = value.shape if len(shape) < 1: continue is_bias = 'bias' in name initializer = fluid.initializer.NumpyArrayInitializer(value) logger.debug("[ToDynamic] param: {}, shape: {}".format( name, shape)) param = layer_helper.create_parameter(temp_attr, shape, data_type, is_bias, initializer) model_state_dict[name] = param logger.info("dynamic parameters make finished!") return model_state_dict
def send(self, title, content="", retry=3): if len(title) > 256: logger.warning("The title should no longer than 256 words!") logger.warning("The title will be truncated!") title = title[:256] if len(content) > 65536: logger.warning("The content should no longer than 65536 words!") content = content[:65536] data = {"text": title, "desp": content} for _ in range(retry): try: response = self.sess.get(self.url, params=data, timeout=(2, 2)) code = response.status_code assert code == 200, "request error! status code: {}".format( code) res = response.text if "success" in res: logger.info("message send successfully!") else: logger.warning("something wrong! response: {}".format(res)) break except Exception as e: logger.error("request error happend! {}".format(e))
def pdparams2static(param_file, filename): assert os.path.exists( param_file + ".pdparams"), "{}.pdparams not exists!".format(param_file) if not os.path.exists(filename): os.makedirs(filename) assert len( os.listdir(filename)) == 0, "dir {} should be empty!".format(filename) logger.info("start to read pdparams params...") static_dict = _read_pdparams(param_file) logger.info("found {} parameters. start to save to {}...".format( len(static_dict), filename)) for name, data in static_dict.items(): _make_static_output(filename, name, data) logger.info("finish!")
def torch2dynamic(param_file, save_path=None): assert os.path.exists(param_file), "{} not exists!".format(param_file) logger.info("start to read torch params...") state_dict = _read_torch_dict(param_file) logger.info("found {} parameters. start to transform...".format( len(state_dict))) dynamic_state_dict = _make_dynamic_state_dict(state_dict) if save_path: with fluid.dygraph.guard(place): fluid.save_dygraph(dynamic_state_dict, save_path) logger.info("dynamic parameters has been saved to {}.pdparams.".format( save_path)) else: return dynamic_state_dict
def static2dynamic(params_dir, save_path=None): params = os.listdir(params_dir) logger.info("found {} parameters. start to read.".format(len(params))) state_dict = {} dtype = "" for param in params: param_path = os.path.join(params_dir, param) if os.path.isdir(param_path): continue data, data_type, lod_info = _read_static_params(param_path) logger.debug("param: {}, shape: {}, data type: {}".format( param, data.shape, data_type)) state_dict[param] = data dtype = data_type logger.info("parameters read finished! start to transform to dynamic!") dynamic_state_dict = _make_dynamic_state_dict(state_dict, dtype) if save_path: with fluid.dygraph.guard(place): fluid.save_dygraph(dynamic_state_dict, save_path) logger.info("dynamic parameters has been saved to {}.pdparams.".format( save_path)) else: return dynamic_state_dict
x = self.fc(x) return F.softmax(x) def build_torch_model(save_params=True): model = TorchModel() model.eval() d = torch.from_numpy(reader()) y = model(d) logger.info(y.tolist()) if save_params: torch.save(model.state_dict(), "torch_model.params") if __name__ == "__main__": logger.info(">>> build satic network & save params...") build_static_network(save_params=True) logger.info(">>> read static params & build dynamic network...") static2dynamic("params", "dynamic") build_dynamic_network(load_params="dynamic") print("\n<========================>\n") logger.info(">>> build dynamic network & save params...") build_dynamic_network(save_params=True) logger.info(">>> read dynamic params & build static network...") dynamic2static("dynamic_params", "static_params") build_static_network(load_pretrain="static_params") print("\n<========================>\n")