def save2(): x = nd.ones(3) y = nd.zeros(4) dt = {'x': x, 'y': y} nd.save('C_4_5_xy', [x, y]) nd.save('C_4_5_dt', dt) x2, y2 = nd.load('C_4_5_xy') print(x2, y2) dt2 = nd.load('C_4_5_dt') print(dt2)
def get(root, suffix): arrays_on_gpu = [0] * FILE_NUM for i in range(FILE_NUM): file_i = os.path.join(root, str(i) + suffix) arrays_on_gpu[i] = torch.Tensor(nd.load(file_i)[0].asnumpy()).to(device[i]) for i in range(FILE_NUM): F.normalize(arrays_on_gpu[i], p=2, dim=1, out=arrays_on_gpu[i]) print('finish load') print(sum([arr.shape[0] for arr in arrays_on_gpu])) pbar = tqdm(total=sum([arr.shape[0] for arr in arrays_on_gpu])) similar_matrixs = [] for i in range(FILE_NUM): result = [] num_classes = arrays_on_gpu[i].size()[0] idx = 0 while idx < num_classes: indices = score(arrays_on_gpu[i][idx:min(idx + BATCH_SIZE, num_classes)], arrays_on_gpu) result.append(indices) idx += BATCH_SIZE pbar.update(BATCH_SIZE) similar_matrixs.append(torch.cat(result, dim=0)) pbar.close() result = [] for i in range(len(similar_matrixs)): result.append(similar_matrixs[i][:, 1:].cpu().numpy()) result = np.concatenate(result, axis=0) result = result.astype(np.int32) # np.save('/anxiang/tmp/xj600w_largeFC.param.npy', result) np.save(os.path.join(hard_config.PREFIX, '%s_largeFC.param.npy' % name), result)
def initialize_inference(inference, pretrained, start_epoch): if pretrained: print('Loading the pretrained model') vggface_weights = nd.load('ckpt/VGG-FACE/VGG_FACE-0000.params') # change the name checkpoint = {} vgg_face_layers = [2, 2, 3, 3, 3] for k, v in vggface_weights.items(): if 'conv' in k: ind1, ind2, sub_name = k.split('_') ind1 = int(ind1.replace('arg:conv', '')) - 1 ind2 = int(ind2[-1]) - 1 ind = sum(vgg_face_layers[:ind1]) + ind2 key = inference.name + '_conv' + str(ind) + '_' + sub_name checkpoint[key] = v # load the weights for k in inference.collect_params().keys(): if k in checkpoint: inference.collect_params()[k]._load_init(checkpoint[k], ctx) print('Loaded %s weights from checkpoints' % k) else: inference.collect_params()[k].initialize(ctx=ctx) print('Initialize %s weights' % k) print('Done') elif start_epoch > 0: print('Loading the weights from [%d] epoch' % start_epoch) inference.load_params( os.path.join(args.ckpt_dir, args.prefix, '%s-%d.params' % (args.prefix, start_epoch)), ctx) else: inference.collect_params().initialize(ctx=ctx) return inference
def load_from_sym_params(self, f, ctx=mx.cpu(), with_dense=False): """f is a file name store by mxnet params""" if not os.path.exists(f): print("parameter file is not exist", f) return sym_dict = nd.load(f) trans_dict = {} print(type(sym_dict)) for k, v in sym_dict.items(): trans_dict[k.split(':')[-1]] = v for cld in self._children.values(): if cld.name == 'pool0': continue params = cld.collect_params() model_layer_keys = params.keys() layer_name = cld.name + '_name' try: stored_keys = self.__getattribute__(layer_name) except AttributeError as e: print(e) continue # does not have corespanding params for model_k, connect_k in zip(model_layer_keys, stored_keys): #print(model_k,connect_k) if model_k.startswith('dense0') and not with_dense: continue params[model_k]._load_init(trans_dict[connect_k], ctx=ctx)
def load_checkpoint(prefix, epoch): """Load model checkpoint from file. Parameters ---------- prefix : str Prefix of model name. epoch : int Epoch number of model we would like to load. Returns ------- symbol : Symbol The symbol configuration of computation network. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - symbol will be loaded from ``prefix-symbol.json``. - parameters will be loaded from ``prefix-epoch.params``. """ symbol = sym.load('%s-symbol.json' % prefix) save_dict = nd.load('%s-%04d.params' % (prefix, epoch)) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return (symbol, arg_params, aux_params)
def load_pretrained_model_only_same_shape(net, filename, ctx, prefix=''): pretrained = nd.load(filename) pretrained_keys = pretrained.keys() params = net._collect_params_with_prefix() params_keys = params.keys() for p in params_keys: if p in pretrained_keys: try: params[p]._load_init(pretrained[p], ctx) print('[*] Parameter {} is loaded successfully'.format(p)) except: print( '[!] Warning : Shape of Paramter {} {} is not consistent with the one {} in the Pretrained Model' .format(p, params[p].shape, pretrained[p].shape)) elif p not in pretrained_keys: print( '[!] Warning : Parameter {} is not exist in Pretrained Model'. format(p)) return 0, 0
def _load_model_gluon(model_path, load_epoch=0): sym = mx.sym.load(model_path + '-symbol.json') save_dict = nd.load('%s-%04d.params' % (model_path, load_epoch)) arg_params = dict() for k, v in save_dict.items(): arg_params[k] = v return sym, arg_params
def load_targets(training_set_dir, batch_no): """ :type training_set_dir: str :type batch_no: int """ return nd.load(training_set_dir + TARGETS_FILENAME + str(batch_no) + MXNET_NDARRAY_FILENAME_EXTENSION)[0]
def load_features(training_set_dir, batch_no): """ :type training_set_dir: str :type batch_no: int """ return nd.load(training_set_dir + FEATURES_FILENAME + str(batch_no) + MXNET_NDARRAY_FILENAME_EXTENSION)[0]
def __getitem__(self, idx): if (os.path.splitext(self.items[idx][0])[1]).lower() == '.ndarray': data = nd.load(self.items[idx][0])[0] else: data = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform is not None: return self._transform(data, label) return data, label
def test_load_save(): x = create_2d_tensor(SMALL_Y, LARGE_X) tmp = tempfile.mkdtemp() tmpfile = os.path.join(tmp, 'large_tensor') nd.save(tmpfile, [x]) y = nd.load(tmpfile) y = y[0] assert x[0][0] == y[0][0] assert x[-1][-1]== y[-1][-1]
def test_load_save(): x = create_vector(size=LARGE_X) tmp = tempfile.mkdtemp() tmpfile = os.path.join(tmp, 'large_vector') nd.save(tmpfile, [x]) y = nd.load(tmpfile) y = y[0] assert x[0] == y[0] assert x[-1] == y[-1]
def check_load_save(): x = create_vector(size=LARGE_X) with TemporaryDirectory() as tmp: tmpfile = os.path.join(tmp, 'large_vector') nd.save(tmpfile, [x]) y = nd.load(tmpfile) y = y[0] assert x[0] == y[0] assert x[-1] == y[-1]
def test_nnvm_pass(iter_num=10): logger = logging.getLogger("log.test.nnvm") logger.info("=== Log Test NNVM ===") dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True) sym, params = mx.sym.load(dump_sym), nd.load(dump_params) (inputs_ext,) = sim.load_ext(dump_ext) data_iter = iter(val_loader) data, _ = next(data_iter) _mrt.std_dump(sym, params, inputs_ext, data, "cvm_mnist")
def load_memory(self, path): mem_dict = nd.load(path) self.value_memory = [] self.label_memory = [] for key in sorted(mem_dict.keys()): if key == "keys": self.key_memory = mem_dict[key] elif key.startswith("values_"): self.value_memory.append(mem_dict[key]) elif key.startswith("labels_"): self.label_memory.append(mem_dict[key])
def load_all_data_label(pathes): all_data, all_label = None, None for path in pathes: data, label = nd.load(path) label = label.reshape((-1, )).astype('float32') if all_data is None: all_data, all_label = data, label else: all_data = nd.concat(all_data, data, dim=0) all_label = nd.concat(all_label, label, dim=0) return all_data, all_label
def _prepare_module(task_id, symbol, ctx_config, data_names, label_names, resume_config): if not resume_config['is_resume'] == '0': return Module(symbol=symbol, context=Executor._prepare_ctx(ctx_config), data_names=data_names, label_names=label_names, logger=get_logger('mxnet_logger[tid=%s]' % task_id, log_to_console=False, log_to_file=True)) else: ckp = resume_config['ckp'] prefix = ckp['prefix'] epoch = ckp['epoch'] params_path = osp.join(params_root_path, '%s-%04d.params' % (prefix, epoch)) # Copyed from MXNet # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. save_dict = nd.load(params_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v mod = Module(symbol=symbol, context=Executor._prepare_ctx(ctx_config), logger=get_logger('mxnet_logger[tid=%s]' % task_id, log_to_console=False, log_to_file=True)) mod._arg_params = arg_params mod._aux_params = aux_params mod.params_initialized = True # TODO: There is a parameter named load_optimizer_states in Module.load return mod
def _act_max_list(self): gluon_params = nd.load(self.gluon_params_filename) act_max_list = OrderedDict() for k in gluon_params.keys(): *others, attr_name = k.split(".") if attr_name == "act_max": atom_block = functools.reduce( lambda b, n: b[int(n)] if self._is_number(n) else getattr(b, n), others, self.origin_net) act_max_list[f'{atom_block.name}'] = gluon_params[k].asscalar() return act_max_list
def process(content_path, style_path, output_shape, style_save_path): content_img, style_img = image.imread(content_path), image.imread(style_path) content_X, contents_Y = get_contents(content_img, output_shape, ctx) if style_save_path: styles_npy = os.path.join(style_save_path, "styles.npy") if os.path.exists(styles_npy): styles_Y = nd.load(styles_npy) else: _, styles_Y = get_styles(style_img, output_shape, ctx) nd.save(styles_npy, styles_Y) else: _, styles_Y = get_styles(style_img, output_shape, ctx) return train(content_X, contents_Y, styles_Y, ctx, lr, max_epochs, lr_decay_epoch)
def init_sphere(mnet, loaded_model=None, ctx=mxnet.cpu()): for k, v in mnet.collect_params().items(): if 'bias' in k: v.initialize(mxnet.initializer.Constant(0.0), ctx=ctx) elif 'batchnorm' in k: if 'gamma' in k or 'var' in k: v.initialize(mxnet.initializer.Constant(1.0), ctx=ctx) elif 'beta' in k or 'mean' in k: v.initialize(mxnet.initializer.Constant(0.0), ctx=ctx) else: v.initialize(mxnet.initializer.Xavier(magnitude=3), ctx=ctx) print ':' print ':' if loaded_model != None: data = nd.load(loaded_model) for k, v in data.items(): print k # ,v.shape
def load_params(prefix, epoch): """Load params from a file """ save_dict = nd.load("%s-%04d.params" % (prefix, epoch)) arg_params = {} aux_params = {} if not save_dict: logging.warning('params file "%s" is empty', '%s-%04d.params' % (prefix, epoch)) return (arg_params, aux_params) for k, v in save_dict.items(): tp, name = k.split(":", 1) if tp == "arg": arg_params[name] = v if tp == "aux": aux_params[name] = v logging.info('params file "%s" is loaded', '%s-%04d.params' % (prefix, epoch)) return (arg_params, aux_params)
def load_parameters(net, filename, ctx=None, allow_missing=False, ignore_extra=False, ignore_different=False, verbose=False): loaded = nd.load(filename) params = net._collect_params_with_prefix() if not loaded and not params: return if not any('.' in i for i in loaded.keys()): # legacy loading del loaded net.collect_params().load( filename, ctx, allow_missing, ignore_extra, net.prefix) return if not allow_missing: for name in params.keys(): assert name in loaded, \ "Parameter '%s' is missing in file '%s', which contains parameters: %s. " \ "Set allow_missing=True to ignore missing parameters."%( name, filename, gluon.block._brief_print_list(loaded.keys())) elif verbose: for name in params.keys(): if name not in loaded: print('miss key {} in model file'.format(name)) for name in loaded: if name not in params: if not ignore_extra: raise ValueError( "Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \ "which contains parameters %s. Set ignore_extra=True to ignore. "%( name, filename, gluon.block._brief_print_list(net._params.keys()))) elif verbose: print('ignore extra key {} in model file'.format(name)) if name in params: # if ignore_different and params[name].shape != loaded[name].shape: # print(name, params[name].shape, loaded[name].shape) # continue try: params[name]._load_init(loaded[name], ctx) except BaseException as e: if verbose: print('ignore key {} cause diffrent shape {} vs {}'.format(name, params[name].shape, loaded[name].shape)) if not ignore_different: raise e
def load_check_point(sym_json_path, params_path, ctx_config_tuple, task_id): ctx_config = list(ctx_config_tuple) # Copyed from MXNet # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. if not isinstance(sym_json_path, sym.Symbol): symbol = sym.load(sym_json_path) else: # If sym_json_path is already an instance of mxnet.sym.Symbol symbol = sym_json_path save_dict = nd.load(params_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v mod = Module(symbol=symbol, context=generate_ctx(ctx_config), logger=get_logger('mxnet_logger[tid=%s]' % task_id, log_to_console=False, log_to_file=True)) mod._arg_params = arg_params mod._aux_params = aux_params mod.params_initialized = True # TODO: There is a parameter named load_optimizer_states in Module.load return mod
def test_sym_pass(iter_num=10): inputs_ext = { 'data': { 'shape': (batch_size, 1, 28, 28), } } inputs = [mx.sym.var(n) for n in inputs_ext] data_iter = iter(val_loader) def data_iter_func(): return next(data_iter) data, _ = data_iter_func() net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx) def graph_func(data): return net1.forward(data.as_in_context(ctx)) sym_file, param_file = load_fname(version) sym, params = mx.sym.load(sym_file), nd.load(param_file) sym, params = spass.sym_quant_prepare(sym, params, inputs_ext) if True: mrt = _mrt.MRT(sym, params, inputs_ext) mrt.set_data('data', data) mrt.calibrate(ctx=ctx) mrt.set_output_prec(8) qsym, qparams, inputs_ext = mrt.quantize() else: inputs_ext['data']['data'] = data th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx) qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, th_dict) qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs, "cvm") dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True) sim.save_ext(dump_ext, inputs_ext) nd.save(dump_params, qparams) open(dump_sym, "w").write(qsym.tojson()) dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True) (inputs_ext,) = sim.load_ext(dump_ext) inputs = [mx.sym.var(n) for n in inputs_ext] net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx) def cvm_quantize(data): data = sim.load_real_data(data, 'data', inputs_ext) return net2.forward(data.as_in_context(ctx)) utils.multi_eval_accuracy(graph_func, data_iter_func, cvm_quantize, iter_num=iter_num)
def load_params(self, params_filename): if os.path.isfile(params_filename): self.Wdense1, self.bdense1, \ self.Wdense2, self.bdense2, \ self.Wdense3, self.bdense3, \ self.Wdense4, self.bdense4, \ self.Wdense5, self.bdense5 \ = nd.load(params_filename) self.params = [ self.Wdense1, self.bdense1, self.Wdense2, self.bdense2, self.Wdense3, self.bdense3, self.Wdense4, self.bdense4, self.Wdense5, self.bdense5 ] for param in self.params: param.attach_grad() self.moments = [ nd.zeros(param.shape, ctx=self.ctx) for param in self.params ]
def load_model(_symbol_file, _param_file): """load existing symbol model""" cur_path = os.path.dirname(os.path.realpath(__file__)) symbol_file_path = os.path.join(cur_path, _symbol_file) symbol = mx.sym.load(symbol_file_path) param_file_path = os.path.join(cur_path, _param_file) save_dict = nd.load(param_file_path) _arg_params = {} _aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': _arg_params[name] = v if tp == 'aux': _aux_params[name] = v return symbol, _arg_params, _aux_params
def load_model(symbol_file, param_file, logger=None): cur_path = os.path.dirname(os.path.realpath(__file__)) symbol_file_path = os.path.join(cur_path, symbol_file) if logger is not None: logger.info('Loading symbol from file %s' % symbol_file_path) symbol = mx.sym.load(symbol_file_path) param_file_path = os.path.join(cur_path, param_file) if logger is not None: logger.info('Loading params from file %s' % param_file_path) save_dict = nd.load(param_file_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return symbol, arg_params, aux_params
def special_init(net, ctx=mxnet.cpu()): """special for 'vgg16-0000.params'""" loaded_model = 'vgg16-0000.params' key = [ 'arg:conv1_1_weight', 'arg:conv1_1_bias', 'arg:conv1_2_weight', 'arg:conv1_2_bias', 'arg:conv2_1_weight', 'arg:conv2_1_bias', 'arg:conv2_2_weight', 'arg:conv2_2_bias', 'arg:conv3_1_weight', 'arg:conv3_1_bias', 'arg:conv3_2_weight', 'arg:conv3_2_bias', 'arg:conv3_3_weight', 'arg:conv3_3_bias', 'arg:conv4_1_weight', 'arg:conv4_1_bias', 'arg:conv4_2_weight', 'arg:conv4_2_bias', 'arg:conv4_3_weight', 'arg:conv4_3_bias', 'arg:conv5_1_weight', 'arg:conv5_1_bias', 'arg:conv5_2_weight', 'arg:conv5_2_bias', 'arg:conv5_3_weight', 'arg:conv5_3_bias', 'arg:fc6_weight', 'arg:fc6_bias', 'arg:fc7_weight', 'arg:fc7_bias', 'arg:fc8_weight', 'arg:fc8_bias' ] loaded = nd.load(loaded_model) params = net.net.collect_params() for i, k in enumerate(params.keys()): name = key[i] params[k]._load_init(loaded[name], ctx)
def load_model(_symbol_file, _param_file, _logger=None): """load existing symbol model""" cur_path = os.path.dirname(os.path.realpath(__file__)) symbol_file_path = os.path.join(cur_path, _symbol_file) if _logger is not None: _logger.info('Loading symbol from file %s' % symbol_file_path) symbol = mx.sym.load(symbol_file_path) param_file_path = os.path.join(cur_path, _param_file) if _logger is not None: _logger.info('Loading params from file %s' % param_file_path) save_dict = nd.load(param_file_path) _arg_params = {} _aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': _arg_params[name] = v if tp == 'aux': _aux_params[name] = v return symbol, _arg_params, _aux_params
def special_initMX(net, ctx=mxnet.cpu(), skip=(0, 0), layers=0): """special for 'vgg16-0000.params' skip: (start:end) the range where to skip when to init alert: this is for the one without BN layer """ loaded_model = "/home1/caffemodel/VGG16.mnt" key = [ 'features.0.weight', 'features.0.bias', 'features.2.weight', 'features.2.bias', 'features.5.weight', 'features.5.bias', 'features.7.weight', 'features.7.bias', 'features.10.weight', 'features.10.bias', 'features.12.weight', 'features.12.bias', 'features.14.weight', 'features.14.bias', 'features.17.weight', 'features.17.bias', 'features.19.weight', 'features.19.bias', 'features.21.weight', 'features.21.bias', 'features.24.weight', 'features.24.bias', 'features.26.weight', 'features.26.bias', 'features.28.weight', 'features.28.bias', 'features.31.weight', 'features.31.bias', 'features.33.weight', 'features.33.bias', 'output.weight', 'output.bias' ] loaded = nd.load(loaded_model) params = net.net.collect_params() start, end = skip key[start:end] = [] this_keys = params.keys() this_end = start + layers * 2 key_weight = [] if layers > 0: for k in this_keys[start:this_end]: if 'weight' in k: params[k].initialize(mxnet.initializer.Xavier(magnitude=3), ctx=ctx) key_weight.append(k) else: params[k].initialize(mxnet.initializer.Constant(0.0), ctx=ctx) # using compressed convolution in layers global_param.set_param(key_weight, ctx=ctx) this_keys[start:this_end] = [] # print 'global_param.netMask.keys: ', global_param.netMask.keys() for name, k in zip(key, this_keys): params[k]._load_init(loaded[name], ctx)