Пример #1
0
def handle(srcfl, rsf):

    rsm = h5load(srcfl[0])

    src_type = [para.dtype for para in rsm]
    map_type = [
        secure_type_map[para.dtype] if para.dtype in secure_type_map else None
        for para in rsm
    ]
    sec_rsm = [
        para if typ is None else para.to(typ)
        for para, typ in zip(rsm, map_type)
    ]

    nmodel = 1
    for modelf in srcfl[1:]:
        for basep, mpload, typ in zip(sec_rsm, h5load(modelf), map_type):
            basep.add_(mpload if typ is None else mpload.to(typ))
        nmodel += 1
    nmodel = float(nmodel)
    for basep in sec_rsm:
        basep.div_(nmodel)

    rsm = [
        para if mtyp is None else para.to(styp)
        for para, mtyp, styp in zip(sec_rsm, map_type, src_type)
    ]

    h5save(rsm, rsf, h5args=h5zipargs)
Пример #2
0
def handle(srcf, rsf, h5args=h5zipargs):

    if srcf == rsf:
        h5save(h5load(srcf, restore_list=False), rsf, h5args=h5args)
    else:
        with h5File(srcf, "r") as sfg, h5File(rsf, 'w') as rfg:
            handle_group(sfg, rfg, h5args=h5args)
Пример #3
0
def load_model_cpu(modf, base_model):

	mpg = h5load(modf)

	for para, mp in zip(base_model.parameters(), mpg):
		para.data = mp.data

	return base_model
Пример #4
0
def handle(srcf, rsf, h5args=h5zipargs):

	if srcf == rsf:
		h5save(h5load(srcf, restore_list=False), rsf, h5args=h5args)
	else:
		sfg, rfg = h5py.File(srcf, "r"), h5py.File(rsf, 'w')
		handle_group(sfg, rfg, h5args=h5args)
		sfg.close()
		rfg.close()
Пример #5
0
def handle(srcfl, rsf):

    type_map = {
        torch.float16: torch.float64,
        torch.float32: torch.float64,
        torch.uint8: torch.int64,
        torch.int8: torch.int64,
        torch.int16: torch.int64,
        torch.int32: torch.int64
    }
    type_map[mask_tensor_type] = torch.int64

    rsm = h5load(srcfl[0])

    src_type = [para.dtype for para in rsm]
    map_type = [
        type_map[para.dtype] if para.dtype in type_map else None
        for para in rsm
    ]
    sec_rsm = [
        para if typ is None else para.to(typ)
        for para, typ in zip(rsm, map_type)
    ]

    nmodel = 1
    for modelf in srcfl[1:]:
        for basep, mpload, typ in zip(sec_rsm, h5load(modelf), map_type):
            basep.add_(mpload if typ is None else mpload.to(typ))
        nmodel += 1
    nmodel = float(nmodel)
    for basep in sec_rsm:
        basep.div_(nmodel)

    rsm = [
        para if mtyp is None else para.to(styp)
        for para, mtyp, styp in zip(sec_rsm, map_type, src_type)
    ]

    h5save(rsm, rsf, h5args=h5zipargs)
Пример #6
0
def load_emb(embf, embt, nword, scale_down_emb, freeze_emb):

    _emb = h5load(embf)
    if nword < _emb.size(0):
        _emb = _emb.narrow(0, 0, nword).contiguous()
    if scale_down_emb:
        _emb.div_(sqrt(embt.size(-1)))
    with torch.no_grad():
        embt.copy_(_emb)
    if freeze_emb:
        embt.requires_grad_(False)
    else:
        embt.requires_grad_(True)

    return embt
Пример #7
0
def load_model_cpu_old(modf, base_model):

	base_model.load_state_dict(h5load(modf))

	return base_model
Пример #8
0
if multi_gpu:
    mymodel = DataParallelMT(mymodel,
                             device_ids=cuda_devices,
                             output_device=cuda_device.index,
                             host_replicate=True,
                             gather_output=False)
    lossf = DataParallelCriterion(lossf,
                                  device_ids=cuda_devices,
                                  output_device=cuda_device.index,
                                  replicate_once=True)

fine_tune_state = cnfg.fine_tune_state
if fine_tune_state is not None:
    logger.info("Load optimizer state from: " + fine_tune_state)
    optimizer.load_state_dict(h5load(fine_tune_state))

lrsch = GoogleLR(optimizer, cnfg.isize, cnfg.warm_step, scale=cnfg.lr_scale)

num_checkpoint = cnfg.num_checkpoint
cur_checkid = 0

tminerr = inf_default

minloss, minerr = eva(vd, nvalid, mymodel, lossf, cuda_device, multi_gpu,
                      use_amp)
logger.info("".join(("Init lr: ", ",".join(tostr(getlr(optimizer))),
                     ", Dev Loss/Error: %.3f %.2f" % (minloss, minerr))))

if fine_tune_m is None:
    save_model(mymodel, wkdir + "init.h5", multi_gpu, logger)