def set_model(mdl_file, model_type):
    """ This function takes a given model file (with full pathname) as input and returns a 
    variable that contains the loaded model. The model is loaded into the CPU or the GPU depending
    on the type of the model. GMM-HMM model is loaded on the CPU, whereas Flow-based HMM models like
    NVP-HMM and Glow-HMM have to be loaded on the GPU
    ----
    Args:
    - mdl_file :  model file (.mdl extension) with full pathname
    - model_type : string that indicates the type of model: "gaus" - GMM, "gen" - NVP, "glow" -  Glow

    Returns:
    - mdl: Variable containing loaded model into the appropriate device

    """
    if model_type == 'gaus':
        # Loading the GMM-HMM model on the CPU
        with open(mdl_file, "rb") as handle:
            mdl = pkl.load(handle)
        mdl.device = 'cpu'

    elif model_type == 'gen' or model_type == 'glow':
        # In case the model is NVP-HMM ("gen") or Glow-HMM ("glow")
        mdl = load_model(mdl_file)
        if torch.cuda.is_available():
            if not options["Mul_gpu"]:
                # default case, only one gpu
                device = torch.device('cuda')
                mdl.device = device
                mdl.pushto(mdl.device)
            else:
                # In case Mul_gpu option is set, the model is pushed to mutiple available GPU cores
                for i in range(4):
                    try:
                        time.sleep(np.random.randint(10))
                        device = torch.device('cuda:{}'.format(
                            int(get_freer_gpu())))
                        # print("Try to push to device: {}".format(device))
                        mdl.device = device
                        mdl.pushto(mdl.device)
                        break
                    except:
                        # if push error (maybe memory overflow, try again)
                        # print("Push to device cuda:{} fail, try again ...")
                        continue
        else:
            # In case no GPU device is available, the model is pushed to CPU
            mdl.device = 'cpu'
            mdl.pushto(mdl.device)

        # Set model into eval mode so that no parameters are learned during "Testing" phase
        mdl.eval()

    # Returns the loaded model file
    return mdl
def set_model(mdl_file, model_type):

    if model_type == 'gaus':
        with open(mdl_file, "rb") as handle:
            mdl = pkl.load(handle)
        mdl.device = 'cpu'
        #f = lambda x: accuracy_fun(x, mdl=mdl)
    elif model_type == 'gen' or model_type == 'glow':
        mdl = load_model(mdl_file)
        if torch.cuda.is_available():
            if not options["Mul_gpu"]:
                # default case, only one gpu
                device = torch.device('cuda')
                mdl.device = device
                mdl.pushto(mdl.device)
            else:
                for i in range(4):
                    try:
                        time.sleep(np.random.randint(10))
                        device = torch.device('cuda:{}'.format(
                            int(get_freer_gpu())))
                        # print("Try to push to device: {}".format(device))
                        mdl.device = device
                        mdl.pushto(mdl.device)
                        break
                    except:
                        # if push error (maybe memory overflow, try again)
                        # print("Push to device cuda:{} fail, try again ...")
                        continue
        else:
            mdl.device = 'cpu'
            mdl.pushto(mdl.device)

        # set model into eval mode
        mdl.eval()

    return mdl
Exemplo n.º 3
0
        mdl.device = 'cpu'
        f = lambda x: accuracy_fun(x, mdl=mdl)
    elif model_type == 'gen':
        mdl = load_model(mdl_file)
        if torch.cuda.is_available():
            if not options["Mul_gpu"]:
                # default case, only one gpu
                device = torch.device('cuda')
                mdl.device = device
                mdl.pushto(mdl.device)
            else:
                for i in range(4):
                    try:
                        time.sleep(np.random.randint(10))
                        device = torch.device('cuda:{}'.format(
                            int(get_freer_gpu())))
                        # print("Try to push to device: {}".format(device))
                        mdl.device = device
                        mdl.pushto(mdl.device)
                        break
                    except:
                        # if push error (maybe memory overflow, try again)
                        # print("Push to device cuda:{} fail, try again ...")
                        continue
        # set model into eval mode
        mdl.eval()

        f = lambda x: accuracy_fun_torch(
            x, mdl=mdl, batch_size_=options["Train"]["eval_batch_size"])

    # print("[Acc:] epoch:{}\tclass:{}\tPush model to {}. Done.".format(epoch,iclass, mdl.device), file=sys.stdout)
Exemplo n.º 4
0
    mdl.iclass = iclass_str

    mdl.device = 'cpu'
    if torch.cuda.is_available():
        if not options["Mul_gpu"]:
            # default case, only one gpu
            device = torch.device('cuda')
            mdl.device = device
            mdl.pushto(mdl.device)

        else:
            for i in range(4):
                try:
                    time.sleep(np.random.randint(20))
                    device = torch.device('cuda:{}'.format(int(
                        get_freer_gpu())))
                    print("Try to push to device: {}".format(device))
                    mdl.device = device
                    mdl.pushto(mdl.device)
                    break
                except:
                    # if push error (maybe memory overflow, try again)
                    print("Push to device cuda:{} fail, try again ...")
                    continue
    print("epoch:{}\tclass:{}\tPush model to {}. Done.".format(
        epoch_str, iclass_str, mdl.device),
          file=sys.stdout)

    # zero pad data for batch training
    max_len_ = max([x.shape[0] for x in xtrain])
    xtrain_padded = pad_data(xtrain, max_len_)