def eval_proposed(weight, use_haptic, use_audio, use_virbo):
    opt = Options().parse()
    opt.use_behavior = True
    opt.use_haptic = use_haptic
    opt.use_audio = use_audio
    opt.use_vibro = use_virbo
    opt.aux = True
    opt.sequence_length = 20
    print("Model Config: ", opt)
    model = Model(opt)
    model.load_weight(weight)
    return model.evaluate(0, keep_batch=True, ssim=False)
Exemple #2
0
def eval_proposed(weight, use_haptic, use_audio, use_virbo, behavior):
    opt = Options().parse()
    opt.use_behavior = True
    opt.use_haptic = use_haptic
    opt.use_audio = use_audio
    opt.use_vibro = False
    opt.behavior_layer = 1
    opt.aux = True
    opt.sequence_length = 20
    opt.data_dir = '../data/' + behavior
    print("Model Config: ", opt)
    model = Model(opt)
    model.load_weight(weight)
    return model.evaluate(0, keep_frame=True)
def predict_proposed(weight, use_haptic, use_audio, use_virbo, filelist):
    opt = Options().parse()
    opt.use_behavior = True
    opt.use_haptic = use_haptic
    opt.use_audio = use_audio
    opt.use_vibro = use_virbo
    opt.aux = True
    opt.sequence_length = 20
    print("Model Config: ", opt)
    model = Model(opt)
    model.load_weight(weight)
    resultlist, gt = model.predict(filelist)
    gen_audios = [np.vstack([hp.cpu().numpy().squeeze()[-3:] for hp in haptic]) for _, haptic, _, _ in resultlist]
    gt_audios = [np.vstack([hp.cpu().numpy().squeeze()[:,-3:] for hp in haptic]) for _, haptic, _, _  in gt]
    pass