示例#1
0
def get_gen_results(args):
    # -get param-stamp
    param_stamp = get_param_stamp_from_args(args)
    # -check whether already run, and if not do so
    eval_tag = "" if args.eval_tag == "none" else "-{}".format(args.eval_tag)
    if not os.path.isfile("{}/prec-{}.txt".format(args.r_dir, param_stamp)):
        print("{}: ...running...".format(param_stamp))
        args.train = True
        main_cl.run(args)
    elif (os.path.isfile("{}/ll-{}.txt".format(args.r_dir, param_stamp))
          and os.path.isfile("{}/is{}-{}.txt".format(args.r_dir, eval_tag,
                                                     param_stamp))):
        print("{}: already run".format(param_stamp))
    else:
        print("{}: ...running evaluation only...".format(param_stamp))
        args.train = False
        main_cl.run(args)
    # -get average precisions
    fileName = '{}/prec-{}.txt'.format(args.r_dir, param_stamp)
    file = open(fileName)
    ave = float(file.readline())
    file.close()
    # -get log-likelihoods
    fileName = '{}/ll-{}.txt'.format(args.r_dir, param_stamp)
    file = open(fileName)
    ll = float(file.readline())
    file.close()
    # -get reconstruction error (per input unit)
    fileName = '{}/re-{}.txt'.format(args.r_dir, param_stamp)
    file = open(fileName)
    re = float(file.readline())
    file.close()
    # -get inception score
    fileName = '{}/is{}-{}.txt'.format(args.r_dir, eval_tag, param_stamp)
    file = open(fileName)
    IS = float(file.readline())
    file.close()
    # -get Frechet inception distance
    fileName = '{}/fid{}-{}.txt'.format(args.r_dir, eval_tag, param_stamp)
    file = open(fileName)
    FID = float(file.readline())
    file.close()
    # -get precision and recall curve
    file_name = '{}/precision{}-{}.txt'.format(args.r_dir, eval_tag,
                                               param_stamp)
    precision = []
    with open(file_name, 'r') as f:
        for line in f:
            precision.append(float(line[:-1]))
    file_name = '{}/recall{}-{}.txt'.format(args.r_dir, eval_tag, param_stamp)
    recall = []
    with open(file_name, 'r') as f:
        for line in f:
            recall.append(float(line[:-1]))
    # -return tuple with the results
    return (ave, ll, re, IS, FID, precision, recall)
def get_results(args, model_name, shift, slot):
    # -get param-stamp
    param_stamp = get_param_stamp_from_args(args)
    # -check whether already run; if not do so
    if os.path.isfile('{}/dict-{}-{}-{}.pkl'.format(args.r_dir, param_stamp,
                                                    args.slot, args.shift)):
        print("{}: already run".format(param_stamp))
    else:
        print("{}: ...running...".format(param_stamp))
        args.metrics = True
        main_cl.run(args, model_name=model_name, shift=shift, slot=slot)
    '''# -get average precision
def get_result(args):
    # -get param-stamp
    param_stamp = get_param_stamp_from_args(args)
    # -check whether already run, and if not do so
    if os.path.isfile('{}/prec-{}.txt'.format(args.r_dir, param_stamp)):
        print("{}: already run".format(param_stamp))
    else:
        print("{}: ...running...".format(param_stamp))
        main_cl.run(args)
    # -get average precision
    fileName = '{}/prec-{}.txt'.format(args.r_dir, param_stamp)
    file = open(fileName)
    ave = float(file.readline())
    file.close()
    # -return it
    return ave
def get_results(args):
    # -get param-stamp
    param_stamp = get_param_stamp_from_args(args)
    # -check whether already run, and if not do so
    if os.path.isfile("{}/dict-{}.pkl".format(args.r_dir, param_stamp)):
        print("{}: already run".format(param_stamp))
    else:
        print("{}: ...running...".format(param_stamp))
        main_cl.run(args)
    # -get average precisions
    fileName = '{}/prec-{}.txt'.format(args.r_dir, param_stamp)
    file = open(fileName)
    ave = float(file.readline())
    file.close()
    # -results-dict
    dict = utils.load_object("{}/dict-{}".format(args.r_dir, param_stamp))
    # -return tuple with the results
    return (dict, ave)
def get_results(args):
    # -get param-stamp
    param_stamp = get_param_stamp_from_args(args)
    # -check whether already run; if not do so
    if os.path.isfile('{}/dict-{}.pkl'.format(args.r_dir, param_stamp)):
        print("{}: already run".format(param_stamp))
    else:
        print("{}: ...running...".format(param_stamp))
        args.metrics = True
        main_cl.run(args)
    # -get average precision
    file_name = '{}/prec-{}.txt'.format(args.r_dir, param_stamp)
    file = open(file_name)
    ave = float(file.readline())
    file.close()
    # -get metrics-dict
    file_name = '{}/dict-{}'.format(args.r_dir, param_stamp)
    metrics_dict = utils.load_object(file_name)
    # -print average precision on screen
    print("--> average precision: {}".format(ave))
    # -return average precision & metrics-dict
    return (ave, metrics_dict)