Пример #1
0
def run():

    parser = argparse.ArgumentParser(description='Evaluate a solution to a problem instance')
    
    parser.add_argument('raw', help='raw')
    parser.add_argument('rop', help='rop')
    parser.add_argument('con', help='con')
    parser.add_argument('inl', help='inl')
    parser.add_argument('sol1', help='sol1')
    parser.add_argument('sol2', help='sol2')
    parser.add_argument('summary', help='summary')
    parser.add_argument('detail', help='detail')
    
    args = parser.parse_args()
    
    try:
        (obj, cost, penalty, max_obj_viol, max_nonobj_viol, infeas) = evaluation.run(
            args.raw,
            args.rop,
            args.con,
            args.inl,
            args.sol1,
            args.sol2,
            args.summary,
            args.detail,
        )
    except:
        print("exception in evaluation.run")
        raise
    else:
        """process obj, cost, penalty, max_obj_viol, max_nonobj_viol, infeas
Пример #2
0
 def batch_iter(self, batch_size, num_epochs, shuffle=True):
     '''迭代器'''
     # num = 1
     # data = np.array(data)
     # data_size = len(data)
     # num_batches_per_epoch = int((data_size - 1) / batch_size) + 1
     echo_part_num = len(self.all_text_path) // normal_param.num_database
     for epoch in range(num_epochs):
         print("epoch:", epoch, "/", num_epochs)
         for part_n in range(normal_param.num_database):
             is_save = False
             # train_data, train_label, dev_data, dev_label, vocal_size_train = self.deal_data(part=echo_part_num,n_part=part_n)
             train_data, train_label, vocal_size_train = self.deal_data(
                 part=echo_part_num, n_part=part_n)
             dev_data, dev_label = evaluation.run()
             data = list(zip(train_data, train_label))
             data = np.array(data)
             data_size = len(data)
             num_batches_per_epoch = int((data_size - 1) / batch_size) + 1
             if shuffle:
                 shuffle_indices = np.random.permutation(
                     np.arange(data_size))
                 shuffle_data = data[shuffle_indices]
             else:
                 shuffle_data = data
             for batch_num in range(num_batches_per_epoch):
                 start_idx = batch_num * batch_size
                 end_idx = min((batch_num + 1) * batch_size, data_size)
                 if batch_num + 1 == num_batches_per_epoch:
                     is_save = True
                 yield shuffle_data[
                     start_idx:end_idx], dev_data, dev_label, is_save
Пример #3
0
def automata_fitness(individual):

    initial_lattice = np.zeros(shape=(40, 40))
    initial_lattice[10][10] = 1

    rules = [individual[0:9], individual[9:]]
    #import pdb; pdb.set_trace()

    max_t = 40

    dead_list = automaton.run(initial_lattice, rules, max_t)
    fitness = evaluation.run(dead_list, real_data)

    return ([fitness])
def automata_fitness(individual):

    initial_lattice = np.zeros(shape=(40, 40))
    for i in range(10):
        initial_lattice[random.randint(15, 20)][random.randint(15, 20)] = 1
    # for i in range(13, 19):
    #     for j in range(13, 18):
    #         initial_lattice[i][j] = 1

    rules = [individual[0:9], individual[9:]]
    #import pdb; pdb.set_trace()

    max_t = 40

    dead_list = automaton.run(initial_lattice, rules, max_t)
    fitness = evaluation.run(dead_list, real_data)

    return ([fitness])
Пример #5
0
def run():

    raw = './examples/case2/case.raw'
    rop = './examples/case2/case.rop'
    con = './examples/case2/case.con'
    inl = './examples/case2/case.inl'
    sol1 = './examples/case2/sol1.txt'
    sol2 = './examples/case2/sol2.txt'
    summary = './summary.csv'
    detail = './detail.csv'
    
    try:
        (obj, cost, penalty, max_obj_viol, max_nonobj_viol, infeas) = evaluation.run(
            raw, rop, con, inl, sol1, sol2, summary, detail)
    except:
        print("exception in evaluation.run")
        raise
    else:
        """process obj, cost, penalty, max_obj_viol, max_nonobj_viol, infeas
Пример #6
0
def run_sol1():
    '''read data, evaluate solution1'''

    parser = argparse.ArgumentParser(
        description='Evaluate a solution to a problem instance')

    parser.add_argument('raw', help='raw')
    parser.add_argument('rop', help='rop')
    parser.add_argument('con', help='con')
    parser.add_argument('inl', help='inl')
    parser.add_argument('sol1', help='sol1')
    parser.add_argument('sol2', help='sol2')
    parser.add_argument('summary', help='summary')
    parser.add_argument('detail', help='detail')

    args = parser.parse_args()

    # Check files exist
    for f in [args.raw, args.rop, args.con, args.inl, args.sol1]:
        if not os.path.isfile(f):
            raise Exception("Can't find {}".format(f))
            #raise FileNotFoundError("Can't find {}".format(f)) # not in Python 2

    try:
        #(obj, cost, penalty, max_obj_viol, max_nonobj_viol, infeas) = evaluation.run( # eval return value is a tuple here
        eval_return_value = evaluation.run(  # eval_return_value is a bool here
            args.raw,
            args.rop,
            args.con,
            args.inl,
            sol1_name=args.sol1,
            sol2_name=None,
            summary_name=args.summary,
            detail_name=args.detail,
        )
    except:
        print("exception in evaluation.run")
        raise
    else:
        """process obj, cost, penalty, max_obj_viol, max_nonobj_viol, infeas
Пример #7
0
def run():

    parser = argparse.ArgumentParser(
        description='Evaluate a solution to a problem instance')

    parser.add_argument('raw', help='raw')
    parser.add_argument('rop', help='rop')
    parser.add_argument('con', help='con')
    parser.add_argument('inl', help='inl')
    parser.add_argument('sol1', help='sol1')
    parser.add_argument('sol2', help='sol2')
    parser.add_argument('summary', help='summary')
    parser.add_argument('detail', help='detail')

    args = parser.parse_args()

    # Check files exist
    for f in [args.raw, args.rop, args.con, args.inl, args.sol1, args.sol2]:
        if not os.path.isfile(f):
            raise Exception("Can't find {}".format(f))
            #raise FileNotFoundError("Can't find {}".format(f)) # not in Python 2

    try:
        (obj, cost, penalty, max_obj_viol, max_nonobj_viol,
         infeas) = evaluation.run(
             args.raw,
             args.rop,
             args.con,
             args.inl,
             args.sol1,
             args.sol2,
             args.summary,
             args.detail,
         )
    except:
        print("exception in evaluation.run")
        raise
    else:
        """process obj, cost, penalty, max_obj_viol, max_nonobj_viol, infeas
Пример #8
0
def run(X, classes, k, runs, distance_measure, n_converged=5):

    precisions = []
    recalls = []
    fscores = []
    ris = []
    epochs = []

    for run in range(runs):
        # Run k-Means algorithm
        clusters, epoch = algorithm(X, k, distance_measure, n_converged)

        # Evaluate our classifier
        p, r, f, ri = evaluation.run(classes, clusters)

        # Append metrics for this run
        precisions.append(p)
        recalls.append(r)
        fscores.append(f)
        ris.append(ri)
        epochs.append(epoch)

    return precisions, recalls, fscores, ris, epochs
Пример #9
0
        description="Evaluation runner for the RESCAL-based matchmakers")
    parser.add_argument("-g",
                        "--ground-truth",
                        required=True,
                        type=parse_matrix_market,
                        help="Matrix with the ground truth relation")
    parser.add_argument("-s",
                        "--slices",
                        nargs="*",
                        type=parse_matrix_market,
                        default=[],
                        help="Matrices for tensor slices")
    parser.add_argument("-c",
                        "--config",
                        type=parse_config,
                        default={},
                        help="EDN configuration")
    args = parser.parse_args()

    # Validate enumerations
    matchmaker_type = args.config["matchmaker"]["type"]
    if matchmaker_type not in {"random", "rescal"}:
        raise ValueError(
            "Matchmaker type {} is not supported!".format(matchmaker_type))
    evaluation_type = args.config["evaluation"]["type"]
    if evaluation_type not in {"n-folds", "time-series"}:
        raise ValueError(
            "Evaluation type {} is not supported!".format(evaluation_type))

    evaluation.run(args)
	if(len(args.learning_rate)>0):
		learning_rate = float(args.learning_rate)
	else:
		learning_rate = 0.1

	if(len(args.n_epochs)>0):
		n_epochs = int(args.n_epochs)
	else:
		n_epochs = 5

	if(len(args.embedding_size)>0):
		embedding_size = int(args.embedding_size)
	else:
		embedding_size = 50

	if(len(args.num_neg_samples)>0):
		num_neg_samples = int(args.num_neg_samples)
	else:
		num_neg_samples = 10

	log= open("time_log.txt","a")

	for fn in ["weibo","digg","mag"]:
		extract_feats_and_trainset.run(fn,sampling_perc,log)
		preprocess_for_imm.run(fn,log)
		rank_nodes.run(fn) 
		infector.run(fn,learning_rate,n_epochs,embedding_size,num_neg_samples,log)
		iminfector.run(fn,embedding_size,log)
		evaluation.run(fn,log)
	log.close()
Пример #11
0
import statistical_tests as stat
import evaluation as data
import numpy as np
import pandas as pd
from scipy import stats

rg_turn_counter, rg_score_1, rg_score_2, rg_composite_score, rg_glue_icebreaker, rg_alana_bot, rg_duration, rg_time_per_glue_turn, rg_coherence_metric_1, rg_coherence_metric_2, rg_coherence_metric_3, rg_coherence_metric_3_percent, cc_turn_counter, cc_score_1, cc_score_2, cc_composite_score, cc_glue_icebreaker, cc_alana_bot, cc_duration, cc_time_per_glue_turn, cc_coherence_metric_1, cc_coherence_metric_2, cc_coherence_metric_3, cc_coherence_metric_3_percent, rg_avg_rating, rg_response_rating, cc_avg_rating, cc_response_rating, likert_rating = data.run(
)

rg_q1 = likert_rating[1:7][1].to_numpy()
rg_q2 = likert_rating[1:7][2].to_numpy()
rg_q3 = likert_rating[1:7][3].to_numpy()
rg_q4 = likert_rating[1:7][4].to_numpy()
rg_q5 = likert_rating[1:7][5].to_numpy()
rg_q6 = likert_rating[1:7][6].to_numpy()

# rg_q2 = np.asarray(likert_rating[1:7][2]).reshape(0,6)
# rg_q3 = np.asarray(likert_rating[1:7][3]).reshape(0,6)
# rg_q4 = np.asarray(likert_rating[1:7][4]).reshape(0,6)
# rg_q5 = np.asarray(likert_rating[1:7][5]).reshape(0,6)
# rg_q6 = np.asarray(likert_rating[1:7][6]).reshape(0,6)
# cc_q1 = np.asarray(likert_rating[7:][1]).reshape(0,6)
cc_q1 = likert_rating[7:][1].to_numpy()
cc_q2 = likert_rating[7:][2].to_numpy()
cc_q3 = likert_rating[7:][3].to_numpy()
cc_q4 = likert_rating[7:][4].to_numpy()
cc_q5 = likert_rating[7:][5].to_numpy()
cc_q6 = likert_rating[7:][6].to_numpy()

rg_score = np.concatenate((rg_score_1, rg_score_2), axis=0)
cc_score = np.concatenate((cc_score_1, cc_score_2), axis=0)
Пример #12
0
def run():

    # change the arguments as desired
    parser = argparse.ArgumentParser(
        description='Evaluate a solution to a problem instance')

    parser.add_argument('scoring_method',
                        help='scoring method / division: 1/2/3/4')
    parser.add_argument('solutionpath',
                        help='path of the folder containing solution')
    parser.add_argument('datapath',
                        help='path of the folder containing input files')
    parser.add_argument('network_model', help='name of the network model')
    parser.add_argument(
        'model_scenario_number',
        help='scenario number being evaluated relative to single model')
    parser.add_argument(
        'dataset_scenario_number',
        help='scenario number being evaluated relative to a multi-model dataset'
    )
    parser.add_argument(
        'slack_objective',
        help='slack objective for empty/infeasible/bad solutions')
    parser.add_argument('code1_runtime', help='code1 runtime')
    parser.add_argument('code2_runtime',
                        help='code2 runtime',
                        action="store",
                        nargs='?')
    parser.add_argument('contingency_count',
                        help='contingency count',
                        action="store",
                        nargs='?')
    parser.add_argument('sec_per_contingency',
                        help='seconds per contingency',
                        action="store",
                        nargs='?')
    parser.add_argument('code2_runtime_goal_sec',
                        help='code2 runtime goal',
                        action="store",
                        nargs='?')
    parser.add_argument('is_sensitive',
                        help='Is the dataset sensitive? yes=1, no=0',
                        action="store",
                        nargs='?')

    #parser.add_argument('out', help='path name of the .csv file to write')

    args = parser.parse_args()

    model_path = "{}/{}/".format(args.datapath, args.network_model)
    scenario_path = "%s/scenario_%02d" % (model_path,
                                          int(args.model_scenario_number))

    print('Model Path: ', model_path)
    print('Scenario Path: ', scenario_path)

    args_raw = getInputPath(
        model_path, "case.raw", scenario_path
    ) + "/case.raw"  #"{}/case.raw".format(scenario_path, args.model_scenario_number)
    args_sup = getInputPath(model_path, "case.json",
                            scenario_path) + "/case.json"
    args_con = getInputPath(model_path, "case.con",
                            scenario_path) + "/case.con"

    #/${NETWORKMODEL}_output${SCENARIO_LOCAL}
    output_path = "{}/{}_output{}".format(args.solutionpath,
                                          args.network_model,
                                          args.model_scenario_number)
    args_sol1 = "{}".format(output_path)
    args_sol2 = "{}/solution2.txt".format(output_path)

    args_summary = "{}/GOCFeasibility.csv".format(output_path)
    args_detail = "{}/{}_DetailedSolution.csv".format(
        output_path, args.model_scenario_number)

    line_switching_allowed = True if args.scoring_method == '3' or args.scoring_method == '4' else None
    xfmr_switching_allowed = True if args.scoring_method == '3' or args.scoring_method == '4' else None

    obj = MAXOBJ

    try:
        contingency_count = int(args.contingency_count)
    except:
        contingency_count = 0

    try:
        sec_per_contingency = float(args.sec_per_contingency)
    except:
        sec_per_contingency = "N/A"

    try:
        code2_runtime_goal_sec = float(args.code2_runtime_goal_sec)
    except:
        code2_runtime_goal_sec = 999999.0

    print("\tScoring Method:%s" % (args.scoring_method))
    print("\tModel:%s" % (model_path))
    print("\tScenario:%s" % (scenario_path))
    print("\tOutput:%s" % (output_path))
    print("\tRAW:%s" % (args_raw))
    print("\tSUP:%s" % (args_sup))
    print("\tCON:%s" % (args_con))

    #Check if solution1 is valid
    if args.slack_objective == "0" and args.code1_runtime == "0" and args.code2_runtime == "0":
        try:
            args_summary = "{}/GOCFeasibility_base.csv".format(output_path)
            args_detail = "{}/{}_DetailedSolution_base.csv".format(
                output_path, args.model_scenario_number)

            evaluation.run(args_raw, args_con, args_sup, args_sol1, None,
                           args_summary, args_detail, line_switching_allowed,
                           xfmr_switching_allowed)
        except:
            traceback.print_exc()
            errfile_path = output_path + '/solution_BASECASE.err'
            with open(errfile_path, 'w'):
                os.utime(errfile_path, None)
        sys.exit(0)

    #solutions_exist = os.path.isfile(args_sol1) and os.path.isfile(args_sol2)
    missing_solution = 'FALSE'  #if solutions_exist else 'TRUE'

    obj = None
    infeas = None

    start_time = time.time()

    slack_objective = float(args.slack_objective)
    score = slack_objective

    code1_runtime = ""
    code2_runtime = ""
    infeasibility_text = "N/A"

    try:
        code1_runtime = float(args.code1_runtime) * 1.0
    except:
        code1_runtime = args.code1_runtime

    try:
        code2_runtime = float(args.code2_runtime) * 1.0
    except:
        code2_runtime = args.code2_runtime

    solutions_exist = False
    try:

        #if solutions_exist:
        (obj, infeas, solutions_exist) = evaluation.run(
            args_raw, args_con, args_sup, args_sol1, args_sol2, args_summary,
            args_detail, line_switching_allowed, xfmr_switching_allowed)

        if process_rank == 0:

            if solutions_exist == False:
                raise Exception("All solutions do not exist")

            if obj > slack_objective and infeas == 0:  #smaller than slack and feasible
                print("obj > slack_objective and infeas == 0")
                score = obj
            if abs(
                    slack_objective - MAXOBJ
            ) < 1:  #slack objective is not available, capture worst case score
                print(
                    "slack_objective - 9876543210 < 1 i.e. there is slack available to set worst score"
                )
                score = obj
            if obj == float('nan'):
                score = slack_objective
            if infeas == 1:
                score = slack_objective

            eval_runtime = time.time() - start_time

            infeasibility_text = 'TRUE' if (
                infeas == 1) else 'FALSE' if infeas == 0 else 'N/A'

            with open(args_summary, 'w') as summaryfile:
                csvwriter = csv.writer(summaryfile,
                                       delimiter=',',
                                       quoting=csv.QUOTE_MINIMAL)
                csvwriter.writerow([
                    'Scenario', 'Score', 'Objective', 'Infeasibility',
                    'Bad/Missing Solution', 'Slack Objective',
                    'Evaluation Duration (sec)', 'Code 1 Duration (sec)',
                    'Code 2 Duration (sec)', 'Contingency Count',
                    'Seconds per Contingency', 'Code 2 Runtime Goal (sec)'
                ])
                csvwriter.writerow([
                    '{} Scenario {} (output{})'.format(
                        args.network_model, args.model_scenario_number,
                        args.model_scenario_number), score, obj,
                    infeasibility_text, missing_solution, slack_objective,
                    eval_runtime, code1_runtime, code2_runtime,
                    contingency_count, sec_per_contingency,
                    code2_runtime_goal_sec
                ])

            print("\tSolutions generated:{}".format(solutions_exist))

            try:
                if args.is_sensitive == "1":
                    obj -= slack_objective
                    score -= slack_objective
            except:
                pass

            if args.is_sensitive == "1":
                contingency_count = 0
                code2_runtime_goal_sec = 0
                slack_objective = 0

            if solutions_exist:
                print("\tModel:{}".format(args.network_model))
                print("\tSlack Objective:%f" % (slack_objective))
                print("\tObjective:%f" % (obj))
                print("\tInfeasibility:%d" % (infeas))
                print("\tEval runtime:%f" % (eval_runtime))
                print("\tCode 1 runtime:{}".format(code1_runtime))
                print("\tCode 2 runtime:{}".format(code2_runtime))
                print("\tsec_per_contingency:{}".format(sec_per_contingency))
                print("\tScore:%f" % (score))
                print("\tContingency Count:%d" % (contingency_count))
                print("\tcode2_runtime_goal_sec:%f" % (code2_runtime_goal_sec))

    except Exception as e:
        traceback.print_exc()
        print(e)
        missing_solution = 'TRUE' if solutions_exist else missing_solution
        with open(args_summary, 'w') as summaryfile:
            csvwriter = csv.writer(summaryfile,
                                   delimiter=',',
                                   quoting=csv.QUOTE_MINIMAL)
            csvwriter.writerow([
                'Scenario', 'Score', 'Objective', 'Infeasibility',
                'Bad/Missing Solution', 'Slack Objective',
                'Evaluation Duration (sec)', 'Code 1 Duration (sec)',
                'Code 2 Duration (sec)', 'Contingency Count',
                'Seconds per Contingency', 'Code 2 Runtime Goal (sec)'
            ])
            csvwriter.writerow([
                '{} Scenario {} (output{})'.format(args.network_model,
                                                   args.model_scenario_number,
                                                   args.model_scenario_number),
                score, 'N/A', missing_solution, slack_objective, '',
                code1_runtime, code2_runtime, contingency_count, 'N/A',
                code2_runtime_goal_sec
            ])
Пример #13
0
from evaluation import load_model, process_data, run
from data import load_validation_data, pair_superset


# Load Model and Training Data
model = load_model()

(x_val, y_val) = load_validation_data()

# Save Predictions
predictions = []

# create test positive and negative pairs
te_pairs, te_y = pair_superset(x_val, y_val)


for i in range(len(te_pairs[:1000])):
    print(f'Running Test {i}')

    data = process_data(te_pairs[i][0], te_pairs[i][1])
 
    # print(f'data Shape : {data.shape}')

    result = run(data, model)
    predictions.append(result)
    

    
correct = [1 if predictions[i] == te_y[i] else 0 for i in range(len(predictions))]
print(f'Test Accuracy: {sum(correct)/len(correct)}')
Пример #14
0
    raw_data = csv.DictReader(open(filename))
    return parse_all_records(raw_data, keep_missing=False)


if __name__ == '__main__':
    args = parser.parse_args()

    output_path = "results/{}/".format(
        datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
    # directory for outputs
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    log_path = output_path + "log.txt"
    logging.basicConfig(filename=log_path,
                        format='%(asctime)s:%(levelname)s: %(message)s',
                        level=logging.INFO)
    datafile = "data/warfarin.csv"
    patients = load_data(datafile)
    models = []
    logging.info(f"Initializing recommender model(s): {args.algo}")

    if args.algo == "all":  # run all models
        models += [get_recommender(algo, output_path) for algo in ALGOS]
    else:  # run a single model
        models += [get_recommender(args.algo, output_path)]

    iters = args.iter if args.iter else 1
    train_ratio = args.train_ratio if args.train_ratio is not None else 0.8

    evaluation.run(patients, models, iters, train_ratio, verbose=True)
Пример #15
0
            track.extract_matches(FLAGS, PATHS)
    else:
        # Extract features.
        if not os.path.exists(PATHS.feature_path):
            feature.run(FLAGS, PATHS)

        # Match features.
        if not os.path.exists(PATHS.matches_file):
            match.run(FLAGS, PATHS)

    if FLAGS.use_gt_orientations:
        # Generate ground truth object information if the option is true.
        ground_truth.run(FLAGS, PATHS)

    # Build reconstruction.
    if not os.path.exists(PATHS.reconstruction_file + '-0'):
        reconstruction.run(FLAGS, PATHS)

    # Extract additional information.
    # if PATHS.ground_truth_path:
    #     if not os.path.exists(PATHS.ground_truth_camera_param_path):
    #         orientation.convert_ground_truth(FLAGS, PATHS)
    #     match_info.run(FLAGS, PATHS)
    #     orientation.run(FLAGS, PATHS)

    # Compare with ground truth if exists.
    if os.path.exists(PATHS.ground_truth_pose_path):
        evaluation.run(FLAGS, PATHS)
        if PATHS.output_path == os.path.join(FLAGS.data_dir, 'sfm_track'):
            match_info.run(FLAGS, PATHS)