Esempio n. 1
0
 def __init__(self, ide, params):
     #: Basic attributes
     self.id = ide
     self.algo = self.set_algo(params['ALGORITHM'])
     self.layers = params['L']
     self.judge_type = params['JUDGMENT_TYPE']
     self.net = self.set_network(params)
     self.time_steps = self.set_time_steps(params['T'],
                                           params['NUM_ITERATIONS'])
     self.judgments = JudgmentModel(params, self.time_steps)
     #: Resource allocation attributes
     self.resource = ResourceModel(params, self.time_steps)
     self.res_alloc_type = self.resource.type
     self.v_r = self.resource.v_r
     #: Results attributes
     self.output_dir = self.set_out_dir(params['OUTPUT_DIR'],
                                        params['MAGNITUDE'])
     self.results_judge = indputils.INDPResults(self.layers)
     self.results_real = indputils.INDPResults(self.layers)
Esempio n. 2
0
def read_results(combinations,
                 optimal_combinations,
                 cost_types,
                 root_result_dir='../results/',
                 deaggregate=False,
                 rslt_dir_lyr='/agents'):
    '''
    This function reads the results of analyses (INDP, JC, etc.) and the corresponding
    objects from file and aggregates the results in a dictionary.

    Parameters
    ----------
    combinations : dict
        All combinations of magnitude, sample, judgment type, resource allocation type
        involved in the JC (or any other decentralized results) collected by
        :func:`generate_combinations`.
    optimal_combinations : dict
        All combinations of magnitude, sample, judgment type, resource allocation type
        involved in the INDP (or any other optimal results) collected by :func:`generate_combinations`.
    cost_types : str
        Cost types that should be read from results and will be shown in the plots.
    root_result_dir : 'str', optional
        Root directory where the results are stored. The default is '../results/'.
    deaggregate : bool, optional
        Should the deaggregated results (for seperate layers) be read. The default is False.
    rslt_dir_lyr : str, optional
        Directory insdie the :func:`root result directory <read_results>` where
        the deaggregated results (for seperate layers)  are. The default is '/agents'.

    Returns
    -------
    cmplt_results : dict
        Dictionary that contains the read results.
    objs : dict
        Dictionary that contains the objects corresponding to the read results.

    '''
    columns = [
        't', 'Magnitude', 'cost_type', 'decision_type', 'judgment_type',
        'auction_type', 'valuation_type', 'no_resources', 'sample', 'cost',
        'normalized_cost', 'layer'
    ]
    cost_types += ['Under Supply Perc']
    cmplt_results = pd.DataFrame(columns=columns, dtype=int)
    objs = {}
    print("\nAggregating Results")
    joinedlist = combinations + optimal_combinations
    for idx, x in enumerate(joinedlist):
        #: Make the directory
        full_suffix = '_L' + str(x[2]) + '_m' + str(x[0]) + '_v' + str(x[3])
        if x[4][:2] in ['jc', 'ng', 'bg']:
            full_suffix += '_' + x[5]
            if x[6] in ["MDA", "MAA", "MCA"]:
                full_suffix += '_AUCTION_' + x[6] + '_' + x[7]
            else:
                full_suffix += '_' + x[6]
        result_dir = root_result_dir + x[4] + '_results' + full_suffix
        if os.path.exists(result_dir + '/actions_' + str(x[1]) + '_.csv'):
            # Save all results to Pandas dataframe
            sample_result = indputils.INDPResults()
            sam_rslt_lyr = {
                l + 1: indputils.INDPResults()
                for l in range(x[2])
            }
            ### !!! Assume the layer name is l+1
            sample_result = sample_result.from_csv(result_dir,
                                                   x[1],
                                                   suffix=x[8])
            if deaggregate:
                for l in range(x[2]):
                    sam_rslt_lyr[l + 1] = sam_rslt_lyr[l + 1].from_csv(
                        result_dir + rslt_dir_lyr,
                        x[1],
                        suffix='L' + str(l + 1) + '_' + x[8])
            initial_cost = {}
            for c in cost_types:
                initial_cost[c] = sample_result[0]['costs'][c]
            norm_cost = 0
            for t in sample_result.results:
                for c in cost_types:
                    if initial_cost[c] != 0.0:
                        norm_cost = sample_result[t]['costs'][
                            c] / initial_cost[c]
                    else:
                        norm_cost = -1.0
                    values = [
                        t, x[0], c, x[4], x[5], x[6], x[7], x[3], x[1],
                        float(sample_result[t]['costs'][c]), norm_cost, 'nan'
                    ]
                    cmplt_results = cmplt_results.append(dict(
                        zip(columns, values)),
                                                         ignore_index=True)
            if deaggregate:
                for l in range(x[2]):
                    initial_cost = {}
                    for c in cost_types:
                        initial_cost[c] = sam_rslt_lyr[l + 1][0]['costs'][c]
                    norm_cost = 0
                    for t in sam_rslt_lyr[l + 1].results:
                        for c in cost_types:
                            if initial_cost[c] != 0.0:
                                norm_cost = sam_rslt_lyr[
                                    l + 1][t]['costs'][c] / initial_cost[c]
                            else:
                                norm_cost = -1.0
                            values = [
                                t, x[0], c, x[4], x[5], x[6], x[7], x[3], x[1],
                                float(sam_rslt_lyr[l + 1][t]['costs'][c]),
                                norm_cost, l + 1
                            ]
                            cmplt_results = cmplt_results.append(
                                dict(zip(columns, values)), ignore_index=True)
            #: Getting back the JuCModel objects:
            if x[4][:2] in ['jc', 'ng', 'bg']:
                with open(result_dir + '/objs_' + str(x[1]) + '.pkl',
                          'rb') as f:
                    objs[str(x)] = pickle.load(f)
            if idx % (len(joinedlist) // 100 + 1) == 0:
                update_progress(idx + 1, len(joinedlist))
        else:
            sys.exit('Error: The combination or folder does not exist' +
                     str(x))
    update_progress(len(joinedlist), len(joinedlist))
    return cmplt_results, objs
Esempio n. 3
0
def predict_resotration(pred_dict, fail_sce_param, params, real_rep_sequence):
    """ Predicts restoration plans and writes to file"""
    print('\nMagnitude '+str(fail_sce_param['mags'])+' sample '+str(fail_sce_param['sample_range'])+\
          ' Rc '+str(params['V']))
    ### Define a few vars and lists ###
    num_pred = pred_dict['num_pred']
    T = params['NUM_ITERATIONS']
    pred_results = {
        x: indputils.INDPResults(params['L'])
        for x in range(num_pred)
    }
    run_times = {x: [0, 0] for x in range(T + 1)}
    ### Initialize element and network objects ###
    objs = import_initial_data(params, fail_sce_param)
    net_obj = {x: copy.deepcopy(params['N']) for x in range(num_pred)}
    for key, val in objs.items():
        val.initialize_state_matrices(T, num_pred)
        val.check_model_exist(pred_dict['param_folder'])
    ### Initialize cost vectors ###
    indp_results = indp.indp(params['N'],
                             v_r=0,
                             T=1,
                             layers=params['L'],
                             controlled_layers=params['L'],
                             print_cmd=False,
                             time_limit=None)
    run_times[0][0] = indp_results[1][0]['run_time']
    for pred_s in range(num_pred):
        pred_results[pred_s].extend(indp_results[1], t_offset=0)
    costs = {x: {} for x in params['L'] + [0]}
    for h in list(indp_results[1][0]['costs'].keys()):
        costs[0][h.replace(' ', '_')] = np.zeros((T + 1, num_pred))
        costs[0][h.replace(' ',
                           '_')][0, :] = indp_results[1].results[0]['costs'][h]
        for l in params['L']:
            costs[l][h.replace(' ', '_')] = np.zeros((T + 1, num_pred))
            costs[l][h.replace(
                ' ',
                '_')][0, :] = indp_results[1].results_layer[l][0]['costs'][h]
    ###Predict restoration plans###
    print('Predicting, time step:')
    for t in range(T):  # t is the time index for previous time step
        print(str(t + 1) + '.', end=" ")
        start_time = time.time()
        ### Feature extraction###
        layer_dict = extract_features(objs, net_obj, t, params['L'], num_pred)
        ###  Cost normalization ###
        costs_normed = {}
        for c in list(costs[0].keys()):
            costs_normed[c] = STAR_utils.normalize_costs(costs[0][c], c)
        run_times[t + 1][1] = predict_next_step(t,
                                                T,
                                                objs,
                                                pred_dict,
                                                costs_normed,
                                                params['V'],
                                                layer_dict,
                                                print_cmd=True)
        run_times[t + 1][0] = time.time() - start_time
        ### Calculate the cost of scenario ###
        for pred_s in range(num_pred):
            start_time = time.time()
            decision_vars = {0: {}}  #0 becasue iINDP
            for key, val in objs.items():
                decision_vars[0][val.name] = val.state_hist[t + 1, pred_s]
                if val.type == 'a':
                    decision_vars[0][val.dupl_name] = val.state_hist[t + 1,
                                                                     pred_s]
            flow_results = indpalt.flow_problem(net_obj[pred_s],
                                                v_r=0,
                                                layers=params['L'],
                                                controlled_layers=params['L'],
                                                decision_vars=decision_vars,
                                                print_cmd=True,
                                                time_limit=None)
            pred_results[pred_s].extend(flow_results[1], t_offset=t + 1)
            indp.apply_recovery(net_obj[pred_s], flow_results[1], 0)

            ### Update the cost dict for the next time step and run times ###
            equiv_run_time = 0
            if run_times[t + 1][1] != 0:
                equiv_run_time = run_times[t + 1][0] / run_times[t + 1][1] + (
                    time.time() - start_time)
            for h in list(costs[0].keys()):
                costs[0][h][t + 1,
                            pred_s] = flow_results[1][0]['costs'][h.replace(
                                '_', ' ')]
                pred_results[pred_s].results[t +
                                             1]['run_time'] = equiv_run_time
                for l in params['L']:
                    costs[l][h][t + 1, pred_s] = flow_results[1].results_layer[
                        l][0]['costs'][h.replace('_', ' ')]
                    pred_results[pred_s].results_layer[l][
                        t + 1]['run_time'] = equiv_run_time
            ###Write results to file###
            subfolder_results = 'stm_results_L' + str(len(
                params['L'])) + '_m' + str(
                    fail_sce_param['mags']) + '_v' + str(params['V'])
            output_dir = check_folder(pred_dict['output_dir'] + '/' +
                                      subfolder_results)
            pred_results[pred_s].to_csv(output_dir,
                                        fail_sce_param["sample_range"],
                                        suffix='ps' + str(pred_s))
            output_dir_l = check_folder(pred_dict['output_dir'] + '/' +
                                        subfolder_results + '/agents')
            pred_results[pred_s].to_csv_layer(output_dir_l,
                                              fail_sce_param["sample_range"],
                                              suffix='ps' + str(pred_s))
            ####Write models to file###
            # indp.save_INDP_model_to_file(flow_results[0], './models',t, l = 0)

    # Extract prediction results
    pred_error = pd.DataFrame(columns=[
        'name', 'pred sample', 'pred rep time', 'real rep time',
        'prediction error'
    ])
    rep_prec = pd.DataFrame(
        columns=['t', 'pred sample', 'pred rep prec', 'real rep prec'])
    sum_pred_rep_prec = np.zeros((T + 1, num_pred))
    sum_real_rep_prec = np.zeros((T + 1))
    for key, val in objs.items():
        sum_real_rep_prec += real_rep_sequence[key][:T + 1]
        for pred_s in range(num_pred):
            if real_rep_sequence[key][0] != val.state_hist[0, pred_s]:
                sys.exit('Error: Unmatched intial state')
            temp_dict = {
                'name': key,
                'pred sample': pred_s,
                'real rep time': T + 1 - sum(real_rep_sequence[key][:T + 1]),
                'pred rep time': T + 1 - sum(val.state_hist[:, pred_s])
            }
            temp_dict['prediction error'] = temp_dict[
                'real rep time'] - temp_dict['pred rep time']
            pred_error = pred_error.append(temp_dict, ignore_index=True)
            sum_pred_rep_prec[:, pred_s] += val.state_hist[:, pred_s]
    for pred_s in range(num_pred):
        for t in range(T + 1):
            no_elements = len(objs.keys())
            temp_dict = {
                't': t,
                'pred sample': pred_s,
                'pred rep prec': sum_pred_rep_prec[t, pred_s] / no_elements,
                'real rep prec': sum_real_rep_prec[t] / no_elements
            }
            rep_prec = rep_prec.append(temp_dict, ignore_index=True)
    return pred_results, pred_error, rep_prec