Ejemplo n.º 1
0
    def _archive_fetch(self, fh, header, archive, from_time, until_time):
        from_time = roundup(from_time, archive['sec_per_point'])
        until_time = roundup(until_time, archive['sec_per_point'])
        tag_cnt = len(header['tag_list'])
        null_point = (None,) * tag_cnt

        base_point = self._read_base_point(fh, archive, header)
        base_ts = base_point[0]

        if base_ts == 0:
            step = archive['sec_per_point']
            cnt = (until_time - from_time) / step
            time_info = (from_time, until_time, step)
            val_list = [null_point] * cnt
            return (header, time_info, val_list)

        from_offset = self._timestamp2offset(from_time, base_ts, header, archive)
        until_offset = self._timestamp2offset(until_time, base_ts, header, archive)

        fh.seek(from_offset)
        if from_offset < until_offset:
            series_str = fh.read(until_offset - from_offset)
        else:
            archive_end = archive['offset'] + archive['size']
            series_str = fh.read(archive_end - from_offset)
            fh.seek(archive['offset'])
            series_str += fh.read(until_offset - archive['offset'])

        ## unpack series string
        point_format = header['point_format']
        byte_order, point_type = point_format[0], point_format[1:]
        cnt = len(series_str) / header['point_size']
        series_format = byte_order + point_type * cnt
        unpacked_series = struct.unpack(series_format, series_str)

        ## construct value list
        # pre-allocate entire list or speed
        val_list = [null_point] * cnt
        step = tag_cnt + 1
        sec_per_point = archive['sec_per_point']
        for i in xrange(0, len(unpacked_series), step):
            point_ts = unpacked_series[i]
            if from_time <= point_ts < until_time:
                val = unpacked_series[i+1: i+step]
                idx = (point_ts - from_time) / sec_per_point
                val_list[idx] = self._conver_null_value(val)

        time_info = (from_time, until_time, sec_per_point)
        return header, time_info, val_list
Ejemplo n.º 2
0
def informed_t_pile_audit(contest):
  piles =[[]  for n in range(NPILES)]

  start_idx = 0
  last_idx = SAMPLE_0
  shuffle(contest.ballots)

  # Initial Random Ballots
  for i in xrange(start_idx, last_idx):
      piles[i % NPILES].append(contest.ballots[i])

  start_idx = last_idx
  last_idx = roundup(last_idx * GROWTH_RATE, SAMPLE_DIVISOR)

  while last_idx <= len(contest.ballots):
    combined_piles = list(chain(*piles))
    total_score = contest.aggregator(contest.candidates, combined_piles)[1]
    
    remaining_piles = set(range(NPILES))
    round_pile_size = last_idx / SAMPLE_DIVISOR

    for i in xrange(start_idx, last_idx):
      ballot = contest.ballots[i]
      max_index, max_value = None , None

      random_remaining_piles = list(remaining_piles)
      shuffle(random_remaining_piles)
      for pile_index in random_remaining_piles:
        current_score = contest.aggregator(contest.candidates, piles[pile_index])[1]
        score_with_new_ballot = contest.aggregator(contest.candidates, piles[pile_index] + [ballot])[1]
        improvement = distance(current_score,total_score) - distance(score_with_new_ballot,total_score)
        if max_value == None or improvement > max_value:
          max_value, max_index = improvement, pile_index
      if max_index == None:
        raise Exception("ERROR: Max index is None")
      piles[max_index].append(ballot)
      if len(piles[max_index]) == round_pile_size:
        remaining_piles.remove(max_index)
    length = [len(pile) for pile in piles]
    if length.count(length[0]) != len(length):
      raise Exception("Piles non uniform size")
    outcome = agreement(contest.candidates, piles, contest.aggregator)
    if outcome:
      return (outcome, last_idx)
    start_idx = last_idx
    last_idx = roundup(last_idx * GROWTH_RATE, SAMPLE_DIVISOR)
  return (contest.outcome[0], last_idx)
Ejemplo n.º 3
0
def bayes_audit(contest):
  s_size = SAMPLE_0
  shuffle(contest.ballots)
  nballots = len(contest.ballots)
  while s_size < nballots:
    sample = contest.ballots[:s_size]
    winner, win_prob = compute_win_probs(contest.candidates, sample, nballots)
    if win_prob >= (1 - ALPHA):
      return (winner, s_size)
    s_size = roundup(s_size * GROWTH_RATE, SAMPLE_DIVISOR)
  return (contest.outcome[0], nballots)
Ejemplo n.º 4
0
def diffsumscore_audit(contest):
  s_size = SAMPLE_0
  shuffle(contest.ballots)
  nballots = len(contest.ballots)
  while s_size < nballots:
    sample = contest.ballots[:s_size]
    outcome = diffsumscore(contest, sample)
    if outcome:
      return (outcome, s_size)
    s_size = roundup(s_size * GROWTH_RATE, SAMPLE_DIVISOR)
  return (contest.outcome[0], nballots)
Ejemplo n.º 5
0
def subsim_audit(contest):
  s_size = SAMPLE_0
  shuffle(contest.ballots)
  nballots = len(contest.ballots)
  while s_size < nballots:
    sample = contest.ballots[:s_size]
    winner, win_ratio = subsample(contest, sample)
    if win_ratio >= 1 - ALPHA:
      return (winner, s_size)
    s_size = roundup(s_size * GROWTH_RATE, SAMPLE_DIVISOR) 
  return (contest.outcome[0], nballots)
Ejemplo n.º 6
0
def get_stat_detail(name, unit, percent=False):

	coef = 1
	percent_sign = ''
	if percent is True:
		coef = 100
		percent_sign = '%'

	final_stat = name in unit['stats']['final'] and unit['stats']['final'][name] * coef or 0

	if percent is True:

		#return '**`%.02g%%`** (`%s`)' % (round(final_stat, 3), string_stat)
		return '%s%%' % roundup(final_stat)

	else:
		#return '**`%d`** (`%s`)' % (final_stat, string_stat)
		return '%d' % final_stat
Ejemplo n.º 7
0
def bootstrap_audit(contest):
  piles = [[] for n in range(NPILES)]
  start_idx = 0
  last_idx = SAMPLE_0
  shuffle(contest.ballots)
  while last_idx <= len(contest.ballots):
    s = contest.ballots[start_idx:last_idx]
    i = 0
    while i < last_idx - start_idx:
      for pile in piles:
        pile.append(choice(s))
      i += 1
    outcome = agreement(contest.candidates, piles + [contest.ballots[:last_idx]], \
                        contest.aggregator)
    if outcome:      
      return (outcome, last_idx)
    start_idx = last_idx
    last_idx = roundup(last_idx * GROWTH_RATE, SAMPLE_DIVISOR)
  return (contest.outcome[0], len(contest.ballots))
Ejemplo n.º 8
0
def plot_error_by_lambda(errors, lambdas, checkpoints, path=None):
    path_org = path
    for i, episode_num in enumerate(checkpoints):
        plt.clf()

        episode_num = roundup(episode_num)

        plt.plot(lambdas, errors[i], label=f"episode={roundup(episode_num)}")

        plt.xlabel("λ", fontsize=16)
        plt.ylabel("MSE", fontsize=14, rotation="horizontal", ha="right")

        plt.xticks(lambdas)

        plt.legend()
        plt.tight_layout()

        if path:
            path = path_org.replace(".", f"({episode_num:.0e}).")
            plt.savefig(path)
        else:
            plt.show()
Ejemplo n.º 9
0
        df_pvt = df.pivot_table(index=['site', 'date'],
                                columns='trt',
                                values=dat,
                                aggfunc='mean',
                                fill_value=0.0)
        df_err = df.pivot_table(index=['site', 'date'],
                                columns='trt',
                                values=dat,
                                aggfunc=np.std,
                                fill_value=0.0)
        df_pvt.reset_index(inplace=True)
        df_err.reset_index(inplace=True)

        df_pvt.to_csv(constants.base_dir + constants.soil_dir + 'df_pvt.csv')
        # Determine maximum value on y axis
        y_max = utils.roundup(df[dat].max(), 10.0)

        # Start plotting
        utils.set_matplotlib_params()
        colors = utils.get_colors()

        # Set up the axes and figure
        fig, axis = plt.subplots(nrows=nrow,
                                 ncols=ncol,
                                 figsize=(5 * ncol, 5 * nrow))

        ctr = [(x, y) for x in np.arange(nrow) for y in np.arange(ncol)]
        site_ctr = 0

        for i in ctr:
            if (nrow > 1):
 def _increase_dot1q_tunnel_vlan(self) -> None:
     self.cur_dot1q_tunnel_vlan = roundup(self.cur_dot1q_tunnel_vlan)
Ejemplo n.º 11
0
    ###df.loc[df['rep'] != 'pre', 'rep'] = 'nonpre'    
    # Convert all site names:
    df.site.replace(constants.SITE_NAMES,inplace=True)
    # Get subset of sites we want to draw
    df     = df[df.site.isin(constants.SITE_NAMES.values())]
    
    for dat in constants.DATA_COLS:
        print dat        
        df_pvt = df.pivot_table(index=['site','date'],columns='trt',values=dat,aggfunc='mean',fill_value=0.0)
        df_err = df.pivot_table(index=['site','date'],columns='trt',values=dat,aggfunc=np.std,fill_value=0.0)
        df_pvt.reset_index(inplace=True)
        df_err.reset_index(inplace=True)

        df_pvt.to_csv(constants.base_dir+constants.soil_dir+'df_pvt.csv')
        # Determine maximum value on y axis
        y_max = utils.roundup(df[dat].max(),10.0)
            
        # Start plotting                               
        utils.set_matplotlib_params()
        colors = utils.get_colors()
              
        # Set up the axes and figure
        fig, axis = plt.subplots(nrows=nrow, ncols=ncol, figsize=(5*ncol,5*nrow))

        ctr       = [(x, y) for x in np.arange(nrow) for y in np.arange(ncol)]
        site_ctr  = 0
        
        for i in ctr:
            if(nrow > 1):
                ax = axis[i[0],i[1]]
            else:
def makeFigureS18_FactorMaps_User3():

    sns.set_style("white")
    
    # constants, vectors
    design = 'LHsamples_wider_1000_AnnQonly'
    structure = '3704614'
    short_idx = np.arange(2,22,2)
    demand_idx = np.arange(1,21,2)
    percentiles = [40, 90]
    nrealizations = 10
    
    # plotting characteristics
    probability_cmap = mpl.cm.get_cmap('RdBu')
    success_cmap = mpl.colors.ListedColormap(np.array([[227,26,28],[166,206,227]])/255.0)
    contour_levels = np.arange(0.0, 1.05,0.1)
              
    # find which samples are still in param_bounds after flipping misidentified wet and dry states
    param_bounds, param_names, params_no, problem = setupProblem(design)
    samples, rows_to_keep = getSamples(design, params_no, param_bounds)
    nsamples = len(rows_to_keep)
    
    # load historical shortage data and convert acre-ft to m^3
    hist_short = np.loadtxt('../Simulation_outputs/' + structure + '_info_hist.txt')[:,2]*1233.48
    hist_demand = np.loadtxt('../Simulation_outputs/' + structure + '_info_hist.txt')[:,1]*1233.48
    # replace failed runs with np.nan (currently -999.9)
    hist_short[hist_short < 0] = np.nan
    
    # load shortage data for this experimental design
    SYN = np.load('../Simulation_outputs/' + design + '/' + structure + '_info.npy')
    # extract columns for year shortage and demand and convert acre-ft to ^3
    SYN_short = SYN[:,short_idx,:]*1233.48
    SYN_demand = SYN[:,demand_idx,:]*1233.48
    # use just the samples within the experimental design
    SYN_short = SYN_short[:,:,rows_to_keep]
    SYN_demand = SYN_demand[:,:,rows_to_keep]
    # replace failed runs with np.nan (currently -999.9)
    SYN_short[SYN_short < 0] = np.nan
    # reshape synthetic shortage data into 12*nyears x nsamples*nrealizations
    SYN_short = SYN_short.reshape([np.shape(SYN_short)[0],np.shape(SYN_short)[1]*np.shape(SYN_short)[2]])
    SYN_demand = SYN_demand.reshape([np.shape(SYN_demand)[0],np.shape(SYN_demand)[1]*np.shape(SYN_demand)[2]])
    
    # create data frames of shortage and SOWs
    dta = pd.DataFrame(data = np.repeat(samples, nrealizations, axis = 0), columns=param_names)
    
    
    fig, axes = plt.subplots(2,4,figsize=(24.3,9.1))
    fig.subplots_adjust(hspace=0.5,right=0.8,wspace=0.5)        
    # plot shortage distribution for this structure under all-encompassing experiment
    ax1 = axes[0,0]
    handles, labels = plotSDC(ax1, SYN_short, SYN_demand, hist_short, hist_demand, nsamples, nrealizations, True)
    ax1.set_ylim([0,1])
    ax1.tick_params(axis='both',labelsize=14)
    ax1.set_ylabel('Shortage/Demand',fontsize=14)
    ax1.set_xlabel('Shortage Percentile',fontsize=14)
    # add lines at percentiles
    for percentile in percentiles:
        ax1.plot([percentile, percentile],[0,1],c='k')
    
    # plotfailure heatmap for this structure under all-encompassing experiment
    ax2 = axes[1,0]
    allSOWs, historic_percents, frequencies, magnitudes, gridcells, im = plotFailureHeatmap(ax2, design, structure)
    addPercentileBlocks(historic_percents, gridcells, percentiles, ax2)
    allSOWsperformance = allSOWs/100
    historic_percents = [roundup(x) for x in historic_percents]
    #all_pseudo_r_scores = calcPseudoR2(frequencies, magnitudes, params_no, allSOWsperformance, dta, structure, design)
    all_pseudo_r_scores = pd.read_csv("../Simulation_outputs/" + design + "/" + structure + "_pseudo_r_scores.csv")
    
    for i in range(len(percentiles)):
        for j in range(3):
            # magnitude of shortage at this percentile to plot
            h = np.where(np.array(historic_percents) == 100 - percentiles[i])[0][0]
            if j == 0:
                h -= 2
            elif j == 2:
                h += 2
            # find out if each realization was a success or failure at this magnitude/frequency combination
            dta['Success'] = allSOWsperformance[list(frequencies).index(100-percentiles[i]),h,:]
            # consider each SOW a success if 50% or more realizations were a success
            avg_dta = dta.groupby(['mu0','mu1','sigma0','sigma1','p00','p11'],as_index=False)[['Success']].mean()
            avg_dta.loc[np.where(avg_dta['Success']>=0.5)[0],'Success'] = 1
            avg_dta.loc[np.where(avg_dta['Success']<0.5)[0],'Success'] = 0
            # load pseudo R2 of predictors for this magnitude/frequency combination
            pseudo_r_scores = all_pseudo_r_scores[str((100-percentiles[i]))+'yrs_'+str(magnitudes[h])+'prc'].values
            if pseudo_r_scores.any():
                top_predictors = np.argsort(pseudo_r_scores)[::-1][:2]
                ranges = param_bounds[top_predictors]
                # define grid of x (1st predictor), and y (2nd predictor) dimensions
                # to plot contour map over
                xgrid = np.arange(param_bounds[top_predictors[0]][0], 
                                  param_bounds[top_predictors[0]][1], np.around((ranges[0][1]-ranges[0][0])/100,decimals=4))
                ygrid = np.arange(param_bounds[top_predictors[1]][0], 
                                  param_bounds[top_predictors[1]][1], np.around((ranges[1][1]-ranges[1][0])/100,decimals=4))
                all_predictors = [ dta.columns.tolist()[k] for k in top_predictors]
                # fit logistic regression model with top two predictors of success and their interaction
                avg_dta['Interaction'] = avg_dta[all_predictors[0]]*dta[all_predictors[1]]
                result = fitLogit_interact(avg_dta, [all_predictors[k] for k in [0,1]])
                
                # plot success/failure for each SOW on top of logistic regression estimate of probability of success
                contourset = plotFactorMap(axes[i,j+1], result, avg_dta, probability_cmap, success_cmap, contour_levels, xgrid, ygrid, \
                              all_predictors[0], all_predictors[1])
                axes[i,j+1].set_title("Success if " + str(magnitudes[h]) + "% shortage\n<" + str((100-percentiles[i])) + "% of the time", fontsize=16)
                fig.savefig('FigureS18_FactorMaps_User3.pdf')
                
    cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
    cbar = fig.colorbar(contourset, cax=cbar_ax)
    cbar.ax.set_ylabel("Predicted Probability of Success", rotation=-90, va="bottom",fontsize=16)
    cbar.ax.tick_params(labelsize=16)
    fig.savefig("FigureS18_FactorMaps_User3.pdf")
    fig.clf()
    
    return None
Ejemplo n.º 13
0
def rr_plot(list_of_processes, timestamp):

    # every file will be saved to given dir
    target = dest + "/" + timestamp
    os.makedirs(target, exist_ok=True)
    os.path.join(target)

    # setting size of the plot according to the biggest termination time
    width_xaxis = []
    for process in list_of_processes:
        width_xaxis.append(process.termination_time)
    width_value = max(width_xaxis)
    height_value = cfg["RR"]["PROCESS_RANGE"] * 5 + 5

    # scalable plot
    WIDTH = math.ceil((width_value + 150) / 50)
    HEIGHT = math.ceil((height_value + 150) / 50)
    BAR_HEIGHT = 2

    fig, gnt = plt.subplots(figsize=(WIDTH, HEIGHT))

    plt.title(f'GANNT PLOT FOR {cfg["RR"]["PROCESS_RANGE"]} PROCESSES, RR, '
              f'QUANTUM {cfg["RR"]["QUANTUM"]} [MS]')

    # setting size of graph
    gnt.set_ylim(0, height_value)
    gnt.set_xlim(0, roundup(width_value + 15))

    gnt.set_xlabel('Time[ms]')
    gnt.set_ylabel('Process')

    gnt.set_yticks(
        [i for i in range(5, cfg["RR"]["PROCESS_RANGE"] * 5 + 5, 5)])
    gnt.invert_yaxis()

    # grid settings and visibility
    gnt.grid(True)
    gnt.set_axisbelow(True)

    names = []
    # mark results
    for index, process in enumerate(list_of_processes):
        names.append("P" + str(process.name))
        # show waiting times from lists
        for interval in process.waiting_list:
            gnt.broken_barh([(interval[0], interval[1] - interval[0])],
                            ((index * 5 + 5) - BAR_HEIGHT / 2, BAR_HEIGHT),
                            facecolors='#8e8888',
                            label='waiting time')
        # show working times from lists
        for interval in process.working_list:
            gnt.broken_barh([(interval[0], interval[1] - interval[0])],
                            ((index * 5 + 5) - BAR_HEIGHT / 2, BAR_HEIGHT),
                            facecolors='#00ff00',
                            label='burst time')
        # add termination time annotation to every process
        gnt.annotate(f'{process.termination_time}',
                     xy=(process.termination_time,
                         index * 5 + 5 + BAR_HEIGHT / 2),
                     xytext=(BAR_HEIGHT * 10, 5),
                     xycoords='data',
                     textcoords='offset pixels',
                     ha='center',
                     va='center')

    gnt.set_yticklabels(names)

    # fix legend visibility
    hand, labl = gnt.get_legend_handles_labels()
    handout = []
    lablout = []
    for h, l in zip(hand, labl):
        if l not in lablout:
            lablout.append(l)
            handout.append(h)
    gnt.legend(handout, lablout, prop={'size': 6})

    # save to file
    title = "RR_CHART_" + timestamp + ".png"
    sub = timestamp + "/"
    plt.savefig(sub + title)
Ejemplo n.º 14
0
    def _propagate(self, fh, header, higher, lower, timestamp_range, lower_idx):
        """
        propagte update to low precision archives.
        """
        from_time, until_time = timestamp_range
        timeunit = Storage.get_propagate_timeunit(lower['sec_per_point'],
                                                  higher['sec_per_point'],
                                                  header['x_files_factor'])
        from_time_boundary = from_time / timeunit
        until_time_boundary = until_time / timeunit
        if (from_time_boundary == until_time_boundary) and (from_time % timeunit) != 0:
            return False

        if lower['sec_per_point'] <= timeunit:
            lower_interval_end = until_time_boundary * timeunit
            lower_interval_start = min(lower_interval_end-timeunit, from_time_boundary*timeunit)
        else:
            lower_interval_end = roundup(until_time, lower['sec_per_point'])
            lower_interval_start = from_time - from_time % lower['sec_per_point']

        fh.seek(higher['offset'])
        packed_base_interval = fh.read(LONG_SIZE)
        higher_base_interval = struct.unpack(LONG_FORMAT, packed_base_interval)[0]

        if higher_base_interval == 0:
            higher_first_offset = higher['offset']
        else:
            higher_first_offset = self._timestamp2offset(lower_interval_start,
                                                         higher_base_interval,
                                                         header,
                                                         higher)

        higher_point_num = (lower_interval_end - lower_interval_start) / higher['sec_per_point']
        higher_size = higher_point_num * header['point_size']
        relative_first_offset = higher_first_offset - higher['offset']
        relative_last_offset = (relative_first_offset + higher_size) % higher['size']
        higher_last_offset = relative_last_offset + higher['offset']

        # get unpacked series str
        # TODO: abstract this to a function
        fh.seek(higher_first_offset)
        if higher_first_offset < higher_last_offset:
            series_str = fh.read(higher_last_offset - higher_first_offset)
        else:
            higher_end = higher['offset'] + higher['size']
            series_str = fh.read(higher_end - higher_first_offset)
            fh.seek(higher['offset'])
            series_str += fh.read(higher_last_offset - higher['offset'])

        # now we unpack the series data we just read
        point_format = header['point_format']
        byte_order, point_type = point_format[0], point_format[1:]
        point_num = len(series_str) / header['point_size']
        # assert point_num == higher_point_num
        series_format = byte_order + (point_type * point_num)
        unpacked_series = struct.unpack(series_format, series_str)

        # and finally we construct a list of values
        point_cnt = (lower_interval_end - lower_interval_start) / lower['sec_per_point']
        tag_cnt = len(header['tag_list'])
        step = tag_cnt + 1
        agg_cnt = lower['sec_per_point'] / higher['sec_per_point']
        step = (tag_cnt + 1) * agg_cnt
        lower_points = [None] * point_cnt

        unpacked_series = unpacked_series[::-1]
        ts = lower_interval_end
        for i in xrange(0, len(unpacked_series), step):
            higher_points = unpacked_series[i: i+step]
            ts -= higher['sec_per_point'] * agg_cnt
            agg_value = self._get_agg_value(higher_points, tag_cnt, header['agg_id'],
                                            lower_interval_start, lower_interval_end)
            lower_points[i/step] = (ts, agg_value)

        lower_points = [x for x in lower_points if x and x[0]]  # filter zero item
        timestamp_range = (lower_interval_start, max(lower_interval_end, until_time))
        self._update_archive(fh, header, lower, lower_points, lower_idx,
                             timestamp_range)
Ejemplo n.º 15
0
def get_field_modset_stats(config):

    modsets = {}
    spacer = EMOJIS['']
    sources = ModRecommendation.objects.all().values('source').distinct()
    sources = [x['source'] for x in sources]
    src_emojis = []
    for source in sources:
        emoji_key = source.replace(' ', '').lower()
        if emoji_key in EMOJIS:
            src_emojis.append(EMOJIS[emoji_key])

    headers = ['%s%s' % (spacer, '|'.join(src_emojis))]

    result = {}
    recos = ModRecommendation.objects.all()
    for reco in recos:
        source = reco.source
        if source not in result:
            result[source] = {}

        char_id = reco.character_id
        if char_id not in result[source]:
            result[source][char_id] = []

        result[source][char_id].append(reco)

    for source, names in result.items():

        new_modsets = {}
        for name, recos in names.items():

            amount = len(recos)
            for reco in recos:

                sets = [reco.set1, reco.set2, reco.set3]

                for aset in sets:
                    if aset not in new_modsets:
                        new_modsets[aset] = 0.0
                    new_modsets[aset] += 1.0 / amount

        lines = []
        for aset, count in new_modsets.items():

            if aset not in modsets:
                modsets[aset] = {}

            if source not in modsets[aset]:
                modsets[aset][source] = 0.0

            modsets[aset][source] += count

        for aset_name in MODSETS.values():
            if aset_name not in modsets:
                continue

            counts = []
            sources = modsets[aset_name]
            for source, count in sorted(sources.items()):
                modset_emoji = EMOJIS[aset_name.replace(' ', '').lower()]
                count = roundup(count)
                pad = pad_numbers(count)
                counts.append('%s%d' % (pad, count))

            if len(counts) < 3:
                pad = pad_numbers(0)
                counts.append('%s%d' % (pad, 0))

            lines.append('%s `|%s`' % (modset_emoji, '|'.join(counts)))

    lines.append(config['separator'])

    lines = headers + lines

    return {
        'name': '== Needed Mod Sets ==',
        'value': '\n'.join(lines),
        'inline': True,
    }
Ejemplo n.º 16
0
def get_field_primary_stats(config, profile, selected_slots,
                            selected_primaries):

    spacer = EMOJIS['']

    if not selected_slots:
        selected_slots = MODSLOTS.values()

    if not selected_primaries:
        selected_primaries = MODSPRIMARIES

    result = {}
    recos = ModRecommendation.objects.all().values()
    for reco in recos:
        char_id = reco['character_id']
        if char_id not in result:
            result[char_id] = []

        result[char_id].append(reco)

    stats = {}

    for unit, recos in result.items():
        extract_modstats(stats, recos)

    player_stats = {}
    extract_modstats_from_roster(player_stats, profile['roster'])

    lines = []
    for slot in selected_slots:
        slot = slot.lower()
        slot_emoji = EMOJIS[slot]
        if slot not in stats:
            continue

        sublines = []
        for primary in sorted(selected_primaries):
            if primary in stats[slot]:
                cg_count = 0
                if 'Capital Games' in stats[slot][primary]:
                    cg_count = roundup(stats[slot][primary]['Capital Games'])

                cr_count = 0
                if 'Crouching Rancor' in stats[slot][primary]:
                    cr_count = roundup(
                        stats[slot][primary]['Crouching Rancor'])

                gg_count = 0
                if 'swgoh.gg' in stats[slot][primary]:
                    gg_count = roundup(stats[slot][primary]['swgoh.gg'])

                ally_count = 0
                if slot in player_stats and primary in player_stats[slot]:
                    ally_count = player_stats[slot][primary]

                pad1 = pad_numbers(cg_count)
                pad2 = pad_numbers(cr_count)
                pad3 = pad_numbers(gg_count)
                pad4 = pad_numbers(ally_count)

                sublines.append('%s `|%s%d|%s%d|%s%d|%s%d|%s`' %
                                (slot_emoji, pad1, cg_count, pad2, cr_count,
                                 pad3, gg_count, pad4, ally_count, primary))

        if sublines:
            lines += [config['separator']] + sublines

    sources = ModRecommendation.objects.all().values('source').distinct()
    sources = [x['source'] for x in sources]

    emojis = []
    for source in sources:
        emoji_key = source.replace(' ', '').lower()
        if emoji_key in EMOJIS:
            emojis.append(EMOJIS[emoji_key])

    guild_logo = 'guildBannerLogo' in profile and profile[
        'guildBannerLogo'] or None
    emojis.append(get_banner_emoji(guild_logo))

    lines = [
        '%s\u202F\u202F\u202F%s`|Primary Stats`' % (spacer, '|'.join(emojis)),
    ] + lines

    return '\n'.join(lines)
Ejemplo n.º 17
0
def fcfs_plot(list_of_processes, timestamp):

    # every file will be saved to given dir
    target = dest + "/" + timestamp
    os.makedirs(target, exist_ok=True)
    os.path.join(target)

    # setting size of the plot according to the biggest termination time
    width_xaxis = []
    for process in list_of_processes:
        width_xaxis.append(process.termination_time)
    width_value = max(width_xaxis)

    # resetting to default
    plt.style.use('default')

    # scalable plot
    WIDTH = math.ceil((width_value + 150) / 50)
    HEIGHT = math.ceil((len(list_of_processes) * 16) / 100)
    fig, gnt = plt.subplots(figsize=(WIDTH, max(HEIGHT, 4)))

    # title
    processes = cfg["FCFS"]["PROCESS_RANGE"]
    if cfg["SUB"]["USE_RR_TO_FCFS"]:
        processes = cfg["RR"]["PROCESS_RANGE"]
    plt.title(f'GANNT PLOT FOR {processes} PROCESSES, FCFS')

    # Y axis height
    gnt.set_ylim(0, 4)

    # X axis width according to the rounded biggest value of all termination
    # times
    gnt.set_xlim(0, roundup(width_value))

    # X, Y axis labels
    gnt.set_xlabel('Time[ms]')
    # I decided to left Y axis without label
    gnt.set_yticks([2])
    gnt.set_yticklabels([' '])

    gnt.invert_yaxis()

    # grid tools and visibility
    gnt.grid(True)
    gnt.set_axisbelow(True)

    # show working times of each process from the list
    for index, process in enumerate(list_of_processes):
        # working list consist of one sub-list
        res_0 = process.working_list[0]
        res_1 = process.working_list[1]
        gnt.broken_barh(
            [(res_0, res_1 - res_0)],
            (1, 2),
            # pick random color of process on graph
            # disclaimer: sometimes it is difficult to read values
            # on a blending-color background
            facecolors=f'#{random.choice(randomhex)}'
            f'{random.choice(randomhex)}'
            f'{random.choice(randomhex)}'
            f'{random.choice(randomhex)}'
            f'{random.choice(randomhex)}'
            f'{random.choice(randomhex)}',
            label=f'P{process.name}')
        # termination time annotation arrangement scheme
        if index % 3 == 0:
            # place lower (the lower value the lower placement)
            value = -30
        elif index % 3 == 1:
            # place even
            value = 0
        else:
            # place higher (the higher value the higher placement)
            value = 30
        gnt.annotate(f'{process.termination_time}',
                     xy=(res_1, 2),
                     xytext=(0, value),
                     xycoords='data',
                     textcoords='offset pixels',
                     ha='center',
                     va='center')

    # fix legend
    hand, labl = gnt.get_legend_handles_labels()
    handout = []
    lablout = []
    for h, l in zip(hand, labl):
        if l not in lablout:
            lablout.append(l)
            handout.append(h)
    gnt.legend(handout,
               lablout,
               prop={'size': 7 - int(processes / 20)},
               bbox_to_anchor=(1.07, 1))

    # save to file
    title = "FCFS_CHART_" + timestamp + ".png"
    sub = timestamp + "/"
    plt.savefig(sub + title)