示例#1
0
def graph(df_aggregate, label_tail, fig, i):
    # Create axis to store graph
    ax = fig.add_subplot(3, 3, i)

    # Background color
    ax.set_facecolor('mistyrose')

    # Grid color
    plt.grid(True, color='lightcoral')

    # Graph sides color
    for child in ax.get_children():
        if isinstance(child, spines.Spine):
            child.set_color('#CD5C5C')

    # Turn off scientific notation, give numbers commas, format dates
    ax.get_yaxis().get_major_formatter().set_scientific(False)
    ax.get_yaxis().set_major_formatter(
        tick.FuncFormatter(lambda x, p: format(int(x), ',')))
    ax.get_xaxis().set_major_formatter(dates.DateFormatter('%-m/%-d'))

    # Set axis labels
    ax.set_ylabel(f'Cases {label_tail}')
    ax.set_xlabel('Date')

    # Create rules for major and minor x-axis ticks: major every week,
    # minor every day
    rule = dates.rrulewrapper(dates.DAILY, interval=7)
    rule_minor = dates.rrulewrapper(dates.DAILY, interval=1)
    days = dates.RRuleLocator(rule)
    days_minor = dates.RRuleLocator(rule_minor)

    # Create major and minor ticks: x-axis by rule above, y-axis auto-generated
    ax.xaxis.set_minor_locator(days_minor)
    ax.yaxis.set_minor_locator(tick.AutoMinorLocator())
    ax.xaxis.set_major_locator(days)
    ax.yaxis.set_major_locator(tick.AutoLocator())

    # Graph data lines
    plt.plot('Confirmed', data=df_aggregate, alpha=0.65)
    plt.plot('Active', data=df_aggregate, alpha=0.65)
    plt.plot('Deaths', data=df_aggregate, color='tab:red', alpha=0.65)
    plt.plot('Recovered', data=df_aggregate, color='tab:green', alpha=0.65)

    # Legend colors
    plt.legend(facecolor='lavenderblush', edgecolor='#f0a0a0')

    return ax
示例#2
0
def test_RRuleLocator():
    import pylab
    import matplotlib.dates as mpldates
    import matplotlib.testing.jpl_units as units
    from datetime import datetime
    import dateutil
    units.register()

    # This will cause the RRuleLocator to go out of bounds when it tries
    # to add padding to the limits, so we make sure it caps at the correct
    # boundary values.
    t0 = datetime(1000, 1, 1)
    tf = datetime(6000, 1, 1)

    fig = pylab.figure()
    ax = pylab.subplot(111)
    ax.set_autoscale_on(True)
    ax.plot([t0, tf], [0.0, 1.0], marker='o')

    rrule = mpldates.rrulewrapper(dateutil.rrule.YEARLY, interval=500)
    locator = mpldates.RRuleLocator(rrule)
    ax.xaxis.set_major_locator(locator)
    ax.xaxis.set_major_formatter(mpldates.AutoDateFormatter(locator))

    ax.autoscale_view()
    fig.autofmt_xdate()

    fig.savefig('RRuleLocator_bounds')
示例#3
0
    def test_RRuleLocator(self):
        """Test RRuleLocator"""
        fname = self.outFile("RRuleLocator_bounds.png")

        # This will cause the RRuleLocator to go out of bounds when it tries
        # to add padding to the limits, so we make sure it caps at the correct
        # boundary values.
        t0 = datetime(1000, 1, 1)
        tf = datetime(6000, 1, 1)

        fig = pylab.figure()
        ax = pylab.subplot(111)
        ax.set_autoscale_on(True)
        ax.plot([t0, tf], [0.0, 1.0], marker='o')

        rrule = mpldates.rrulewrapper(dateutil.rrule.YEARLY, interval=500)
        locator = mpldates.RRuleLocator(rrule)
        ax.xaxis.set_major_locator(locator)
        ax.xaxis.set_major_formatter(mpldates.AutoDateFormatter(locator))

        ax.autoscale_view()
        fig.autofmt_xdate()

        fig.savefig(fname)
        self.checkImage(fname)
示例#4
0
def plot_data(data, fit, predicts, ylabel, saveName):
    #print(len(fit[0]), fit[1].shape)
    #print(predicts.shape)

    rule = mdt.rrulewrapper(mdt.DAILY, interval=15)
    loc = mdt.RRuleLocator(rule)
    formatter = mdt.DateFormatter('%d/%m/%y')
    fig, ax = plt.subplots()
    if data != None:
        plt.plot_date(data[0], data[1], ls='solid')
    if fit != None:
        plt.plot_date(fit[0], fit[1], ls='solid')
    #date_idx = 0

    if predicts != None:
        #for p in predicts:
        #dates = [begin_predict_date + datetime.timedelta(days = date_idx + i) for i in range(0,len(p))]
        plt.plot_date(predicts[0], predicts[1], ls='dashed')
        #date_idx += 1
    ax.xaxis.set_major_locator(loc)
    ax.xaxis.set_major_formatter(formatter)
    ax.xaxis.set_tick_params(rotation=30, labelsize=10)
    ax.set_xlabel('Data')
    if ylabel != None:
        ax.set_ylabel(ylabel)
    plt.title(saveName)
    plt.savefig('{}_data_plot.png'.format(saveName))
示例#5
0
def convert_date_breaks(breaks_str: str) -> mdates.DateLocator:
    """
    Converts a conversational description of a period (e.g. 2 weeks) to a matplotlib date tick Locator.

    Args:
        breaks_str: A period description of the form "{interval} {period}"

    Returns:
        A corresponding mdates.Locator
    """
    # Column type groupings
    DATE_CONVERSION = {
        "year": YEARLY,
        "month": MONTHLY,
        "week": WEEKLY,
        "day": DAILY,
        "hour": HOURLY,
        "minute": MINUTELY,
        "second": SECONDLY,
    }
    interval, period = breaks_str.split()
    period = period.lower()
    if period.endswith("s"):
        period = period[:-1]
    period = DATE_CONVERSION[period]

    return mdates.RRuleLocator(mdates.rrulewrapper(period, interval=int(interval)))
def test_RRuleLocator_close_minmax():
    # if d1 and d2 are very close together, rrule cannot create
    # reasonable tick intervals; ensure that this is handled properly
    rrule = mdates.rrulewrapper(dateutil.rrule.SECONDLY, interval=5)
    loc = mdates.RRuleLocator(rrule)
    d1 = datetime.datetime(year=2020, month=1, day=1)
    d2 = datetime.datetime(year=2020, month=1, day=1, microsecond=1)
    expected = [
        '2020-01-01 00:00:00+00:00', '2020-01-01 00:00:00.000001+00:00'
    ]
    assert list(map(str, mdates.num2date(loc.tick_values(d1, d2)))) == expected
示例#7
0
 def _configure_xaxis(self, _ax):
     # 将 x 轴设为日期时间格式
     _ax.xaxis_date()
     rule = mdates.rrulewrapper(mdates.DAILY, interval=1)
     loc = mdates.RRuleLocator(rule)
     formatter = mdates.DateFormatter('%d %b')
     _ax.xaxis.set_major_locator(loc)
     _ax.xaxis.set_major_formatter(formatter)
     xlabels = _ax.get_xticklabels()
     plt.setp(xlabels, rotation=30, fontsize=9)
     # 日期的排列根据图像的大小自适应
     self.fig.autofmt_xdate()
示例#8
0
    def _configure_xaxis(self):
        ''''x axis'''
        # make x axis date axis
        self._axes.xaxis_date()

        rule = mdates.rrulewrapper(mdates.HOURLY, interval=3)
        loc = mdates.RRuleLocator(rule)
        formatter = mdates.DateFormatter("%y-%m-%d %H:%M:%S")

        self._axes.xaxis.set_major_locator(loc)
        self._axes.xaxis.set_major_formatter(formatter)
        xlabels = self._axes.get_xticklabels()
        plt.setp(xlabels, rotation=30, size='x-small')
示例#9
0
    def _configure_xaxis(self):
        """'x axis"""
        # make x axis date axis
        self._ax.xaxis_date()

        # format date to ticks on every 7 days
        rule = mdates.rrulewrapper(mdates.DAILY, interval=7)
        loc = mdates.RRuleLocator(rule)
        formatter = mdates.DateFormatter("%d %b")

        self._ax.xaxis.set_major_locator(loc)
        self._ax.xaxis.set_major_formatter(formatter)
        xlabels = self._ax.get_xticklabels()
        plt.setp(xlabels, rotation=30, fontsize=9)
示例#10
0
    def do_plot(self, dates, values, x_fit, y_fit, min_level, save_as):

        rule = mdates.rrulewrapper(mdates.WEEKLY)
        loc = mdates.RRuleLocator(rule)
        formatter = mdates.DateFormatter('%d/%m/%y')

        fig, ax = plt.subplots()
        plt.plot_date(dates, values)
        plt.plot(x_fit, y_fit, 'b-')
        plt.plot([x_fit[0], x_fit[-1]], [min_level, min_level], 'r-')

        plt.ylim(300, 700)

        plt.grid(True)

        plt.xticks(rotation=90)
        ax.xaxis.set_major_locator(loc)
        ax.xaxis.set_major_formatter(formatter)

        plt.title('Helium Fill Level')
        plt.text(dates.min() + 1,
                 425,
                 'by fit: ' +
                 str("{:3.2f}".format(self.slope) + str(' units/day')),
                 horizontalalignment='left',
                 verticalalignment='center',
                 fontsize=12)
        plt.text(dates.min() + 1,
                 375,
                 'est. date: ' + self.fill_date_str,
                 horizontalalignment='left',
                 verticalalignment='center',
                 fontsize=12)
        #ax.xaxis.set_tick_params(rotation=30, labelsize=10)

        #        now = datetime.now().date()
        #        now_year = now.year
        #        now_month = now.month
        #        now_day = now.day

        fig.savefig(self.data_file[0:10] + '_' + save_as,
                    bbox_inches='tight',
                    dpi=400)
def set_axes_to_plot(data, ax, period_string):
    marker_list = ["o", "v", "^", "<", ">", "1", "2", "s", "p", "P", "*", "h", "H", "x", "X", "D", "d", ".", "+"]
    for idx, item in enumerate(data):
        ax.plot_date(item[2], item[0], marker=marker_list[idx % len(marker_list)],
                      label=item[1] + ' (' + str(item[0]) + ') ')
    ax.legend(prop={'size': 8})
    ax.set_title("Max points per 1 " + period_string)

    rule = mdates.rrulewrapper(mdates.MONTHLY, interval=2)
    ax.xaxis.set_major_locator(mdates.RRuleLocator(rule))
    ax.xaxis.set_major_formatter(formatter=mdates.DateFormatter('%d-%m-%y'))
    ax.xaxis.set_tick_params(rotation=30, labelsize=8)

    ax.set_axisbelow(True)
    ax.minorticks_on()
    ax.grid(which='minor', axis='y', linestyle=':', linewidth='0.5', c='k', alpha=0.1)

    if period_string == "day":
        ax.set_xlabel("Date")
    else:
        ax.set_xlabel('Start date of the recording period')
    ax.set_ylabel('Points')
    ax.grid()
示例#12
0
def main():
    import optparse
    from sonet.lib import SonetOption
    p = optparse.OptionParser(
        usage="usage: %prog [options] input_file output_file",
        option_class=SonetOption)
    p.add_option('-v',
                 action="store_true",
                 dest="verbose",
                 default=False,
                 help="Verbose output")
    p.add_option('-i',
                 '--ignorecols',
                 action="store",
                 dest="ignorecols",
                 help="Columns numbers of the source file to ignore"
                 "(comma separated and starting from 0)")
    p.add_option('-I',
                 '--id',
                 action="store",
                 dest="id_col",
                 type="int",
                 help="Id column number (starting from 0)",
                 default=0)
    p.add_option('-o', '--onlycols', action="store", dest="onlycols",
                 help="Select only this set of columns" + \
                      "(comma separated and starting from 0)")
    p.add_option('-p',
                 '--percentages',
                 action="store_true",
                 dest="perc",
                 help="Use percentages instead of absolute value")
    p.add_option('-w',
                 '--window',
                 action="store",
                 dest="window",
                 type=int,
                 help="Collapse days")
    p.add_option('-g',
                 '--group',
                 action="store",
                 dest="group",
                 help="Group by weekday/month")
    p.add_option('-S',
                 '--sliding',
                 action="store",
                 dest="smooth",
                 type=int,
                 help="Sliding window")
    p.add_option('--exclude-less-than',
                 action="store",
                 dest="excludelessthan",
                 type=int,
                 help=("Exclude lines with totals (or dic if -d option is "
                       "used) smaller than this parameter"))
    p.add_option('--exclude-more-than',
                 action="store",
                 dest="excludemorethan",
                 type=int,
                 help=("Exclude lines with totals (or dic if -d option is "
                       "used) greater than this parameter"))
    p.add_option('-s',
                 '--start',
                 action="store",
                 dest='start',
                 type="yyyymmdd",
                 metavar="YYYYMMDD",
                 default=None,
                 help="Look for revisions starting from this date")
    p.add_option('-e',
                 '--end',
                 action="store",
                 dest='end',
                 type="yyyymmdd",
                 metavar="YYYYMMDD",
                 default=None,
                 help="Look for revisions until this date")
    p.add_option('-d',
                 '--dic',
                 action="store_true",
                 dest="dic",
                 default=False,
                 help="Calculate percentage over dic column instead of total")
    p.add_option('-n',
                 '--namespaces',
                 action="store",
                 dest="namespaces",
                 help="Output only selected namespaces (comma separated)")
    opts, files = p.parse_args()

    if len(files) != 2:
        p.error("Wrong parameters")
    if opts.verbose:
        logging.basicConfig(stream=sys.stderr,
                            level=logging.DEBUG,
                            format='%(asctime)s %(levelname)s %(message)s',
                            datefmt='%Y-%m-%d %H:%M:%S')
    csv_reader = csv.reader(open(files[0]), delimiter="\t")
    onlycols = None
    ignorecols = None
    if opts.onlycols:
        onlycols = [int(x) for x in opts.onlycols.split(",")]
    if opts.ignorecols:
        ignorecols = [int(x) for x in opts.ignorecols.split(",")]

    # content contains all the csv file
    content = [row for row in csv_reader]

    # columns to skip (namespace, text, total, ...)
    ns_index = 1
    len_line = len(content[0])
    to_skip = [ns_index, len_line - 1, len_line - 2, len_line - 3, opts.id_col]

    # CSV header, only of interesting columns
    header = [x for x in _gen_data(content[0], to_skip, ignorecols, onlycols)]
    namespaces = {}
    for row in content[1:]:
        try:
            namespaces[row[ns_index]].append(row)
        except KeyError:
            namespaces[row[ns_index]] = [row]

    pdf_pag = PdfPages(files[1])
    opts.namespaces = opts.namespaces.split(",") if opts.namespaces else None
    for ns in namespaces:
        if (not opts.namespaces) or (ns in opts.namespaces):
            logging.info("Processing namespace: %s", ns)
            # Creates a matrix (list) with percentages of the occurrencies of every
            # category. Don't count id, total, text, ignore columns. If onlycols is set
            # consider only them.
            mat = []
            timestamps = []
            totals = []
            tot_index = -3
            if opts.dic:
                tot_index = -5

            for line in namespaces[ns]:
                #filter only pages with total (or dic is -d) greater or smaller than X
                if opts.excludemorethan:
                    if float(line[tot_index]) > opts.excludemorethan:
                        continue
                if opts.excludelessthan:
                    if float(line[tot_index]) < opts.excludelessthan:
                        continue

                mat.append([
                    x for x in _gen_data(line, to_skip, ignorecols, onlycols)
                ])
                totals.append(float(line[tot_index]))
                timestamps.append(dt.strptime(line[opts.id_col], "%Y/%m/%d"))

            mat = np.array(mat, dtype=np.float).transpose()
            logging.info("Input file read. Ready to plot")

            with Timr("Plotting"):
                for i, series in enumerate(mat):
                    logging.info("Plotting page %d", i + 1)

                    # Don't plot zeros and skip zero revisions!
                    #ser = [x for x in series if x != 0]
                    #time = [x for k, x in enumerate(timestamps) if series[k] != 0]
                    #tot = [x for k, x in enumerate(totals) if series[k] != 0]
                    ser = [x for k, x in enumerate(series) \
                           if (not opts.start or timestamps[k] >= opts.start) and \
                              (not opts.end or timestamps[k] <= opts.end)]
                    time = [x for k, x in enumerate(timestamps) \
                            if (not opts.start or x >= opts.start) and \
                               (not opts.end or x <= opts.end)]
                    tot = [x for k, x in enumerate(totals) \
                           if (not opts.start or timestamps[k] >= opts.start) and \
                              (not opts.end or timestamps[k] <= opts.end)]

                    if opts.smooth and len(time) and len(ser) and len(tot):
                        time, ser, tot = smooth_values(time, ser, tot,
                                                       opts.smooth)

                    if opts.window and len(time) and len(ser) and len(tot):
                        time, ser, tot = collapse_values(
                            time, ser, tot, opts.window)

                    if opts.group and len(time) and len(ser) and len(tot):
                        time, ser, tot = group_values(time, ser, tot,
                                                      opts.group)

                    try:
                        mean = float(sum(series)) / len(series)
                    except ZeroDivisionError:
                        continue
                    #rel_mean is the mean for the period [opts.end, opts.start]
                    try:
                        rel_mean = float(sum(ser)) / len(ser)
                    except ZeroDivisionError:
                        continue

                    if opts.perc:
                        try:
                            mean = float(sum(series)) / sum(totals)
                            rel_mean = float(sum(ser)) / sum(tot)
                        except ZeroDivisionError:
                            mean = 0
                            rel_mean = 0
                        # Calculate percentages
                        ser = [calc_perc(x, tot[k]) for k, x in enumerate(ser)]
                        # Set axis limit 0-1 IS IT GOOD OR BAD?
                        #axis.set_ylim(0, 1)
                        plt.ylabel("%")

                    first_time = time[0].date()
                    last_time = time[-1].date()
                    plt.clf()
                    plt.subplots_adjust(bottom=0.25)
                    plt.xticks(rotation=90)
                    fig = plt.gcf()
                    fig.set_size_inches(11.7, 8.3)
                    axis = plt.gca()
                    axis.xaxis.set_major_formatter(
                        md.DateFormatter('%Y-%m-%d'))
                    axis.set_xlim(matplotlib.dates.date2num(first_time),
                                  matplotlib.dates.date2num(last_time))
                    if last_time - first_time < timedelta(days=30):
                        axis.xaxis.set_major_locator(md.DayLocator(interval=1))
                        axis.xaxis.set_minor_locator(md.DayLocator(interval=1))
                    else:
                        axis.xaxis.set_minor_locator(
                            md.MonthLocator(interval=1))
                        rule = md.rrulewrapper(md.MONTHLY, interval=4)
                        auto_loc = md.RRuleLocator(rule)
                        axis.xaxis.set_major_locator(auto_loc)
                    axis.tick_params(labelsize='x-small')
                    plt.xlabel("Revisions Timestamp")

                    if len(time) and len(ser):
                        if opts.window:
                            time = [t.date() for t in time]
                        logging.info("Mean: %f", mean)
                        logging.info("Relative Mean: %f", rel_mean)
                        plt.plot(matplotlib.dates.date2num(time), ser, "b.-")
                        plt.axhline(y=mean, color="r")
                        plt.title("%s - %s - Mean: %.5f - Relative mean: %.5f" % \
                                  (ns, header[i], round(mean, 5),
                                   round(rel_mean, 5)))
                    pdf_pag.savefig()
    pdf_pag.close()
    for row in reader:
        map_points[row[0]] = int(row[2])

# for deleted maps
with open('../ddnet-stats/race.csv', 'r', encoding='utf-8') as fp:
    reader = csv.reader(fp, delimiter=',')
    for row in reader:
        if row[0] not in map_points.keys():
            map_points[row[0]] = 0

fig1, (ax11, ax12) = plt.subplots(nrows=2, ncols=1)
fig2, (ax21, ax22) = plt.subplots(nrows=2, ncols=1)
fig3, (ax31, ax32) = plt.subplots(nrows=2, ncols=1)

rule = mdates.rrulewrapper(mdates.MONTHLY, interval=1)
ax21.xaxis.set_major_locator(mdates.RRuleLocator(rule))
ax21.xaxis.set_major_formatter(formatter=mdates.DateFormatter('%Y-%m'))
ax21.xaxis.set_tick_params(rotation=75, labelsize=9)

ax22.xaxis.set_major_locator(mdates.RRuleLocator(rule))
ax22.xaxis.set_major_formatter(formatter=mdates.DateFormatter('%Y-%m'))
ax22.xaxis.set_tick_params(rotation=75, labelsize=9)

nick_list = nick_name_list_to_compare

personal_info = set_axes_for_comp_stats(nick_list,
                                        [ax11, ax12, ax21, ax22, ax31, ax32])

ax11.set_title("Total points")
ax11.legend()
ax11.set_ylabel('Total points')
示例#14
0
axes_top = axes_arr[0].twiny()
axes_top.set_xlim(xmin, xmax)
axes_top.annotate(args.tz, xy=(5,12), xycoords='axes points', fontsize=8)
axes_top.xaxis.set_tick_params(which='both', direction='in',
        top=False, bottom=True, labeltop=False, labelbottom=True)

axes_top.xaxis.set_major_locator(mdates.DayLocator(tz=tz_site))
axes_top.xaxis.set_major_formatter(matplotlib.ticker.NullFormatter()) 
axes_top.xaxis.set_tick_params(which='major', length=8)

if (args.hours <= 120):
    labeled_days=(MO, TU, WE, TH, FR, SA, SU)
else:
    labeled_days=(SA, SU)
rule = mdates.rrulewrapper(DAILY, byweekday=labeled_days, byhour=12)
loc  = mdates.RRuleLocator(rule, tz=tz_site)
axes_top.xaxis.set_minor_locator(loc)
axes_top.xaxis.set_minor_formatter(mdates.DateFormatter("%a", tz=tz_site)) 
axes_top.xaxis.set_tick_params(which='minor', length=0, labelsize=8, pad=-8)

#
# Tweak to tau225 y-axis to ensure we always get at least one
# full log decade
#
tau_max = axes_arr[0].get_ylim()[1]
if (tau_max < 0.1):
    tau_max = 0.1
axes_arr[0].set_ylim(bottom=0.01, top=tau_max)
#
# Tweak to PWV y axis to always start from pwv = 0, with a
# small offset.
示例#15
0
import matplotlib.pyplot as plt
import matplotlib.finance as finance
import matplotlib.dates as dates
from matplotlib.dates import YEARLY,MONDAY
import datetime

begtime=datetime.date(2016,1,1)
endtime=datetime.date(2016,2,1)
p1=finance.quotes_historical_yahoo('IBM',begtime,endtime)
fig,ax = plt.subplots()
ax.xaxis.set_major_locator(dates.WeekdayLocator(dates.MONDAY))
rule = dates.rrulewrapper(YEARLY,byeaster=1,interval=1)
ax.xaxis.set_minor_locator(dates.RRuleLocator(rule))
ax.xaxis.set_major_formatter(dates.DateFormatter("%d/%m/%y %A"))
ax.xaxis.set_minor_formatter(dates.DateFormatter("%b %d"))
#ax.xaxis_date()
#ax.autoscale_view()
#plt.setp(plt.gca().get_xticklabels(),rotation=80,ha='right')
finance.candlestick(ax,p1,width=0.6)

plt.setp(ax.get_xticklabels(),rotation=80,ha='right')
#ax.xaxis_date()

        points = int(re.split(',".*",', line)[1][:-1])
        nick = (re.findall(',".*",', line))[0].split(',')[1][1:-1]
        if nick not in players_dict.keys():
            players_dict[nick] = [[dt], [points]]
        else:
            players_dict[nick][0].append(dt)
            players_dict[nick][1].append(points)

fig, ax = plt.subplots()
for k in players_dict.keys():
    ax.plot_date(players_dict[k][0], players_dict[k][1], '.', label=k + ' (' + str(len(players_dict[k][0])) + ')')
ax.legend(title="Nicks (days on top)")
ax.set_title("Top 1 Points")

rule = mdates.rrulewrapper(mdates.MONTHLY, interval=2)
ax.xaxis.set_major_locator(mdates.RRuleLocator(rule))
ax.xaxis.set_major_formatter(formatter=mdates.DateFormatter('%d-%m-%y'))
ax.xaxis.set_tick_params(rotation=30, labelsize=8)

ax.set_axisbelow(True)
ax.minorticks_on()
ax.grid(which='minor', axis='y', linestyle=':', linewidth='0.5', c='k', alpha=0.1)
ax.grid()

ax.set_xlabel('Date')
ax.set_ylabel('Points')

fig.set_size_inches((11.69, 8.27), forward=False)  # A4
fig.savefig('output_files/top1points_plot.pdf', dpi=1000)
# *** PLOT TOP 1 POINTS ***