Esempio n. 1
0
def create_lc(
        log,
        cacheDirectory,
        epochs):
    """*create the atlas lc for one transient*

    **Key Arguments**

    - ``cacheDirectory`` -- the directory to add the lightcurve to
    - ``log`` -- logger
    - ``epochs`` -- dictionary of lightcurve data-points
    

    **Return**

    - None
    

    **Usage**

    .. todo::

        add usage info
        create a sublime snippet for usage

    ```python
    usage code
    ```
    
    """
    log.debug('starting the ``create_lc`` function')

    from astrocalc.times import conversions
    # CONVERTER TO CONVERT MJD TO DATE
    converter = conversions(
        log=log
    )

    # c = cyan, o = arange
    magnitudes = {
        'c': {'mjds': [], 'mags': [], 'magErrs': []},
        'o': {'mjds': [], 'mags': [], 'magErrs': []},
        'I': {'mjds': [], 'mags': [], 'magErrs': []},
    }

    summedMagnitudes = {
        'c': {'mjds': [], 'mags': [], 'magErrs': []},
        'o': {'mjds': [], 'mags': [], 'magErrs': []},
        'I': {'mjds': [], 'mags': [], 'magErrs': []},
    }

    limits = {
        'c': {'mjds': [], 'mags': [], 'magErrs': []},
        'o': {'mjds': [], 'mags': [], 'magErrs': []},
        'I': {'mjds': [], 'mags': [], 'magErrs': []},
    }

    discoveryMjd = False
    for epoch in epochs:
        objectName = epoch["atlas_designation"]
        if not epoch["fnu"]:
            continue

        if epoch["mjd_obs"] < 50000.:
            continue

        if not epoch["snr"] <= 5 and (not discoveryMjd or discoveryMjd > epoch["mjd_obs"]):
            discoveryMjd = epoch["mjd_obs"]

        if epoch["snr"] <= 3 and epoch["filter"] in ["c", "o", "I"]:
            limits[epoch["filter"]]["mjds"].append(epoch["mjd_obs"])
            limits[epoch["filter"]]["mags"].append(epoch["fnu"])
            limits[epoch["filter"]]["magErrs"].append(epoch["fnu_error"])
        elif epoch["filter"] in ["c", "o", "I"]:
            magnitudes[epoch["filter"]]["mjds"].append(epoch["mjd_obs"])
            magnitudes[epoch["filter"]]["mags"].append(epoch["fnu"])
            magnitudes[epoch["filter"]]["magErrs"].append(epoch["fnu_error"])

    for fil, d in list(magnitudes.items()):
        distinctMjds = {}
        for m, f, e in zip(d["mjds"], d["mags"], d["magErrs"]):
            key = str(int(math.floor(m)))
            if key not in distinctMjds:
                distinctMjds[key] = {
                    "mjds": [m],
                    "mags": [f],
                    "magErrs": [e]
                }
            else:
                distinctMjds[key]["mjds"].append(m)
                distinctMjds[key]["mags"].append(f)
                distinctMjds[key]["magErrs"].append(e)

        for k, v in list(distinctMjds.items()):
            summedMagnitudes[fil]["mjds"].append(
                old_div(sum(v["mjds"]), len(v["mjds"])))
            summedMagnitudes[fil]["mags"].append(
                old_div(sum(v["mags"]), len(v["mags"])))
            summedMagnitudes[fil]["magErrs"].append(sum(v["magErrs"]) / len(v["magErrs"]
                                                                            ) / math.sqrt(len(v["magErrs"])))

    if not discoveryMjd:
        return

    # COMMENT THIS LINE OUT TO PLOT ALL MAGNITUDE MEASUREMENTS INSTEAD OF
    # SUMMED
    magnitudes = summedMagnitudes

    # DUMP OUT SUMMED ATLAS MAGNITUDE
    # for m, l, e in zip(limits['o']["mjds"], limits['o']["mags"], limits['o']["magErrs"]):
    #     print "%(m)s, o, %(l)s, %(e)s, <3" % locals()
    # for m, l, e in zip(limits['c']["mjds"], limits['c']["mags"], limits['c']["magErrs"]):
    #     print "%(m)s, c, %(l)s, %(e)s, <3" % locals()

    # for m, l, e in zip(magnitudes['o']["mjds"], magnitudes['o']["mags"], magnitudes['o']["magErrs"]):
    #     print "%(m)s, o, %(l)s, %(e)s," % locals()
    # for m, l, e in zip(magnitudes['c']["mjds"], magnitudes['c']["mags"], magnitudes['c']["magErrs"]):
    #     print "%(m)s, c, %(l)s, %(e)s," % locals()

    discoveryUT = converter.mjd_to_ut_datetime(
        mjd=discoveryMjd, datetimeObject=True)

    discoveryUT = discoveryUT.strftime("%Y %m %d %H:%M")

    summedMagnitudes = {}

    # GENERATE THE FIGURE FOR THE PLOT
    fig = plt.figure(
        num=None,
        figsize=(10, 10),
        dpi=100,
        facecolor=None,
        edgecolor=None,
        frameon=True)

    mpl.rc('ytick', labelsize=20)
    mpl.rc('xtick', labelsize=20)
    mpl.rcParams.update({'font.size': 22})

    # FORMAT THE AXES
    ax = fig.add_axes(
        [0.1, 0.1, 0.8, 0.8],
        polar=False,
        frameon=True)
    ax.set_xlabel('MJD', labelpad=20)
    ax.set_ylabel('Apparent Magnitude', labelpad=15)

    # ax.set_yscale('log')

    # ATLAS OBJECT NAME LABEL AS TITLE
    fig.text(0.1, 1.02, objectName, ha="left", fontsize=40)

    # RHS AXIS TICKS
    plt.setp(ax.xaxis.get_majorticklabels(),
             rotation=45, horizontalalignment='right')
    import matplotlib.ticker as mtick
    ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%5.0f'))

    # ADD MAGNITUDES AND LIMITS FOR EACH FILTER
    handles = []

    # SET AXIS LIMITS FOR MAGNTIUDES
    upperMag = -99
    lowerMag = 99

    # DETERMINE THE TIME-RANGE OF DETECTION FOR THE SOURCE
    mjdList = magnitudes['o']['mjds'] + \
        magnitudes['c']['mjds'] + magnitudes['I']['mjds']

    if len(mjdList) == 0:
        return

    lowerDetectionMjd = min(mjdList)
    upperDetectionMjd = max(mjdList)
    mjdLimitList = limits['o']['mjds'] + \
        limits['c']['mjds'] + limits['I']['mjds']
    priorLimitsFlavour = None
    for l in sorted(mjdLimitList):
        if l < lowerDetectionMjd and l > lowerDetectionMjd - 30.:
            priorLimitsFlavour = 1
    if not priorLimitsFlavour:
        for l in mjdLimitList:
            if l < lowerDetectionMjd - 30.:
                priorLimitsFlavour = 2
                lowerMJDLimit = l - 2

    if not priorLimitsFlavour:
        fig.text(0.1, -0.08, "* no recent pre-discovery detection limit > $5\\sigma$",
                 ha="left", fontsize=16)

    postLimitsFlavour = None

    for l in sorted(mjdLimitList):
        if l > upperDetectionMjd and l < upperDetectionMjd + 10.:
            postLimitsFlavour = 1
    if not postLimitsFlavour:
        for l in reversed(mjdLimitList):
            if l > upperDetectionMjd + 10.:
                postLimitsFlavour = 2
                upperMJDLimit = l + 2

    if priorLimitsFlavour or postLimitsFlavour:
        limits = {
            'c': {'mjds': [], 'mags': [], 'magErrs': []},
            'o': {'mjds': [], 'mags': [], 'magErrs': []},
            'I': {'mjds': [], 'mags': [], 'magErrs': []},
        }
        for epoch in epochs:
            if epoch["filter"] not in ["c", "o", "I"]:
                continue
            objectName = epoch["atlas_designation"]
            if not epoch["fnu"]:
                continue

            if epoch["mjd_obs"] < 50000.:
                continue

            if (epoch["snr"] <= 3 and ((priorLimitsFlavour == 1 and epoch["mjd_obs"] > lowerDetectionMjd - 30.) or (priorLimitsFlavour == 2 and epoch["mjd_obs"] > lowerMJDLimit) or priorLimitsFlavour == None) and ((postLimitsFlavour == 1 and epoch["mjd_obs"] < upperDetectionMjd + 10.) or (postLimitsFlavour == 2 and epoch["mjd_obs"] < upperMJDLimit) or postLimitsFlavour == None)):
                limits[epoch["filter"]]["mjds"].append(epoch["mjd_obs"])
                limits[epoch["filter"]]["mags"].append(epoch["fnu"])
                # 000 limits[epoch["filter"]]["magErrs"].append(epoch["dm"])
                limits[epoch["filter"]]["magErrs"].append(epoch["fnu_error"])

    allMags = magnitudes['o']['mags'] + magnitudes['c']['mags']
    magRange = max(allMags) - min(allMags)

    deltaMag = magRange * 0.1

    if len(limits['o']['mjds']):
        limitLeg = ax.errorbar(limits['o']['mjds'], limits['o']['mags'], yerr=limits[
            'o']['magErrs'], color='#FFA500', fmt='o', mfc='white', mec='#FFA500', zorder=1, ms=12., alpha=0.8, linewidth=0.4,  label='<3$\\sigma$ ', capsize=10, markeredgewidth=1.2)

        # ERROBAR CAP THICKNESS
        handles.append(limitLeg)
        limitLeg[1][0].set_markeredgewidth('0.4')
        limitLeg[1][1].set_markeredgewidth('0.4')

        # if min(limits['o']['mags']) < lowerMag:
        #     lowerMag = min(limits['o']['mags'])
    if len(limits['c']['mjds']):
        limitLeg = ax.errorbar(limits['c']['mjds'], limits['c']['mags'], yerr=limits[
            'c']['magErrs'], color='#2aa198', fmt='o', mfc='white', mec='#2aa198', zorder=1, ms=12., alpha=0.8, linewidth=0.4, label='<3$\\sigma$ ', capsize=10, markeredgewidth=1.2)
        # ERROBAR CAP THICKNESS
        limitLeg[1][0].set_markeredgewidth('0.4')
        limitLeg[1][1].set_markeredgewidth('0.4')
        if not len(handles):
            handles.append(limitLeg)

    if len(limits['I']['mjds']):
        limitLeg = ax.errorbar(limits['I']['mjds'], limits['I']['mags'], yerr=limits[
            'I']['magErrs'], color='#dc322f', fmt='o', mfc='white', mec='#dc322f', zorder=1, ms=12., alpha=0.8, linewidth=0.4, label='<3$\\sigma$ ', capsize=10, markeredgewidth=1.2)
        # ERROBAR CAP THICKNESS
        limitLeg[1][0].set_markeredgewidth('0.4')
        limitLeg[1][1].set_markeredgewidth('0.4')
        if not len(handles):
            handles.append(limitLeg)

    if len(magnitudes['o']['mjds']):
        orangeMag = ax.errorbar(magnitudes['o']['mjds'], magnitudes['o']['mags'], yerr=magnitudes[
            'o']['magErrs'], color='#FFA500', fmt='o', mfc='#FFA500', mec='#FFA500', zorder=1, ms=12., alpha=0.8, linewidth=1.2,  label='o-band mag ', capsize=10)

        # ERROBAR CAP THICKNESS
        orangeMag[1][0].set_markeredgewidth('0.7')
        orangeMag[1][1].set_markeredgewidth('0.7')
        handles.append(orangeMag)
        if max(np.array(magnitudes['o']['mags']) + np.array(magnitudes['o']['magErrs'])) > upperMag:
            upperMag = max(
                np.array(magnitudes['o']['mags']) + np.array(magnitudes['o']['magErrs']))
            upperMagIndex = np.argmax((
                magnitudes['o']['mags']) + np.array(magnitudes['o']['magErrs']))

        if min(np.array(magnitudes['o']['mags']) - np.array(magnitudes['o']['magErrs'])) < lowerMag:
            lowerMag = min(
                np.array(magnitudes['o']['mags']) - np.array(magnitudes['o']['magErrs']))
            lowerMagIndex = np.argmin((
                magnitudes['o']['mags']) - np.array(magnitudes['o']['magErrs']))

    if len(magnitudes['c']['mjds']):
        cyanMag = ax.errorbar(magnitudes['c']['mjds'], magnitudes['c']['mags'], yerr=magnitudes[
            'c']['magErrs'], color='#2aa198', fmt='o', mfc='#2aa198', mec='#2aa198', zorder=1, ms=12., alpha=0.8, linewidth=1.2, label='c-band mag ', capsize=10)
        # ERROBAR CAP THICKNESS
        cyanMag[1][0].set_markeredgewidth('0.7')
        cyanMag[1][1].set_markeredgewidth('0.7')
        handles.append(cyanMag)
        if max(np.array(magnitudes['c']['mags']) + np.array(magnitudes['c']['magErrs'])) > upperMag:
            upperMag = max(
                np.array(magnitudes['c']['mags']) + np.array(magnitudes['c']['magErrs']))
            upperMagIndex = np.argmax((
                magnitudes['c']['mags']) + np.array(magnitudes['c']['magErrs']))

        if min(np.array(magnitudes['c']['mags']) - np.array(magnitudes['c']['magErrs'])) < lowerMag:
            lowerMag = min(
                np.array(magnitudes['c']['mags']) - np.array(magnitudes['c']['magErrs']))
            lowerMagIndex = np.argmin(
                (magnitudes['c']['mags']) - np.array(magnitudes['c']['magErrs']))

    if len(magnitudes['I']['mjds']):
        cyanMag = ax.errorbar(magnitudes['I']['mjds'], magnitudes['I']['mags'], yerr=magnitudes[
            'I']['magErrs'], color='#dc322f', fmt='o', mfc='#dc322f', mec='#dc322f', zorder=1, ms=12., alpha=0.8, linewidth=1.2, label='I-band mag ', capsize=10)
        # ERROBAR CAP THICKNESS
        cyanMag[1][0].set_markeredgewidth('0.7')
        cyanMag[1][1].set_markeredgewidth('0.7')
        handles.append(cyanMag)
        if max(np.array(magnitudes['I']['mags']) + np.array(magnitudes['I']['magErrs'])) > upperMag:
            upperMag = max(
                np.array(magnitudes['I']['mags']) + np.array(magnitudes['I']['magErrs']))
            upperMagIndex = np.argmax((
                magnitudes['I']['mags']) + np.array(magnitudes['I']['magErrs']))

        if min(np.array(magnitudes['I']['mags']) - np.array(magnitudes['I']['magErrs'])) < lowerMag:
            lowerMag = min(
                np.array(magnitudes['I']['mags']) - np.array(magnitudes['I']['magErrs']))
            lowerMagIndex = np.argmin(
                (magnitudes['I']['mags']) - np.array(magnitudes['I']['magErrs']))

    plt.legend(handles=handles, prop={
               'size': 13.5}, bbox_to_anchor=(0.95, 1.2), loc=0, borderaxespad=0., ncol=4, scatterpoints=1)

    # SET THE TEMPORAL X-RANGE
    allMjd = magnitudes['o']['mjds'] + magnitudes['c']['mjds']
    xmin = min(allMjd) - 5.
    xmax = max(allMjd) + 5.
    ax.set_xlim([xmin, xmax])

    ax.set_ylim([0. - deltaMag, upperMag + deltaMag])
    y_formatter = mpl.ticker.FormatStrFormatter("%2.1f")
    ax.yaxis.set_major_formatter(y_formatter)

    # PLOT THE MAGNITUDE SCALE
    axisUpperFlux = upperMag
    axisLowerFlux = 1e-29

    axisLowerMag = -2.5 * math.log10(axisLowerFlux) - 48.6
    axisUpperMag = -2.5 * math.log10(axisUpperFlux) - 48.6

    ax.set_yticks([2.2])
    import matplotlib.ticker as ticker

    magLabels = [20., 19.5, 19.0, 18.5,
                 18.0, 17.5, 17.0, 16.5, 16.0, 15.5, 15.0]
    magFluxes = [pow(10, old_div(-(m + 48.6), 2.5)) * 1e27 for m in magLabels]

    ax.yaxis.set_major_locator(ticker.FixedLocator((magFluxes)))
    ax.yaxis.set_major_formatter(ticker.FixedFormatter((magLabels)))
    # FLIP THE MAGNITUDE AXIS
    # plt.gca().invert_yaxis()

    # ADD SECOND Y-AXIS
    ax2 = ax.twinx()
    ax2.set_ylim([0. - deltaMag, upperMag + deltaMag])
    ax2.yaxis.set_major_formatter(y_formatter)

    # RELATIVE TIME SINCE DISCOVERY
    lower, upper = ax.get_xlim()
    utLower = converter.mjd_to_ut_datetime(mjd=lower, datetimeObject=True)
    utUpper = converter.mjd_to_ut_datetime(mjd=upper, datetimeObject=True)

    # ADD SECOND X-AXIS
    ax3 = ax.twiny()
    ax3.set_xlim([utLower, utUpper])
    ax3.grid(True)
    ax.xaxis.grid(False)
    plt.setp(ax3.xaxis.get_majorticklabels(),
             rotation=45, horizontalalignment='left')
    ax3.xaxis.set_major_formatter(dates.DateFormatter('%b %d'))
    # ax3.set_xlabel('Since Discovery (d)',  labelpad=10,)

    # # Put a legend on plot
    # box = ax.get_position()
    # ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
    # ax.legend(loc='top right', bbox_to_anchor=(1.1, 0.5), prop={'size': 8})

    # from matplotlib.ticker import LogLocator
    # minorLocator = LogLocator(base=10, subs=[2.0, 5.0])
    # if magRange < 1.5:
    #     minorLocator = LogLocator(
    #         base=10, subs=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
    # ax2.yaxis.set_minor_locator(minorLocator)
    # ax2.yaxis.set_minor_formatter(y_formatter)
    # ax2.tick_params(axis='y', which='major', pad=5)
    # ax2.tick_params(axis='y', which='minor', pad=5)
    ax2.set_ylabel('$F_{nu} \\times 1e^{27}$', rotation=-90.,  labelpad=27)

    discoveryText = "discovery epoch\nmjd %(discoveryMjd)2.2f\n%(discoveryUT)s UT" % locals(
    )
    ax.text(0.05, 0.95, discoveryText,
            verticalalignment='top', horizontalalignment='left',
            transform=ax.transAxes,
            color='black', fontsize=12, linespacing=1.5)

    ax2.grid(False)
    # SAVE PLOT TO FILE
    pathToOutputPlotFolder = ""
    title = objectName + " forced photometry lc"
    # Recursively create missing directories
    if not os.path.exists(cacheDirectory):
        os.makedirs(cacheDirectory)
    fileName = cacheDirectory + "/atlas_fp_lightcurve.png"
    plt.savefig(fileName, bbox_inches='tight', transparent=False,
                pad_inches=0.1)

    # CLEAR FIGURE
    plt.clf()

    log.debug('completed the ``create_lc`` function')
    return None
Esempio n. 2
0
def doit(output_h5_filename=None,
         kalman_filename=None,
         data2d_filename=None,
         start=None,
         stop=None,
         gate_angle_threshold_degrees=40.0,
         area_threshold_for_orientation=0.0,
         obj_only=None,
         options=None):
    gate_angle_threshold_radians = gate_angle_threshold_degrees * D2R

    if options.show:
        import matplotlib.pyplot as plt
        import matplotlib.ticker as mticker

    M = SymobolicModels()
    x = sympy.DeferredVector('x')
    G_symbolic = M.get_observation_model(x)
    dx_symbolic = M.get_process_model(x)

    if 0:
        print 'G_symbolic'
        sympy.pprint(G_symbolic)
        print

    G_linearized = [G_symbolic.diff(x[i]) for i in range(7)]
    if 0:
        print 'G_linearized'
        for i in range(len(G_linearized)):
            sympy.pprint(G_linearized[i])
        print

    arg_tuple_x = (M.P00, M.P01, M.P02, M.P03, M.P10, M.P11, M.P12, M.P13,
                   M.P20, M.P21, M.P22, M.P23, M.Ax, M.Ay, M.Az, x)

    xm = sympy.DeferredVector('xm')
    arg_tuple_x_xm = (M.P00, M.P01, M.P02, M.P03, M.P10, M.P11, M.P12, M.P13,
                      M.P20, M.P21, M.P22, M.P23, M.Ax, M.Ay, M.Az, x, xm)

    eval_G = lambdify(arg_tuple_x, G_symbolic, 'numpy')
    eval_linG = lambdify(arg_tuple_x, G_linearized, 'numpy')

    # coord shift of observation model
    phi_symbolic = M.get_observation_model(xm)

    # H = G - phi
    H_symbolic = G_symbolic - phi_symbolic

    # We still take derivative wrt x (not xm).
    H_linearized = [H_symbolic.diff(x[i]) for i in range(7)]

    eval_phi = lambdify(arg_tuple_x_xm, phi_symbolic, 'numpy')
    eval_H = lambdify(arg_tuple_x_xm, H_symbolic, 'numpy')
    eval_linH = lambdify(arg_tuple_x_xm, H_linearized, 'numpy')

    if 0:
        print 'dx_symbolic'
        sympy.pprint(dx_symbolic)
        print

    eval_dAdt = drop_dims(lambdify(x, dx_symbolic, 'numpy'))

    debug_level = 0
    if debug_level:
        np.set_printoptions(linewidth=130, suppress=True)

    if os.path.exists(output_h5_filename):
        raise RuntimeError("will not overwrite old file '%s'" %
                           output_h5_filename)

    ca = core_analysis.get_global_CachingAnalyzer()
    with open_file_safe(output_h5_filename, mode='w') as output_h5:

        with open_file_safe(kalman_filename, mode='r') as kh5:
            with open_file_safe(data2d_filename, mode='r') as h5:
                for input_node in kh5.root._f_iter_nodes():
                    # copy everything from source to dest
                    input_node._f_copy(output_h5.root, recursive=True)

                try:
                    dest_table = output_h5.root.ML_estimates
                except tables.exceptions.NoSuchNodeError, err1:
                    # backwards compatibility
                    try:
                        dest_table = output_h5.root.kalman_observations
                    except tables.exceptions.NoSuchNodeError, err2:
                        raise err1
                for colname in ['hz_line%d' % i for i in range(6)]:
                    clear_col(dest_table, colname)
                dest_table.flush()

                if options.show:
                    fig1 = plt.figure()
                    ax1 = fig1.add_subplot(511)
                    ax2 = fig1.add_subplot(512, sharex=ax1)
                    ax3 = fig1.add_subplot(513, sharex=ax1)
                    ax4 = fig1.add_subplot(514, sharex=ax1)
                    ax5 = fig1.add_subplot(515, sharex=ax1)
                    ax1.xaxis.set_major_formatter(
                        mticker.FormatStrFormatter("%d"))

                    min_frame_range = np.inf
                    max_frame_range = -np.inf

                reconst = reconstruct.Reconstructor(kh5)

                camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)
                fps = result_utils.get_fps(h5)
                dt = 1.0 / fps

                used_camn_dict = {}

                # associate framenumbers with timestamps using 2d .h5 file
                data2d = h5.root.data2d_distorted[:]  # load to RAM
                if start is not None:
                    data2d = data2d[data2d['frame'] >= start]
                if stop is not None:
                    data2d = data2d[data2d['frame'] <= stop]
                data2d_idxs = np.arange(len(data2d))
                h5_framenumbers = data2d['frame']
                h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

                ML_estimates_2d_idxs = (kh5.root.ML_estimates_2d_idxs[:])

                all_kobs_obj_ids = dest_table.read(field='obj_id')
                all_kobs_frames = dest_table.read(field='frame')
                use_obj_ids = np.unique(all_kobs_obj_ids)
                if obj_only is not None:
                    use_obj_ids = obj_only

                if hasattr(kh5.root.kalman_estimates.attrs,
                           'dynamic_model_name'):
                    dynamic_model = kh5.root.kalman_estimates.attrs.dynamic_model_name
                    if dynamic_model.startswith('EKF '):
                        dynamic_model = dynamic_model[4:]
                else:
                    dynamic_model = 'mamarama, units: mm'
                    warnings.warn(
                        'could not determine dynamic model name, using "%s"' %
                        dynamic_model)

                for obj_id_enum, obj_id in enumerate(use_obj_ids):
                    # Use data association step from kalmanization to load potentially
                    # relevant 2D orientations, but discard previous 3D orientation.
                    if obj_id_enum % 100 == 0:
                        print 'obj_id %d (%d of %d)' % (obj_id, obj_id_enum,
                                                        len(use_obj_ids))
                    if options.show:
                        all_xhats = []
                        all_ori = []

                    output_row_obj_id_cond = all_kobs_obj_ids == obj_id

                    obj_3d_rows = ca.load_dynamics_free_MLE_position(
                        obj_id, kh5)
                    if start is not None:
                        obj_3d_rows = obj_3d_rows[
                            obj_3d_rows['frame'] >= start]
                    if stop is not None:
                        obj_3d_rows = obj_3d_rows[obj_3d_rows['frame'] <= stop]

                    try:
                        smoothed_3d_rows = ca.load_data(
                            obj_id,
                            kh5,
                            use_kalman_smoothing=True,
                            frames_per_second=fps,
                            dynamic_model_name=dynamic_model)
                    except core_analysis.NotEnoughDataToSmoothError:
                        continue

                    smoothed_frame_qfi = result_utils.QuickFrameIndexer(
                        smoothed_3d_rows['frame'])

                    slopes_by_camn_by_frame = collections.defaultdict(dict)
                    x0d_by_camn_by_frame = collections.defaultdict(dict)
                    y0d_by_camn_by_frame = collections.defaultdict(dict)
                    pt_idx_by_camn_by_frame = collections.defaultdict(dict)
                    min_frame = np.inf
                    max_frame = -np.inf

                    start_idx = None
                    for this_idx, this_3d_row in enumerate(obj_3d_rows):
                        # iterate over each sample in the current camera
                        framenumber = this_3d_row['frame']

                        if not np.isnan(this_3d_row['hz_line0']):
                            # We have a valid initial 3d orientation guess.
                            if framenumber < min_frame:
                                min_frame = framenumber
                                assert start_idx is None, "frames out of order?"
                                start_idx = this_idx

                        max_frame = max(max_frame, framenumber)
                        h5_2d_row_idxs = h5_frame_qfi.get_frame_idxs(
                            framenumber)

                        frame2d = data2d[h5_2d_row_idxs]
                        frame2d_idxs = data2d_idxs[h5_2d_row_idxs]

                        obs_2d_idx = this_3d_row['obs_2d_idx']
                        kobs_2d_data = ML_estimates_2d_idxs[int(obs_2d_idx)]

                        # Parse VLArray.
                        this_camns = kobs_2d_data[0::2]
                        this_camn_idxs = kobs_2d_data[1::2]

                        # Now, for each camera viewing this object at this
                        # frame, extract images.
                        for camn, camn_pt_no in zip(this_camns,
                                                    this_camn_idxs):
                            # find 2D point corresponding to object
                            cam_id = camn2cam_id[camn]

                            cond = ((frame2d['camn'] == camn) &
                                    (frame2d['frame_pt_idx'] == camn_pt_no))
                            idxs = np.nonzero(cond)[0]
                            if len(idxs) == 0:
                                continue
                            assert len(idxs) == 1
                            ## if len(idxs)!=1:
                            ##     raise ValueError('expected one (and only one) frame, got %d'%len(idxs))
                            idx = idxs[0]

                            orig_data2d_rownum = frame2d_idxs[idx]
                            frame_timestamp = frame2d[idx]['timestamp']

                            row = frame2d[idx]
                            assert framenumber == row['frame']
                            if ((row['eccentricity'] <
                                 reconst.minimum_eccentricity)
                                    or (row['area'] <
                                        area_threshold_for_orientation)):
                                slopes_by_camn_by_frame[camn][
                                    framenumber] = np.nan
                                x0d_by_camn_by_frame[camn][
                                    framenumber] = np.nan
                                y0d_by_camn_by_frame[camn][
                                    framenumber] = np.nan
                                pt_idx_by_camn_by_frame[camn][
                                    framenumber] = camn_pt_no
                            else:
                                slopes_by_camn_by_frame[camn][
                                    framenumber] = row['slope']
                                x0d_by_camn_by_frame[camn][framenumber] = row[
                                    'x']
                                y0d_by_camn_by_frame[camn][framenumber] = row[
                                    'y']
                                pt_idx_by_camn_by_frame[camn][
                                    framenumber] = camn_pt_no

                    if start_idx is None:
                        warnings.warn("skipping obj_id %d: "
                                      "could not find valid start frame" %
                                      obj_id)
                        continue

                    obj_3d_rows = obj_3d_rows[start_idx:]

                    # now collect in a numpy array for all cam

                    assert int(min_frame) == min_frame
                    assert int(max_frame + 1) == max_frame + 1
                    frame_range = np.arange(int(min_frame), int(max_frame + 1))
                    if debug_level >= 1:
                        print 'frame range %d-%d' % (frame_range[0],
                                                     frame_range[-1])
                    camn_list = slopes_by_camn_by_frame.keys()
                    camn_list.sort()
                    cam_id_list = [camn2cam_id[camn] for camn in camn_list]
                    n_cams = len(camn_list)
                    n_frames = len(frame_range)

                    save_cols = {}
                    save_cols['frame'] = []
                    for camn in camn_list:
                        save_cols['dist%d' % camn] = []
                        save_cols['used%d' % camn] = []
                        save_cols['theta%d' % camn] = []

                    # NxM array with rows being frames and cols being cameras
                    slopes = np.ones((n_frames, n_cams), dtype=np.float)
                    x0ds = np.ones((n_frames, n_cams), dtype=np.float)
                    y0ds = np.ones((n_frames, n_cams), dtype=np.float)
                    for j, camn in enumerate(camn_list):

                        slopes_by_frame = slopes_by_camn_by_frame[camn]
                        x0d_by_frame = x0d_by_camn_by_frame[camn]
                        y0d_by_frame = y0d_by_camn_by_frame[camn]

                        for frame_idx, absolute_frame_number in enumerate(
                                frame_range):

                            slopes[frame_idx, j] = slopes_by_frame.get(
                                absolute_frame_number, np.nan)
                            x0ds[frame_idx,
                                 j] = x0d_by_frame.get(absolute_frame_number,
                                                       np.nan)
                            y0ds[frame_idx,
                                 j] = y0d_by_frame.get(absolute_frame_number,
                                                       np.nan)

                        if options.show:
                            frf = np.array(frame_range, dtype=np.float)
                            min_frame_range = min(np.min(frf), min_frame_range)
                            max_frame_range = max(np.max(frf), max_frame_range)

                            ax1.plot(frame_range,
                                     slope2modpi(slopes[:, j]),
                                     '.',
                                     label=camn2cam_id[camn])

                    if options.show:
                        ax1.legend()

                    if 1:
                        # estimate orientation of initial frame
                        row0 = obj_3d_rows[:
                                           1]  # take only first row but keep as 1d array
                        hzlines = np.array([
                            row0['hz_line0'], row0['hz_line1'],
                            row0['hz_line2'], row0['hz_line3'],
                            row0['hz_line4'], row0['hz_line5']
                        ]).T
                        directions = reconstruct.line_direction(hzlines)
                        q0 = PQmath.orientation_to_quat(directions[0])
                        assert not np.isnan(
                            q0.x), "cannot start with missing orientation"
                        w0 = 0, 0, 0  # no angular rate
                        init_x = np.array(
                            [w0[0], w0[1], w0[2], q0.x, q0.y, q0.z, q0.w])

                        Pminus = np.zeros((7, 7))

                        # angular rate part of state variance is .5
                        for i in range(0, 3):
                            Pminus[i, i] = .5

                        # quaternion part of state variance is 1
                        for i in range(3, 7):
                            Pminus[i, i] = 1

                    if 1:
                        # setup of noise estimates
                        Q = np.zeros((7, 7))

                        # angular rate part of state variance
                        for i in range(0, 3):
                            Q[i, i] = Q_scalar_rate

                        # quaternion part of state variance
                        for i in range(3, 7):
                            Q[i, i] = Q_scalar_quat

                    preA = np.eye(7)

                    ekf = kalman_ekf.EKF(init_x, Pminus)
                    previous_posterior_x = init_x
                    if options.show:
                        _save_plot_rows = []
                        _save_plot_rows_used = []
                    for frame_idx, absolute_frame_number in enumerate(
                            frame_range):
                        # Evaluate the Jacobian of the process update
                        # using previous frame's posterior estimate. (This
                        # is not quite the same as this frame's prior
                        # estimate. The difference this frame's prior
                        # estimate is _after_ the process update
                        # model. Which we need to get doing this.)

                        if options.show:
                            _save_plot_rows.append(np.nan * np.ones(
                                (n_cams, )))
                            _save_plot_rows_used.append(np.nan * np.ones(
                                (n_cams, )))

                        this_dx = eval_dAdt(previous_posterior_x)
                        A = preA + this_dx * dt
                        if debug_level >= 1:
                            print
                            print 'frame', absolute_frame_number, '-' * 40
                            print 'previous posterior', previous_posterior_x
                            if debug_level > 6:
                                print 'A'
                                print A

                        xhatminus, Pminus = ekf.step1__calculate_a_priori(A, Q)
                        if debug_level >= 1:
                            print 'new prior', xhatminus

                        # 1. Gate per-camera orientations.

                        this_frame_slopes = slopes[frame_idx, :]
                        this_frame_theta_measured = slope2modpi(
                            this_frame_slopes)
                        this_frame_x0d = x0ds[frame_idx, :]
                        this_frame_y0d = y0ds[frame_idx, :]
                        if debug_level >= 5:
                            print 'this_frame_slopes', this_frame_slopes

                        save_cols['frame'].append(absolute_frame_number)
                        for j, camn in enumerate(camn_list):
                            # default to no detection, change below
                            save_cols['dist%d' % camn].append(np.nan)
                            save_cols['used%d' % camn].append(0)
                            save_cols['theta%d' % camn].append(
                                this_frame_theta_measured[j])

                        all_data_this_frame_missing = False
                        gate_vector = None

                        y = []  # observation (per camera)
                        hx = []  # expected observation (per camera)
                        C = []  # linearized observation model (per camera)
                        N_obs_this_frame = 0
                        cams_without_data = np.isnan(this_frame_slopes)
                        if np.all(cams_without_data):
                            all_data_this_frame_missing = True

                        smoothed_pos_idxs = smoothed_frame_qfi.get_frame_idxs(
                            absolute_frame_number)
                        if len(smoothed_pos_idxs) == 0:
                            all_data_this_frame_missing = True
                            smoothed_pos_idx = None
                            smooth_row = None
                            center_position = None
                        else:
                            try:
                                assert len(smoothed_pos_idxs) == 1
                            except:
                                print 'obj_id', obj_id
                                print 'absolute_frame_number', absolute_frame_number
                                if len(frame_range):
                                    print 'frame_range[0],frame_rang[-1]', frame_range[
                                        0], frame_range[-1]
                                else:
                                    print 'no frame range'
                                print 'len(smoothed_pos_idxs)', len(
                                    smoothed_pos_idxs)
                                raise
                            smoothed_pos_idx = smoothed_pos_idxs[0]
                            smooth_row = smoothed_3d_rows[smoothed_pos_idx]
                            assert smooth_row['frame'] == absolute_frame_number
                            center_position = np.array(
                                (smooth_row['x'], smooth_row['y'],
                                 smooth_row['z']))
                            if debug_level >= 2:
                                print 'center_position', center_position

                        if not all_data_this_frame_missing:
                            if expected_orientation_method == 'trust_prior':
                                state_for_phi = xhatminus  # use a priori
                            elif expected_orientation_method == 'SVD_line_fits':
                                # construct matrix of planes
                                P = []
                                for camn_idx in range(n_cams):
                                    this_x0d = this_frame_x0d[camn_idx]
                                    this_y0d = this_frame_y0d[camn_idx]
                                    slope = this_frame_slopes[camn_idx]
                                    plane, ray = reconst.get_3D_plane_and_ray(
                                        cam_id, this_x0d, this_y0d, slope)
                                    if np.isnan(plane[0]):
                                        continue
                                    P.append(plane)
                                if len(P) < 2:
                                    # not enough data to do SVD... fallback to prior
                                    state_for_phi = xhatminus  # use a priori
                                else:
                                    Lco = reconstruct.intersect_planes_to_find_line(
                                        P)
                                    q = PQmath.pluecker_to_quat(Lco)
                                    state_for_phi = cgtypes_quat2statespace(q)

                            cams_with_data = ~cams_without_data
                            possible_cam_idxs = np.nonzero(cams_with_data)[0]
                            if debug_level >= 6:
                                print 'possible_cam_idxs', possible_cam_idxs
                            gate_vector = np.zeros((n_cams, ), dtype=np.bool)
                            ## flip_vector = np.zeros( (n_cams,), dtype=np.bool)
                            for camn_idx in possible_cam_idxs:
                                cam_id = cam_id_list[camn_idx]
                                camn = camn_list[camn_idx]

                                # This ignores distortion. To incorporate
                                # distortion, this would require
                                # appropriate scaling of orientation
                                # vector, which would require knowing
                                # target's size. In which case we should
                                # track head and tail separately and not
                                # use this whole quaternion mess.

                                ## theta_measured=slope2modpi(
                                ##     this_frame_slopes[camn_idx])
                                theta_measured = this_frame_theta_measured[
                                    camn_idx]
                                if debug_level >= 6:
                                    print 'cam_id %s, camn %d' % (cam_id, camn)
                                if debug_level >= 3:
                                    a = reconst.find2d(cam_id, center_position)
                                    other_position = get_point_on_line(
                                        xhatminus, center_position)
                                    b = reconst.find2d(cam_id, other_position)
                                    theta_expected = find_theta_mod_pi_between_points(
                                        a, b)
                                    print('  theta_expected,theta_measured',
                                          theta_expected * R2D,
                                          theta_measured * R2D)

                                P = reconst.get_pmat(cam_id)
                                if 0:
                                    args_x = (P[0, 0], P[0, 1], P[0, 2],
                                              P[0, 3], P[1, 0], P[1, 1], P[1,
                                                                           2],
                                              P[1, 3], P[2, 0], P[2, 1], P[2,
                                                                           2],
                                              P[2, 3], center_position[0],
                                              center_position[1],
                                              center_position[2], xhatminus)
                                    this_y = theta_measured
                                    this_hx = eval_G(*args_x)
                                    this_C = eval_linG(*args_x)
                                else:
                                    args_x_xm = (P[0, 0], P[0, 1], P[0, 2],
                                                 P[0, 3], P[1, 0], P[1,
                                                                     1], P[1,
                                                                           2],
                                                 P[1, 3], P[2, 0], P[2,
                                                                     1], P[2,
                                                                           2],
                                                 P[2, 3], center_position[0],
                                                 center_position[1],
                                                 center_position[2], xhatminus,
                                                 state_for_phi)
                                    this_phi = eval_phi(*args_x_xm)
                                    this_y = angle_diff(theta_measured,
                                                        this_phi,
                                                        mod_pi=True)
                                    this_hx = eval_H(*args_x_xm)
                                    this_C = eval_linH(*args_x_xm)
                                    if debug_level >= 3:
                                        print('  this_phi,this_y',
                                              this_phi * R2D, this_y * R2D)

                                save_cols['dist%d' % camn][-1] = this_y  # save

                                # gate
                                if abs(this_y) < gate_angle_threshold_radians:
                                    save_cols['used%d' % camn][-1] = 1
                                    gate_vector[camn_idx] = 1
                                    if debug_level >= 3:
                                        print '    good'
                                    if options.show:
                                        _save_plot_rows_used[-1][
                                            camn_idx] = this_y
                                    y.append(this_y)
                                    hx.append(this_hx)
                                    C.append(this_C)
                                    N_obs_this_frame += 1

                                    # Save which camn and camn_pt_no was used.
                                    if absolute_frame_number not in used_camn_dict:
                                        used_camn_dict[
                                            absolute_frame_number] = []
                                    camn_pt_no = (pt_idx_by_camn_by_frame[camn]
                                                  [absolute_frame_number])
                                    used_camn_dict[
                                        absolute_frame_number].append(
                                            (camn, camn_pt_no))
                                else:
                                    if options.show:
                                        _save_plot_rows[-1][camn_idx] = this_y
                                    if debug_level >= 6:
                                        print '    bad'
                            if debug_level >= 1:
                                print 'gate_vector', gate_vector
                                #print 'flip_vector',flip_vector
                            all_data_this_frame_missing = not bool(
                                np.sum(gate_vector))

                        # 3. Construct observations model using all
                        # gated-in camera orientations.

                        if all_data_this_frame_missing:
                            C = None
                            R = None
                            hx = None
                        else:
                            C = np.array(C)
                            R = R_scalar * np.eye(N_obs_this_frame)
                            hx = np.array(hx)
                            if 0:
                                # crazy observation error scaling
                                for i in range(N_obs_this_frame):
                                    beyond = abs(y[i]) - 10 * D2R
                                    beyond = max(0, beyond)  # clip at zero
                                    R[i:i] = R_scalar * (1 + 10 * beyond)
                            if debug_level >= 6:
                                print 'full values'
                                print 'C', C
                                print 'hx', hx
                                print 'y', y
                                print 'R', R

                        if debug_level >= 1:
                            print 'all_data_this_frame_missing', all_data_this_frame_missing
                        xhat, P = ekf.step2__calculate_a_posteriori(
                            xhatminus,
                            Pminus,
                            y=y,
                            hx=hx,
                            C=C,
                            R=R,
                            missing_data=all_data_this_frame_missing)
                        if debug_level >= 1:
                            print 'xhat', xhat
                        previous_posterior_x = xhat
                        if center_position is not None:
                            # save
                            output_row_frame_cond = all_kobs_frames == absolute_frame_number
                            output_row_cond = output_row_frame_cond & output_row_obj_id_cond
                            output_idxs = np.nonzero(output_row_cond)[0]
                            if len(output_idxs) == 0:
                                pass
                            else:
                                assert len(output_idxs) == 1
                                idx = output_idxs[0]
                                hz = state_to_hzline(xhat, center_position)
                                for row in dest_table.iterrows(start=idx,
                                                               stop=(idx + 1)):
                                    for i in range(6):
                                        row['hz_line%d' % i] = hz[i]
                                    row.update()
                        ## xhat_results[ obj_id ][absolute_frame_number ] = (
                        ##     xhat,center_position)
                        if options.show:
                            all_xhats.append(xhat)
                            all_ori.append(state_to_ori(xhat))

                    # save to H5 file
                    names = [colname for colname in save_cols]
                    names.sort()
                    arrays = []
                    for name in names:
                        if name == 'frame':
                            dtype = np.int64
                        elif name.startswith('dist'):
                            dtype = np.float32
                        elif name.startswith('used'):
                            dtype = np.bool
                        elif name.startswith('theta'):
                            dtype = np.float32
                        else:
                            raise NameError('unknown name %s' % name)
                        arr = np.array(save_cols[name], dtype=dtype)
                        arrays.append(arr)
                    save_recarray = np.rec.fromarrays(arrays, names=names)
                    h5group = core_analysis.get_group_for_obj(obj_id,
                                                              output_h5,
                                                              writeable=True)
                    output_h5.create_table(h5group,
                                           'obj%d' % obj_id,
                                           save_recarray,
                                           filters=tables.Filters(
                                               1, complib='lzo'))

                    if options.show:
                        all_xhats = np.array(all_xhats)
                        all_ori = np.array(all_ori)
                        _save_plot_rows = np.array(_save_plot_rows)
                        _save_plot_rows_used = np.array(_save_plot_rows_used)

                        ax2.plot(frame_range, all_xhats[:, 0], '.', label='p')
                        ax2.plot(frame_range, all_xhats[:, 1], '.', label='q')
                        ax2.plot(frame_range, all_xhats[:, 2], '.', label='r')
                        ax2.legend()

                        ax3.plot(frame_range, all_xhats[:, 3], '.', label='a')
                        ax3.plot(frame_range, all_xhats[:, 4], '.', label='b')
                        ax3.plot(frame_range, all_xhats[:, 5], '.', label='c')
                        ax3.plot(frame_range, all_xhats[:, 6], '.', label='d')
                        ax3.legend()

                        ax4.plot(frame_range, all_ori[:, 0], '.', label='x')
                        ax4.plot(frame_range, all_ori[:, 1], '.', label='y')
                        ax4.plot(frame_range, all_ori[:, 2], '.', label='z')
                        ax4.legend()

                        colors = []
                        for i in range(n_cams):
                            line, = ax5.plot(frame_range,
                                             _save_plot_rows_used[:, i] * R2D,
                                             'o',
                                             label=cam_id_list[i])
                            colors.append(line.get_color())
                        for i in range(n_cams):
                            # loop again to get normal MPL color cycling
                            ax5.plot(frame_range,
                                     _save_plot_rows[:, i] * R2D,
                                     'o',
                                     mec=colors[i],
                                     ms=1.0)
                        ax5.set_ylabel('observation (deg)')
                        ax5.legend()
Esempio n. 3
0
__author__ = 'Rusty Gentile'

matplotlib.rcParams.update({'font.size': 10})
n_sims = 10
n_years = 5

fig, ax = plt.subplots(1, 2, figsize=(10, 4))

for k in range(n_sims):
    df_sim = pd.read_csv(f'./data/results/results_cons_sim_{k}.csv')
    gb_sim = df_sim.groupby('year')
    ax[0].plot(gb_sim['year'].count(), label=f'sim #{k+1}')

ax[0].xaxis.set_major_locator(ticker.MaxNLocator(n_years))
ax[0].xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
ax[0].set_xlabel('Year')
ax[0].set_ylabel('N Hives')
ax[0].set_title('Conservative Assumptions')
ax[0].legend()

for k in range(n_sims):
    df_sim = pd.read_csv(f'./data/results/results_aggr_sim_{k}.csv')
    gb_sim = df_sim.groupby('year')
    ax[1].plot(gb_sim['year'].count(), label=f'sim #{k+1}')

ax[1].xaxis.set_major_locator(ticker.MaxNLocator(n_years))
ax[1].xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
ax[1].set_xlabel('Year')
ax[1].set_ylabel('N Hives')
ax[1].set_title('Aggressive Assumptions')
Esempio n. 4
0
        i_t, i_n, c_t, c_n, inc_t, inc_n = process_test(
            args.data_input, args.data_result, args.b)

        fig, ax = plt.subplots()

        ax.set_title(f'Frequency Sweep at {args.b} BPM, {VOLS[args.v]} Volume')

        ax.scatter(i_t, i_n / 1e3, marker='o', color='k', label='Input Notes')
        ax.scatter(c_t,
                   c_n / 1e3,
                   marker='o',
                   color='b',
                   label='Correctly Measured Notes')
        ax.scatter(inc_t,
                   inc_n / 1e3,
                   marker='x',
                   color='r',
                   label='Incorrectly Measured Notes')
        ax.legend(loc='best')

        ax.set_xlabel('Time (s)')

        ax.set_yscale('log')
        ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
        ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter('%.1f'))
        ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
        ax.set_ylabel('Frequency (kHz)')

        fig.tight_layout()
        plt.show()
plt.plot(roots7,
         Ksigma2p7,
         linestyle='--',
         linewidth=2,
         color='b',
         markersize=10,
         marker='s',
         markerfacecolor='b',
         markeredgecolor='b',
         label='$\sigma_\eta=0.7$')

ax.tick_params(axis='both', which='major', labelsize=14)
ax.set_xticks(np.arange(0, 250, 50), minor=False)
ax.set_xticklabels(np.arange(0, 250, 25), minor=False, family='serif')
ax.set_xticks(np.arange(0, 250, 50), minor=True)
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
ax.xaxis.set_major_formatter(sformatter)
plt.xlim(0, 210)

ax.set_yticks(np.arange(-1, 2.5, 0.2), minor=False)
ax.set_yticklabels(np.arange(-1, 2.5, 0.2), minor=False, family='serif')
ax.set_yticks(np.arange(-1, 2.5, 0.05), minor=True)
plt.ylim(0.25, 1.05)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1f'))
ax.yaxis.set_major_formatter(sformatter)

ax.legend(loc=(0.6, 0.01), fontsize=18)

plt.xlabel('$\sqrt{s}_{NN}$ (GeV)', fontsize=22, weight='normal')
plt.ylabel('$K\sigma^2=C_4/C_2$', fontsize=22, weight='normal')
Esempio n. 6
0
    def Planewave2Dviz(
        self,
        x1,
        y1,
        x2,
        y2,
        npts2D,
        npts,
        sig,
        t,
        srcLoc=0.0,
        orientation="x",
        view="x",
        normal="Z",
        functype="E_from_ED",
        loc=0.0,
        scale="log",
        dx=50.0,
    ):
        nx, ny = npts2D, npts2D
        x, y = linefun(x1, x2, y1, y2, npts)
        if scale == "log":
            logamp = True
        elif scale == "linear":
            logamp = False
        else:
            raise NotImplementedError()
        self.SetDataview(srcLoc,
                         sig,
                         t,
                         orientation,
                         normal,
                         functype,
                         na=nx,
                         nb=ny,
                         loc=loc)
        plot1D = True
        if normal == "X" or normal == "x":
            xyz_line = np.c_[np.ones_like(x) * self.x, x, y]
            self.dataview.xyz_line = xyz_line
        if normal == "Y" or normal == "y":
            xyz_line = np.c_[x, np.ones_like(x) * self.y, y]
            self.dataview.xyz_line = xyz_line
        if normal == "Z" or normal == "z":
            xyz_line = np.c_[x, y, np.ones_like(x) * self.z]
            self.dataview.xyz_line = xyz_line

        plt.figure(figsize=(18 * 1.5, 3.4 * 1.5))
        gs1 = gridspec.GridSpec(2, 7)
        gs1.update(left=0.05, right=0.48, wspace=0.05)
        ax1 = plt.subplot(gs1[:2, :3])
        ax1.axis("equal")

        ax1, dat1 = self.dataview.plot2D_TD(ax=ax1,
                                            view=view,
                                            colorbar=False,
                                            logamp=logamp)
        vmin, vmax = dat1.cvalues.min(), dat1.cvalues.max()
        if scale == "log":
            cb = plt.colorbar(dat1,
                              ax=ax1,
                              ticks=np.linspace(vmin, vmax, 5),
                              format="$10^{%.1f}$")
        elif scale == "linear":
            cb = plt.colorbar(dat1,
                              ax=ax1,
                              ticks=np.linspace(vmin, vmax, 5),
                              format="%.1e")

        tempstr = functype.split("_")

        title = tempstr[0] + view

        if tempstr[0] == "E":
            unit = " (V/m)"
            fieldname = "Electric field"
        elif tempstr[0] == "H":
            unit = " (A/m)"
            fieldname = "Magnetic field"
        else:
            raise NotImplementedError()

        label = fieldname + unit
        cb.set_label(label)
        ax1.set_title(title)

        if plot1D:
            ax1.plot(x, y, "r.", ms=4)
            ax2 = plt.subplot(gs1[:, 4:6])
            val_line_x, val_line_y, val_line_z = self.dataview.eval_TD(
                xyz_line, srcLoc, np.r_[sig], np.r_[t], orientation, self.func)

            if view == "X" or view == "x":
                val_line = val_line_x
            elif view == "Y" or view == "y":
                val_line = val_line_y
            elif view == "Z" or view == "z":
                val_line = val_line_z

            distance = xyz_line[:, 2]

            if scale == "log":
                temp = val_line.copy() * np.nan
                temp[val_line > 0.0] = val_line[val_line > 0.0]
                ax2.plot(temp, distance, "k.-")
                temp = val_line.copy() * np.nan
                temp[val_line < 0.0] = -val_line[val_line < 0.0]
                ax2.plot(temp, distance, "k.--")
                ax2.set_xlim(abs(val_line).min(), abs(val_line).max())
                ax2.set_xscale(scale)

            elif scale == "linear":
                ax2.plot(val_line, distance, "k.-")
                ax2.set_xlim(val_line.min(), val_line.max())
                ax2.set_xscale(scale)
                xticks = np.linspace(-abs(val_line).max(),
                                     abs(val_line).max(), 3)
                plt.plot(np.r_[0.0, 0.0],
                         np.r_[distance.min(), distance.max()],
                         "k-",
                         lw=2)
                ax2.xaxis.set_ticks(xticks)
                ax2.xaxis.set_major_formatter(
                    ticker.FormatStrFormatter("%.0e"))
                ax2.set_xlim(-abs(val_line).max(), abs(val_line).max())

            ax2.set_ylim(distance.min(), distance.max())
            ax2.set_ylabel("Profile (m)")

            if tempstr[0] == "E":
                label = "(" + tempstr[0] + view + ")-field (V/m) "
            elif tempstr[0] == "H":
                label = "(" + tempstr[0] + view + ")-field (A/m) "
            elif tempstr[0] == "J":
                label = "(" + tempstr[0] + view + ")-field (A/m$^2$) "
            else:
                raise NotImplementedError()

            ax2.set_title("EM data")
            ax2.set_xlabel(label)
            ax2.grid(True)
        plt.show()
        pass
Esempio n. 7
0
def main():
    # input a efficiency of inverter, constraints of LOLP, constraints ofdummy
    Ef_inv = float(input('Enter the efficiency of inverter: '))
    Con_LOLP = float(input('Enter the constraints of LOLP: '))
    Con_dummy = float(input('Enter the constraints of dummy: '))
    prompt3 = int(input('choice a Types of Load PatternP_load: ')
                  )  # prompt3는 1~5까지(P_load 값만 사용자가 입력)
    want_iteration = int(input('input want iteration: '))

    prompt1_result = []
    prompt2_result = []
    state_result = []
    dummy_result = []
    LOLP_result = []

    prompt1_result, prompt2_result, state_result, dummy_result, LOLP_result, P_dg_result, DAE = Function1(
        Ef_inv, Con_LOLP, Con_dummy, prompt3, want_iteration)

    cost = []

    # Initial Capital 계산
    for prompt1, prompt2 in zip(prompt1_result, prompt2_result):
        IC = 1.4 * 3000 * prompt2 + 1.2 * 2290 * prompt1 * round(
            prompt1 / 1) + C_bat * 213 + P_dgr * 850
        cost.append(IC)

    # 태양광(pv) = prompt2, 풍력(wind) = prompt2
    data = {
        'pv(kw)': prompt2_result,
        'wind(kw)': prompt1_result,
        'dummy': dummy_result,
        'LOLP_sum': LOLP_result,
        'P_dg': P_dg_result,
        'cost($)': cost,
        'state': state_result
    }

    df = pd.DataFrame(data)
    # print(tabulate(df, headers='keys', tablefmt='psql'))

    opt_df = df.loc[df['state'] == '***** Optimal *****', :]
    opt_df = opt_df.sort_values(by='cost($)')
    print(tabulate(opt_df, headers='keys', tablefmt='psql'))

    opt_prompt1 = opt_df['wind(kw)']
    opt_prompt2 = opt_df['pv(kw)']

    min_prompt1 = 0
    min_prompt2 = 0
    min_cost = 99999999

    # Initial Capital 계산
    for prompt1, prompt2 in zip(opt_prompt1, opt_prompt2):
        IC = 1.4 * 3000 * prompt2 + 1.2 * 2290 * prompt1 * round(
            prompt1 / 1) + C_bat * 213 + P_dgr * 850
        if min_cost >= IC:
            min_cost = IC
            min_prompt1 = prompt1
            min_prompt2 = prompt2

    optimal_prompt1 = min_prompt1
    optimal_prompt2 = min_prompt2

    P_wind, P_dummy, P_pv, P_bc, P_bd, P_load, SOC, P_dg, LOLP, LOLP_sum = Function2(
        Ef_inv, Con_LOLP, Con_dummy, optimal_prompt1, optimal_prompt2, prompt3)

    # save excel file
    # make_excel(y_pos1, y_pos2, y_pos3, y_pos4, y_pos5, y_pos6)

    # ploting the simulation results
    x_pos = [x for x in range(24)]
    y_pos1 = [round(P_wind[idx], 3) for idx, y in enumerate(P_wind)]
    y_pos2 = [round(P_dummy[idx], 3) for idx, y in enumerate(P_dummy)]
    y_pos3 = [round(P_pv[idx], 3) for idx, y in enumerate(P_pv)]
    y_pos4 = [round(bc + bd, 3) for bc, bd in zip(P_bc, P_bd)]
    y_pos5 = [round(P_load[idx], 3) for idx, y in enumerate(P_load)]
    y_pos6 = [round(SOC[idx], 3) for idx, y in enumerate(SOC)]
    y_pos7 = [round(P_dg[idx], 3) for idx, y in enumerate(P_dg)]
    y_pos8 = [round(LOLP[idx], 3) for idx, y in enumerate(LOLP)]

    fig = plt.figure()

    # P_wind 그래프
    ax1 = fig.add_subplot(4, 2, 1)
    ax1.plot(x_pos, y_pos1, label='P_wind', c='b', ls='--')
    ax1.xaxis.set_ticks(np.arange(0, 24, 2))
    ax1.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax1.set_xlabel('t(h)')
    # ax1.set_ylabel('Power of Wind Turbine(kWh)')
    plt.legend(loc='best')

    # P_dummy 그래프
    ax2 = fig.add_subplot(4, 2, 2)
    ax2.plot(x_pos, y_pos2, label='Dummy', c='g', ls='--')
    ax2.xaxis.set_ticks(np.arange(0, 24, 2))
    ax2.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax2.set_xlabel('t(h)')
    # ax2.set_ylabel('Power of Dummy(kWh)')
    plt.legend(loc='best')

    # P_pv 그래프
    ax3 = fig.add_subplot(4, 2, 3)
    ax3.plot(x_pos, y_pos3, label='P_pv', c='r', ls='--')
    ax3.xaxis.set_ticks(np.arange(0, 24, 2))
    ax3.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax3.set_xlabel('t(h)')
    # ax3.set_ylabel('Power of Photovoltaic(kWh)')
    plt.legend(loc='best')

    # Pbat 그래프
    ax4 = fig.add_subplot(4, 2, 4)
    ax4.plot(x_pos, y_pos4, label='Pbat', c='c', ls='--')
    ax4.xaxis.set_ticks(np.arange(0, 24, 2))
    ax4.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax4.set_xlabel('t(h)')
    # ax4.set_ylabel('Power of Battery(kWh)')
    plt.legend(loc='best')

    # P_load 그래프
    ax5 = fig.add_subplot(4, 2, 5)
    ax5.plot(x_pos, y_pos5, label='P_load', c='m', ls='--')
    ax5.xaxis.set_ticks(np.arange(0, 24, 2))
    ax5.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax5.set_xlabel('t(h)')
    # ax5.set_ylabel('Power of Load(kWh)')
    plt.legend(loc='best')

    # SOC 그래프
    ax6 = fig.add_subplot(4, 2, 6)
    ax6.plot(x_pos, y_pos6, label='SOC', c='y', ls='--')
    ax6.xaxis.set_ticks(np.arange(0, 24, 2))
    ax6.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax6.set_xlabel('t(h)')
    # ax6.set_ylabel('SOC(%)')
    plt.legend(loc='best')

    # P_dg 그래프
    ax7 = fig.add_subplot(4, 2, 7)
    ax7.plot(x_pos, y_pos7, label='P_dg', c='k', ls='--')
    ax7.xaxis.set_ticks(np.arange(0, 24, 2))
    ax7.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax7.set_xlabel('t(h)')
    # ax7.set_ylabel('Power of DG(kWh)')
    plt.legend(loc='best')

    # LOLP 그래프
    ax8 = fig.add_subplot(4, 2, 8)
    ax8.plot(x_pos, y_pos8, label='LOLP', c='tomato', ls='--')
    ax8.xaxis.set_ticks(np.arange(0, 24, 2))
    ax8.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
    # ax8.set_xlabel('t(h)')
    # ax8.set_ylabel('Number of LOLP')

    plt.legend(loc='best')
    plt.tight_layout()
    plt.show()
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_formatter(major_formatter)
ax.text(0.0,
        0.1,
        'FuncFormatter(lambda x, pos: "[%.2f]" % x)',
        fontsize=15,
        transform=ax.transAxes)

# FormatStr formatter
ax = fig.add_subplot(n, 1, 4)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(">%d<"))
ax.text(0.0,
        0.1,
        "FormatStrFormatter('>%d<')",
        fontsize=15,
        transform=ax.transAxes)

# Scalar formatter
ax = fig.add_subplot(n, 1, 5)
setup(ax)
ax.xaxis.set_major_locator(ticker.AutoLocator())
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
ax.text(0.0, 0.1, "ScalarFormatter()", fontsize=15, transform=ax.transAxes)

# StrMethod formatter
    def solve_transient(self, steps):

        # Initialize arrays to store transient solutions
        flux_t = numpy.zeros([self.core_mesh_length, steps + 1])
        precursor_t = numpy.zeros([self.core_mesh_length, steps + 1])

        # Initialize a StepCharacteristic object
        test_moc = moc.StepCharacteristic(self.input_file_name)

        # Record initial conditions
        flux_t[:, 0] = test_moc.flux[:, 1]
        precursor_t[:, 0] = self.delayed_neutron_precursor_concentration[:, 1]

        self.update_variables(test_moc.flux[:, 1], test_moc.current, test_moc.eddington_factors,
                                   test_moc.delayed_neutron_precursor_concentration)

        for iteration in xrange(steps):
            converged = False
            while not converged:
                self.update_eddington(test_moc.eddington_factors)
                # Store previous solutions to evaluate convergence
                last_flux = numpy.array(self.flux[:, 0])
                last_current = numpy.array(self.current[:, 0])
                last_dnpc = numpy.array(self.delayed_neutron_precursor_concentration[:, 0])

                self.solve_linear_system()

                # Calculate difference between previous and present solutions
                flux_diff = abs(last_flux - self.flux[:, 0])
                current_diff = abs(last_current[1:-1] - self.current[1:-1, 0])
                dnpc_diff = abs(last_dnpc - self.delayed_neutron_precursor_concentration[:, 0])
                eddington_diff = abs(test_moc.eddington_factors - test_moc.eddington_factors_old)

                if numpy.max(flux_diff / abs(self.flux[:, 0])) < 1E-6 \
                        and numpy.max(current_diff) < 1E-10 \
                        and numpy.max(dnpc_diff) < 1E-10\
                        and numpy.max(eddington_diff / test_moc.eddington_factors) < 1E-6:

                    test_moc.iterate_alpha()

                    # Calculate difference between previous and present alpha
                    alpha_diff = abs(test_moc.alpha - test_moc.alpha_old)/abs(test_moc.alpha_old)

                    if numpy.max(alpha_diff) < 1E-4:
                        converged = True
                        test_moc.flux_t = numpy.array(self.flux[:, 0])

                else:
                    test_moc.update_variables(self.flux[:, 0],
                                              self.delayed_neutron_precursor_concentration[:, 0])
                    #test_moc.iterate_alpha()
                    test_moc.solve(False, True)

            self.flux[:, 1] = numpy.array(self.flux[:, 0])
            self.current[:, 1] = numpy.array(self.current[:, 0])
            self.delayed_neutron_precursor_concentration[:, 1] = numpy.array(self.delayed_neutron_precursor_concentration[
                                                                      :, 0])

            flux_t[:, iteration + 1] = numpy.array(self.flux[:, 1])
            precursor_t[:, iteration + 1] = numpy.array(self.delayed_neutron_precursor_concentration[:, 0])

        # plot flux at each time step
        x = numpy.arange(0, self.core_mesh_length)
        ax = plt.subplot(111)
        for iteration in xrange(steps + 1):
            ax.plot(x, flux_t[:, iteration], label= "t = " + "{:.1E}".format(self.dt * iteration))
        ax.grid(True)
        plt.xlabel('Position [cm]')
        plt.ylabel('Flux' + r'$\left[\frac{1}{s cm^{2}}\right]$')
        #plt.title('Neutron Flux')
        plt.tight_layout()

        # Shrink current axis by 20%
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
        # Put a legend to the right of the current axis
        ax.legend(loc='center left', bbox_to_anchor=(1, -0.1))
        ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0e'))
        plt.show()

        # plot precursor concentration at each time step
        ax = plt.subplot(111)
        for iteration in xrange(steps + 1):
            ax.plot(x, precursor_t[:, iteration], label="t = " + "{:.1E}".format(self.dt * iteration))
        ax.grid(True)
        plt.xlabel('Position'+r'[cm]')
        plt.ylabel('DNPC' + r'$\left[\frac{1}{cm^3}\right]$')
        plt.title('Precursor Concentration')
        # Shrink current axis by 20%
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
        # Put a legend to the right of the current axis
        ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
        plt.show()

        dnpc_filename = "output/precursor_concentration_"+ str(self.input_file_name) +"_N=" + str(self.core_mesh_length) + "_dt=" + str(self.dt) + ".csv"
        flux_filename = "output/flux_" + str(self.input_file_name) +"_N=" + str(self.core_mesh_length) + "_dt=" + str(self.dt) + ".csv"
        numpy.savetxt(dnpc_filename, precursor_t, delimiter=",")
        numpy.savetxt(flux_filename, flux_t, delimiter=",")
Esempio n. 10
0
def plotNdQuantityPerSolver(nRows,
                            nCols,
                            quantity,
                            title,
                            solver_names,
                            line_styles,
                            ax=None,
                            boundUp=None,
                            boundLow=None,
                            yscale='linear',
                            subplot_titles=None,
                            ylabels=None,
                            sharey=False,
                            margins=None,
                            x=None):
    if (ax == None):
        f, ax = plt.subplots(nRows, nCols, sharex=True, sharey=sharey)
    ax = ax.reshape(nRows, nCols)
    k = 0
    if (x == None):
        x = list(range(quantity.shape[0]))
    for j in range(nCols):
        for i in range(nRows):
            if (k < quantity.shape[2]):
                if (subplot_titles != None):
                    ax[i, j].set_title(subplot_titles[k])
                elif (i == 0):
                    ax[i, j].set_title(str(k))
                    # set titles on first row only
                if (ylabels != None):
                    ax[i, j].set_ylabel(ylabels[k])

                ymin = np.min(quantity[:, :, k])
                ymax = np.max(quantity[:, :, k])
                if (boundUp != None):
                    if (len(boundUp.shape) == 1):  # constant bound
                        if (boundUp[k] < 2 * ymax):
                            ymax = np.max([ymax, boundUp[k]])
                        ax[i, j].plot([0, quantity.shape[0] - 1],
                                      [boundUp[k], boundUp[k]],
                                      '--',
                                      color=BOUNDS_COLOR,
                                      alpha=LINE_ALPHA)
                    elif (
                            len(boundUp.shape) == 2
                    ):  # bound variable in time but constant for each solver
                        if (np.max(boundUp[:, k]) < 2 * ymax):
                            ymax = np.max(
                                np.concatenate(([ymax], boundUp[:, k])))
                        ax[i, j].plot(boundUp[:, k],
                                      '--',
                                      color=BOUNDS_COLOR,
                                      label='Upper bound',
                                      alpha=LINE_ALPHA)
                if (boundLow != None):
                    if (len(boundLow.shape) == 1):
                        if (boundLow[k] > 2 * ymin):
                            ymin = np.min([ymin, boundLow[k]])
                        ax[i, j].plot([0, quantity.shape[0] - 1],
                                      [boundLow[k], boundLow[k]],
                                      '--',
                                      color=BOUNDS_COLOR,
                                      alpha=LINE_ALPHA)
                    else:
                        if (np.min(boundLow[:, k]) > 2 * ymin):
                            ymin = np.min(
                                np.concatenate(([ymin], boundLow[:, k])))
                        ax[i, j].plot(boundLow[:, k],
                                      '--',
                                      color=BOUNDS_COLOR,
                                      label='Lower bound',
                                      alpha=LINE_ALPHA)
                lw = DEFAULT_LINE_WIDTH
                for s in range(quantity.shape[1]):
                    p, = ax[i, j].plot(x,
                                       quantity[:, s, k],
                                       line_styles[s],
                                       alpha=LINE_ALPHA,
                                       linewidth=lw)
                    if (margins != None):
                        if (type(margins) is list):
                            mp = margins[0]
                            mn = margins[1]
                        else:
                            mp = margins
                            mn = margins
                        ymax = np.max(
                            np.concatenate(
                                ([ymax], quantity[:, s, k] + mp[:, s, k])))
                        ymin = np.min(
                            np.concatenate(
                                ([ymin], quantity[:, s, k] - mn[:, s, k])))
                        ax[i, j].fill_between(x,
                                              quantity[:, s, k] + mp[:, s, k],
                                              quantity[:, s, k] - mn[:, s, k],
                                              alpha=0.15,
                                              linewidth=0,
                                              facecolor='green')
                    if (solver_names != None):
                        p.set_label(solver_names[s])
                    lw = max(LINE_WIDTH_MIN, lw - LINE_WIDTH_RED)
                ax[i, j].set_yscale(yscale)
                ax[i, j].xaxis.set_ticks(np.arange(0, x[-1], x[-1] / 2))
                ax[i, j].yaxis.set_ticks([ymin, ymax])
                if (ymax - ymin > 5.0):
                    ax[i, j].yaxis.set_major_formatter(
                        ticker.FormatStrFormatter('%0.0f'))
                elif (ymax - ymin > 0.5):
                    ax[i, j].yaxis.set_major_formatter(
                        ticker.FormatStrFormatter('%0.1f'))
                else:
                    ax[i, j].yaxis.set_major_formatter(
                        ticker.FormatStrFormatter('%0.2f'))
                if (sharey == False):
                    ax[i, j].set_ylim([
                        ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
                    ])
                k += 1
            else:
                ax[i, j].yaxis.set_major_formatter(
                    ticker.FormatStrFormatter('%0.0f'))

    if (SAVE_FIGURES):
        for ext in FILE_EXTENSIONS:
            plt.gcf().savefig(FIGURE_PATH + title.replace(' ', '_') + '.' +
                              ext,
                              format=ext,
                              dpi=FIGURES_DPI,
                              bbox_inches='tight')
    else:
        ax[nRows / 2, 0].set_ylabel(title)
    if (SHOW_LEGENDS):
        #        leg = ax[0,0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
        leg = ax[0, 0].legend(loc='best')


#        leg.get_frame().set_alpha(LEGEND_ALPHA)
    return ax
Esempio n. 11
0
		# turn on grid
		if opts.is_grid == True:
			plt.grid(True)

		if not opts.is_no_top_axis:
			if not opts.is_events:
                		axtop = plt.twiny()
                		axtop.xaxis.tick_top()
                		axtop.xaxis.set_label_position("top")
				axtop.set_xlim(xmin=start_sample, xmax=start_sample+window-1)
                		for label in axtop.get_xticklabels(): label.set_fontsize(opts.fs)
                		plt.xlabel("Samples", fontsize=opts.fs)
                		plt.gca().minorticks_on()

		ax.set_xlim(xmin=time[0], xmax=time[-1])
		ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%g"))
		ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%g"))
		ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
		ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
        	for label in ax.get_xticklabels(): label.set_fontsize(opts.fs)
        	for label in ax.get_yticklabels(): label.set_fontsize(opts.fs)

	elif opts.is_profileonly:   # plotting only the profile

		flux = profile		
		phase = [float(n)/opts.nbins for n in range(bin_start, bin_end + 1)]
		if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
			flux = np.append(flux, profile)
			phase = np.append(phase, [float(opts.nbins + n)/opts.nbins for n in range(bin_start, bin_end + 1)])

        	ax = fig.add_subplot(111)
Esempio n. 12
0
def set_ytickformat(ax, fmt="%.4g"):
    """ Change y-axis tick format to use a more flexible notation. """
    yax = ax.get_yaxis()
    yax.set_major_formatter(mticker.FormatStrFormatter(fmt))
    return ax
Esempio n. 13
0
    def __init__(
        self,
        ax,
        cmap=None,
        norm=None,
        alpha=1.0,
        values=None,
        boundaries=None,
        orientation='vertical',
        extend='neither',
        spacing='uniform',  # uniform or proportional
        ticks=None,
        format=None,
        drawedges=False,
        filled=True,
    ):
        self.ax = ax

        if cmap is None:
            cmap = cm.get_cmap()
        if norm is None:
            norm = colors.Normalize()
        self.alpha = alpha
        cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
        self.values = values
        self.boundaries = boundaries
        self.extend = extend
        self.spacing = spacing
        self.orientation = orientation
        self.drawedges = drawedges
        self.filled = filled

        # artists
        self.solids = None
        self.lines = None
        self.dividers = None
        self.extension_patch1 = None
        self.extension_patch2 = None

        if orientation == "vertical":
            self.cbar_axis = self.ax.yaxis
        else:
            self.cbar_axis = self.ax.xaxis

        if format is None:
            if isinstance(self.norm, colors.LogNorm):
                # change both axis for proper aspect
                self.ax.set_xscale("log")
                self.ax.set_yscale("log")
                self.cbar_axis.set_minor_locator(ticker.NullLocator())
                formatter = ticker.LogFormatter()
            else:
                formatter = None
        elif isinstance(format, str):
            formatter = ticker.FormatStrFormatter(format)
        else:
            formatter = format  # Assume it is a Formatter

        if formatter is None:
            formatter = self.cbar_axis.get_major_formatter()
        else:
            self.cbar_axis.set_major_formatter(formatter)

        if np.iterable(ticks):
            self.cbar_axis.set_ticks(ticks)
        elif ticks is not None:
            self.cbar_axis.set_major_locator(ticks)
        else:
            self._select_locator(formatter)

        self._config_axes()

        self.update_artists()

        self.set_label_text('')
Esempio n. 14
0
def create_lc_depreciated(
        log,
        cacheDirectory,
        epochs):
    """*create the atlas lc for one transient*

    **Key Arguments**

    - ``cacheDirectory`` -- the directory to add the lightcurve to
    - ``log`` -- logger
    - ``epochs`` -- dictionary of lightcurve data-points
    

    **Return**

    - None
    

    **Usage**

    .. todo::

        add usage info
        create a sublime snippet for usage

    ```python
    usage code
    ```
    
    """
    log.debug('starting the ``create_lc`` function')

    # c = cyan, o = arange
    magnitudes = {
        'c': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
        'o': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
        'I': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
    }

    limits = {
        'c': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
        'o': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
        'I': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
    }

    discoveryMjd = False
    for epoch in epochs:
        if epoch["filter"] not in ["c", "o", "I"]:
            continue
        objectName = epoch["atlas_designation"]
        if epoch["limiting_mag"] == 1:
            limits[epoch["filter"]]["mjds"].append(epoch["mjd_obs"])
            limits[epoch["filter"]]["mags"].append(epoch["mag"])
            limits[epoch["filter"]]["magErrs"].append(epoch["dm"])
            limits[epoch["filter"]]["zp"].append(epoch["zp"])
            flux = 10**(old_div((float(epoch["zp"]) -
                                 float(epoch["mag"])), 2.5))
            limits[epoch["filter"]]["flux"].append(flux)
        else:
            if not discoveryMjd or discoveryMjd > epoch["mjd_obs"]:
                discoveryMjd = epoch["mjd_obs"]
            magnitudes[epoch["filter"]]["mjds"].append(epoch["mjd_obs"])
            magnitudes[epoch["filter"]]["mags"].append(epoch["mag"])
            magnitudes[epoch["filter"]]["magErrs"].append(epoch["dm"])
            magnitudes[epoch["filter"]]["zp"].append(epoch["zp"])
            flux = 10**(old_div((float(epoch["zp"]) -
                                 float(epoch["mag"])), 2.5))
            magnitudes[epoch["filter"]]["flux"].append(flux)

    # GENERATE THE FIGURE FOR THE PLOT
    fig = plt.figure(
        num=None,
        figsize=(10, 10),
        dpi=100,
        facecolor=None,
        edgecolor=None,
        frameon=True)

    mpl.rc('ytick', labelsize=20)
    mpl.rc('xtick', labelsize=20)
    mpl.rcParams.update({'font.size': 22})

    # FORMAT THE AXES
    ax = fig.add_axes(
        [0.1, 0.1, 0.8, 0.8],
        polar=False,
        frameon=True)
    ax.set_xlabel('MJD', labelpad=20)
    ax.set_ylabel('Apparent Magnitude', labelpad=15)

    # fig.text(0.1, 1.0, "ATLAS", ha="left", color="#2aa198", fontsize=40)
    # fig.text(0.275, 1.0, objectName.replace("ATLAS", ""),
    #          color="#FFA500", ha="left", fontsize=40)
    fig.text(0.1, 1.02, objectName, ha="left", fontsize=40)

    # ax.set_title(objectName, y=1.10, ha='left', position=(0, 1.11))
    plt.setp(ax.xaxis.get_majorticklabels(),
             rotation=45, horizontalalignment='right')
    import matplotlib.ticker as mtick
    ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%5.0f'))

    # ADD MAGNITUDES AND LIMITS FOR EACH FILTER
    # plt.scatter(magnitudes['o']['mjds'], magnitudes['o']['mags'], s=20., c=None, alpha=0.9,
    # edgecolors='#FFA500', linewidth=1.0, facecolors='#FFA500')
    handles = []

    # SET AXIS LIMITS FOR MAGNTIUDES
    upperMag = -99
    lowerMag = 99

    # DETERMINE THE TIME-RANGE OF DETECTION FOR THE SOURCE
    mjdList = magnitudes['o']['mjds'] + \
        magnitudes['c']['mjds'] + magnitudes['I']['mjds']

    if len(mjdList) == 0:
        return

    lowerDetectionMjd = min(mjdList)
    upperDetectionMjd = max(mjdList)
    mjdLimitList = limits['o']['mjds'] + \
        limits['c']['mjds'] + limits['I']['mjds']
    priorLimitsFlavour = None
    for l in sorted(mjdLimitList):
        if l < lowerDetectionMjd and l > lowerDetectionMjd - 30.:
            priorLimitsFlavour = 1
    if not priorLimitsFlavour:
        for l in mjdLimitList:
            if l < lowerDetectionMjd - 30.:
                priorLimitsFlavour = 2
                lowerMJDLimit = l - 2

    if not priorLimitsFlavour:
        fig.text(0.1, -0.08, "* no recent pre-discovery detection limit > $5\\sigma$",
                 ha="left", fontsize=16)

    postLimitsFlavour = None

    for l in sorted(mjdLimitList):
        if l > upperDetectionMjd and l < upperDetectionMjd + 10.:
            postLimitsFlavour = 1
    if not postLimitsFlavour:
        for l in reversed(mjdLimitList):
            if l > upperDetectionMjd + 10.:
                postLimitsFlavour = 2
                upperMJDLimit = l + 2

    if priorLimitsFlavour or postLimitsFlavour:
        limits = {
            'c': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
            'o': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
            'I': {'mjds': [], 'mags': [], 'magErrs': [], 'flux': [], 'zp': []},
        }
        for epoch in epochs:
            objectName = epoch["atlas_designation"]
            if (epoch["limiting_mag"] == 1 and ((priorLimitsFlavour == 1 and epoch["mjd_obs"] > lowerDetectionMjd - 30.) or (priorLimitsFlavour == 2 and epoch["mjd_obs"] > lowerMJDLimit) or priorLimitsFlavour == None) and ((postLimitsFlavour == 1 and epoch["mjd_obs"] < upperDetectionMjd + 10.) or (postLimitsFlavour == 2 and epoch["mjd_obs"] < upperMJDLimit) or postLimitsFlavour == None)):
                limits[epoch["filter"]]["mjds"].append(epoch["mjd_obs"])
                limits[epoch["filter"]]["mags"].append(epoch["mag"])
                limits[epoch["filter"]]["magErrs"].append(epoch["dm"])
                limits[epoch["filter"]]["zp"].append(epoch["zp"])
                flux = 10**(old_div((float(epoch["zp"]) -
                                     float(epoch["mag"])), 2.5))
                limits[epoch["filter"]]["flux"].append(flux)

    allMags = limits['o']['mags'] + limits['c']['mags'] + \
        magnitudes['o']['mags'] + magnitudes['c']['mags']
    magRange = max(allMags) - min(allMags)
    if magRange < 4.:
        deltaMag = 0.1
    else:
        deltaMag = magRange * 0.08

    if len(limits['o']['mjds']):
        limitLeg = plt.scatter(limits['o']['mjds'], limits['o']['mags'], s=170., c=None, alpha=0.8,
                               edgecolors='#FFA500', linewidth=1.0, facecolors='none', label="$5\\sigma$ limit  ")
        handles.append(limitLeg)
        if max(limits['o']['mags']) > upperMag:
            upperMag = max(limits['o']['mags'])
            upperMagIndex = np.argmax(limits['o']['mags'])
            # MAG PADDING
            upperFlux = limits['o']['flux'][
                upperMagIndex] - 10**(old_div(deltaMag, 2.5))

        # if min(limits['o']['mags']) < lowerMag:
        #     lowerMag = min(limits['o']['mags'])
    if len(limits['c']['mjds']):
        limitLeg = plt.scatter(limits['c']['mjds'], limits['c']['mags'], s=170., c=None, alpha=0.8,
                               edgecolors='#2aa198', linewidth=1.0, facecolors='none', label="$5\\sigma$ limit  ")
        if len(handles) == 0:
            handles.append(limitLeg)
        if max(limits['c']['mags']) > upperMag:
            upperMag = max(limits['c']['mags'])
            upperMagIndex = np.argmax(limits['c']['mags'])
            # MAG PADDING
            upperFlux = limits['c']['flux'][
                upperMagIndex] - 10**(old_div(deltaMag, 2.5))
        # if min(limits['c']['mags']) < lowerMag:
        #     lowerMag = min(limits['c']['mags'])

    if len(limits['I']['mjds']):
        limitLeg = plt.scatter(limits['I']['mjds'], limits['I']['mags'], s=170., c=None, alpha=0.8,
                               edgecolors='#dc322f', linewidth=1.0, facecolors='none', label="$5\\sigma$ limit  ")
        if len(handles) == 0:
            handles.append(limitLeg)
        if max(limits['I']['mags']) > upperMag:
            upperMag = max(limits['I']['mags'])
            upperMagIndex = np.argmax(limits['I']['mags'])
            # MAG PADDING
            upperFlux = limits['I']['flux'][
                upperMagIndex] - 10**(old_div(deltaMag, 2.5))
    if len(magnitudes['o']['mjds']):
        orangeMag = plt.errorbar(magnitudes['o']['mjds'], magnitudes['o']['mags'], yerr=magnitudes[
            'o']['magErrs'], color='#FFA500', fmt='o', mfc='#FFA500', mec='#FFA500', zorder=1, ms=12., alpha=0.8, linewidth=1.2,  label='o-band mag ', capsize=10)

        # ERROBAR STYLE
        orangeMag[-1][0].set_linestyle('--')
        # ERROBAR CAP THICKNESS
        orangeMag[1][0].set_markeredgewidth('0.7')
        orangeMag[1][1].set_markeredgewidth('0.7')
        handles.append(orangeMag)
        if max(np.array(magnitudes['o']['mags']) + np.array(magnitudes['o']['magErrs'])) > upperMag:
            upperMag = max(
                np.array(magnitudes['o']['mags']) + np.array(magnitudes['o']['magErrs']))
            upperMagIndex = np.argmax((
                magnitudes['o']['mags']) + np.array(magnitudes['o']['magErrs']))
            # MAG PADDING
            upperFlux = magnitudes['o']['flux'][
                upperMagIndex] - 10**(old_div(deltaMag, 2.5))

        if min(np.array(magnitudes['o']['mags']) - np.array(magnitudes['o']['magErrs'])) < lowerMag:
            lowerMag = min(
                np.array(magnitudes['o']['mags']) - np.array(magnitudes['o']['magErrs']))
            lowerMagIndex = np.argmin((
                magnitudes['o']['mags']) - np.array(magnitudes['o']['magErrs']))
            # MAG PADDING
            lowerFlux = magnitudes['o']['flux'][
                lowerMagIndex] + 10**(old_div(deltaMag, 2.5))
    if len(magnitudes['c']['mjds']):
        cyanMag = plt.errorbar(magnitudes['c']['mjds'], magnitudes['c']['mags'], yerr=magnitudes[
            'c']['magErrs'], color='#2aa198', fmt='o', mfc='#2aa198', mec='#2aa198', zorder=1, ms=12., alpha=0.8, linewidth=1.2, label='c-band mag ', capsize=10)
        # ERROBAR STYLE
        cyanMag[-1][0].set_linestyle('--')
        # ERROBAR CAP THICKNESS
        cyanMag[1][0].set_markeredgewidth('0.7')
        cyanMag[1][1].set_markeredgewidth('0.7')
        handles.append(cyanMag)
        if max(np.array(magnitudes['c']['mags']) + np.array(magnitudes['c']['magErrs'])) > upperMag:
            upperMag = max(
                np.array(magnitudes['c']['mags']) + np.array(magnitudes['c']['magErrs']))
            upperMagIndex = np.argmax((
                magnitudes['c']['mags']) + np.array(magnitudes['c']['magErrs']))
            # MAG PADDING
            upperFlux = magnitudes['c']['flux'][
                upperMagIndex] - 10**(old_div(deltaMag, 2.5))
        if min(np.array(magnitudes['c']['mags']) - np.array(magnitudes['c']['magErrs'])) < lowerMag:
            lowerMag = min(
                np.array(magnitudes['c']['mags']) - np.array(magnitudes['c']['magErrs']))
            lowerMagIndex = np.argmin(
                (magnitudes['c']['mags']) - np.array(magnitudes['c']['magErrs']))
            # MAG PADDING
            lowerFlux = magnitudes['c']['flux'][
                lowerMagIndex] + 10**(old_div(deltaMag, 2.5))
    if len(magnitudes['I']['mjds']):
        cyanMag = plt.errorbar(magnitudes['I']['mjds'], magnitudes['I']['mags'], yerr=magnitudes[
            'I']['magErrs'], color='#dc322f', fmt='o', mfc='#dc322f', mec='#dc322f', zorder=1, ms=12., alpha=0.8, linewidth=1.2, label='I-band mag ', capsize=10)
        # ERROBAR STYLE
        cyanMag[-1][0].set_linestyle('--')
        # ERROBAR CAP THICKNESS
        cyanMag[1][0].set_markeredgewidth('0.7')
        cyanMag[1][1].set_markeredgewidth('0.7')
        handles.append(cyanMag)
        if max(np.array(magnitudes['I']['mags']) + np.array(magnitudes['I']['magErrs'])) > upperMag:
            upperMag = max(
                np.array(magnitudes['I']['mags']) + np.array(magnitudes['I']['magErrs']))
            upperMagIndex = np.argmax((
                magnitudes['I']['mags']) + np.array(magnitudes['I']['magErrs']))
            # MAG PADDING
            upperFlux = magnitudes['I']['flux'][
                upperMagIndex] - 10**(old_div(deltaMag, 2.5))
        if min(np.array(magnitudes['I']['mags']) - np.array(magnitudes['I']['magErrs'])) < lowerMag:
            lowerMag = min(
                np.array(magnitudes['I']['mags']) - np.array(magnitudes['I']['magErrs']))
            lowerMagIndex = np.argmin(
                (magnitudes['I']['mags']) - np.array(magnitudes['I']['magErrs']))
            # MAG PADDING
            lowerFlux = magnitudes['I']['flux'][
                lowerMagIndex] + 10**(old_div(deltaMag, 2.5))

    plt.legend(handles=handles, prop={
               'size': 13.5}, bbox_to_anchor=(1., 1.2), loc=0, borderaxespad=0., ncol=4, scatterpoints=1)

    # SET THE TEMPORAL X-RANGE
    allMjd = limits['o']['mjds'] + limits['c']['mjds'] + \
        magnitudes['o']['mjds'] + magnitudes['c']['mjds']
    xmin = min(allMjd) - 2.
    xmax = max(allMjd) + 2.
    ax.set_xlim([xmin, xmax])

    ax.set_ylim([lowerMag - deltaMag, upperMag + deltaMag])
    # FLIP THE MAGNITUDE AXIS
    plt.gca().invert_yaxis()

    # ADD SECOND Y-AXIS
    ax2 = ax.twinx()
    ax2.set_yscale('log')
    ax2.set_ylim([upperFlux, lowerFlux])
    y_formatter = mpl.ticker.FormatStrFormatter("%d")
    ax2.yaxis.set_major_formatter(y_formatter)

    # RELATIVE TIME SINCE DISCOVERY
    lower, upper = ax.get_xlim()
    from astrocalc.times import conversions
    # CONVERTER TO CONVERT MJD TO DATE
    converter = conversions(
        log=log
    )
    utLower = converter.mjd_to_ut_datetime(mjd=lower, datetimeObject=True)
    utUpper = converter.mjd_to_ut_datetime(mjd=upper, datetimeObject=True)

    # ADD SECOND X-AXIS
    ax3 = ax.twiny()
    ax3.set_xlim([utLower, utUpper])
    ax3.grid(True)
    ax.xaxis.grid(False)
    plt.setp(ax3.xaxis.get_majorticklabels(),
             rotation=45, horizontalalignment='left')
    ax3.xaxis.set_major_formatter(dates.DateFormatter('%b %d'))
    # ax3.set_xlabel('Since Discovery (d)',  labelpad=10,)

    # # Put a legend on plot
    # box = ax.get_position()
    # ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
    # ax.legend(loc='top right', bbox_to_anchor=(1.1, 0.5), prop={'size': 8})

    from matplotlib.ticker import LogLocator
    minorLocator = LogLocator(base=10, subs=[2.0, 5.0])
    if magRange < 1.5:
        minorLocator = LogLocator(
            base=10, subs=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
    ax2.yaxis.set_minor_locator(minorLocator)
    ax2.yaxis.set_minor_formatter(y_formatter)
    ax2.tick_params(axis='y', which='major', pad=5)
    ax2.tick_params(axis='y', which='minor', pad=5)
    ax2.set_ylabel('Approx. Counts', rotation=-90.,  labelpad=27)

    ax2.grid(False)
    # SAVE PLOT TO FILE
    pathToOutputPlotFolder = ""
    title = objectName + " forced photometry lc"
    # Recursively create missing directories
    if not os.path.exists(cacheDirectory):
        os.makedirs(cacheDirectory)
    fileName = cacheDirectory + "/atlas_fp_lightcurve.png"
    plt.savefig(fileName, bbox_inches='tight', transparent=False,
                pad_inches=0.1)

    # CLEAR FIGURE
    plt.clf()

    log.debug('completed the ``create_lc`` function')
    return None
Esempio n. 15
0
def combined_learning_plot_patternwise(weights: list,
                                       times: list,
                                       dweights: list,
                                       neurons_t: list,
                                       neuralstates: list,
                                       spp: int,
                                       rot: int,
                                       file: str = None):
    c_pat = len(neuralstates)
    l_ax = c_pat + 2

    w = weights[0::spp]
    t = times[0::spp]
    n = neurons_t[0::spp]
    metadata = ""

    #
    # Prepare plot

    fig, axes = plt.subplots(ncols=l_ax, nrows=3)
    size = 5
    fig.set_size_inches(l_ax * size, 3 * size)

    #
    # Title

    ax = axes[0][0]
    ax.set_title(metadata, fontsize=14, fontweight='bold')
    ax.set_axis_off()

    #
    # Plots

    state_0: NeuralState = neuralstates[0]

    weight_min = np.min(w)
    weight_max = np.max(w)

    major_locator_w = tik.MultipleLocator(state_0.N // 2)
    major_formatter_w = tik.FormatStrFormatter('%d')
    minor_locator_w = tik.MultipleLocator(state_0.N // 4)

    for i in range(l_ax - 1):
        #
        # Neuron Map

        if 0 < i < len(n) + 1:
            ax = axes[0][i]
            state: NeuralState = n[i - 1]

            z = state.as_matrix()

            if i == 1:
                neural_map(ax, z, True)
                ax.set_title("Active Neurons")
            else:
                neural_map(ax, z, False)

        #
        # Weights
        ax_w = axes[1][i]
        z = w[i]

        im_w = ax_w.imshow(z,
                           cmap="hot",
                           interpolation='none',
                           vmin=weight_min,
                           vmax=weight_max)

        ax_w.invert_yaxis()
        ax_w.set_aspect('equal')
        if i == 0:
            ax_w.yaxis.set_major_locator(major_locator_w)
            ax_w.yaxis.set_major_formatter(major_formatter_w)
            ax_w.yaxis.set_minor_locator(minor_locator_w)

            ax_w.xaxis.set_major_locator(major_locator_w)
            ax_w.xaxis.set_major_formatter(major_formatter_w)
            ax_w.xaxis.set_minor_locator(minor_locator_w)
            ax_w.set_title("Weights: t = " + '% 4.2f' % 0)
        else:
            ax_w.set_axis_off()
            ax_w.set_title("t = " + '% 4.2f' % t[i])

        #
        # Weights per neuron
        ax = axes[2][i]
        if i == 0:
            ax.spines['top'].set_color('none')
            ax.spines['right'].set_color('none')
            ax.set_title("Weight per neuron (colored: only active):")
            wpn_n = np.zeros(state_0.N)
        else:
            ax.set_axis_off()
            wpn_n = state.vec

        weight_per_neuron(ax, z, wpn_n)

        #
        # Colorbar
        if i == l_ax - 2:
            ax = axes[1][-1]
            ax.set_aspect(8)

            fig.colorbar(im_w, orientation='vertical', cax=ax, extend='both')

    #
    # Empty axes
    ax = axes[0][-1]
    fig.delaxes(ax)

    ax = axes[2][-1]
    fig.delaxes(ax)

    #
    # Finish

    fig.tight_layout()

    if not file:
        plt.show()
    else:
        i = 0
        while os.path.exists('{}_{:d}.png'.format(file, i)):
            i += 1
        file = '{}_{:d}.png'.format(file, i)
        print("Saving results to: " + file)
        plt.savefig(file, dpi=100)

    plt.close()
Esempio n. 16
0
    def _set_ticker(self, a):
        try:
            if not isiterable(self.ticks):
                if self.scale == 'linear':
                    a.set_major_locator(mticker.AutoLocator())
                elif self.scale == 'log':
                    a.set_major_locator(mticker.LogLocator(self.base))
                elif self.scale == 'symlog':
                    from matplotlib.scale import SymmetricalLogScale
                    scale = SymmetricalLogScale(a,
                                                basex=self.base,
                                                linthreshx=self.symloglin,
                                                linscalex=self.symloglinscale)
                    a.set_major_locator(
                        mticker.SymmetricalLogLocator(scale.get_transform()))


#                    scale.set_default_locators_and_formatters(a)

                else:
                    a.set_major_locator(mticker.AutoLocator())
                #a.get_axes().locator_params(self.name[0], nbins = 10)
                if self.ticks is not None:
                    value = self.ticks
                else:
                    #figpage = a.get_axes().figobj.get_figpage()
                    figpage = a.axes.figobj.get_figpage()
                    if self.name[0] == 'x':
                        value = figpage.getp('nticks')[0]
                    elif self.name[0] == 'y':
                        value = figpage.getp('nticks')[1]
                    elif self.name[0] == 'z':
                        value = figpage.getp('nticks')[2]
                    else:
                        pass
                try:
                    ## this works onlyfor MaxNLocator
                    #a.get_axes().locator_params(self.name[0], nbins = value)
                    a.axes.locator_params(self.name[0], nbins=value)
                except:
                    ## for Symlog and LogLocator
                    a.get_major_locator().numticks = value
            else:
                a.set_ticks(self.ticks)

            if self.format == 'default':
                if self.scale == 'linear':
                    a.set_major_formatter(mticker.ScalarFormatter())
                elif self.scale == 'log':
                    a.set_major_formatter(
                        mticker.LogFormatterMathtext(self.base))
                elif self.scale == 'symlog':
                    a.set_major_formatter(
                        mticker.LogFormatterMathtext(self.base))
                else:
                    a.set_major_formatter(mticker.ScalarFormatter())
            elif self.format == 'scalar':
                a.set_major_formatter(mticker.ScalarFormatter())
            elif self.format == 'scalar(mathtext)':
                a.set_major_formatter(
                    mticker.ScalarFormatter(useOffset=True, useMathText=True))
                a.get_major_formatter().get_offset()
            elif self.format == 'log':
                a.set_major_formatter(mticker.LogFormatter(self.base))
            elif self.format == 'log(mathtext)':
                a.set_major_formatter(mticker.LogFormatterMathtext(self.base))
            elif self.format == 'log(exp)':
                a.set_major_formatter(mticker.LogFormatterExponent(self.base))
            elif self.format == 'none':
                a.set_major_formatter(mticker.NullFormatter())
            else:
                a.set_major_formatter(mticker.FormatStrFormatter(self.format))
        except:
            import traceback
            traceback.print_exc()
Esempio n. 17
0
def ShowEnergySpectra():
    import matplotlib.ticker as mtick
    import pylandau
    from scipy.optimize import curve_fit
    print("Producing energy spectra: ")
    if (len(data) == 0):
        print("No data read!")
        return -1
    if (len(g_fileNames) == 0):
        print("No files found!")
        return -1
#    print([row[0] for row in data])
    fig, axes = plt.subplots(nrows=g_nHistsPerRow, ncols=g_nHistsPerRow)

    print("Energy calibration: ")
    #    plt.hist([row[0] for row in data],100)
    for i in range(0, len(g_fileNames)):
        axes.flat[i].hist([row[0] for row in data if row[5] == g_fileNames[i]],
                          1000, (0, 1000000),
                          histtype="step",
                          label="All data")

        yhist, bins, patches, = axes.flat[i].hist([
            row[0] for row in data if row[5] == g_fileNames[i] and (row[6] > 2)
        ],
                                                  1000, (0, 1000000),
                                                  color='red',
                                                  histtype="step",
                                                  label='Coincidences')

        #        results = moyal.fit([row[0] for row in data if row[4] == g_fileNames[i] and len(row)==6])
        g_EnergyCalib.append(0)

        nMuonEvents = sum(yhist)
        if (nMuonEvents > 100):
            x = np.linspace(0, 1000000, 1000)
            #            print(yhist.argmax())
            coeff, pcov = curve_fit(pylandau.landau,
                                    x,
                                    yhist,
                                    p0=(bins[yhist.argmax()],
                                        bins[yhist.argmax()] / 10,
                                        yhist.max()),
                                    bounds=(0, 1000000))
            g_EnergyCalib[i] = coeff[0] / 2
            print("\tModule{:3.0f}: {:10.1f} ch/MeV {:8.0f} events".format(
                g_fileNames[i], g_EnergyCalib[i], nMuonEvents))
            axes.flat[i].plot(x, pylandau.landau(x, *coeff), "g-", label='Fit')
            #
            x_coordinate = int(axes.flat[i].get_xlim()[1]) - 60
            y_coordinate = int(axes.flat[i].get_ylim()[1]) - 5
            axes.flat[i].text(
                x_coordinate,
                y_coordinate,
                'mpv: {:10.3f}\nsigma: {:10.3f}\nA :{:10.3f}'.format(
                    coeff[0], coeff[1], coeff[2]),
                horizontalalignment='right',
                verticalalignment='top',
                bbox=dict(facecolor='white',
                          edgecolor='black',
                          boxstyle='square'))
        else:
            g_EnergyCalib[i] = 250000
            print("\tModule{:3.0f}: {:10.1f} ch/MeV {:8.0f} events".format(
                g_fileNames[i], g_EnergyCalib[i], nMuonEvents))

        axes.flat[i].set(title="Module: " + str(g_fileNames[i]),
                         xlabel="Energy [Channels]",
                         ylabel="NoE [#]")
        axes.flat[i].xaxis.set_major_formatter(
            mtick.FormatStrFormatter('%.2e'))


#    plt.tight_layout()
    fig.canvas.mpl_connect('button_press_event', on_click)
    plt.show()
def set_title_and_labels(ax, plt, baseFontSize, title, xDataMin, xDataMax,
                         xAxisLabel, yAxisLabel, yAxisFormatString, r_label,
                         l_label):

    MAX_TITLE_LENGTH = 68

    if len(title) > MAX_TITLE_LENGTH:
        # Trim out the middle portion of the title
        # Find the first space
        spaceIndex = title.find(' ')
        if spaceIndex > 0:
            titleSuffix = title[spaceIndex:]
            charsToKeep = MAX_TITLE_LENGTH - len(titleSuffix)
            if charsToKeep > 0:
                titleToUse = title[0:charsToKeep] + titleSuffix
            else:
                titleToUse = title[-MAX_TITLE_LENGTH:]

        else:
            titleToUse = title[-MAX_TITLE_LENGTH:]
    else:
        titleToUse = title

    plt.title(titleToUse, fontsize=baseFontSize + 1)

    # Assure that the X axis minimimum is not negative
    xmin, xmax = plt.xlim()

    if xmin < 0:
        plt.xlim(xmin=0)

    # When plotting BPI or TIC, fix the Y axis minimum at 0 and add 5% padding above the Y axis maximum
    # Otherwise, assure that the Y range isn't too small
    ymin, ymax = plt.ylim()

    if "- TIC -" in title or "- BPI -" in title:
        ymin = 0
        ymax += ymax * 0.05
    else:
        yRange = ymax - ymin
        if yRange < ymax * 0.20:
            # Range is less than 20% of the max value
            # Pad using 5% of the average of ymin and ymax
            yPadding = (ymin + ymax) / 2.0 * 0.05
            ymin -= yPadding
            ymax += yPadding
        else:
            # Range is more than 20% of the max value
            # Pad using 5% of the range
            ymin -= yRange * 0.05
            ymax += yRange * 0.05

        if ymin < 0:
            ymin = 0

    plt.ylim(ymin=ymin, ymax=ymax)

    # Set the X axis maximum to the max X value (in other words, we don't want any padding)
    # However, if there is only one X data point, do add some padding
    if xDataMin == xDataMax:
        plt.xlim(xmin=xDataMax - 1)
        plt.xlim(xmax=xDataMax + 1)
    else:
        plt.xlim(xmax=xDataMax)

    plt.xlabel(xAxisLabel, fontsize=baseFontSize)
    plt.ylabel(yAxisLabel, fontsize=baseFontSize)

    plt.xticks(fontsize=baseFontSize - 2)
    plt.yticks(fontsize=baseFontSize - 2)
    ax.yaxis.set_major_formatter(mtick.FormatStrFormatter(yAxisFormatString))
    ax.yaxis.set_minor_locator(mtick.AutoMinorLocator())

    # If the x axis range is 5 or less, assure that the minimum distance between major tick marks is at least 1
    if xDataMax - xDataMin <= 5:
        ax.xaxis.set_major_locator(mtick.MultipleLocator(1))

    ax.xaxis.set_major_formatter(
        mtick.FuncFormatter(lambda x, p: format(int(x), ',')))
    ax.xaxis.set_minor_locator(mtick.AutoMinorLocator())

    plt.gcf().text(0.88, 0.02, r_label, fontsize=baseFontSize - 1)

    if len(l_label) > 0:
        plt.gcf().text(0.01, 0.02, l_label, fontsize=baseFontSize - 1)
Esempio n. 19
0
ax.tick_params(axis='y', labelsize=30)
start, end = ax.get_xlim()
ax.xaxis.set_ticks(
    np.arange(start, end, (int((end - start) / 5.1)) / 100 * 100))


def kilos(x, pos):
    if x == 0:
        return '0'
    return '%0.1fK' % (x * 1e-3)


ax.xaxis.set_major_formatter(ticker.FuncFormatter(kilos))

start, end = ax.get_ylim()
ax.yaxis.set_ticks(np.arange(start, end, ((end - start) / 5.1)))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))

plt.xlabel('Feature Cost', fontsize=32)
plt.ylabel('Explained Variance', fontsize=32)

if exp_id == 5:
    plt.ylabel('NDCG@5', fontsize=32)

plt.savefig('yahoo_results/set%d_size%d_exp%d.png' %
            (set_id, group_size, exp_id),
            bbox_inches='tight',
            dpi=plt.gcf().dpi)

plt.show()
Esempio n. 20
0
def PlotPerf(testResultFile,
             var_column,
             activated=list(optNames.values()),
             plot_bars=True,
             plot_time=True,
             show_legend=True,
             rotateDegree=0):

    if not plot_bars and not plot_time: raise Exception("Are you kidding me ?")
    fileparent = dirname(testResultFile)
    filename = splitext(basename(testResultFile))[0]
    exportPath = (fileparent +
                  "/" if len(fileparent) > 0 else "") + filename + ".pdf"
    basedf = pd.read_csv(testResultFile, sep='\t', header=0)
    xAxis = np.array(sorted(set(basedf[var_column])))
    xAxisFixed = range(1, len(xAxis) + 1)
    xAxisMapping = {x: i for (x, i) in zip(xAxis, xAxisFixed)}
    optCount = len(activated)
    barWidth = np.float64(0.7 / optCount)
    offset = -barWidth * (optCount - 1) / 2
    fig, baseAx = plt.subplots(figsize=FIGSIZE)
    baseAx.set_xlabel(dict_map.get(var_column, var_column), fontsize=FONTSIZE)
    baseAx.set_xlim([0, max(xAxisFixed) + 1])
    baseAx.tick_params(axis='x', labelsize=FONTSIZE)
    baseAx.tick_params(axis='y', labelsize=FONTSIZE)
    plt.xticks(rotation=rotateDegree)

    if plot_bars:
        barsAx = baseAx
        barsAx.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0f'))
        barsAx.set_ylabel("#Explored x 10^6", fontsize=FONTSIZE)
        if BAR_LOG_SCALE: barsAx.set_yscale("log", basey=10)
        barsAx.set_xlabel(dict_map.get(var_column, var_column),
                          fontsize=FONTSIZE)
        #barsAx.set_ylim([0,1.2*np.amax(basedf["#all_visited_context"])])
        barsAx.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))

    if plot_time:
        if plot_bars:
            timeAx = baseAx.twinx()
            timeAx.yaxis.tick_left()
            timeAx.yaxis.set_label_position("left")
            barsAx.yaxis.tick_right()
            barsAx.yaxis.set_label_position("right")
        else:
            timeAx = baseAx
        timeAx.set_ylabel("Execution time (s)", fontsize=FONTSIZE)
        if TIME_LOG_SCALE: timeAx.set_yscale("log", basey=10)
        timeAx.tick_params(axis='y', labelsize=FONTSIZE)
        timeAx.set_xlim([0, max(xAxisFixed) + 1])

    plt.xticks(xAxisFixed, xAxis, rotation='vertical')

    for optName in activated:
        optConstraint = optNamesReversed[optName]
        df = basedf[basedf["closed"] == optConstraint[0]]
        df = df[df["prune"] == optConstraint[1]]
        if len(optConstraint) > 2:
            df = df[df["upperbound_type"] == optConstraint[2]]
        varVector = np.array(df[var_column])
        #varVector= np.array([x for x in varVector if x<TIMETHRESHOLD])

        distinctVarVector = sorted(set(varVector))
        distinctVarVectorFixed = [xAxisMapping[x] for x in distinctVarVector]
        nbVisitedVector = np.array(
            map(np.mean, [
                df[df[var_column] == element]["#all_visited_context"]
                for element in distinctVarVector
            ]))
        execTimeVector = np.array(
            map(np.mean, [
                df[df[var_column] == element]["#timespent"]
                for element in distinctVarVector
            ]))
        execMeanTimeVector = execTimeVector
        execErrorTimeVector = 0
        if len(distinctVarVectorFixed) > 0:
            if plot_bars:
                barsAx.bar(distinctVarVectorFixed + offset,
                           np.array([x / 10**6 for x in nbVisitedVector]),
                           hatch=hatchTypeByOpt[optName],
                           width=barWidth,
                           align='center',
                           color=colorByOptBars[optName],
                           label=optName,
                           edgecolor=colorByOptEdge[optName])
            if plot_time:
                timeAx.errorbar(distinctVarVectorFixed,
                                execMeanTimeVector,
                                yerr=execErrorTimeVector,
                                fmt=lineTypeByOpt[optName] +
                                markerByOpt[optName],
                                linewidth=LINEWIDTH,
                                markersize=MARKERSIZE,
                                label=optName,
                                color=colorByOptLines[optName])
            if show_legend:
                legend = timeAx.legend(
                    loc='upper right', shadow=True,
                    fontsize=LEGENDFONTSIZE) if plot_time else barsAx.legend(
                        loc='upper right',
                        shadow=True,
                        fontsize=LEGENDFONTSIZE)  #'upper left' 'lower right'
        offset += barWidth

    fig.tight_layout()

    plt.savefig(exportPath)
    if SHOWPLOT: plt.show()
def visualize_tss_info(api, dataset, vis_save_dir):
    vis_save_dir = vis_save_dir.resolve()
    print('{:} start to visualize {:} information'.format(
        time_string(), dataset))
    vis_save_dir.mkdir(parents=True, exist_ok=True)
    cache_file_path = vis_save_dir / '{:}-cache-tss-info.pth'.format(dataset)
    if not cache_file_path.exists():
        print('Do not find cache file : {:}'.format(cache_file_path))
        params, flops, train_accs, valid_accs, test_accs = [], [], [], [], []
        for index in range(len(api)):
            cost_info = api.get_cost_info(index, dataset, hp='12')
            params.append(cost_info['params'])
            flops.append(cost_info['flops'])
            # accuracy
            info = api.get_more_info(index, dataset, hp='200', is_random=False)
            train_accs.append(info['train-accuracy'])
            test_accs.append(info['test-accuracy'])
            if dataset == 'cifar10':
                info = api.get_more_info(index,
                                         'cifar10-valid',
                                         hp='200',
                                         is_random=False)
                valid_accs.append(info['valid-accuracy'])
            else:
                valid_accs.append(info['valid-accuracy'])
            print('')
        info = {
            'params': params,
            'flops': flops,
            'train_accs': train_accs,
            'valid_accs': valid_accs,
            'test_accs': test_accs
        }
        torch.save(info, cache_file_path)
    else:
        print('Find cache file : {:}'.format(cache_file_path))
        info = torch.load(cache_file_path)
        params, flops, train_accs, valid_accs, test_accs = info[
            'params'], info['flops'], info['train_accs'], info[
                'valid_accs'], info['test_accs']
    print('{:} collect data done.'.format(time_string()))

    resnet = [
        '|nor_conv_3x3~0|+|none~0|nor_conv_3x3~1|+|skip_connect~0|none~1|skip_connect~2|'
    ]
    resnet_indexes = [api.query_index_by_arch(x) for x in resnet]
    largest_indexes = [
        api.query_index_by_arch(
            '|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|nor_conv_3x3~0|nor_conv_3x3~1|nor_conv_3x3~2|'
        )
    ]

    indexes = list(range(len(params)))
    dpi, width, height = 250, 8500, 1300
    figsize = width / float(dpi), height / float(dpi)
    LabelSize, LegendFontsize = 24, 24
    # resnet_scale, resnet_alpha = 120, 0.5
    xscale, xalpha = 120, 0.8

    fig, axs = plt.subplots(1, 4, figsize=figsize)
    # ax1, ax2, ax3, ax4, ax5 = axs
    for ax in axs:
        for tick in ax.xaxis.get_major_ticks():
            tick.label.set_fontsize(LabelSize)
        ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.0f'))
        for tick in ax.yaxis.get_major_ticks():
            tick.label.set_fontsize(LabelSize)
    ax2, ax3, ax4, ax5 = axs
    # ax1.xaxis.set_ticks(np.arange(0, max(indexes), max(indexes)//5))
    # ax1.scatter(indexes, test_accs, marker='o', s=0.5, c='tab:blue')
    # ax1.set_xlabel('architecture ID', fontsize=LabelSize)
    # ax1.set_ylabel('test accuracy (%)', fontsize=LabelSize)

    ax2.scatter(params, train_accs, marker='o', s=0.5, c='tab:blue')
    ax2.scatter([params[x] for x in resnet_indexes],
                [train_accs[x] for x in resnet_indexes],
                marker='*',
                s=xscale,
                c='tab:orange',
                label='ResNet',
                alpha=xalpha)
    ax2.scatter([params[x] for x in largest_indexes],
                [train_accs[x] for x in largest_indexes],
                marker='x',
                s=xscale,
                c='tab:green',
                label='Largest Candidate',
                alpha=xalpha)
    ax2.set_xlabel('#parameters (MB)', fontsize=LabelSize)
    ax2.set_ylabel('train accuracy (%)', fontsize=LabelSize)
    ax2.legend(loc=4, fontsize=LegendFontsize)

    ax3.scatter(params, test_accs, marker='o', s=0.5, c='tab:blue')
    ax3.scatter([params[x] for x in resnet_indexes],
                [test_accs[x] for x in resnet_indexes],
                marker='*',
                s=xscale,
                c='tab:orange',
                label='ResNet',
                alpha=xalpha)
    ax3.scatter([params[x] for x in largest_indexes],
                [test_accs[x] for x in largest_indexes],
                marker='x',
                s=xscale,
                c='tab:green',
                label='Largest Candidate',
                alpha=xalpha)
    ax3.set_xlabel('#parameters (MB)', fontsize=LabelSize)
    ax3.set_ylabel('test accuracy (%)', fontsize=LabelSize)
    ax3.legend(loc=4, fontsize=LegendFontsize)

    ax4.scatter(flops, train_accs, marker='o', s=0.5, c='tab:blue')
    ax4.scatter([flops[x] for x in resnet_indexes],
                [train_accs[x] for x in resnet_indexes],
                marker='*',
                s=xscale,
                c='tab:orange',
                label='ResNet',
                alpha=xalpha)
    ax4.scatter([flops[x] for x in largest_indexes],
                [train_accs[x] for x in largest_indexes],
                marker='x',
                s=xscale,
                c='tab:green',
                label='Largest Candidate',
                alpha=xalpha)
    ax4.set_xlabel('#FLOPs (M)', fontsize=LabelSize)
    ax4.set_ylabel('train accuracy (%)', fontsize=LabelSize)
    ax4.legend(loc=4, fontsize=LegendFontsize)

    ax5.scatter(flops, test_accs, marker='o', s=0.5, c='tab:blue')
    ax5.scatter([flops[x] for x in resnet_indexes],
                [test_accs[x] for x in resnet_indexes],
                marker='*',
                s=xscale,
                c='tab:orange',
                label='ResNet',
                alpha=xalpha)
    ax5.scatter([flops[x] for x in largest_indexes],
                [test_accs[x] for x in largest_indexes],
                marker='x',
                s=xscale,
                c='tab:green',
                label='Largest Candidate',
                alpha=xalpha)
    ax5.set_xlabel('#FLOPs (M)', fontsize=LabelSize)
    ax5.set_ylabel('test accuracy (%)', fontsize=LabelSize)
    ax5.legend(loc=4, fontsize=LegendFontsize)

    save_path = vis_save_dir / 'tss-{:}.png'.format(dataset)
    fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')
    print('{:} save into {:}'.format(time_string(), save_path))
    plt.close('all')
Esempio n. 22
0
#  + https://stackoverflow.com/questions/38152356/matplotlib-dollar-sign-with-thousands-comma-tick-labels
#  + https://pbpython.com/effective-matplotlib.html#customizing-the-plot
#  + https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html#matplotlib.pyplot.subplots

chart_title = "Top Selling Products (March 2018)" #TODO: get month and year

sorted_products = []
sorted_sales = []

for d in top_sellers:
    sorted_products.append(d["name"])
    sorted_sales.append(d["monthly_sales"])

# reverse the order of both because we want to display top-sellers at the top:
sorted_products.reverse()
sorted_sales.reverse()

# this section needs to come before the chart construction
fig, ax = plt.subplots() # enables us to further customize the figure and/or the axes
usd_formatter = ticker.FormatStrFormatter('$%1.0f')
ax.xaxis.set_major_formatter(usd_formatter)

# chart construction
plt.barh(sorted_products, sorted_sales)
plt.title(chart_title)
plt.ylabel("Product")
plt.xlabel("Monthly Sales (USD)")

plt.tight_layout() # ensures all areas of the chart are visible by default (fixes labels getting cut off)
plt.show()
Esempio n. 23
0
plt.figure(figsize=(16, 8))

plotW, = plt.loglog(ind[1:], W_mean, 'o-', label='Wasserstein', lw=3, ms=10)
col_W = plotW.get_color()
plt.fill_between(ind[1:], W_25, W_75, facecolor=col_W, alpha=0.3)
plt.fill_between(ind[1:], W_10, W_90, facecolor=col_W, alpha=0.2)

plotSRW, = plt.loglog(ind[1:], SRW_mean, 'o-', label='SRW', lw=3, ms=10)
col_SRW = plotSRW.get_color()
plt.fill_between(ind[1:], SRW_25, SRW_75, facecolor=col_SRW, alpha=0.3)
plt.fill_between(ind[1:], SRW_10, SRW_90, facecolor=col_SRW, alpha=0.2)

plotPRW, = plt.loglog(ind[1:], PRW_mean, 'o-', label='PRW', lw=3, ms=10)
col_PRW = plotPRW.get_color()
plt.fill_between(ind[1:], PRW_25, PRW_75, facecolor=col_PRW, alpha=0.3)
plt.fill_between(ind[1:], PRW_10, PRW_90, facecolor=col_PRW, alpha=0.2)

plt.xlabel('Noise level (log scale)', fontsize=25)
plt.ylabel('Relative error (log scale)', fontsize=25)

plt.yticks(fontsize=20)
plt.xticks(ind[1:], fontsize=20)
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2g'))

plt.legend(loc=2, fontsize=18)
plt.grid(ls=':')
plt.savefig('figs/exp2_noise_level.png')

plt.show()
Esempio n. 24
0
def plot_timeseries(subplot=None, options=None):
    kalman_filename = options.kalman_filename

    if not hasattr(options, 'frames'):
        options.frames = False

    if not hasattr(options, 'show_landing'):
        options.show_landing = False

    if not hasattr(options, 'unicolor'):
        options.unicolor = False

    if not hasattr(options, 'show_obj_id'):
        options.show_obj_id = True

    if not hasattr(options, 'show_track_ends'):
        options.show_track_ends = False

    start = options.start
    stop = options.stop
    obj_only = options.obj_only
    fps = options.fps
    dynamic_model = options.dynamic_model
    use_kalman_smoothing = options.use_kalman_smoothing

    if not use_kalman_smoothing:
        if (dynamic_model is not None):
            print >> sys.stderr, (
                'WARNING: disabling Kalman smoothing '
                '(--disable-kalman-smoothing) is incompatable '
                'with setting dynamic model options (--dynamic-model)')

    ca = core_analysis.get_global_CachingAnalyzer()

    if kalman_filename is None:
        raise ValueError('No kalman_filename given. Nothing to do.')

    m = hashlib.md5()
    m.update(open(kalman_filename, mode='rb').read())
    actual_md5 = m.hexdigest()
    (obj_ids, use_obj_ids, is_mat_file, data_file,
     extra) = ca.initial_file_load(kalman_filename)
    print 'opened kalman file %s %s, %d obj_ids' % (
        kalman_filename, actual_md5, len(use_obj_ids))

    if 'frames' in extra:
        if (start is not None) or (stop is not None):
            valid_frames = np.ones((len(extra['frames']), ), dtype=np.bool)
            if start is not None:
                valid_frames &= extra['frames'] >= start
            if stop is not None:
                valid_frames &= extra['frames'] <= stop
            this_use_obj_ids = np.unique(obj_ids[valid_frames])
            use_obj_ids = list(set(use_obj_ids).intersection(this_use_obj_ids))

    include_obj_ids = None
    exclude_obj_ids = None
    do_fuse = False
    if options.stim_xml:
        file_timestamp = data_file.filename[4:19]
        fanout = xml_stimulus.xml_fanout_from_filename(options.stim_xml)
        include_obj_ids, exclude_obj_ids = fanout.get_obj_ids_for_timestamp(
            timestamp_string=file_timestamp)
        walking_start_stops = fanout.get_walking_start_stops_for_timestamp(
            timestamp_string=file_timestamp)
        if include_obj_ids is not None:
            use_obj_ids = include_obj_ids
        if exclude_obj_ids is not None:
            use_obj_ids = list(set(use_obj_ids).difference(exclude_obj_ids))
        if options.fuse:
            do_fuse = True
    else:
        walking_start_stops = []

    if dynamic_model is None:
        dynamic_model = extra['dynamic_model_name']
        print 'detected file loaded with dynamic model "%s"' % dynamic_model
        if dynamic_model.startswith('EKF '):
            dynamic_model = dynamic_model[4:]
        print '  for smoothing, will use dynamic model "%s"' % dynamic_model

    if not is_mat_file:
        mat_data = None

        if fps is None:
            fps = result_utils.get_fps(data_file, fail_on_error=False)

        if fps is None:
            fps = 100.0
            import warnings
            warnings.warn('Setting fps to default value of %f' % fps)

        tz = result_utils.get_tz(data_file)

    dt = 1.0 / fps

    all_vels = []

    if obj_only is not None:
        use_obj_ids = [i for i in use_obj_ids if i in obj_only]

    allX = {}
    frame0 = None

    line2obj_id = {}
    Xz_all = []

    fuse_did_once = False

    if not hasattr(options, 'timestamp_file'):
        options.timestamp_file = None

    if not hasattr(options, 'ori_qual'):
        options.ori_qual = None

    if options.timestamp_file is not None:
        h5 = tables.open_file(options.timestamp_file, mode='r')
        print 'reading timestamps and frames'
        table_data2d_frames = h5.root.data2d_distorted.read(field='frame')
        table_data2d_timestamps = h5.root.data2d_distorted.read(
            field='timestamp')
        print 'done'
        h5.close()
        table_data2d_frames_find = utils.FastFinder(table_data2d_frames)

    if len(use_obj_ids) == 0:
        print 'No obj_ids to plot, quitting'
        sys.exit(0)

    time0 = 0.0  # set default value

    for obj_id in use_obj_ids:
        if not do_fuse:
            try:
                kalman_rows = ca.load_data(
                    obj_id,
                    data_file,
                    use_kalman_smoothing=use_kalman_smoothing,
                    dynamic_model_name=dynamic_model,
                    return_smoothed_directions=options.smooth_orientations,
                    frames_per_second=fps,
                    up_dir=options.up_dir,
                    min_ori_quality_required=options.ori_qual,
                )
            except core_analysis.ObjectIDDataError:
                continue
            #kobs_rows = ca.load_dynamics_free_MLE_position( obj_id, data_file )
        else:
            if options.show_3d_orientations:
                raise NotImplementedError('orientation data is not supported '
                                          'when fusing obj_ids')
            if fuse_did_once:
                break
            fuse_did_once = True
            kalman_rows = flydra_analysis.a2.flypos.fuse_obj_ids(
                use_obj_ids,
                data_file,
                dynamic_model_name=dynamic_model,
                frames_per_second=fps)
        frame = kalman_rows['frame']

        if (start is not None) or (stop is not None):
            valid_cond = numpy.ones(frame.shape, dtype=numpy.bool)

            if start is not None:
                valid_cond = valid_cond & (frame >= start)

            if stop is not None:
                valid_cond = valid_cond & (frame <= stop)

            kalman_rows = kalman_rows[valid_cond]
            if not len(kalman_rows):
                continue

        walking_and_flying_kalman_rows = kalman_rows  # preserve original data

        for flystate in ['flying', 'walking']:
            frame = walking_and_flying_kalman_rows['frame']  # restore
            if flystate == 'flying':
                # assume flying unless we're told it's walking
                state_cond = numpy.ones(frame.shape, dtype=numpy.bool)
            else:
                state_cond = numpy.zeros(frame.shape, dtype=numpy.bool)

            if len(walking_start_stops):
                for walkstart, walkstop in walking_start_stops:
                    frame = walking_and_flying_kalman_rows['frame']  # restore

                    # handle each bout of walking
                    walking_bout = numpy.ones(frame.shape, dtype=numpy.bool)
                    if walkstart is not None:
                        walking_bout &= (frame >= walkstart)
                    if walkstop is not None:
                        walking_bout &= (frame <= walkstop)
                    if flystate == 'flying':
                        state_cond &= ~walking_bout
                    else:
                        state_cond |= walking_bout

                kalman_rows = np.take(walking_and_flying_kalman_rows,
                                      np.nonzero(state_cond)[0])
                assert len(kalman_rows) == np.sum(state_cond)
                frame = kalman_rows['frame']

            if frame0 is None:
                frame0 = int(frame[0])

            time0 = 0.0
            if options.timestamp_file is not None:
                frame_idxs = table_data2d_frames_find.get_idxs_of_equal(frame0)
                if len(frame_idxs):
                    time0 = table_data2d_timestamps[frame_idxs[0]]
                else:
                    raise ValueError(
                        'could not fine frame %d in timestamp file' % frame0)

            Xx = kalman_rows['x']
            Xy = kalman_rows['y']
            Xz = kalman_rows['z']

            Dx = Dy = Dz = None
            if options.smooth_orientations:
                Dx = kalman_rows['dir_x']
                Dy = kalman_rows['dir_y']
                Dz = kalman_rows['dir_z']
            elif 'rawdir_x' in kalman_rows.dtype.fields:
                Dx = kalman_rows['rawdir_x']
                Dy = kalman_rows['rawdir_y']
                Dz = kalman_rows['rawdir_z']

            if not options.frames:
                f2t = Frames2Time(frame0, fps, time0)
            else:

                def identity(x):
                    return x

                f2t = identity

            kws = {
                'linewidth': 2,
                'picker': 5,
            }
            if options.unicolor:
                kws['color'] = 'k'

            line = None

            if 'frame' in subplot:
                subplot['frame'].plot(f2t(frame), frame)

            if 'P55' in subplot:
                subplot['P55'].plot(f2t(frame), kalman_rows['P55'])

            if 'x' in subplot:
                line, = subplot['x'].plot(f2t(frame),
                                          Xx,
                                          label='obj %d (%s)' %
                                          (obj_id, flystate),
                                          **kws)
                line2obj_id[line] = obj_id
                kws['color'] = line.get_color()

            if 'y' in subplot:
                line, = subplot['y'].plot(f2t(frame),
                                          Xy,
                                          label='obj %d (%s)' %
                                          (obj_id, flystate),
                                          **kws)
                line2obj_id[line] = obj_id
                kws['color'] = line.get_color()

            if 'z' in subplot:
                frame_data = numpy.ma.getdata(
                    frame)  # works if frame is masked or not

                # plot landing time
                if options.show_landing:
                    if flystate == 'flying':  # only do this once
                        for walkstart, walkstop in walking_start_stops:
                            if walkstart in frame_data:
                                landing_dix = numpy.nonzero(
                                    frame_data == walkstart)[0][0]
                                subplot['z'].plot([f2t(walkstart)],
                                                  [Xz.data[landing_dix]],
                                                  'rD',
                                                  ms=10,
                                                  label='landing')

                if options.show_track_ends:
                    if flystate == 'flying':  # only do this once
                        subplot['z'].plot(f2t([frame_data[0], frame_data[-1]]),
                                          [
                                              numpy.ma.getdata(Xz)[0],
                                              numpy.ma.getdata(Xz)[-1]
                                          ],
                                          'cd',
                                          ms=6,
                                          label='track end')

                line, = subplot['z'].plot(f2t(frame),
                                          Xz,
                                          label='obj %d (%s)' %
                                          (obj_id, flystate),
                                          **kws)
                kws['color'] = line.get_color()
                line2obj_id[line] = obj_id

                if flystate == 'flying':
                    # only do this once
                    if options.show_obj_id:
                        subplot['z'].text(f2t(frame_data[0]),
                                          numpy.ma.getdata(Xz)[0],
                                          '%d' % (obj_id, ))
                        line2obj_id[line] = obj_id

            if flystate == 'flying':
                Xz_all.append(np.ma.array(Xz).compressed())
                #bins = np.linspace(0,.8,30)
                #print 'Xz.shape',Xz.shape
                #pylab.hist(Xz, bins=bins)

            for (dir_var, Dd) in [('dx', Dx), ('dy', Dy), ('dz', Dz)]:
                if dir_var in subplot:
                    line, = subplot[dir_var].plot(f2t(frame),
                                                  Dd,
                                                  label='obj %d (%s)' %
                                                  (obj_id, flystate),
                                                  **kws)
                    line2obj_id[line] = obj_id
                    kws['color'] = line.get_color()

            if numpy.__version__ >= '1.2.0':
                X = numpy.ma.array((Xx, Xy, Xz))
            else:
                # See http://scipy.org/scipy/numpy/ticket/820
                X = numpy.ma.vstack(
                    (Xx[numpy.newaxis, :], Xy[numpy.newaxis, :],
                     Xz[numpy.newaxis, :]))

            dist_central_diff = (X[:, 2:] - X[:, :-2])
            vel_central_diff = dist_central_diff / (2 * dt)

            vel2mag = numpy.ma.sqrt(numpy.ma.sum(vel_central_diff**2, axis=0))
            xy_vel2mag = numpy.ma.sqrt(
                numpy.ma.sum(vel_central_diff[:2, :]**2, axis=0))

            frames2 = frame[1:-1]

            accel4mag = (vel2mag[2:] - vel2mag[:-2]) / (2 * dt)
            frames4 = frames2[1:-1]

            if 'vel' in subplot:
                line, = subplot['vel'].plot(f2t(frames2),
                                            vel2mag,
                                            label='obj %d (%s)' %
                                            (obj_id, flystate),
                                            **kws)
                line2obj_id[line] = obj_id
                kws['color'] = line.get_color()

            if 'xy_vel' in subplot:
                line, = subplot['xy_vel'].plot(f2t(frames2),
                                               xy_vel2mag,
                                               label='obj %d (%s)' %
                                               (obj_id, flystate),
                                               **kws)
                line2obj_id[line] = obj_id
                kws['color'] = line.get_color()

            if len(accel4mag.compressed()) and 'accel' in subplot:
                line, = subplot['accel'].plot(f2t(frames4),
                                              accel4mag,
                                              label='obj %d (%s)' %
                                              (obj_id, flystate),
                                              **kws)
                line2obj_id[line] = obj_id
                kws['color'] = line.get_color()

            if flystate == 'flying':
                valid_vel2mag = vel2mag.compressed()
                all_vels.append(valid_vel2mag)
    if len(all_vels):
        all_vels = numpy.hstack(all_vels)
    else:
        all_vels = numpy.array([], dtype=float)

    if 1:
        cond = all_vels < 2.0
        if numpy.ma.sum(cond) != len(all_vels):
            all_vels = all_vels[cond]
            import warnings
            warnings.warn('clipping all velocities > 2.0 m/s')

    if not options.frames:
        xlabel = 'time (s)'
    else:
        xlabel = 'frame'

    for ax in subplot.itervalues():
        ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
        ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%s"))

    fixup_ax = FixupAxesWithTimeZone(tz).fixup_ax

    if 'frame' in subplot:
        if time0 != 0.0:
            fixup_ax(subplot['frame'])
        else:
            subplot['frame'].set_xlabel(xlabel)

    if 'x' in subplot:
        subplot['x'].set_ylim([-1, 1])
        subplot['x'].set_ylabel(r'x (m)')
        if time0 != 0.0:
            fixup_ax(subplot['x'])
        else:
            subplot['x'].set_xlabel(xlabel)

    if 'y' in subplot:
        subplot['y'].set_ylim([-0.5, 1.5])
        subplot['y'].set_ylabel(r'y (m)')
        if time0 != 0.0:
            fixup_ax(subplot['y'])
        else:
            subplot['y'].set_xlabel(xlabel)

    max_z = None
    if options.stim_xml:
        file_timestamp = options.kalman_filename[4:19]
        stim_xml = xml_stimulus.xml_stimulus_from_filename(
            options.stim_xml, timestamp_string=file_timestamp)
        post_max_zs = []
        for post_num, post in enumerate(stim_xml.iterate_posts()):
            post_max_zs.append(max(post['verts'][0][2],
                                   post['verts'][1][2]))  # max post height
        if len(post_max_zs):
            max_z = min(post_max_zs)  # take shortest of posts

    if 'z' in subplot:
        subplot['z'].set_ylim([0, 1])
        subplot['z'].set_ylabel(r'z (m)')
        if max_z is not None:
            subplot['z'].axhline(max_z, color='m')
        if time0 != 0.0:
            fixup_ax(subplot['z'])
        else:
            subplot['z'].set_xlabel(xlabel)

    for dir_var in ['dx', 'dy', 'dz']:
        if dir_var in subplot:
            subplot[dir_var].set_ylabel(dir_var)
            if time0 != 0.0:
                fixup_ax(subplot[dir_var])
            else:
                subplot[dir_var].set_xlabel(xlabel)

    if 'z_hist' in subplot:  # and flystate=='flying':
        Xz_all = np.hstack(Xz_all)
        bins = np.linspace(0, .8, 30)
        ax = subplot['z_hist']
        ax.hist(Xz_all, bins=bins, orientation='horizontal')
        ax.set_xticks([])
        ax.set_yticks([])
        xlim = tuple(ax.get_xlim())  # matplotlib 0.98.3 returned np.array view
        ax.set_xlim((xlim[1], xlim[0]))
        ax.axhline(max_z, color='m')

    if 'vel' in subplot:
        subplot['vel'].set_ylim([0, 2])
        subplot['vel'].set_ylabel(r'vel (m/s)')
        subplot['vel'].set_xlabel(xlabel)
        if time0 != 0.0:
            fixup_ax(subplot['vel'])
        else:
            subplot['vel'].set_xlabel(xlabel)

    if 'xy_vel' in subplot:
        #subplot['xy_vel'].set_ylim([0,2])
        subplot['xy_vel'].set_ylabel(r'horiz vel (m/s)')
        subplot['xy_vel'].set_xlabel(xlabel)
        if time0 != 0.0:
            fixup_ax(subplot['xy_vel'])
        else:
            subplot['xy_vel'].set_xlabel(xlabel)

    if 'accel' in subplot:
        subplot['accel'].set_ylabel(r'acceleration (m/(s^2))')
        subplot['accel'].set_xlabel(xlabel)
        if time0 != 0.0:
            fixup_ax(subplot['accel'])
        else:
            subplot['accel'].set_xlabel(xlabel)

    if 'vel_hist' in subplot:
        ax = subplot['vel_hist']
        bins = numpy.linspace(0, 2, 50)
        ax.set_title('excluding walking')
        pdf, bins, patches = ax.hist(all_vels, bins=bins, normed=True)
        ax.set_xlim(0, 2)
        ax.set_ylabel('probability density')
        ax.set_xlabel('velocity (m/s)')

    return line2obj_id
    plt.rcdefaults()
    fig, ax = plt.subplots()

    y_pos = np.arange(len(product_list))

    error = np.random.rand(len(product_list))

    ax.barh(y_pos, sales_price_list, xerr=error, align='center')
    ax.set_yticks(y_pos)
    ax.set_yticklabels(product_list)

    ax.invert_yaxis()  # labels read top-to-bottom
    ax.set_xlabel('Sales (USD)')
    ax.set_ylabel('Product')
    ax.set_title('Top-selling Products (' + month + ' ' + year + ')')
    formatter = ticker.FormatStrFormatter('$%1.2f')
    ax.xaxis.set_major_formatter(formatter)

    totals = []

    for i in ax.patches:
        totals.append(i.get_width())
    total = sum(totals)
    for j, i in enumerate(ax.patches):
        if j in [0, 1, 2]:
            ax.text(i.get_width()-700, i.get_y()+.5, \
                  str(bar_label[j]), fontsize=7, color='black')
        else:
            ax.text(i.get_width()-.2, i.get_y()+.5, \
                  str(bar_label[j]), fontsize=7, color='black')
Esempio n. 26
0
        lr_l[i] = current_lr

    import matplotlib as mpl
    from matplotlib import pyplot as plt
    import matplotlib.ticker as mtick
    mpl.style.use('default')
    import seaborn
    seaborn.set(style='whitegrid')
    seaborn.set_context('paper')

    plt.figure(1)
    plt.subplot(111)
    plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
    plt.title('Title', fontsize=16, color='k')
    plt.plot(list(range(N_iter)),
             lr_l,
             linewidth=1.5,
             label='learning rate scheme')
    legend = plt.legend(loc='upper right', shadow=False)
    ax = plt.gca()
    labels = ax.get_xticks().tolist()
    for k, v in enumerate(labels):
        labels[k] = str(int(v / 1000)) + 'K'
    ax.set_xticklabels(labels)
    ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))

    ax.set_ylabel('Learning rate')
    ax.set_xlabel('Iteration')
    fig = plt.gcf()
    plt.show()
Esempio n. 27
0
 def test_basic(self):
     # test % style formatter
     tmp_form = mticker.FormatStrFormatter('%05d')
     assert '00002' == tmp_form(2)
Esempio n. 28
0
def combined_plot1(weights: list,
                   times: list,
                   dweights: list,
                   stepsize: int,
                   neurons: np.ndarray,
                   hopfield: np.ndarray,
                   file: str = None,
                   metadata: str = ""):
    """
    
    :param weights:
    :param times:
    :param dweights:
    :param stepsize:
    :param neurons:
    :param hopfield:
    :param file:
    :param metadata:
    :return:
    """

    l = len(weights)

    w = weights[0::stepsize]
    c_w = len(w)
    dw = [sum(dweights[i:i + stepsize]) for i in range(0, l - 1, stepsize)]
    c_dw = len(dw)

    l_ax = max(4, c_w + 1)

    # Build a figure with 2 subplots, the first is 3D
    fig, axes = plt.subplots(ncols=l_ax, nrows=4)
    size = 5
    fig.set_size_inches(l_ax * size, 3 * size)

    #
    # Title

    fig.suptitle(metadata, fontsize=14, fontweight='bold')

    for i in range(2, l_ax - 2):
        fig.delaxes(axes[0][i])

    #
    # Neuron Map

    major_locator_n = tik.MultipleLocator(neurons.shape[0] // 2)
    major_formatter_n = tik.FormatStrFormatter('%d')
    minor_locator_n = tik.MultipleLocator(1)

    ax = axes[0][-1]
    z = neurons
    im = ax.imshow(z, cmap="hot", interpolation='none')
    ax.set_aspect('equal')
    ax.set_title("Active Neurons")

    ax.yaxis.set_major_locator(major_locator_n)
    ax.yaxis.set_major_formatter(major_formatter_n)
    ax.yaxis.set_minor_locator(minor_locator_n)

    ax.xaxis.set_major_locator(major_locator_n)
    ax.xaxis.set_major_formatter(major_formatter_n)
    ax.xaxis.set_minor_locator(minor_locator_n)

    ax = axes[0][-2]
    ax.set_aspect(8)
    fig.colorbar(im, orientation='vertical', cax=ax)

    #
    # Hopfield

    major_locator_w = tik.MultipleLocator(hopfield.shape[0] // 2)
    major_formatter_w = tik.FormatStrFormatter('%d')
    minor_locator_w = tik.MultipleLocator(hopfield.shape[0] // 4)

    ax = axes[0][0]
    z = hopfield
    im = ax.imshow(z, cmap="hot", interpolation='none')
    ax.invert_yaxis()
    ax.set_aspect('equal')
    ax.set_title("Hopfield weights")
    ax.yaxis.tick_right()

    ax.yaxis.set_major_locator(major_locator_w)
    ax.yaxis.set_major_formatter(major_formatter_w)
    ax.yaxis.set_minor_locator(minor_locator_w)

    ax.xaxis.set_major_locator(major_locator_w)
    ax.xaxis.set_major_formatter(major_formatter_w)
    ax.xaxis.set_minor_locator(minor_locator_w)

    ax = axes[0][1]
    ax.set_aspect(8)
    fig.colorbar(im, orientation='vertical', cax=ax)
    ax.yaxis.tick_left()

    #
    # Weights & Weights per neuron

    weight_min = np.min(w)
    weight_max = np.max(w)

    for i in range(c_w):
        ax = axes[1][i]
        z = w[i]

        im = ax.imshow(z,
                       cmap="hot",
                       interpolation='none',
                       vmin=weight_min,
                       vmax=weight_max)

        ax.invert_yaxis()
        ax.set_aspect('equal')
        if i == 0:
            ax.yaxis.set_major_locator(major_locator_w)
            ax.yaxis.set_major_formatter(major_formatter_w)
            ax.yaxis.set_minor_locator(minor_locator_w)

            ax.xaxis.set_major_locator(major_locator_w)
            ax.xaxis.set_major_formatter(major_formatter_w)
            ax.xaxis.set_minor_locator(minor_locator_w)
            ax.set_title("Weights: t = " + '% 4.2f' % times[i * stepsize])
        else:
            ax.set_axis_off()
            ax.set_title("t = " + '% 4.2f' % times[i * stepsize])

        ax = axes[3][i]
        weight_per_neuron(ax, z, neurons.flatten())

        if i != 0:
            ax.set_axis_off()
        else:
            ax.spines['top'].set_color('none')
            ax.spines['right'].set_color('none')
            ax.set_title("Weight per neuron (colored: only active):")

    ax = axes[1][-1]

    ax.set_aspect(8)
    fig.colorbar(im, orientation='vertical', cax=ax, extend='both')

    fig.delaxes(axes[3][-1])

    #
    # dWeights

    dweight_min = np.min(dw)
    dweight_max = np.max(dw)

    for i in range(c_dw):
        ax = axes[2][i]
        z = dw[i]

        im = ax.imshow(z,
                       cmap="hot",
                       interpolation='none',
                       vmin=dweight_min,
                       vmax=dweight_max)
        ax.invert_yaxis()
        ax.set_aspect('equal')
        if i == 0:
            ax.yaxis.set_major_locator(major_locator_w)
            ax.yaxis.set_major_formatter(major_formatter_w)
            ax.yaxis.set_minor_locator(minor_locator_w)

            ax.xaxis.set_major_locator(major_locator_w)
            ax.xaxis.set_major_formatter(major_formatter_w)
            ax.xaxis.set_minor_locator(minor_locator_w)
            ax.set_title("Deviations:")
        else:
            ax.set_axis_off()

    fig.delaxes(axes[2][-2])

    ax = axes[2][-1]
    ax.set_aspect(8)
    fig.colorbar(im, orientation='vertical', cax=ax, extend='both')

    #
    # Finish

    fig.tight_layout()

    if not file:
        plt.show()
    else:
        i = 0
        while os.path.exists('{}_{:d}.png'.format(file, i)):
            i += 1
        file = '{}_{:d}.png'.format(file, i)
        print("Saving results to: " + file)
        plt.savefig(file, dpi=100)

    plt.close()
Esempio n. 29
0
                 palette=cores,
                 zorder=2)

#Titulo do gráfico
plt.title('Intenção de voto', fontsize=20, color='darkslategray')

#Criação dos eixos
plt.xlabel('Porcentagem da intenção de voto (%)',
           fontsize=12,
           labelpad=20,
           color='slategray')
plt.ylabel('', fontsize=0, labelpad=0, color='slategray')
plt.xticks(fontsize=12, color='slategray')
plt.yticks(fontsize=14, color='darkslategray')
fmt = '%.0f%%'
xticks = mtick.FormatStrFormatter(fmt)
ax.xaxis.set_major_formatter(xticks)

#Distanciamento das bordas
plt.ylim(bottom=15.6, top=-1.0)
plt.xlim(0, 100)

#Grade no fundo para facilitar a leitura, em cor clara para evitar conflito com as barras horizontais.
plt.grid(alpha=0.15, color='silver', zorder=-1)
voto1 = intencao_cand['voto1']

#Porcentagem das barras
for p in ax.patches:
    percentage = '{:,.1f}%'.format(p.get_width())
    width, height = p.get_width(), p.get_height()
    x = p.get_x() + width + 0.5
Esempio n. 30
0
    def plot_throughput_delay(self):
        min_delay = None
        color_names = get_color_names(self.cc_schemes)
        marker_names = get_marker_names(self.cc_schemes)

        fig_raw, ax_raw = plt.subplots()
        fig_mean, ax_mean = plt.subplots()

        for cc in self.data:
            if not self.data[cc]:
                continue

            value = self.data[cc]
            cc_name = self.friendly_names[cc]
            color = color_names[cc]
            marker = marker_names[cc]
            y_data, x_data, _ = zip(*value)

            # find min and max delay
            cc_min_delay = min(x_data)
            if not min_delay or cc_min_delay < min_delay:
                min_delay = cc_min_delay

            # plot raw values
            ax_raw.scatter(x_data,
                           y_data,
                           color=color,
                           marker=marker,
                           label=cc_name,
                           clip_on=False)

            # plot the average of raw values
            x_mean = sum(x_data) / len(x_data)
            y_mean = sum(y_data) / len(y_data)
            ax_mean.scatter(x_mean,
                            y_mean,
                            color=color,
                            marker=marker,
                            clip_on=False)
            ax_mean.annotate(cc_name, (x_mean, y_mean))

        for fig, ax in [(fig_raw, ax_raw), (fig_mean, ax_mean)]:
            if min_delay > 0:
                ax.set_xscale('log', basex=2)
                ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
            ax.invert_xaxis()

            yticks = ax.get_yticks()
            if yticks[0] < 0:
                ax.set_ylim(bottom=0)

            xlabel = '95th percentile of per-packet one-way delay (ms)'
            if self.worst_abs_ofst:
                xlabel += ('\n(worst absolute clock offset: %s ms)' %
                           self.worst_abs_ofst)
            ax.set_xlabel(xlabel)
            ax.set_ylabel('Average throughput (Mbit/s)')
            ax.grid()

        # save pantheon_summary.png
        ax_raw.set_title(self.experiment_title, y=1.02, fontsize=12)
        lgd = ax_raw.legend(scatterpoints=1,
                            bbox_to_anchor=(1, 0.5),
                            loc='center left',
                            fontsize=12)
        raw_summary = path.join(self.data_dir, 'pantheon_summary.png')
        fig_raw.savefig(raw_summary,
                        dpi=300,
                        bbox_extra_artists=(lgd, ),
                        bbox_inches='tight',
                        pad_inches=0.2)

        # save pantheon_summary_mean.png
        ax_mean.set_title(self.experiment_title +
                          '\nmean of all runs by scheme',
                          fontsize=12)
        mean_summary = path.join(self.data_dir, 'pantheon_summary_mean.png')
        fig_mean.savefig(mean_summary,
                         dpi=300,
                         bbox_inches='tight',
                         pad_inches=0.2)