Example #1
0
def plot_ctxcov(num_cg, num_chg, num_chh, cov_cg, cov_chg, cov_chh):
    """
    Plot the CG/CHG/CHH coverage distribution
    """
    colors = { 'CG': (38/255, 173/255, 84/255),
              'CHG': (44/255, 180/255, 234/255),
              'CHH': (249/255, 42/255, 54/255)}

    plt.switch_backend('Agg')
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.spines['bottom'].set_position(('outward', 8))
    ax.spines['left'].set_position(('outward', 8))
    ax.spines['bottom'].set_linewidth(2)
    ax.spines['left'].set_linewidth(2)
    maxval = int(max([cov_cg.mean(), cov_chg.mean(), cov_chh.mean()]) + 2*max([cov_cg.std(), cov_chg.std(), cov_chh.std()]))
    n, bins = np.histogram(cov_cg, bins=np.linspace(0, maxval, maxval+1))
    n = np.cumsum(n[::-1])[::-1]
    ax.plot(np.arange(0.5, maxval, 1), n/num_cg*100, linewidth=2, color=colors['CG'], label='CG')
    n, bins = np.histogram(cov_chg, bins=np.linspace(0, maxval, maxval+1))
    n = np.cumsum(n[::-1])[::-1]
    ax.plot(np.arange(0.5, maxval, 1), n/num_chg*100, linewidth=2, color=colors['CHG'], label='CHG')
    n, bins = np.histogram(cov_chh, bins=np.linspace(0, maxval, maxval+1))
    n = np.cumsum(n[::-1])[::-1]
    ax.plot(np.arange(0.5, maxval, 1), n/num_chh*100, linewidth=2, color=colors['CHH'], label='CHH')
    ax.set_xlim(0, maxval)
    ax.set_ylim(0, 100)
    ax.tick_params(direction='out', top='off', right='off', length=5, width=2, labelsize='large')
    ax.set_xlabel('Coverage (x)', size='large', weight='bold')
    ax.set_ylabel('Percentage (%)', size='large', weight='bold')
    ax.legend(loc='upper right', prop={'size': 'small'})
    plt.tight_layout()
    return ax
Example #2
0
def set_mpl_backend():

    from matplotlib import rcParams, rcdefaults

    # Standardize mpl setup
    rcdefaults()

    # Set default backend to Agg. The Qt and Jupyter glue applications don't
    # use the default backend, so this is just to make sure that importing
    # matplotlib doesn't cause errors related to the MacOSX or Qt backend.
    rcParams['backend'] = 'Agg'

    # Disable key bindings in matplotlib
    for setting in list(rcParams.keys()):
        if setting.startswith('keymap'):
            rcParams[setting] = ''

    # Set the MPLBACKEND variable explicitly, because ipykernel uses the lack of
    # MPLBACKEND variable to indicate that it should use its own backend, and
    # this in turn causes some rcParams to be changed, causing test failures
    # etc.
    os.environ['MPLBACKEND'] = 'Agg'

    # Explicitly switch backend
    from matplotlib.pyplot import switch_backend
    switch_backend('agg')
Example #3
0
def drawMean(ctype, filename, titulo, yaxis, comp, *args):
    plt.switch_backend('Qt4Agg')
    fig = plt.figure(figsize=(28, 5))
    ax = fig.add_subplot(1,2,1)
    means = []
    for v in comp:
        means.append(np.mean(v))
    col = ['r', 'b', 'g', 'm', 'c', 'y', 'r', 'b', 'g', 'm', 'c', 'y']
    if len(comp) == 16:
        col = ['r', 'r', 'g', 'g', 'g', 'b', 'b', 'b', 'c', 'c', 'c', 'm', 'm', 'm', 'y', 'y', 'y' ]
    if len(comp) == 12:
        col = ['r', 'r' ,'b','b','g', 'g','m', 'm', 'c', 'c', 'y' ,'y']
    if len(comp) == 13:
        col = ['r', 'r', 'r','b','b','g', 'g','m', 'm', 'c', 'c', 'y' ,'y']
    if len(comp) == 11:
        col = ['r' ,'b','b','g', 'g','m', 'm', 'c', 'c', 'y' ,'y']
    ax.grid( b=True, linestyle='-',  axis = 'y', linewidth=1, zorder=1) 
    if len(comp) < 7:
        ax.bar( range(1, int(ctype) + 1),  means, align='center', color=col[0: int(ctype) + 1], zorder = 10, width = 0.6)
    else:
        ax.bar( range(1, int(ctype) + 1),  means, align='center', color=col[0: int(ctype) + 1], zorder = 10)
    plt.title(titulo)
    labels = [i for i in args]
    plt.ylabel(yaxis+' (promedio)')
    plt.tick_params(labelsize = 7)
    plt.xticks(range(1, (int(ctype)) + 1), labels)
    plt.savefig(filename+' (promedio).png', bbox_inches = 'tight')
    plt.xticks()
    val_strings = ["%10.10f" % x for x in means ]
    with open(filename + '_promedio.txt', 'w') as file_:
        for k in range(0, len(comp)):
            file_.write(labels[k] + " " + val_strings[k] + "\n")
Example #4
0
File: asc.py Project: cyrobot/soinn
def test_asc():
    mean1 = [0.5, 0.5]
    mean2 = [3.0, 0.5]
    conv1 = [[1, 0], [0, 1]]
    conv2 = [[1.5, 0], [0, 1.2]]
    samples = 2000
    class1_data = np.random.multivariate_normal(mean1, conv1, samples)
    class1_label = np.ones(samples, dtype=np.int32) * 0
    class2_data = np.random.multivariate_normal(mean2, conv2, samples)
    class2_label = np.ones(samples, dtype=np.int32) * 1
    train_data = np.concatenate((class1_data, class2_data))
    train_label = np.concatenate((class1_label, class2_label))

    prototypes, labels = asc(train_data, train_label, 20, 20, 3)
    plt.switch_backend('Qt4Agg')
    plt.figure(1)
    plt.hold(True)
    plt.plot(class1_data[:, 0], class1_data[:, 1], 'r*')
    plt.plot(class2_data[:, 0], class2_data[:, 1], 'b.')
    plt.figure(2)
    plt.hold(True)
    for i in xrange(labels.shape[0]):
        if labels[i] == 0:
            plt.plot(prototypes[i, 0], prototypes[i, 1], 'r*')
        else:
            plt.plot(prototypes[i, 0], prototypes[i, 1], 'b.')
    plt.hold(False)
    plt.show()
Example #5
0
def has_matplotlib(version=None, op=">="):
    """
    True if matplotlib_ is installed.
    If version is None, the result of matplotlib.__version__ `op` version is returned.
    """
    try:
        import matplotlib
        # have_display = "DISPLAY" in os.environ
    except ImportError:
        print("Skipping matplotlib test")
        return False

    matplotlib.use("Agg")
    #matplotlib.use("Agg", force=True)  # Use non-graphical display backend during test.
    import matplotlib.pyplot as plt
    # http://stackoverflow.com/questions/21884271/warning-about-too-many-open-figures
    plt.close("all")

    backend = matplotlib.get_backend()
    if backend.lower() != "agg":
        #raise RuntimeError("matplotlib backend now is %s" % backend)
        #matplotlib.use("Agg", warn=True, force=False)
        # Switch the default backend.
        # This feature is experimental, and is only expected to work switching to an image backend.
        plt.switch_backend("Agg")

    if version is None: return True
    return cmp_version(matplotlib.__version__, version, op=op)
def createPowerCurve(data, cols, currentAnalysis):
    plt.switch_backend('agg')
    fg, ax = plt.subplots()
    plt.title('Power curve scatter')
    plotting.powerCurve(data.data, cols[0], cols[1], ax)
    createDirForPlot(currentAnalysis)
    plt.savefig(STATIC_DIR + '/plots/' + currentAnalysis.title +'/powerCurve.png')
Example #7
0
    def switch_backend(backend, sloppy=True):
        """
        Switch matplotlib backend.

        :type backend: str
        :param backend: Name of matplotlib backend to switch to.
        :type sloppy: bool
        :param sloppy: If ``True``, only uses
            :func:`matplotlib.pyplot.switch_backend` and no warning will be
            shown if the backend was not switched successfully. If ``False``,
            additionally tries to use :func:`matplotlib.use` first and also
            shows a warning if the backend was not switched successfully.
        """
        import matplotlib
        # sloppy. only do a `plt.switch_backend(..)`
        if sloppy:
            import matplotlib.pyplot as plt
            plt.switch_backend(backend)
        else:
            # check if `matplotlib.use(..)` is emitting a warning
            try:
                with warnings.catch_warnings(record=True):
                    warnings.simplefilter("error", UserWarning)
                    matplotlib.use(backend)
            # if that's the case, follow up with `plt.switch_backend(..)`
            except UserWarning:
                import matplotlib.pyplot as plt
                plt.switch_backend(backend)
            # finally check if the switch was successful,
            # show a warning if not
            if matplotlib.get_backend().upper() != backend.upper():
                msg = "Unable to change matplotlib backend to '%s'" % backend
                warnings.warn(msg)
def createDistribution(data, cols, currentAnalysis):
    plt.switch_backend('agg')
    fg, ax = plt.subplots()
    plotting.distribution(data.data, cols[0])
    plt.title('Distribution - ' + cols[0])
    createDirForPlot(currentAnalysis)
    plt.savefig(STATIC_DIR + '/plots/' + currentAnalysis.title +'/distribution.png')
def createFFT(data, cols, currentAnalysis):
    plt.switch_backend('agg')
    fg, ax = plt.subplots()
    plt.title('FFT - ' + cols[0])
    plotting.fft(data.data, cols[0], ax)
    createDirForPlot(currentAnalysis)
    plt.savefig(STATIC_DIR + '/plots/' + currentAnalysis.title +'/fft.png')
Example #10
0
def plot(rr_file, tag_file):
    """
    Paint results of acquisition
    @param rr_file: Path to file that contains rr values
    @param tag_file: Path to file that contains tag values
    """

    import matplotlib.pyplot as plt
    plt.switch_backend("WXAgg")

    colors = ['orange', 'green', 'lightblue', 'grey', 'brown', 'red', 'yellow', 'black', 'magenta', 'purple']
    shuffle(colors)
    rr_values = parse_rr_file(rr_file)
    hr_values = map(lambda rr: 60 / (float(rr) / 1000), rr_values)
    tag_values = parse_tag_file(tag_file)
    x = [x / 1000 for x in cumsum(rr_values)]
    y = hr_values
    plt.plot(x, y)

    for tag in tag_values:
        c = colors.pop()
        plt.axvspan(tag[0], tag[0] + tag[2], facecolor=c, alpha=.8, label=tag[1])

    plt.ylabel('Heart rate (bpm)')
    plt.xlabel('Time (s)')
    plt.title('Acquisition results')
    plt.ylim(ymin=min(min(y) - 10, 40), ymax=max(max(y) + 10, 150))
    plt.legend()
    plt.show()
Example #11
0
    def plot(self, filedir=None, file_format='pdf'):
        if filedir is None:
            filedir = self.workdir
        import matplotlib.pyplot as plt
        plt.switch_backend('agg')

        plt.figure(figsize=(8, 6))
        plt.subplots_adjust(left=0.1, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
        forces = np.array(self.output['forces'])
        maxforce = [np.max(np.apply_along_axis(np.linalg.norm, 1, x)) for x in forces]
        avgforce = [np.mean(np.apply_along_axis(np.linalg.norm, 1, x)) for x in forces]

        if np.max(maxforce) > 0.0 and np.max(avgforce) > 0.0:
            plt.semilogy(maxforce, 'b.-', label='Max force')
            plt.semilogy(avgforce, 'r.-', label='Mean force')
        else:
            plt.plot(maxforce, 'b.-', label='Max force')
            plt.plot(avgforce, 'r.-', label='Mean force')
        plt.xlabel('Ion movement iteration')
        plt.ylabel('Max Force')
        plt.savefig(filedir + os.sep + 'forces.' + file_format)
        plt.clf()

        plt.figure(figsize=(8, 6))
        plt.subplots_adjust(left=0.1, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
        stress = np.array(self.output['stress'])
        diag_stress = [np.trace(np.abs(x)) for x in stress]
        offdiag_stress = [np.sum(np.abs(np.triu(x, 1).flatten())) for x in stress]
        plt.semilogy(diag_stress, 'b.-', label='diagonal')
        plt.semilogy(offdiag_stress, 'r.-', label='off-diagonal')
        plt.legend()
        plt.xlabel('Ion movement iteration')
        plt.ylabel(r'$\sum |stress|$ (diag, off-diag)')
        plt.savefig(filedir + os.sep + 'stress.' + file_format)
Example #12
0
        def wrapped(*args, **kwargs):
            orig_backend = plt.get_backend()
            plt.switch_backend('agg')
            mpl_setup()

            if pyplot_helpers.Gcf.figs:
                warnings.warn('Figures existed before running the %s %s test.'
                              ' All figures should be closed after they run. '
                              'They will be closed automatically now.' %
                              (mod_name, test_name))
                pyplot_helpers.Gcf.destroy_all()

            if MPL_VERSION >= '2':
                style_context = mpl.style.context
            else:
                @contextlib.contextmanager
                def style_context(style, after_reset=False):
                    yield

            with style_context(self.style):
                r = test_func(*args, **kwargs)

                fig_managers = pyplot_helpers.Gcf._activeQue
                figures = [manager.canvas.figure for manager in fig_managers]

                try:
                    self.run_figure_comparisons(figures, test_name=mod_name)
                finally:
                    for figure in figures:
                        pyplot_helpers.Gcf.destroy_fig(figure)
                    plt.switch_backend(orig_backend)
            return r
Example #13
0
    def testPlot(self):
        """Test plotting of spectrum

        Not easy to test the actual result, but we can test that the API hasn't
        been broken.
        """
        import matplotlib.pyplot as plt
        plt.switch_backend("agg")  # In case someone has loaded a different backend that will cause trouble
        ext = ".png"  # Extension to use for plot filenames
        spectrum = self.makeSpectrum()
        # Write directly to file
        with lsst.utils.tests.getTempFilePath(ext) as filename:
            spectrum.plot(numRows=4, doBackground=True, doReferenceLines=True, filename=filename)
        # Check return values
        with lsst.utils.tests.getTempFilePath(ext) as filename:
            numRows = 4  # Must be > 1 for len(axes) to work
            figure, axes = spectrum.plot(numRows=numRows)
            self.assertEqual(len(axes), numRows)
            figure.savefig(filename)
        # Test one row, write directly to file
        with lsst.utils.tests.getTempFilePath(ext) as filename:
            figure, axes = spectrum.plot(numRows=1, filename=filename)
        # Test one row, check return values
        with lsst.utils.tests.getTempFilePath(ext) as filename:
            figure, axes = spectrum.plot(numRows=1)
            with self.assertRaises(TypeError):
                axes[0]
            figure.savefig(filename)
Example #14
0
def mpl_test_settings(request):
    from matplotlib.testing.decorators import _do_cleanup

    original_units_registry = matplotlib.units.registry.copy()
    original_settings = matplotlib.rcParams.copy()

    backend = None
    backend_marker = request.keywords.get('backend')
    if backend_marker is not None:
        assert len(backend_marker.args) == 1, \
            "Marker 'backend' must specify 1 backend."
        backend = backend_marker.args[0]
        prev_backend = matplotlib.get_backend()

    style = '_classic_test'  # Default of cleanup and image_comparison too.
    style_marker = request.keywords.get('style')
    if style_marker is not None:
        assert len(style_marker.args) == 1, \
            "Marker 'style' must specify 1 style."
        style = style_marker.args[0]

    matplotlib.testing.setup()
    if backend is not None:
        # This import must come after setup() so it doesn't load the default
        # backend prematurely.
        import matplotlib.pyplot as plt
        plt.switch_backend(backend)
    matplotlib.style.use(style)
    try:
        yield
    finally:
        if backend is not None:
            plt.switch_backend(prev_backend)
        _do_cleanup(original_units_registry,
                    original_settings)
Example #15
0
def ecg2rri(x):
    global rri, t, ax2, pos_c
    pos_c = []
    plt.switch_backend('qt4Agg')
    plt.ion()
    msg = "RRi Detection Parameters"
    title = "Parameters Dialog"
    fieldNames = ["Threshold", "Refractory Period", "Low Cuttof Freq.",
                  "Upper Cuttof Freq.", "Sampling Frequency"]
    fieldValues = ["0.5", "200", "5", "40", "1000"]
    fieldValues = multenterbox(msg, title, fieldNames, fieldValues)
    fieldValues = [float(k) for k in fieldValues]
    thr = fieldValues[0]
    Fs = fieldValues[4]
    lC = fieldValues[2] / (0.5 * Fs)  #normalized lower cutoff frequency.
    uC = fieldValues[3] / (0.5 * Fs)  #normalized upper cutoff frequency.
    B, A = butter(4, [lC, uC], 'band')
    xf = filtfilt(B, A, x)  #filtered ecg.
    xd = diff(xf)  #first derivative of the ecg.
    peaks = array([peaks + 1 for peaks in xrange(len(xd)) if xd[peaks] > 0 and
        xd[peaks + 1] < 0 and xf[peaks] > thr or xd[peaks] == 0])  #find RR peaks above threshold.

    rri = diff(peaks)  # RRi in miliseconds
    t = cumsum(rri) / 1000.0
    t_ecg = arange(0, len(xf)) / Fs
    fig = plt.figure()
    ax1 = fig.add_subplot(2, 1, 1)
    ax2 = fig.add_subplot(2, 1, 2)
    ax1.plot(t_ecg, xf)
    ax1.plot(t_ecg[peaks], xf[peaks], 'g.-')
    ax2.plot(t, rri, 'k.-')
    fig.canvas.mpl_connect('button_press_event', onclick)
    return t, rri
Example #16
0
def plot_bar(dataframe, bulk, ctx):
    colors = { 'CG': ( 38/255, 173/255,  84/255),
              'CHG': ( 44/255, 180/255, 234/255),
              'CHH': (249/255,  42/255,  54/255)}
    dataframe = dataframe*100
    plt.switch_backend('Agg')
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax = dataframe.plot(ax=ax, kind='bar', grid=False, rot=0, color=colors[ctx], ylim=(0, 100))
    ax.set_ylabel('Methylation Level (%)', fontsize='xx-large', fontweight='bold')
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.spines['bottom'].set_linewidth(2)
    ax.spines['left'].set_linewidth(2)
    #ax.spines['bottom'].set_position(('outward', 5))
    #ax.spines['left'].set_position(('outward', 5))
    ax.tick_params(direction='out', length=6, width=2, labelsize='xx-large', top='off', right='off')
    for label in ax.xaxis.get_ticklabels():
        label.set_fontweight('bold')
    for label in ax.yaxis.get_ticklabels():
        label.set_fontweight('bold')
    ax.set_title(ctx, fontsize='xx-large', weight='bold')
    #ax.axhline(y=np.mean(bulk[ctx])*100, linewidth=2, linestyle='--', color='k')
    fig.tight_layout()
    return ax
Example #17
0
def main():
    parser = get_parser()
    args = parser.parse_args()
    root = os.path.splitext(os.path.basename(args.cgmap))[0]
    ctxstr = const_ctxstr(args.fasta)
    cgmap = const_cgmap(ctxstr, args.cgmap, args.depth)
    gtftree = const_gtftree(args.gtf)
    bulk = calc_bulk(ctxstr, cgmap)
    plt.switch_backend('Agg')
    bulk_ax = plot_bulkmean(bulk)
    fig = bulk_ax.get_figure()
    fig.savefig('{}.bulk.mean.png'.format(root), dpi=300)
    plt.close(fig)
    bulk_fig = plot_bulkhist(bulk)
    bulk_fig.savefig('{}.bulk.hist.png'.format(root), dpi=300)
    plt.close(fig)
    ign, cg_table, chg_table, chh_table = calc_mlevel(ctxstr, cgmap, gtftree, args.pmtsize)
    cg_table.to_csv('{}.feature.CG.txt'.format(root), sep='\t', float_format='%.3f')
    chg_table.to_csv('{}.feature.CHG.txt'.format(root), sep='\t', float_format='%.3f')
    chh_table.to_csv('{}.feature.CHH.txt'.format(root), sep='\t', float_format='%.3f')
    cg_ax, chg_ax, chh_ax = plot_feature_mlevel(bulk, ign, cg_table, chg_table, chh_table)
    fig = cg_ax.get_figure()
    fig.savefig('{}.feature.CG.png'.format(root), dpi=300)
    plt.close(fig)
    fig = chg_ax.get_figure()
    fig.savefig('{}.feature.CHG.png'.format(root), dpi=300)
    plt.close(fig)
    fig = chh_ax.get_figure()
    fig.savefig('{}.feature.CHH.png'.format(root), dpi=300)
    plt.close(fig)
    gpos, gmlevel = calc_genomewide(ctxstr, cgmap)
    gax = plot_genomewide(ctxstr, gpos, gmlevel)
    fig = gax.get_figure()
    fig.savefig('{}.genomewide.png'.format(root), dpi=300)
    plt.close(fig)
Example #18
0
def mpl_test_settings(request):
    from matplotlib.testing.decorators import _cleanup_cm

    with _cleanup_cm():

        backend = None
        backend_marker = request.keywords.get('backend')
        if backend_marker is not None:
            assert len(backend_marker.args) == 1, \
                "Marker 'backend' must specify 1 backend."
            backend = backend_marker.args[0]
            prev_backend = matplotlib.get_backend()

        style = '_classic_test'  # Default of cleanup and image_comparison too.
        style_marker = request.keywords.get('style')
        if style_marker is not None:
            assert len(style_marker.args) == 1, \
                "Marker 'style' must specify 1 style."
            style = style_marker.args[0]

        matplotlib.testing.setup()
        if backend is not None:
            # This import must come after setup() so it doesn't load the
            # default backend prematurely.
            import matplotlib.pyplot as plt
            plt.switch_backend(backend)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
            matplotlib.style.use(style)
        try:
            yield
        finally:
            if backend is not None:
                plt.switch_backend(prev_backend)
Example #19
0
 def setBackend(cls, value):
     """ Possible values are:
     - TkAgg for Tkinter
     - Agg for non-interactive plots.
     """
     plt.switch_backend(value)
     cls.backend = value
Example #20
0
def grafica_tasa_de_reservacion(request):
    
    # Recuperacion del diccionario para crear el grafico
    try:
        datos_ocupacion = request.GET.dict()
        datos_ocupacion = OrderedDict(sorted((k, float(v)) for k, v in datos_ocupacion.items()))     
        response = HttpResponse(content_type='image/png')
    except:
        return HttpResponse(status=400) # Bad request
    
    # Si el request no viene con algun diccionario
    if (not datos_ocupacion):
        return HttpResponse(status=400) # Bad request
    
    # Configuracion y creacion del grafico de barras con la biblioteca pyplot
    pyplot.switch_backend('Agg') # Para que no use Tk y aparezcan problemas con hilos
    pyplot.bar(range(len(datos_ocupacion)), datos_ocupacion.values(), hold = False, color = '#6495ed')
    pyplot.ylim([0,100])
    pyplot.title('Distribución de los porcentajes por fecha')
    pyplot.xticks(range(len(datos_ocupacion)), list(datos_ocupacion.keys()), rotation=20)
    pyplot.ylabel('Porcentaje (%)')
    pyplot.grid(True, 'major', 'both')
    pyplot.savefig(response, format='png') # Guarda la imagen creada en el HttpResponse creado
    pyplot.close()
    
    return response
Example #21
0
    def movie(self, fldname, jd1=None, jd2=None, jdvec=None, fps=10, **kwargs):
        curr_backend = plt.get_backend()
        plt.switch_backend('Agg')
        FFMpegWriter = animation.writers['ffmpeg']
        metadata = dict(title='%s' % (self.projname),
                        artist=self.projname,
                        comment='https://github.com/brorfred/njord')
        writer = FFMpegWriter(fps=fps, metadata=metadata,
            extra_args=['-vcodec', 'libx264',"-pix_fmt", "yuv420p"])

        jdvec = self.get_tvec(jd1, jd2) if jdvec is None else jdvec
        fig = plt.figure()
        with writer.saving(fig, "%s.mp4" % self.projname, 200):
            for jd in jdvec:
                pl.clf()
                print(pl.num2date(jd).strftime("%Y-%m-%d %H:%M load "), end="")
                sys.stdout.flush()
                try:
                    fld= self.get_field(fldname, jd=jd)
                except:
                    print("not downloaded" % jd)
                    continue
                print("plot ", end="")
                sys.stdout.flush()
                self.pcolor(fld, **kwargs)
                pl.title(pl.num2date(jd).strftime("%Y-%m-%d %H:%M"))
                print("write")
                writer.grab_frame()#bbox_inches="tight", pad_inches=0)
        plt.switch_backend(curr_backend)
Example #22
0
def test(target=None, show=False, onlydoctests=False, coverage=False, htmlreport=False):
    """Run docstring examples and additional tests.

    Examples
    --------
    >>> from pygimli.utils import boxprint
    >>> test(target=boxprint)

    Parameters
    ----------
    target : function, optional
        Function or method to test. By default everything is tested.
    show : boolean, optional
        Show matplotlib windows during test run. They will be closed
        automatically.
    onlydoctests : boolean, optional
        Run test files in ../tests as well.
    coverage : boolean, optional
        Create a coverage report. Requires the pytest-cov plugin.
    htmlreport : str, optional
        Filename for HTML report such as www.pygimli.org/build_tests.html.
        Requires pytest-html plugin.
    """
    if target:
        import doctest
        doctest.run_docstring_examples(target, globals())
        return

    try:
        import pytest
    except ImportError:
        raise ImportError("pytest is required to run test suite. " + \
                          "Try 'sudo pip install pytest'.")

    from matplotlib import pyplot as plt
    from pygimli.utils import opt_import
    pc = opt_import("pytest_cov", "create a code coverage report")
    ph = opt_import("pytest_html", "create a html report")

    old_backend = plt.get_backend()
    if not show:
        plt.switch_backend("Agg")
    cwd = os.path.realpath(__path__[0])
    cfg = os.path.join(cwd, "../tests/setup.cfg")
    cmd = ""
    if os.path.exists(cfg):
        cmd += "-c %s " % cfg
    if pc and coverage:
        cmd += "--cov pygimli --cov-report term " + \
               "--cov-config %s " % cfg.replace("setup.cfg", ".coveragerc")
    if ph and htmlreport:
        cmd += "--html %s " % htmlreport
    cmd += "%s " % cwd
    if not onlydoctests and os.path.exists(cfg):
        cmd += os.path.join(cwd, "../tests")

    exitcode = pytest.main(cmd)
    plt.switch_backend(old_backend)
    plt.close('all')
    sys.exit(exitcode)
 def test_chinese_restaurant_process(self):
     print sys.path
     from matplotlib import pyplot
     import matplotlib
     from scipy import stats
     alpha = 20
     test_size = 1000
     tests = 1000
     data = [0]
     for j in range(0, tests):
         cr = ChineseRestaurant(alpha, Numbers())
         for i in range(0, test_size):
             new_sample = cr.draw()
             if new_sample >= len(data):
                 data.append(0)
             data[new_sample] += 1
         assert cr.heap[1] == test_size
     pyplot.switch_backend('Qt5Agg')
     #data=sorted(data, reverse=True)
     print len(data)
     actual_plot, = pyplot.plot(range(1,len(data)), data[1:], label='actual avg')
     expected = [0]
     remain = test_size * tests
     for i in range(1, len(data)):
         break_ = stats.beta.mean(1.0, float(alpha)) * remain
         expected.append(break_)
         remain -= break_
     #print est
     expected_plot, = pyplot.plot(range(1,len(data)), expected[1:], 'r', linewidth=1, label='expected')
     matplotlib.interactive(True)
     pyplot.ylabel("People at Table")
     pyplot.xlabel("Table Number")
     pyplot.title("Chinese Restaurant Process Unit Test")
     pyplot.legend()
     pyplot.show(block=True)
Example #24
0
    def movie(self,di=10, coord='latlon',land="nice", heatmap=False):
        curr_backend = plt.get_backend()
        plt.switch_backend('Agg')

        FFMpegWriter = animation.writers['ffmpeg']
        metadata = dict(title='%s %s' % (self.projname, self.casename),
                        artist='pytraj',
                        comment='https://github.com/TRACMASS/pytraj')
        writer = FFMpegWriter(fps=15, metadata=metadata)

        fig = plt.figure()
        with writer.saving(fig, "traj_test.mp4", 200):
            for part in self.partvec:
                self.load(part=part)
                jdvec = np.sort(self.jdvec)                
                for jd in jdvec:
                    print part, jdvec[-1] - jd, len(self.jd[self.jd==jd])
                    if len(self.jd[self.jd==jd]) <= 1: continue
                    if jd/di == float(jd)/di:
                        if heatmap == True:
                            self.heatmap(log=True, jd=jd)
                        else:
                            self.scatter(jd=jd, coord=coord, land=land)
                        writer.grab_frame()
        plt.switch_backend(curr_backend)
Example #25
0
def _setup():
    # The baseline images are created in this locale, so we should use
    # it during all of the tests.
    try:
        locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
    except locale.Error:
        try:
            locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
        except locale.Error:
            warnings.warn(
                "Could not set locale to English/United States. "
                "Some date-related tests may fail")

    plt.switch_backend('Agg')  # use Agg backend for these test
    if mpl.get_backend().lower() != "agg":
        msg = ("Using a wrong matplotlib backend ({0}), "
               "which will not produce proper images")
        raise Exception(msg.format(mpl.get_backend()))

    # These settings *must* be hardcoded for running the comparison
    # tests
    mpl.rcdefaults()  # Start with all defaults
    mpl.rcParams['text.hinting'] = True
    mpl.rcParams['text.antialiased'] = True
    mpl.rcParams['text.hinting_factor'] = 8

    # make sure we don't carry over bad plots from former tests
    msg = ("no of open figs: {} -> find the last test with ' "
           "python tests.py -v' and add a '@cleanup' decorator.")
    assert len(plt.get_fignums()) == 0, msg.format(plt.get_fignums())
Example #26
0
def test_plot_connectome():
    import matplotlib.pyplot as plt
    plt.switch_backend('template')
    node_color = ['green', 'blue', 'k', 'cyan']
    # symmetric up to 1e-3 relative tolerance
    adjacency_matrix = np.array([[1., -2., 0.3, 0.],
                                 [-2.002, 1, 0., 0.],
                                 [0.3, 0., 1., 0.],
                                 [0., 0., 0., 1.]])
    node_coords = np.arange(3 * 4).reshape(4, 3)

    args = adjacency_matrix, node_coords
    kwargs = dict(edge_threshold=0.38,
                  title='threshold=0.38',
                  node_size=10, node_color=node_color)
    plot_connectome(*args, **kwargs)

    # used to speed-up tests for the next plots
    kwargs['display_mode'] = 'x'

    # node_coords not an array but a list of tuples
    plot_connectome(adjacency_matrix,
                    [tuple(each) for each in node_coords],
                    **kwargs)
    # saving to file
    with tempfile.NamedTemporaryFile(suffix='.png') as fp:
        display = plot_connectome(*args, output_file=fp.name,
                                  **kwargs)
        assert_true(display is None)
        assert_true(os.path.isfile(fp.name) and
                    os.path.getsize(fp.name) > 0)

    # with node_kwargs, edge_kwargs and edge_cmap arguments
    plot_connectome(*args,
                    edge_threshold='70%',
                    node_size=[10, 20, 30, 40],
                    node_color=np.zeros((4, 3)),
                    edge_cmap='RdBu',
                    node_kwargs={
                        'marker': 'v'},
                    edge_kwargs={
                        'linewidth': 4})

    # masked array support
    masked_adjacency_matrix = np.ma.masked_array(
        adjacency_matrix, np.abs(adjacency_matrix) < 0.5)
    plot_connectome(masked_adjacency_matrix, node_coords,
                    **kwargs)

    # sparse matrix support
    sparse_adjacency_matrix = sparse.coo_matrix(adjacency_matrix)
    plot_connectome(sparse_adjacency_matrix, node_coords,
                    **kwargs)

    # NaN matrix support
    nan_adjacency_matrix = np.array([[1., np.nan, 0.],
                                     [np.nan, 1., 2.],
                                     [np.nan, 2., 1.]])
    nan_node_coords = np.arange(3 * 3).reshape(3, 3)
    plot_connectome(nan_adjacency_matrix, nan_node_coords, **kwargs)
Example #27
0
def main():

    dataDir = sys.argv[1]
    dataType = "train2014"

    plt.switch_backend("TkAgg")
    pylab.rcParams['figure.figsize'] = (15.0, 10.0)

    ct = coco_text.COCO_Text(sys.argv[2])
    ct.info()

    # get all images containing at least one instance of legible text
    imgIds = ct.getImgIds(imgIds=ct.train, 
                                catIds=[('legibility','legible')])

    while True:
        # pick one at random
        img = ct.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]

        I = io.imread('%s/images/%s/%s'%(dataDir,dataType,img['file_name']))
        print '/images/%s/%s'%(dataType,img['file_name'])
        plt.figure()
        annIds = ct.getAnnIds(imgIds=img['id'])
        anns = ct.loadAnns(annIds)
        ct.showAnns(anns)
        plt.imshow(I)
        plt.show()
Example #28
0
 def test_QueueNetwork_animate(self):
     if not HAS_MATPLOTLIB:
         with mock.patch('queueing_tool.network.queue_network.plt.show'):
             self.qn.animate(frames=5)
     else:
         plt.switch_backend('Agg')
         self.qn.animate(frames=5)
Example #29
0
def test_plot_stat_map():
    mp.use('template', warn=False)
    import matplotlib.pyplot as plt
    plt.switch_backend('template')
    img = _generate_img()

    plot_stat_map(img, cut_coords=(80, -120, -60))

    # Smoke test coordinate finder, with and without mask
    masked_img = nibabel.Nifti1Image(
        np.ma.masked_equal(img.get_data(), 0),
        mni_affine)
    plot_stat_map(masked_img, display_mode='x')
    plot_stat_map(img, display_mode='y', cut_coords=2)

    # 'yx' display_mode
    plot_stat_map(img, display_mode='yx')

    # regression test #510
    data = np.zeros((91, 109, 91))
    aff = np.eye(4)
    new_img = nibabel.Nifti1Image(data, aff)
    plot_stat_map(new_img, threshold=1000, colorbar=True)

    rng = np.random.RandomState(42)
    data = rng.randn(91, 109, 91)
    new_img = nibabel.Nifti1Image(data, aff)
    plot_stat_map(new_img, threshold=1000, colorbar=True)
Example #30
0
def matplotlib_backend():
    """Figure out which matplotlib backend to use"""
    if platform.system().startswith('Linux'):
        if platform.linux_distribution()[0] == 'arch':
            plt.switch_backend('Qt4Agg')
    elif platform.system().startswith('Darwin'):
        plt.switch_backend('MacOSX')
Example #31
0
 def __exit__(self, *args):
     plt.switch_backend(self._old_backend)
     plt.ion()
Example #32
0
from bidder import *
from copy import deepcopy
from master_file import regret_winexp, regret_exp3
from auction_parameters import set_auction_params
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.switch_backend('agg')

num_repetitions = 30
winexp = []
exp3 = []
min_num_rounds = 0
max_num_rounds = 5000
step = 5
num_adaptive = 4
rounds = [T for T in range(min_num_rounds, max_num_rounds)]

#initialize the bidders once for the maximum number of rounds
T = max_num_rounds
(num_bidders, num_slots, outcome_space, rank_scores, ctr, reserve, values,
 threshold, noise) = set_auction_params(T, num_repetitions)
# bids of the "adversaries" are considered fixed
# bids size now: num_auctions x T x num_bidders
bids = []
for t in range(0, T):
    bids.append([np.random.uniform(0, 1) for i in range(0, num_bidders)])

# Preferred Discretizations for the learner
epsilon = 0.01
    def visualize():
        """Plot six audio features for two given song IDs

        This function takes a request for two song IDs and two labels for
        those IDs to display on the chart. The chosen audio features to
        display are:

            acousticness, danceability, energy,
            instrumentalness, liveness, valence

        The values are plotted on a radar chart and were chosen because they
        all range from 0 to 1. Only tracks in the AWS database can currently
        be searched, but extra work could be done to take in any track ID.

        Returns
        -------
        png
            a png image file of the radar chart for the given IDs and labels
        """
        plt.switch_backend('Agg')  # avoid error from matplotlib

        id_a = request.args.get('id_a',
                                default='06w9JimcZu16KyO3WXR459',
                                type=str)
        id_b = request.args.get('id_b',
                                default='6XzyAQs2jU4PMWmnFlEQLW',
                                type=str)
        label_a = request.args.get('label_a', default=id_a, type=str)
        label_b = request.args.get('label_b', default=id_b, type=str)

        track_a = Track.query.filter(Track.track_id == id_a).first()
        track_b = Track.query.filter(Track.track_id == id_b).first()

        track_df = pd.DataFrame([track_a.to_dict(), track_b.to_dict()])

        if label_a == id_a:
            label_a = f"{track_df.loc[0]['track_name'][:30]}"

        if label_b == id_b:
            label_b = f"{track_df.loc[1]['track_name'][:30]}"

        vis_labels = [label_a, label_b]

        labels = [
            'acousticness', 'danceability', 'energy', 'instrumentalness',
            'liveness', 'valence'
        ]

        num_vals = len(labels)
        angles = [n / float(num_vals) * 2 * np.pi for n in range(num_vals)]
        angles += angles[:1]  # make cyclic to connect vertices in polygon

        # Set figure settings
        fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
        ax.set_theta_offset(np.pi / 2)
        ax.set_theta_direction(-1)
        ax.set_thetagrids(np.degrees(angles), labels)
        ax.set_rlabel_position(0)
        ax.set_yticks([0.20, 0.40, 0.60, 0.80])
        ax.set_yticklabels(['0.20', '0.40', '0.60', '0.80'])
        ax.set_ylim(0, 1)

        # Plot and fill the radar polygons
        feature_df = track_df[labels]
        colors = ['#EF019F', '#780150']
        for i, color in enumerate(colors):
            values = feature_df.loc[i].values.flatten().tolist()
            values += values[:1]  # make cyclic to connect vertices in polygon
            ax.plot(angles,
                    values,
                    color=color,
                    linewidth=1,
                    linestyle='solid',
                    label=vis_labels[i])
            ax.fill(angles, values, color=color, alpha=0.25)

            # Set feature labels so they don't overlap the chart
            for label, angle in zip(ax.get_xticklabels(), angles):
                if angle in [0, np.pi]:
                    label.set_horizontalalignment('center')
                elif 0 < angle < np.pi:
                    label.set_horizontalalignment('left')
                else:
                    label.set_horizontalalignment('right')
        ax.legend(loc='best')

        # Save the figure as an image to output on the app
        pic_bytes = io.BytesIO()
        plt.savefig(pic_bytes, format='png')
        pic_bytes.seek(0)
        data = base64.b64encode(pic_bytes.read()).decode('ascii')
        plt.clf()
        return f"<img src='data:image/png;base64,{data}'>"
Example #34
0
def do_evaluation(model, data_loader, device, types, output_dir, iteration=None, viz=False):
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    dataset = data_loader.dataset
    header = 'Testing {}:'.format(dataset.dataset_name)
    results_dict = {}
    has_mask = False
    for images, img_metas, targets in metric_logger.log_every(data_loader, 10, header):
        assert len(targets) == 1
        images = images.to(device)

        model_time = time.time()
        det = model(images, img_metas)[0]
        boxes, scores, labels = det['boxes'], det['scores'], det['labels']

        model_time = time.time() - model_time

        img_meta = img_metas[0]
        scale_factor = img_meta['scale_factor']
        img_info = img_meta['img_info']

        if viz:
            import matplotlib.pyplot as plt
            import matplotlib.patches as patches
            plt.switch_backend('TKAgg')
            image = de_normalize(images[0], img_meta)
            plt.subplot(122)
            plt.imshow(image)
            plt.title('Predict')
            for i, ((x1, y1, x2, y2), label) in enumerate(zip(boxes.tolist(), labels.tolist())):
                if scores[i] > 0.65:
                    rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, facecolor='none', edgecolor='g')
                    category_id = dataset.label2cat[label]
                    plt.text(x1, y1, '{}:{:.2f}'.format(dataset.CLASSES[category_id], scores[i]), color='r')
                    plt.gca().add_patch(rect)

            plt.subplot(121)
            plt.imshow(image)
            plt.title('GT')
            for i, ((x1, y1, x2, y2), label) in enumerate(zip(targets[0]['boxes'].tolist(), targets[0]['labels'].tolist())):
                category_id = dataset.label2cat[label]
                rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, facecolor='none', edgecolor='g')
                plt.text(x1, y1, '{}'.format(dataset.CLASSES[category_id]))
                plt.gca().add_patch(rect)
            plt.show()

        boxes /= scale_factor
        result = {}

        if 'masks' in det:
            has_mask = True
            (w, h) = img_meta['origin_img_shape']
            masks = paste_masks_in_image(det['masks'], boxes, (h, w))
            rles = []
            for mask in masks.cpu().numpy():
                mask = mask >= 0.5
                mask = mask_util.encode(np.array(mask[0][:, :, None], order='F', dtype='uint8'))[0]
                # "counts" is an array encoded by mask_util as a byte-stream. Python3's
                # json writer which always produces strings cannot serialize a bytestream
                # unless you decode it. Thankfully, utf-8 works out (which is also what
                # the pycocotools/_mask.pyx does).
                mask['counts'] = mask['counts'].decode('utf-8')
                rles.append(mask)
            result['masks'] = rles

        boxes = boxes.tolist()
        labels = labels.tolist()
        labels = [dataset.label2cat[label] for label in labels]
        scores = scores.tolist()

        result['boxes'] = boxes
        result['scores'] = scores
        result['labels'] = labels

        # save_visualization(dataset, img_meta, result, output_dir)

        results_dict.update({
            img_info['id']: result
        })
        metric_logger.update(model_time=model_time)

    if get_world_size() > 1:
        dist.barrier()

    predictions = _accumulate_predictions_from_multiple_gpus(results_dict)
    if not is_main_process():
        return {}
    results = {}
    if has_mask:
        result = coco_evaluation(dataset, predictions, output_dir, iteration=iteration)
        results.update(result)
    if 'voc' in types:
        result = voc_evaluation(dataset, predictions, output_dir, iteration=iteration, use_07_metric=False)
        results.update(result)
    return results
Example #35
0
def plotView():

    plt.switch_backend('Agg')

    url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
    cc_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/web-data/data/cases_country.csv'

    df = pd.read_csv(url)
    dfc = pd.read_csv(cc_url)

    #Filter by Australia
    aus_df = df.loc[(df['Country/Region'] == 'Australia')]

    aus_dfc = dfc.loc[(dfc['Country_Region'] == 'Australia')]
    aus_dfc = aus_dfc.drop([
        'Lat', 'Long_', 'People_Tested', 'People_Hospitalized',
        'Mortality_Rate', 'UID', 'ISO3', 'Deaths', 'Recovered', 'Active',
        'Incident_Rate'
    ],
                           axis=1)
    aus_dfc.rename(columns={'Country_Region': 'Country/Region'}, inplace=True)

    #Convert to date, assign to variable, drop column
    aus_dfc['Last_Update'] = pd.to_datetime(aus_dfc['Last_Update'])
    updatetime = aus_dfc['Last_Update'].dt.tz_localize('utc').dt.tz_convert(
        'Australia/Melbourne')
    updatetime = updatetime.dt.tz_localize(None)
    updatetime = updatetime.values[0]
    updatetime = pd.to_datetime(updatetime)
    aus_dfc = aus_dfc.drop(['Last_Update'], axis=1)

    #Sum all states
    aus_df = aus_df.groupby(['Country/Region']).sum()

    #Remove Lat and Long columns
    aus_df.drop(['Lat', 'Long'], axis=1, inplace=True)

    aus_df = pd.merge(aus_df, aus_dfc, on='Country/Region')
    aus_df = aus_df.groupby(['Country/Region']).sum()

    #Show the difference between days (new cases)
    #aus_df = aus_df.diff(axis=1)

    #Transpose data (days as rows instead of columns)
    aus_df = aus_df.T

    #Show the difference between days (new cases)
    aus_df['New Cases'] = aus_df['Australia'] - aus_df['Australia'].shift(1)

    test = datetime.strftime(datetime.today() - timedelta(1), '%-m/%-d/%y')
    test2 = aus_df.index[-2]

    if aus_df.index[-2] < datetime.strftime(datetime.today() - timedelta(1),
                                            '%-m/%-d/%y'):
        aus_df.rename(
            {
                aus_df.index[-1]:
                datetime.strftime(datetime.today() - timedelta(1),
                                  '%-m/%-d/%y')
            },
            inplace=True)
        aus_df.index = pd.to_datetime(aus_df.index)
    else:
        aus_df.rename({aus_df.index[-1]: datetime.today()}, inplace=True)
        aus_df.index = pd.to_datetime(aus_df.index)

    #Reset the index
    aus_df = aus_df.reset_index()

    #Rename the Date column
    aus_df.rename(columns={'index': 'Date'}, inplace=True)

    #Convert from string to datetime
    aus_df['Date'] = pd.to_datetime(aus_df['Date'])

    #Set beginning of data
    aus_df = aus_df[(aus_df['Date'] > '02-29-2020')]

    #Format date
    aus_df['Date'] = aus_df['Date'].dt.strftime('%d/%m')

    #Prepare for secondary Y axis
    fig = make_subplots(specs=[[{"secondary_y": True}]])

    # Add traces
    fig.add_trace(
        go.Bar(x=aus_df['Date'], y=aus_df['New Cases'], name="New Cases"),
        secondary_y=False,
    )

    fig.add_trace(
        go.Scatter(x=aus_df['Date'], y=aus_df['Australia'],
                   name="Total Cases"),
        secondary_y=True,
    )

    # Add figure title
    fig.update_layout(
        title_text=
        "New and Overall Cases in Australia (data from Johns Hopkins, last updated {})"
        .format(updatetime))

    # Set x-axis title
    fig.update_xaxes(title_text="Date")

    # Set y-axes titles
    fig.update_yaxes(title_text="New Cases", secondary_y=False)
    fig.update_yaxes(title_text="Total Cases",
                     secondary_y=True,
                     rangemode='tozero')
    # fig.update_xaxes(
    # rangeslider_visible=True,
    #     rangeselector=dict(
    #         buttons=list([
    #             dict(count=1, label="1m", step="month", stepmode="backward"),
    #             dict(count=6, label="6m", step="month", stepmode="backward"),
    #             dict(count=1, label="YTD", step="year", stepmode="todate"),
    #             dict(count=1, label="1y", step="year", stepmode="backward"),
    #             dict(step="all")
    #         ])
    #     )
    # )

    fig.layout.template = 'plotly_white'

    fig.write_html("./templates/file.html")
    # fig.write_html("./index.html")

    return render_template('file.html')  #, url ='/static/images/aus_new.png')
Example #36
0
import os
import sys
import scipy.io as spio
import numpy as np
from datetime import datetime, timedelta
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.colors import ListedColormap
from matplotlib.dates import DateFormatter, DayLocator, HourLocator, \
    MinuteLocator, date2num
plt.switch_backend('Agg')


def celltolist(xtickstr):
    """
    convert list of list to list of string.

    Examples
    --------

    [['2010-10-11'], [], ['2011-10-12]] =>
    ['2010-10-11], '', '2011-10-12']
    """

    tmp = []
    for iElement in range(0, len(xtickstr)):
        if not len(xtickstr[iElement][0]):
            tmp.append('')
        else:
            tmp.append(xtickstr[iElement][0][0])
Example #37
0
 def __enter__(self):
     plt.ioff()
     self._old_backend = plt.get_backend()
     plt.switch_backend(self._backend)
Example #38
0
Code for preparing presentation stuff.
"""
import os
import re
import imageio
import numpy as np
import shutil
import matplotlib.pyplot as plt
from matplotlib import ticker
from scipy.stats import norm, uniform
import seaborn as sns
import matplotlib2tikz

from utility import MixtureModel

plt.switch_backend('Agg')
sns.set()
dpi = 300


def generate_polynomial_concept_images():
    """Generates examples of polynomials and how they look for use in presentations about the method."""
    np.random.seed(0)
    sns.set_style('darkgrid')
    figure, axes = plt.subplots(dpi=dpi)
    x_axis = np.arange(-1, 1, 0.001)
    axes.plot(x_axis, (-1 * (x_axis**4)) + (-1 * (x_axis**3)) +
              (2 * (x_axis**2)) + x_axis,
              color=sns.color_palette()[4])
    observation_color = sns.xkcd_rgb['medium grey']
    for observation in np.linspace(-1, 1, num=10):
Example #39
0
    global bs
    dbs = bs * 0
    for x in numpy.arange(20) / 20 * math.pi / 2:
        for i in range(n):
            dbs[i] = lam * (exact(x) - approx(x)) * dapprox_dbi(x, i)
        bs += dbs


def plot():
    xs = numpy.arange(500) / 500 * math.pi / 2
    plt.plot(xs, [approx(x) for x in xs])
    plt.plot(xs, [exact(x) for x in xs])


if __name__ == '__main__':
    plt.switch_backend('agg')  # to run headless

    plot()
    plt.savefig("../12.approx_sin_initial.svg")
    plt.clf()

    for _ in range(3):
        plot()
        for __ in range(10):
            iterate()

    plt.savefig("../12.approx_sin.svg")

    plt.clf()
    plot()
# Author: [email protected]
# Date: 2020.8.2
#

import os
import sys
import getopt
import re
import matplotlib.pyplot as plt
plt.switch_backend('agg')  # For GUI less server
import json


def grep(tstr, file):
    with open(file) as frp:
        lines = frp.readlines()
    targets = []
    for line in lines:
        if tstr in line:
            line = line.replace('\n', '')
            targets.append(line)
    return targets


def main(argv):
    # +----------------------+
    # | Command Line Options |
    # +----------------------+
    min_plot_energy = -6
    max_plot_energy = 6
    plot_format = 'png'
Example #41
0
def main(argv):
    inps = cmdLineParse()
    if not inps.disp_fig:
        plt.switch_backend('Agg')
    print '\n******************** Plot Network **********************'

    # Output figure name
    figName1 = 'BperpHist' + inps.fig_ext
    figName2 = 'Network' + inps.fig_ext
    if 'Modified' in inps.file:
        figName1 = 'BperpHist_Modified' + inps.fig_ext
        figName2 = 'Network_Modified' + inps.fig_ext

    ##### 1. Read Info
    # Read dateList and bperpList
    ext = os.path.splitext(inps.file)[1]
    if ext in ['.h5']:
        k = readfile.read_attribute(inps.file)['FILE_TYPE']
        print 'reading date and perpendicular baseline from ' + k + ' file: ' + os.path.basename(
            inps.file)
        if not k in multi_group_hdf5_file:
            print 'ERROR: only the following file type are supported:\n' + str(
                multi_group_hdf5_file)
            sys.exit(1)
        Bp = ut.Baseline_timeseries(inps.file)
        date8List = ptime.igram_date_list(inps.file)
        date6List = ptime.yymmdd(date8List)
    else:
        print 'reading date and perpendicular baseline from baseline list file: ' + inps.bl_list_file
        date8List, Bp = pnet.read_baseline_file(inps.bl_list_file)[0:2]
        date6List = ptime.yymmdd(date8List)
    print 'number of acquisitions: ' + str(len(date8List))

    # Read Pairs Info
    print 'reading pairs info from file: ' + inps.file
    date12_list = pnet.get_date12_list(inps.file)
    pairs_idx = pnet.date12_list2index(date12_list, date6List)
    print 'number of pairs       : ' + str(len(pairs_idx))

    # Read Coherence List
    inps.coherence_list = None
    if inps.coherence_file and os.path.isfile(inps.coherence_file):
        ext = os.path.splitext(inps.coherence_file)[1]
        if ext in ['.h5']:
            listFile = os.path.splitext(
                inps.coherence_file)[0] + '_spatialAverage.list'
            if os.path.isfile(listFile):
                print 'reading coherence value from existed ' + listFile
                fcoh = np.loadtxt(listFile, dtype=str)
                inps.coherence_list = [float(i) for i in fcoh[:, 1]]
                coh_date12_list = [i for i in fcoh[:, 0]]
            else:
                print 'calculating average coherence value from ' + inps.coherence_file
                inps.coherence_list = ut.spatial_average(inps.coherence_file,
                                                         saveList=True)
                coh_date12_list = pnet.get_date12_list(inps.coherence_file)
        else:
            print 'reading coherence value from ' + inps.coherence_file
            fcoh = np.loadtxt(inps.coherence_file, dtype=str)
            inps.coherence_list = [float(i) for i in fcoh[:, 1]]
            coh_date12_list = [i for i in fcoh[:, 0]]
        # Check length of coherence file and input file
        if not set(coh_date12_list) == set(date12_list):
            print 'WARNING: input coherence list has different pairs/date12 from input file'
            print 'turn off the color plotting of interferograms based on coherence'
            inps.coherence_list = None

    ##### 2. Plot
    # Fig 1 - Baseline History
    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(111)
    ax1 = pnet.plot_perp_baseline_hist(ax1, date8List, Bp, vars(inps))

    if inps.save_fig:
        fig1.savefig(figName1, bbox_inches='tight')
        print 'save figure to ' + figName1

    # Fig 2 - Interferogram Network
    fig2 = plt.figure(2)
    ax2 = fig2.add_subplot(111)
    ax2 = pnet.plot_network(ax2, pairs_idx, date8List, Bp, vars(inps))

    if inps.save_fig:
        fig2.savefig(figName2, bbox_inches='tight')
        print 'save figure to ' + figName2

    if inps.save_list:
        txtFile = os.path.splitext(inps.file)[0] + '_date12.list'
        np.savetxt(txtFile, date12_list, fmt='%s')
        print 'save pairs/date12 info to file: ' + txtFile

    if inps.disp_fig:
        plt.show()
Example #42
0
def create_NNF_vectors_image(source,
                             target,
                             f,
                             patch_size,
                             server=True,
                             subsampling=100,
                             line_width=0.5,
                             line_color='k',
                             tmpdir='./'):
    """
    Display the nearest-neighbour field as a sparse vector field between source and target images
    """
    import matplotlib.pyplot as plt

    # get the shape of the source image
    im_shape = source.shape

    # if you are using matplotlib on a server
    if server:
        plt.switch_backend('agg')
    import matplotlib.patches as patches

    fig = plt.figure(frameon=False)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.set_axis_off()
    fig.add_axes(ax)

    source = cv.cvtColor(source, cv.COLOR_BGR2RGB)
    target = cv.cvtColor(target, cv.COLOR_BGR2RGB)

    # create an image that contains the source and target side by side
    plot_im = np.concatenate((source, target), axis=1)
    ax.imshow(plot_im)

    vector_coords = make_coordinates_matrix(im_shape, step=subsampling)
    vshape = vector_coords.shape
    vector_coords = np.reshape(vector_coords, (vshape[0] * vshape[1], 2))

    for coord in vector_coords:
        rect = patches.Rectangle(
            (coord[1] - patch_size / 2.0, coord[0] - patch_size / 2.0),
            patch_size,
            patch_size,
            linewidth=line_width,
            edgecolor=line_color,
            facecolor='none')
        ax.add_patch(rect)

        arrow = patches.Arrow(coord[1],
                              coord[0],
                              f[coord[0], coord[1], 1] + im_shape[1],
                              f[coord[0], coord[1], 0],
                              lw=line_width,
                              edgecolor=line_color)
        ax.add_patch(arrow)

    dpi = fig.dpi
    fig.set_size_inches(im_shape[1] * 2 / dpi, im_shape[0] / dpi)
    tmp_image = tmpdir + '/tmpvecs.png'
    fig.savefig(tmp_image)
    plt.close(fig)
    return tmp_image
Example #43
0
from collections import Counter
from numpy import mean
from scipy.spatial.distance import cosine
from scipy.stats import pearsonr
import random
import matplotlib.pyplot as plt

plt.switch_backend(
    'TkAgg'
)  #change the backend in case you have a different one in order for the plt.show() command to work

fig, ax = plt.subplots()

# We need to draw the canvas, otherwise the labels won't be positioned and won't have values yet.
fig.canvas.draw()


def create_user_version(tA, tB):
    user_A_ratings = random.randrange(
        0, 101)  # The number of movies this version of user A rated
    user_B_ratings = random.randrange(
        0, 101)  # The number of movies this version of user B rated

    A_indexes = []  # The indexes of the movies for A
    B_indexes = []  # The indexes of the movies for B
    A_version = []  # This version of A
    B_version = []  # This version of B

    for m in range(user_A_ratings):  # Get a random rating from A
        temp_index = random.randrange(0, len(tA))
        while (temp_index in A_indexes):
Example #44
0
def logp_hist(f, args, device):
    import matplotlib.pyplot as plt
    import seaborn as sns
    sns.set()
    plt.switch_backend('agg')

    def sample(x, n_steps=args.n_steps):
        x_k = t.autograd.Variable(x.clone(), requires_grad=True)
        # sgld
        for k in range(n_steps):
            f_prime = t.autograd.grad(f(x_k).sum(), [x_k],
                                      retain_graph=True)[0]
            x_k.data += f_prime + 1e-2 * t.randn_like(x_k)
        final_samples = x_k.detach()
        return final_samples

    def grad_norm(x):
        x_k = t.autograd.Variable(x, requires_grad=True)
        f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0]
        grad = f_prime.view(x.size(0), -1)
        return grad.norm(p=2, dim=1)

    def score_fn(x):
        if args.score_fn == "px":
            return f(x).detach().cpu()
        elif args.score_fn == "py":
            return nn.Softmax()(f.classify(x)).max(1)[0].detach().cpu()
        elif args.score_fn == "pxgrad":
            return -t.log(grad_norm(x).detach().cpu())
        elif args.score_fn == "refine":
            init_score = f(x)
            x_r = sample(x)
            final_score = f(x_r)
            delta = init_score - final_score
            return delta.detach().cpu()
        elif args.score_fn == "refinegrad":
            init_score = -grad_norm(x).detach()
            x_r = sample(x)
            final_score = -grad_norm(x_r).detach()
            delta = init_score - final_score
            return delta.detach().cpu()
        elif args.score_fn == "refinel2":
            x_r = sample(x)
            norm = (x - x_r).view(x.size(0), -1).norm(p=2, dim=1)
            return -norm.detach().cpu()
        else:
            return f.classify(x).max(1)[0].detach().cpu()

    transform_test = tr.Compose([
        tr.ToTensor(),
        tr.Normalize((.5, .5, .5), (.5, .5, .5)),
        lambda x: x + args.sigma * t.randn_like(x)
    ])
    datasets = {
        "cifar10":
        tv.datasets.CIFAR10(root="../data",
                            transform=transform_test,
                            download=True,
                            train=False),
        "svhn":
        tv.datasets.SVHN(root="../data",
                         transform=transform_test,
                         download=True,
                         split="test"),
        "cifar100":
        tv.datasets.CIFAR100(root="../data",
                             transform=transform_test,
                             download=True,
                             train=False),
        "celeba":
        tv.datasets.ImageFolder(root="/scratch/gobi1/gwohl/CelebA/splits",
                                transform=tr.Compose([
                                    tr.Resize(32),
                                    tr.ToTensor(),
                                    tr.Normalize((.5, .5, .5), (.5, .5, .5)),
                                    lambda x: x + args.sigma * t.randn_like(x)
                                ]))
    }

    score_dict = {}
    for dataset_name in args.datasets:
        print(dataset_name)
        dataset = datasets[dataset_name]
        dataloader = DataLoader(dataset,
                                batch_size=100,
                                shuffle=True,
                                num_workers=4,
                                drop_last=False)
        this_scores = []
        for x, _ in dataloader:
            x = x.to(device)
            scores = score_fn(x)
            print(scores.mean())
            this_scores.extend(scores.numpy())
        score_dict[dataset_name] = this_scores

    for name, scores in score_dict.items():
        plt.hist(scores, label=name, bins=100, normed=True, alpha=.5)
    plt.legend()
    plt.savefig(args.save_dir + "/fig.pdf")
    def test_brachistochrone_upstream_state(self):
        import openmdao.api as om
        from openmdao.utils.assert_utils import assert_near_equal
        import dymos as dm

        import matplotlib.pyplot as plt
        plt.switch_backend('Agg')

        from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE

        #
        # Define the OpenMDAO problem
        #
        p = om.Problem(model=om.Group())

        # Instantiate the transcription so we can get the number of nodes from it while
        # building the problem.
        tx = dm.GaussLobatto(num_segments=10, order=3)

        # Add an indep var comp to provide the external control values
        ivc = p.model.add_subsystem('states_ivc',
                                    om.IndepVarComp(),
                                    promotes_outputs=['*'])

        # Add the output to provide the values of theta at the control input nodes of the transcription.
        ivc.add_output('x0', shape=(1, ), units='m')

        # Connect x0 to the state error component so we can constrain the given value of x0
        # to be equal to the value chosen in the phase.
        p.model.connect('x0', 'state_error_comp.x0_target')
        p.model.connect('traj.phase0.timeseries.states:x',
                        'state_error_comp.x0_actual',
                        src_indices=[0],
                        flat_src_indices=True)

        #
        # Define a Trajectory object
        #
        traj = dm.Trajectory()

        p.model.add_subsystem('traj', subsys=traj)

        p.model.add_subsystem(
            'state_error_comp',
            om.ExecComp('x0_error = x0_target - x0_actual',
                        x0_error={'units': 'm'},
                        x0_target={'units': 'm'},
                        x0_actual={'units': 'm'}))

        p.model.add_constraint('state_error_comp.x0_error', equals=0.0)

        #
        # Define a Dymos Phase object with GaussLobatto Transcription
        #
        phase = dm.Phase(ode_class=BrachistochroneODE, transcription=tx)

        traj.add_phase(name='phase0', phase=phase)

        #
        # Set the time options
        # Time has no targets in our ODE.
        # We fix the initial time so that the it is not a design variable in the optimization.
        # The duration of the phase is allowed to be optimized, but is bounded on [0.5, 10].
        #
        phase.set_time_options(fix_initial=True,
                               duration_bounds=(0.5, 10.0),
                               units='s')

        #
        # Set the time options
        # Initial values of positions and velocity are all fixed.
        # The final value of position are fixed, but the final velocity is a free variable.
        # The equations of motion are not functions of position, so 'x' and 'y' have no targets.
        # The rate source points to the output in the ODE which provides the time derivative of the
        # given state.
        phase.add_state('x',
                        fix_initial=False,
                        fix_final=True,
                        units='m',
                        rate_source='xdot')
        phase.add_state('y',
                        fix_initial=True,
                        fix_final=True,
                        units='m',
                        rate_source='ydot')
        phase.add_state('v',
                        fix_initial=True,
                        fix_final=False,
                        units='m/s',
                        rate_source='vdot',
                        targets=['v'])

        # Define theta as a control.
        # Use opt=False to allow it to be connected to an external source.
        # Arguments lower and upper are no longer valid for an input control.
        phase.add_control(name='theta', units='rad', targets=['theta'])

        # Minimize final time.
        phase.add_objective('time', loc='final')

        # Set the driver.
        p.driver = om.ScipyOptimizeDriver()

        # Allow OpenMDAO to automatically determine our sparsity pattern.
        # Doing so can significant speed up the execution of Dymos.
        p.driver.declare_coloring()

        # Setup the problem
        p.setup(check=True)

        # Now that the OpenMDAO problem is setup, we can set the values of the states.
        p.set_val('x0', 0.0, units='m')

        # Here we're intentially setting the intiial x value to something other than zero, just
        # to demonstrate that the optimizer brings it back in line with the value of x0 set above.
        p.set_val('traj.phase0.states:x',
                  phase.interp('x', [1, 10]),
                  units='m')

        p.set_val('traj.phase0.states:y',
                  phase.interp('y', [10, 5]),
                  units='m')

        p.set_val('traj.phase0.states:v',
                  phase.interp('v', [0, 5]),
                  units='m/s')

        p.set_val('traj.phase0.controls:theta',
                  phase.interp('theta', [90, 90]),
                  units='deg')

        # Run the driver to solve the problem
        dm.run_problem(p, make_plots=True)

        # Test the results
        assert_near_equal(p.get_val('traj.phase0.timeseries.time')[-1],
                          1.8016,
                          tolerance=1.0E-3)

        # Check the validity of our results by using scipy.integrate.solve_ivp to
        # integrate the solution.
        sim_out = traj.simulate()

        # Plot the results
        fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 4.5))

        axes[0].plot(p.get_val('traj.phase0.timeseries.states:x'),
                     p.get_val('traj.phase0.timeseries.states:y'),
                     'ro',
                     label='solution')

        axes[0].plot(sim_out.get_val('traj.phase0.timeseries.states:x'),
                     sim_out.get_val('traj.phase0.timeseries.states:y'),
                     'b-',
                     label='simulation')

        axes[0].set_xlabel('x (m)')
        axes[0].set_ylabel('y (m/s)')
        axes[0].legend()
        axes[0].grid()

        axes[1].plot(p.get_val('traj.phase0.timeseries.time'),
                     p.get_val('traj.phase0.timeseries.controls:theta',
                               units='deg'),
                     'ro',
                     label='solution')

        axes[1].plot(sim_out.get_val('traj.phase0.timeseries.time'),
                     sim_out.get_val('traj.phase0.timeseries.controls:theta',
                                     units='deg'),
                     'b-',
                     label='simulation')

        axes[1].set_xlabel('time (s)')
        axes[1].set_ylabel(r'$\theta$ (deg)')
        axes[1].legend()
        axes[1].grid()

        plt.show()
def visualize_dataset(data_dir, modality='mri', threeD=True, save_as_nifti=False):
    tensors = load_data(fname=os.path.join(data_dir, 'standardized_data_set.npz'))

    if save_as_nifti:
        reference_img = nib.load(os.path.join(data_dir, 'reference.nii'))
        coordinate_space = reference_img.affine

    n_subj = tensors[0].shape[0]
    print('Processing ', n_subj, 'subjects')

    settings = ['train', 'test']

    if threeD:
        batch_size = 1
    else:
        batch_size = tensors[0].shape[2]


    ct_sets, mri_sets = generate_loaders(tensors, batch_size=batch_size, threeD=threeD)

    print('Using modality', modality)
    if modality == 'mri':
        sets = mri_sets
    elif modality == 'ct':
        sets = ct_sets
    else:
        print('Modality has to be one of: ct, mri.', modality, 'is not valid.')
        return

    for setting in settings:
        loader = sets[setting]
        list = [(inputs, outputs) for (inputs, outputs) in tqdm(loader)]

        plt.switch_backend('agg')
        ncol = 6
        nrow = n_subj + 2
        figure = plt.figure(figsize=(ncol + 1, nrow + 1))
        gs = gridspec.GridSpec(nrow, ncol,
                               wspace=1, hspace=0.25,
                               top=1. - 0.5 / (nrow + 1), bottom=0.5 / (nrow + 1),
                               left=0.5 / (ncol + 1), right=1 - 0.5 / (ncol + 1))
        print('Batches:', len(list))

        for subj in range(len(list)):
            subj_data = list[subj]
            print('i/o', len(subj_data))
            # input data (shape n_batch, n_c, x ,y , z)
            print('input', subj_data[0].shape)
            if not threeD:
                subj_data = (subj_data[0].permute(1, 2, 3, 0).unsqueeze(0), subj_data[1].permute(1, 2, 3, 0).unsqueeze(0))
            print('input', subj_data[0].shape)
            for channel in range(subj_data[0].shape[1]):
                visual_add_center(subj_data[0][0, channel], subj, channel, gs)
                if save_as_nifti:
                    binary_img = nib.Nifti1Image(subj_data[0].squeeze().permute(1,2,3,0).numpy(), affine=coordinate_space)
                    nib.save(binary_img, os.path.join(data_dir, str(subj) + '_' + setting + '_mri.nii'))
            # add output
            visual_add_center(subj_data[1][0, 0], subj, channel + 1, gs)
            if save_as_nifti:
                binary_img = nib.Nifti1Image(subj_data[1][0, 0].numpy(), affine=coordinate_space)
                nib.save(binary_img, os.path.join(data_dir, str(subj) + '_' + setting + '_GT.nii'))


        plt.ioff()
        plt.switch_backend('agg')
        figure_path = os.path.join(data_dir, modality + '_' + setting + '_dataloader_visualisation.svg')
        figure.savefig(figure_path, dpi='figure', format='svg')
        plt.close(figure)
Example #47
0
def train(dataset,
          model,
          args,
          same_feat=True,
          val_dataset=None,
          test_dataset=None,
          writer=None,
          mask_nodes=True):
    writer_batch_idx = [0, 3, 6, 9]

    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                 lr=0.001)
    iter = 0
    best_val_result = {'epoch': 0, 'loss': 0, 'acc': 0}
    test_result = {'epoch': 0, 'loss': 0, 'acc': 0}
    train_accs = []
    train_epochs = []
    best_val_accs = []
    best_val_epochs = []
    test_accs = []
    test_epochs = []
    for epoch in range(args.num_epochs):
        begin_time = time.time()
        avg_loss = 0.0
        model.train()
        print('Epoch: ', epoch)
        for batch_idx, data in enumerate(dataset):
            model.zero_grad()
            adj = Variable(data['adj'].float(), requires_grad=False).cuda()
            h0 = Variable(data['feats'].float(), requires_grad=False).cuda()
            label = Variable(data['label'].long()).cuda()
            batch_num_nodes = data['num_nodes'].int().numpy(
            ) if mask_nodes else None
            assign_input = Variable(data['assign_feats'].float(),
                                    requires_grad=False).cuda()

            ypred = model(h0, adj, batch_num_nodes, assign_x=assign_input)
            if not args.method == 'soft-assign' or not args.linkpred:
                loss = model.loss(ypred, label)
            else:
                loss = model.loss(ypred, label, adj, batch_num_nodes)
            loss.backward()
            nn.utils.clip_grad_norm(model.parameters(), args.clip)
            optimizer.step()
            iter += 1
            avg_loss += loss
            #if iter % 20 == 0:
            #    print('Iter: ', iter, ', loss: ', loss.data[0])

            # log once per XX epochs
            if epoch % 10 == 0 and batch_idx == len(
                    dataset
            ) // 2 and args.method == 'soft-assign' and writer is not None:
                log_assignment(model.assign_tensor, writer, epoch,
                               writer_batch_idx)
                log_graph(adj, batch_num_nodes, writer, epoch,
                          writer_batch_idx, model.assign_tensor)
        avg_loss /= batch_idx + 1
        elapsed = time.time() - begin_time
        if writer is not None:
            writer.add_scalar('loss/avg_loss', avg_loss, epoch)
            if args.linkpred:
                writer.add_scalar('loss/linkpred_loss', model.link_loss, epoch)
        print('Avg loss: ', avg_loss, '; epoch time: ', elapsed)
        result = evaluate(dataset,
                          model,
                          args,
                          name='Train',
                          max_num_examples=100)
        train_accs.append(result['acc'])
        train_epochs.append(epoch)
        if val_dataset is not None:
            val_result = evaluate(val_dataset, model, args, name='Validation')
        if val_result['acc'] > best_val_result[
                'acc'] - 1e-7 or epoch > args.num_epochs * 0.2:
            best_val_result['acc'] = val_result['acc']
            best_val_result['epoch'] = epoch
            best_val_result['loss'] = avg_loss
            test_result = evaluate(test_dataset, model, args, name='Test')
            test_result['epoch'] = epoch
        if writer is not None:
            writer.add_scalar('acc/train_acc', result['acc'], epoch)
            writer.add_scalar('acc/val_acc', val_result['acc'], epoch)
            writer.add_scalar('loss/best_val_loss', best_val_result['loss'],
                              epoch)
            writer.add_scalar('acc/test_acc', test_result['acc'], epoch)

        print('Best val result: ', best_val_result)
        print('Test result: ', test_result)
        best_val_epochs.append(best_val_result['epoch'])
        best_val_accs.append(best_val_result['acc'])
        test_epochs.append(test_result['epoch'])
        test_accs.append(test_result['acc'])

    matplotlib.style.use('seaborn')
    plt.switch_backend('agg')
    plt.figure()
    plt.plot(train_epochs, util.exp_moving_avg(train_accs, 0.85), '-', lw=1)
    plt.plot(best_val_epochs, best_val_accs, 'bo', test_epochs, test_accs,
             'go')
    plt.legend(['train', 'val', 'test'])
    plt.savefig(gen_train_plt_name(args), dpi=600)
    plt.close()
    matplotlib.style.use('default')

    return model
Example #48
0
def show_the_model(args):
    """Show the problem"""
    matplotlib.use('TkAgg')
    plt.switch_backend('TkAgg')
    env, env_name = model_build(fix=args.fix,
                                small=args.small,
                                medium=args.medium,
                                medhard=args.medhard)
    config = pu.get_train_gym_config(env_name=env_name,
                                     seed=np.random.randint(10000))
    sim_n = 200
    if args.small:
        sim_n = 200
    if args.medium:
        sim_n = 500
    valid_x0 = None  # use this to load validation set
    if args.valid:
        valid_x0 = np.load(VALIDATION_SET)['x0']
        sim_n = valid_x0.shape[0]
    v_x0, v_xf, v_traj = pu.policy_rollout(env,
                                           config,
                                           sim_n,
                                           show=args.render,
                                           return_traj=True,
                                           valid_x0=valid_x0)
    v_xf_norm = np.linalg.norm(v_xf, axis=1)
    level = [0.1, 0.5, 1, 2]
    for level_ in level:
        print('level %f count %d' % (level_, np.sum(v_xf_norm < level_)))
    # find closest state
    v_min_state_norm = np.zeros(sim_n)
    for i in range(sim_n):
        v_min_state_norm[i] = np.amin(
            np.linalg.norm(v_traj[i]['state'], axis=1))
    datafnm = 'data/%s_all_rst.npz' % env_name
    figfnm = 'gallery/%s_rollout_xf.pdf' % env_name
    figfnm_min = 'gallery/%s_rollout_min_x.pdf' % env_name
    if args.small:
        datafnm = datafnm.replace('.npz', '_small.npz')
        figfnm = figfnm.replace('.pdf', '_small.pdf')
        figfnm_min = figfnm_min.replace('.pdf', '_small.pdf')
    if args.medium:
        datafnm = datafnm.replace('.npz', '_medium.npz')
        figfnm = figfnm.replace('.pdf', '_medium.pdf')
        figfnm_min = figfnm_min.replace('.pdf', '_medium.pdf')
    if args.medhard:
        datafnm = datafnm.replace('.npz', '_medhard.npz')
        figfnm = figfnm.replace('.pdf', '_medhard.pdf')
        figfnm_min = figfnm_min.replace('.pdf', '_medhard.pdf')
    if args.valid:
        datafnm = datafnm.replace('.npz', '_valid.npz')
        figfnm = figfnm.replace('.pdf', '_valid.pdf')
        figfnm_min = figfnm_min.replace('.pdf', '_valid.pdf')
    for level_ in level:
        print('min level %f count %d' %
              (level_, np.sum(v_min_state_norm < level_)))
    np.savez(datafnm, x0=v_x0, xf=v_xf, traj=v_traj)
    fig, ax = plt.subplots()
    ax.hist(v_xf_norm, bins='auto')
    ax.set_xlabel(r'$\|x_f\|$')
    ax.set_ylabel('Count')
    fig.savefig(figfnm)
    fig, ax = plt.subplots()
    ax.hist(v_min_state_norm, bins='auto')
    ax.set_xlabel(r'$\|x\|_{\min}$')
    ax.set_ylabel('Count')
    fig.savefig(figfnm_min)
    plt.show()
Example #49
0
    def _run_interface(self, runtime):
        from nilearn.input_data import NiftiLabelsMasker
        from nilearn.connectome import ConnectivityMeasure
        from sklearn.covariance import EmpiricalCovariance
        import numpy as np
        import pandas as pd
        import os
        import matplotlib.pyplot as plt
        from mne.viz import plot_connectivity_circle
        import re

        plt.switch_backend('Agg')

        # extract timeseries from every label
        masker = NiftiLabelsMasker(labels_img=self.inputs.atlas_file,
                                   standardize=True, verbose=1)
        timeseries = masker.fit_transform(self.inputs.timeseries_file)
        # create correlation matrix
        correlation_measure = ConnectivityMeasure(cov_estimator=EmpiricalCovariance(),
                                                  kind="correlation")
        correlation_matrix = correlation_measure.fit_transform([timeseries])[0]
        np.fill_diagonal(correlation_matrix, np.NaN)

        # add the atlas labels to the matrix
        atlas_lut_df = pd.read_csv(self.inputs.atlas_lut, sep='\t')
        regions = atlas_lut_df['regions'].values
        correlation_matrix_df = pd.DataFrame(correlation_matrix, index=regions, columns=regions)

        # do a fisher's r -> z transform
        fisher_z_matrix_df = correlation_matrix_df.apply(_fisher_r_to_z)

        # write out the file.
        trial_regex = re.compile(r'.*desc-(?P<trial>[A-Za-z0-9]+)')
        title = re.search(trial_regex, self.inputs.timeseries_file).groupdict()['trial']
        template_name = 'desc-{trial}_correlation.{ext}'
        corr_mat_fname = template_name.format(trial=title, ext="tsv")
        corr_mat_path = os.path.join(runtime.cwd, corr_mat_fname)
        fisher_z_matrix_df.to_csv(corr_mat_path, sep='\t', na_rep='n/a')

        # save the filename in the outputs
        self._results['correlation_matrix'] = corr_mat_path

        # visualizations with mne
        connmat = fisher_z_matrix_df.values
        labels = list(fisher_z_matrix_df.index)

        # plot a circle visualization of the correlation matrix
        viz_mat_fname = template_name.format(trial=title, ext="svg")
        viz_mat_path = os.path.join(runtime.cwd, viz_mat_fname)

        n_lines = int(np.sum(connmat > 0) / 2)
        fig = plt.figure(figsize=(5, 5))

        plot_connectivity_circle(connmat, labels, n_lines=n_lines, fig=fig, title=title,
                                 fontsize_title=10, facecolor='white', textcolor='black',
                                 colormap='jet', colorbar=1, node_colors=['black'],
                                 node_edgecolor=['white'], show=False, interactive=False)

        fig.savefig(viz_mat_path, dpi=300)
        self._results['correlation_fig'] = viz_mat_path

        return runtime
Example #50
0
import json
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd

matplotlib.use("Agg")

plt.switch_backend("agg")
from numpy.polynomial import Polynomial
from panaxea.toolkit.Toolkit import depickle_from_lite

matplotlib.use("Qt4Agg")
"""
This script contains functions to calculate and visualize expected vs actual
growth curves and calculate errors.
"""


def convert_num_agents_to_volume(num_cancer_cell_agents,
                                 cancer_cells_per_agent, cancer_cell_volume):
    """
    Given an amount of cancer cell agents, converts this to the
    corresponding volume.

    Parameters
    ----------
    num_cancer_cell_agents : int
        The number of cancer cell agents present at a given time
    cancer_cells_per_agent : int
Example #51
0
    print("saving list of files used by tests to %s" % fname)
    _EXPORT_DATAPATHS_FILE = open(fname, "w")
else:
    _EXPORT_DATAPATHS_FILE = None

if "--create-missing" in sys.argv:
    sys.argv.remove("--create-missing")
    print("Allowing creation of missing test results.")
    os.environ["IRIS_TEST_CREATE_MISSING"] = "true"

# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False

if MPL_AVAILABLE and "-d" in sys.argv:
    sys.argv.remove("-d")
    plt.switch_backend("tkagg")
    _DISPLAY_FIGURES = True

# Threading non re-entrant blocking lock to ensure thread-safe plotting.
_lock = threading.Lock()


def main():
    """A wrapper for unittest.main() which adds iris.test specific options to the help (-h) output."""
    if "-h" in sys.argv or "--help" in sys.argv:
        stdout = sys.stdout
        buff = io.StringIO()
        # NB. unittest.main() raises an exception after it's shown the help text
        try:
            sys.stdout = buff
            unittest.main()
Example #52
0
def log_graph(adj,
              batch_num_nodes,
              writer,
              epoch,
              batch_idx,
              assign_tensor=None):
    plt.switch_backend('agg')
    fig = plt.figure(figsize=(8, 6), dpi=300)

    for i in range(len(batch_idx)):
        ax = plt.subplot(2, 2, i + 1)
        num_nodes = batch_num_nodes[batch_idx[i]]
        adj_matrix = adj[
            batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
        G = nx.from_numpy_matrix(adj_matrix)
        nx.draw(G,
                pos=nx.spring_layout(G),
                with_labels=True,
                node_color='#336699',
                edge_color='grey',
                width=0.5,
                node_size=300,
                alpha=0.7)
        ax.xaxis.set_visible(False)

    plt.tight_layout()
    fig.canvas.draw()

    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
    writer.add_image('graphs', data, epoch)

    # log a label-less version
    #fig = plt.figure(figsize=(8,6), dpi=300)
    #for i in range(len(batch_idx)):
    #    ax = plt.subplot(2, 2, i+1)
    #    num_nodes = batch_num_nodes[batch_idx[i]]
    #    adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
    #    G = nx.from_numpy_matrix(adj_matrix)
    #    nx.draw(G, pos=nx.spring_layout(G), with_labels=False, node_color='#336699',
    #            edge_color='grey', width=0.5, node_size=25,
    #            alpha=0.8)

    #plt.tight_layout()
    #fig.canvas.draw()

    #data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    #data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    #writer.add_image('graphs_no_label', data, epoch)

    # colored according to assignment
    assignment = assign_tensor.cpu().data.numpy()
    fig = plt.figure(figsize=(8, 6), dpi=300)

    num_clusters = assignment.shape[2]
    all_colors = np.array(range(num_clusters))

    for i in range(len(batch_idx)):
        ax = plt.subplot(2, 2, i + 1)
        num_nodes = batch_num_nodes[batch_idx[i]]
        adj_matrix = adj[
            batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()

        label = np.argmax(assignment[batch_idx[i]], axis=1).astype(int)
        label = label[:batch_num_nodes[batch_idx[i]]]
        node_colors = all_colors[label]

        G = nx.from_numpy_matrix(adj_matrix)
        nx.draw(G,
                pos=nx.spring_layout(G),
                with_labels=False,
                node_color=node_colors,
                edge_color='grey',
                width=0.4,
                node_size=50,
                cmap=plt.get_cmap('Set1'),
                vmin=0,
                vmax=num_clusters - 1,
                alpha=0.8)

    plt.tight_layout()
    fig.canvas.draw()

    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
    writer.add_image('graphs_colored', data, epoch)
Example #53
0
    def run(self):
        self.lg.info("simulator: platform.system() = {}".format(platform.system()))
        if __debug__:
            if platform.system() == 'Linux':
                plt.switch_backend("TkAgg")
            elif platform.system() == 'Darwin':
                plt.switch_backend("macosx")
                plt.style.use("seaborn-white")

        # Removing temporal socket files
        for pattern in ['*_udp', '*_tcp']:
            for tmp_file in glob(pattern):
                os.remove(tmp_file)
        
        # Listen to the team for drawing
        sim.FEEDBACK["DRAW"] = Queue()
        Process(target=self.store).start()

        if __debug__:
            if self.gui is True:
                Process(target=self.draw).start()

        # Listen to the team for simulation life
        sim.FEEDBACK["STATUS"] = Queue()

        # create shared list for CIS set of rules (only when cis is choosen?)
        manager = Manager()
        sim.SHARED_LIST["malicious"] = manager.list()
        sim.SHARED_LIST["regular"] = manager.list()
        sim.SHARED_LIST["attacked"] = manager.list()

        # Automatic bitrate control only for CIS-SSS
        sim.RECV_LIST = manager.dict()
        #sim.LOCK = Semaphore()

        # run splitter
        p = Process(target=self.run_a_splitter)
        p.start()
        self.processes["S"] = p.pid
        self.attended_monitors = 0
        self.attended_peers = 0
        self.attended_mps = 0

        # run a monitor
        p = Process(target=self.run_a_peer, args=["S", "monitor", "M"+str(self.attended_monitors+0), True])
        p.start()
        self.processes["M"+str(self.attended_monitors+1)] = p.pid
        self.attended_monitors += 1

        queue = sim.FEEDBACK["STATUS"]
        m = queue.get()
        while m[0] != "Bye" and self.current_round < self.number_of_rounds:
            if (m[0] == "R"):
                self.current_round = m[1]
                r = np.random.uniform(0, 1)
                if r <= Simulator.P_IN:
                    self.addPeer()
            m = queue.get()

        sim.FEEDBACK["DRAW"].put(("Bye", "Bye"))
        sim.FEEDBACK["STATUS"].put(("Bye", "Bye"))
        for name, pid in self.processes.items():
            self.lg.info("Killing {}, ...".format(name))
            os.system("kill -9 "+str(pid))
            self.lg.info("{} killed".format(name))
            
        if self.set_of_rules == "cis" or self.set_of_rules == "cis-sss":
            self.lg.info("List of Malicious")
            self.lg.info(sim.SHARED_LIST["malicious"])
            self.lg.info("List of Regular detected")
            self.lg.info(sim.SHARED_LIST["regular"])
            self.lg.info("List of peer Attacked")
            self.lg.info(sim.SHARED_LIST["attacked"])
Example #54
0
def main():
    plt.switch_backend('agg')

    N = 2
    xmean = np.random.randn(N)
    sigma = 0.3
    stopeval = 1e3 * N**2
    stopfitness = 1e-10

    λ = 64  # 4+int(3*np.log(N))
    mu = λ // 4
    weights = np.log(mu + 1 / 2) - np.log(np.asarray(range(1, mu + 1))).astype(
        np.float32)
    weights = weights / np.sum(weights)
    mueff = (np.sum(weights)**2) / np.sum(weights**2)

    cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)
    cs = (mueff + 2) / (N + mueff + 5)
    c1 = 2 / ((N + 1.3)**2 + mueff)
    cmu = min(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((N + 2)**2 + mueff))
    damps = 1 + 2 * max(0, ((mueff - 1) / (N + 1))**0.5 - 1) + cs

    pc = np.zeros(N).astype(np.float32)
    ps = np.zeros(N).astype(np.float32)
    B = np.eye(N, N).astype(np.float32)
    D = np.ones(N).astype(np.float32)

    C = B * np.diag(D**2) * B.T
    invsqrtC = B * np.diag(D**-1) * B.T
    eigeneval = 0
    chiN = N**0.5 * (1 - 1 / (4 * N) + 1 / (21 * N**2))

    counteval = 0
    generation = 0
    solution_found = False
    graphs = []
    while counteval < stopeval:
        arx = np.zeros((λ, N))
        arfitness = np.zeros(λ)
        for k in range(λ):
            arx[k] = xmean + sigma * B.dot(D * np.random.randn(N))
            arfitness[k] = f(arx[k])
            counteval += 1

        plt.ylim(-1, 20)
        plt.xlim(-1, 20)
        plt.plot(solution[0], solution[1], "b.")
        plt.plot(arx[:, 0], arx[:, 1], "r.")
        plt.plot(np.mean(arx[:, 0]), np.mean(arx[:, 1]), "g.")
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        plt.clf()
        buf.seek(0)
        img = imread(buf)
        buf.close()
        graphs.append(img)

        arindex = np.argsort(-arfitness)
        arfitness = arfitness[arindex]

        xold = xmean
        xmean = weights.dot(arx[arindex[0:mu]])

        ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * invsqrtC.dot(
            (xmean - xold) / sigma)
        hsig = np.linalg.norm(ps) / np.sqrt(
            1 - (1 - cs)**(2 * counteval / λ)) / chiN < 1.4 + 2 / (N + 1)
        pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * (
            (xmean - xold) / sigma)
        artmp = (1 / sigma) * (arx[arindex[0:mu]] - xold)
        C = (1 - c1 - cmu) * C + c1 * (pc.dot(pc.T) + (1 - hsig) * cc *
                                       (2 - cc) * C) + cmu * artmp.T.dot(
                                           np.diag(weights)).dot(artmp)
        sigma = sigma * np.exp((cs / damps) * (np.linalg.norm(ps) / chiN - 1))

        if counteval - eigeneval > λ / (c1 + cmu) / N / 10:
            eigeneval = counteval
            C = np.triu(C) + np.triu(C, 1).T
            D, B = np.linalg.eig(C)
            D = np.sqrt(D)
            invsqrtC = B.dot(np.diag(D**-1).dot(B.T))

        generation += 1

        if arfitness[0] >= -stopfitness:
            solution_found = True
            break

    if solution_found:
        print("Solution found at generation #" + str(generation))
    else:
        print("Solution not found")

    if not os.path.exists("result"):
        os.makedirs("result")
    imageio.mimsave('result/cma-es.gif', graphs)
Example #55
0
def safe_subprocess_main_with_flags(flags, func, *args, **kwargs):
    if flags.gui:
        import matplotlib.pyplot as plt
        plt.switch_backend('TkAgg')
    init_worker_process_flags(flags)
    return func(*args, **kwargs)
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
import random
import torch
from torch import nn
from torch.autograd import Variable
from autoencoder import AutoEncoder
from argument import args
from utilities import *
plt.switch_backend('TkAgg')

num_epochs = args.num_epochs
batch_size = args.batch_size
learning_rate = args.learning_rate

HOME = os.path.expanduser("~")
image_filepath = HOME + '/data/VOCdevkit/VOC2007/JPEGImages/'  #train images are here


def save_model():
    print('Saving the weights')
    torch.save(model.state_dict(), 'conv_autoencoder_weight.pt')
    torch.save(optimizer.state_dict(), 'conv_autoencoder_optimizer.pt')


data = []
print('Reading Images')
for i in tqdm(range(len(os.listdir(image_filepath)[0:3]))):
Example #57
0
def run(
    training_data,
    max_runs,
    batch_size,
    max_p,
    epochs,
    metric,
    gpy_model,
    gpy_acquisition,
    initial_design,
    seed,
):
    bounds = [
        {
            "name": "lr",
            "type": "continuous",
            "domain": (1e-5, 1e-1)
        },
        {
            "name": "momentum",
            "type": "continuous",
            "domain": (0.0, 1.0)
        },
    ]
    # create random file to store run ids of the training tasks
    tracking_client = mlflow.tracking.MlflowClient()

    def new_eval(nepochs,
                 experiment_id,
                 null_train_loss,
                 null_valid_loss,
                 null_test_loss,
                 return_all=False):
        """
        Create a new eval function

        :param nepochs: Number of epochs to train the model.
        :experiment_id: Experiment id for the training run
        :valid_null_loss: Loss of a null model on the validation dataset
        :test_null_loss: Loss of a null model on the test dataset.
        :return_test_loss: Return both validation and test loss if set.

        :return: new eval function.
        """
        def eval(params):
            """
            Train Keras model with given parameters by invoking MLflow run.

            Notice we store runUuid and resulting metric in a file. We will later use these to pick
            the best run and to log the runUuids of the child runs as an artifact. This is a
            temporary workaround until MLflow offers better mechanism of linking runs together.

            :param params: Parameters to the train_keras script we optimize over:
                          learning_rate, drop_out_1
            :return: The metric value evaluated on the validation data.
            """
            lr, momentum = params[0]
            with mlflow.start_run(nested=True) as child_run:
                p = mlflow.projects.run(
                    run_id=child_run.info.run_id,
                    uri=".",
                    entry_point="train",
                    parameters={
                        "training_data": training_data,
                        "epochs": str(nepochs),
                        "learning_rate": str(lr),
                        "momentum": str(momentum),
                        "seed": str(seed),
                    },
                    experiment_id=experiment_id,
                    synchronous=False,
                )
                succeeded = p.wait()
            if succeeded:
                training_run = tracking_client.get_run(p.run_id)
                metrics = training_run.data.metrics

                # cap the loss at the loss of the null model
                train_loss = min(null_valid_loss,
                                 metrics["train_{}".format(metric)])
                valid_loss = min(null_valid_loss,
                                 metrics["val_{}".format(metric)])
                test_loss = min(null_test_loss,
                                metrics["test_{}".format(metric)])
            else:
                # run failed => return null loss
                tracking_client.set_terminated(p.run_id, "FAILED")
                train_loss = null_train_loss
                valid_loss = null_valid_loss
                test_loss = null_test_loss

            mlflow.log_metrics({
                "train_{}".format(metric): train_loss,
                "val_{}".format(metric): valid_loss,
                "test_{}".format(metric): test_loss,
            })

            if return_all:
                return train_loss, valid_loss, test_loss
            else:
                return valid_loss

        return eval

    with mlflow.start_run() as run:
        experiment_id = run.info.experiment_id
        # Evaluate null model first.
        # We use null model (predict everything to the mean) as a reasonable upper bound on loss.
        # We need an upper bound to handle the failed runs (e.g. return NaNs) because GPyOpt can not
        # handle Infs.
        # Always including a null model in our results is also a good ML practice.
        train_null_loss, valid_null_loss, test_null_loss = new_eval(
            0, experiment_id, _inf, _inf, _inf, True)(params=[[0, 0]])
        myProblem = GPyOpt.methods.BayesianOptimization(
            new_eval(epochs, experiment_id, train_null_loss, valid_null_loss,
                     test_null_loss),
            bounds,
            evaluator_type="local_penalization"
            if min(batch_size, max_p) > 1 else "sequential",
            batch_size=batch_size,
            num_cores=max_p,
            model_type=gpy_model,
            acquisition_type=gpy_acquisition,
            initial_design_type=initial_design,
            initial_design_numdata=max_runs >> 2,
            exact_feval=False,
        )
        myProblem.run_optimization(max_runs)
        matplotlib.use("agg")
        plt.switch_backend("agg")
        with TempDir() as tmp:
            acquisition_plot = tmp.path("acquisition_plot.png")
            convergence_plot = tmp.path("convergence_plot.png")
            myProblem.plot_acquisition(filename=acquisition_plot)
            myProblem.plot_convergence(filename=convergence_plot)
            if os.path.exists(convergence_plot):
                mlflow.log_artifact(convergence_plot, "converegence_plot")
            if os.path.exists(acquisition_plot):
                mlflow.log_artifact(acquisition_plot, "acquisition_plot")

        # find the best run, log its metrics as the final metrics of this run.
        client = MlflowClient()
        runs = client.search_runs(
            [experiment_id], "tags.mlflow.parentRunId = '{run_id}' ".format(
                run_id=run.info.run_id))
        best_val_train = _inf
        best_val_valid = _inf
        best_val_test = _inf
        best_run = None
        for r in runs:
            if r.data.metrics["val_rmse"] < best_val_valid:
                best_run = r
                best_val_train = r.data.metrics["train_rmse"]
                best_val_valid = r.data.metrics["val_rmse"]
                best_val_test = r.data.metrics["test_rmse"]
        mlflow.set_tag("best_run", best_run.info.run_id)
        mlflow.log_metrics({
            "train_{}".format(metric): best_val_train,
            "val_{}".format(metric): best_val_valid,
            "test_{}".format(metric): best_val_test,
        })
Example #58
0
from __future__ import absolute_import

from unittest import SkipTest

import numpy as np
from holoviews.core.data import Dataset
from holoviews.core.options import Store
from holoviews.element import Graph, circular_layout
from holoviews.element.comparison import ComparisonTestCase
from holoviews.plotting import comms

# Standardize backend due to random inconsistencies
try:
    from matplotlib import pyplot
    pyplot.switch_backend('agg')
    from holoviews.plotting.mpl import OverlayPlot
    mpl_renderer = Store.renderers['matplotlib']
except:
    mpl_renderer = None


class MplGraphPlotTests(ComparisonTestCase):
    def setUp(self):
        if not mpl_renderer:
            raise SkipTest(
                'Matplotlib tests require matplotlib to be available')
        self.previous_backend = Store.current_backend
        Store.current_backend = 'matplotlib'
        self.default_comm = mpl_renderer.comms['default']
        mpl_renderer.comms['default'] = (comms.Comm, '')
Example #59
0
def main(main_args):
    # Initialize argparse
    parser = argparse.ArgumentParser(
        description='Outputs a list of patched functions and the corresponding source code lines.')

    parser.add_argument('gitrepo', metavar='repo', help='git repo url or local path file:///')
    parser.add_argument('--revision', help='repository revision')
    parser.add_argument('--print-mode', dest='print', choices=['full', 'simple', 'only-fn', 'functions'], default='full',
                        help='print format')
    parser.add_argument('--with-hash', action='store_true', help='print git hashes in --print-mode=functions')
    parser.add_argument('--only-added', action='store_true', help='print only added lines in --print-mode=functions')
    parser.add_argument('--verbose', action='store_true', help='display helpful progress messages')
    parser.add_argument('-s', '--summary', action='store_true', help='prints a summary of the data')
    parser.add_argument('-p', '--plot', action='store_true', help='save graphs of the generated data')
    parser.add_argument('-i', '--skip-initial', dest='skip', action='store_true',
                        help='skip initial commit - can be very large')
    parser.add_argument('-l', '--limit', type=int, help='plot commits up to this one')
    parser.add_argument('-ri', '--rangeInt', type=int, metavar='N',
                        help='look at patches for the previous N commits (preceding HASH)')
    parser.add_argument('-rh', '--range', metavar='INIT_HASH', help='look at patches between INIT_HASH and HASH')
    parser.add_argument('--save-json', dest='json', action='store_true',
                        help='output function update information in JSON format')
    parser.add_argument('--track', dest='track', choices=['loc', 'diff'], default='diff', help='what data to save')
    parser.add_argument('--path-filter', dest='path_filter', help='restrict output to paths matched by filter')

    # Dictionary of arguments
    args_orig = parser.parse_args(main_args)
    args = vars(args_orig)

    # Handle printing
    OutputManager.should_print = bool(args['verbose'])
    OutputManager.with_hash = bool(args['with_hash'])
    OutputManager.only_added = bool(args['only_added'])

    repo_manager = RepoManager(args['gitrepo'], args['print'], bool(args['json']), args['track'], args['path_filter'])

    if args['revision']:
        repo_manager.compare_patches_in_range(args['revision'],args['range'])
    elif args['plot'] or args['summary']:
        if args['range']:
            repo_manager.get_updated_fn_per_commit(args['skip'], end_hash=args['range'])
        elif args['rangeInt']:
            repo_manager.get_updated_fn_per_commit(args['skip'], times=int(args['rangeInt']))
        else:
            repo_manager.get_updated_fn_per_commit(args['skip'])

    if args['summary']:
        repo_manager.summary()

    if args['plot']:
        assert(hasMatplotlib)
        plt.switch_backend('MacOSX')
        # manager = plt.get_current_fig_manager()
        # manager.window.showMaximized()

        repo_manager.plot_fn_per_commit(args['skip'])
        repo_manager.plot_fn_per_commit_restricted(args['skip'], args['limit'])
        repo_manager.plot_other_changed(args['skip'])

    OutputManager.print_all(args['print'] == 'only-fn')
    repo_manager.cleanup()
Example #60
0
import itertools
import os
import shutil
import sys
import tempfile
import matplotlib
from matplotlib import cm
from ..plot import cmap


# Set no-op Matplotlib backend to defer importing anything that requires a GUI
# until we have determined that it is necessary based on the command line
# arguments.
if 'matplotlib.pyplot' in sys.modules:
    from matplotlib import pyplot as plt
    plt.switch_backend('Template')
else:
    matplotlib.use('Template', warn=False, force=True)


@contextlib.contextmanager
def TemporaryDirectory(suffix='', prefix='tmp', dir=None, delete=True):
    try:
        dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
        yield dir
    finally:
        if delete:
            shutil.rmtree(dir)


class GlobAction(argparse._StoreAction):