Esempio n. 1
0
import toga
from eddington import EddingtonException, to_relevant_precision_string
from matplotlib.ticker import FuncFormatter, NullLocator
from toga.style import Pack
from toga.style.pack import COLUMN, HIDDEN, VISIBLE
from toga.validators import Number

from eddington_gui.boxes.eddington_box import EddingtonBox
from eddington_gui.boxes.line_box import LineBox
from eddington_gui.consts import LABEL_WIDTH, LONG_INPUT_WIDTH, SMALL_PADDING

# TODO: remove once https://github.com/beeware/toga-chart/issues/11 is fixed  # pylint: disable=fixme # noqa
EDDINGTON_FORMATTER = FuncFormatter(
    lambda y, _: to_relevant_precision_string(y))
NULL_LOCATOR = NullLocator()


class PlotConfigurationBox(EddingtonBox):  # pylint: disable=R0902,R0904
    """Visual box to create plot configuration."""

    __title_input: toga.TextInput
    __residuals_title_input: toga.TextInput
    __xlabel_input: toga.TextInput
    __ylabel_input: toga.TextInput
    __grid_switch: toga.Switch
    __legend_switch: toga.Switch
    __x_domain_switch: toga.Switch
    __x_min_title: toga.Label
    __x_min_input: toga.TextInput
    __x_max_title: toga.Label
Esempio n. 2
0
def triplot(chain, color='k', weights=None, interpolate=False, smooth=True, \
           labels=None, figsize=(11,8.5), title=None, inj=None, tex=True, \
            incMaxPost=True, cmap='YlOrBr', lw=1.5, ranges=False, axarr=None):
    """

    Make Triangle plot

    """

    # rcParams settings
    if chain.shape[1] < 10:
        ticksize = 10
        #plt.rcParams['ytick.labelsize'] = 10.0
        #plt.rcParams['xtick.labelsize'] = 10.0
    else:
        ticksize = 8
        #plt.rcParams['ytick.labelsize'] = 8.0
        #plt.rcParams['xtick.labelsize'] = 8.0
    if tex:
        plt.rcParams['text.usetex'] = True

    # get number of parameters
    ndim = chain.shape[1]
    parameters = np.linspace(0, ndim - 1, ndim)

    if axarr is not None:
        f = plt.gcf()
        #fig, axarr = plt.subplots(nrows=len(parameters), ncols=len(parameters),figsize=figsize)
    else:
        f, axarr = plt.subplots(nrows=len(parameters),
                                ncols=len(parameters),
                                figsize=figsize)

    for i in range(len(parameters)):
        # for j in len(parameters[np.where(i <= parameters)]:
        for j in range(len(parameters)):
            ii = i
            jj = len(parameters) - j - 1

            # get ranges
            if ranges:
                xmin, xmax = confinterval(chain[:, parameters[ii]],
                                          sigma=0.95,
                                          type='equalProb')
                x_range = [xmin, xmax]
                xmin, xmax = confinterval(chain[:, parameters[jj]],
                                          sigma=0.95,
                                          type='equalProb')
                y_range = [xmin, xmax]

            else:
                x_range = [
                    chain[:, parameters[ii]].min(),
                    chain[:, parameters[ii]].max()
                ]
                y_range = [
                    chain[:, parameters[jj]].min(),
                    chain[:, parameters[jj]].max()
                ]

            axarr[ii, jj].tick_params(axis='both', which='major', labelsize=10)

            xmajorLocator = matplotlib.ticker.MaxNLocator(nbins=4,
                                                          prune='both')
            ymajorLocator = matplotlib.ticker.MaxNLocator(nbins=4,
                                                          prune='both')

            if j <= len(parameters) - i - 1:
                axarr[jj][ii].xaxis.set_minor_locator(NullLocator())
                axarr[jj][ii].yaxis.set_minor_locator(NullLocator())
                axarr[jj][ii].xaxis.set_major_locator(NullLocator())
                axarr[jj][ii].yaxis.set_major_locator(NullLocator())

                axarr[jj][ii].xaxis.set_minor_formatter(NullFormatter())
                axarr[jj][ii].yaxis.set_minor_formatter(NullFormatter())
                axarr[jj][ii].xaxis.set_major_formatter(NullFormatter())
                axarr[jj][ii].yaxis.set_major_formatter(NullFormatter())
                xmajorFormatter = FormatStrFormatter('%g')
                ymajorFormatter = FormatStrFormatter('%g')

                if ii == jj:
                    # Make a 1D plot
                    makesubplot1d(axarr[ii][ii], chain[:,parameters[ii]], \
                                  weights=weights, interpolate=interpolate, \
                                  smooth=smooth, color=color, lw=lw, range=x_range)
                    axarr[ii][jj].set_ylim(ymin=0)
                    if incMaxPost:
                        mx = getMax(chain[:, parameters[ii]], weights=weights)
                        axarr[ii][jj].set_title('%5.4g' % (mx), fontsize=10)

                    if inj is not None:
                        axarr[ii][ii].axvline(inj[ii], lw=2, color='k')
                else:
                    # Make a 2D plot
                    makesubplot2d(axarr[jj][ii],
                                  chain[:, parameters[ii]],
                                  chain[:, parameters[jj]],
                                  cmap=cmap,
                                  color=color,
                                  weights=weights,
                                  smooth=smooth,
                                  lw=lw,
                                  x_range=x_range,
                                  y_range=y_range)

                    if inj is not None:
                        axarr[jj][ii].plot(inj[ii], inj[jj], 'x', color='k', markersize=12, \
                                           mew=2, mec='k')

                axarr[jj][ii].xaxis.set_major_locator(xmajorLocator)
                axarr[jj][ii].yaxis.set_major_locator(ymajorLocator)
            else:
                axarr[jj][ii].set_visible(False)
                #axarr[jj][ii].axis('off')

            if jj == len(parameters) - 1:
                axarr[jj][ii].xaxis.set_major_formatter(xmajorFormatter)
                if labels:
                    axarr[jj][ii].set_xlabel(labels[ii])

            if ii == 0:
                if jj == 0:
                    axarr[jj][ii].yaxis.set_major_locator(NullLocator())
                    #axarr[jj][ii].set_ylabel('Post.')
                else:
                    axarr[jj][ii].yaxis.set_major_formatter(ymajorFormatter)
                    if labels:
                        axarr[jj][ii].set_ylabel(labels[jj])

    # overall plot title
    if title:
        f.suptitle(title, fontsize=14, y=0.90)

    # make plots closer together
    f.subplots_adjust(hspace=0.1)
    f.subplots_adjust(wspace=0.1)

    return axarr
Esempio n. 3
0
def detect():
    parser = argparse.ArgumentParser()
    parser.add_argument("--image_folder",
                        type=str,
                        default="PyTorch_YOLOv3/data/custom/images/valid/",
                        help="path to dataset")
    parser.add_argument("--model_def",
                        type=str,
                        default="PyTorch_YOLOv3/config/yolov3-custom.cfg",
                        help="path to model definition file")
    parser.add_argument("--class_path",
                        type=str,
                        default="PyTorch_YOLOv3/data/custom/classes.names",
                        help="path to class label file")
    parser.add_argument("--conf_thres",
                        type=float,
                        default=0.8,
                        help="object confidence threshold")  # 0.8
    parser.add_argument(
        "--nms_thres",
        type=float,
        default=0.3,
        help="iou thresshold for non-maximum suppression")  # 0.25
    parser.add_argument("--batch_size",
                        type=int,
                        default=1,
                        help="size of the batches")
    parser.add_argument(
        "--n_cpu",
        type=int,
        default=0,
        help="number of cpu threads to use during batch generation")
    parser.add_argument("--img_size",
                        type=int,
                        default=416,
                        help="size of each image dimension")
    parser.add_argument(
        "--checkpoint_model",
        type=str,
        default="PyTorch_YOLOv3/checkpoints/yolov3_ckpt_best_f01.pth",
        help="path to checkpoint model")
    opt = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    os.makedirs("PyTorch_YOLOv3/output", exist_ok=True)
    os.makedirs("PyTorch_YOLOv3/pre_img", exist_ok=True)
    os.makedirs("PyTorch_YOLOv3/coordinate", exist_ok=True)

    fname_list = []
    for file in os.listdir(opt.image_folder):

        file_name = splitext(file)[0]
        fname_list.append(f"{file_name}.txt")

    fname_list = sorted(fname_list)

    # Set up model
    model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
    # Load checkpoint weights
    model.load_state_dict(torch.load(opt.checkpoint_model))

    model.eval()  # Set in evaluation mode

    dataloader = DataLoader(
        ImageFolder(opt.image_folder, img_size=opt.img_size),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.n_cpu,
    )

    classes = load_classes(opt.class_path)  # Extracts class labels from file

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    imgs = []  # Stores image paths
    img_detections = []  # Stores detections for each image index

    print("\nPerforming object detection:")
    prev_time = time.time()

    for batch_i, (img_paths, input_imgs) in tqdm(enumerate(dataloader),
                                                 total=len(dataloader),
                                                 desc="Batch Inference Time"):
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))

        # Get detections
        with torch.no_grad():
            detections = model(input_imgs)
            detections = non_max_suppression(detections, opt.conf_thres,
                                             opt.nms_thres)

        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        # print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))

        # Save image and detections
        imgs.extend(img_paths)
        img_detections.extend(detections)

    plt.set_cmap('gray')

    rewrite = True
    print("\nSaving images:")

    for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):

        print("(%d) Image: '%s'" % (img_i, path))

        # Create plot

        img = np.array(Image.open(path).convert('L'))
        plt.figure()
        fig, ax = plt.subplots(1)
        ax.imshow(img)

        # Draw bounding boxes and labels of detections
        if detections is not None:
            # Rescale boxes to original image

            detections = rescale_boxes(detections, opt.img_size, img.shape[:2])

            rewrite = True
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:

                # print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
                # x1 = x1 - 10
                y1 = y1 - 5
                y2 = y2 + 5
                x1 = x1 - 50
                x2 = x2 + 50
                box_w = x2 - x1
                box_h = y2 - y1
                x1, y1, x2, y2 = math.floor(x1), math.floor(y1), math.ceil(
                    x2), math.ceil(y2)
                box_w, box_h = x2 - x1, y2 - y1

                if rewrite:
                    f1 = open(
                        f"VertebraSegmentation/coordinate/{fname_list[img_i]}",
                        'w')
                    f2 = open(f"PyTorch_YOLOv3/coordinate/{fname_list[img_i]}",
                              'w')
                    f1.write("{:d} {:d} {:d} {:d} {:d} {:d}\n".format(
                        x1, y1, x2, y2, box_w, box_h))
                    f2.write("{:d} {:d} {:d} {:d} {:d} {:d}\n".format(
                        x1, y1, x2, y2, box_w, box_h))
                    rewrite = False
                else:
                    f1 = open(
                        f"VertebraSegmentation/coordinate/{fname_list[img_i]}",
                        'a')
                    f2 = open(f"PyTorch_YOLOv3/coordinate/{fname_list[img_i]}",
                              'a')
                    f1.write("{:d} {:d} {:d} {:d} {:d} {:d}\n".format(
                        x1, y1, x2, y2, box_w, box_h))
                    f2.write("{:d} {:d} {:d} {:d} {:d} {:d}\n".format(
                        x1, y1, x2, y2, box_w, box_h))
                # color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                bbox = patches.Rectangle((x1, y1),
                                         box_w,
                                         box_h,
                                         linewidth=0.5,
                                         edgecolor='red',
                                         facecolor="none")
                # Add the bbox to the plot
                ax.add_patch(bbox)
        # Save generated image with detections
        plt.axis("off")
        plt.gca().xaxis.set_major_locator(NullLocator())
        plt.gca().yaxis.set_major_locator(NullLocator())
        # plt.set_cmap('gray')
        filename = path.split("/")[-1].split(".")[0]
        plt.savefig(f"PyTorch_YOLOv3/output/{filename}.png",
                    bbox_inches="tight",
                    pad_inches=0.0,
                    facecolor="none")
        plt.close()
def corner(xs,
           bins=20,
           range=None,
           weights=None,
           color="k",
           smooth=None,
           smooth1d=None,
           no_1d=True,
           labels=None,
           label_kwargs=None,
           show_titles=False,
           title_fmt=".2f",
           title_kwargs=None,
           truths=None,
           truth_color="#4682b4",
           scale_hist=False,
           quantiles=None,
           verbose=False,
           fig=None,
           max_n_ticks=5,
           top_ticks=False,
           use_math_text=False,
           reverse=False,
           hist_kwargs=None,
           **hist2d_kwargs):
    """
    Make a *sick* corner plot showing the projections of a data set in a
    multi-dimensional space. kwargs are passed to hist2d() or used for
    `matplotlib` styling.
    Parameters
    ----------
    xs : array_like[nsamples, ndim]
        The samples. This should be a 1- or 2-dimensional array. For a 1-D
        array this results in a simple histogram. For a 2-D array, the zeroth
        axis is the list of samples and the next axis are the dimensions of
        the space.
    bins : int or array_like[ndim,]
        The number of bins to use in histograms, either as a fixed value for
        all dimensions, or as a list of integers for each dimension.
    weights : array_like[nsamples,]
        The weight of each sample. If `None` (default), samples are given
        equal weight.
    color : str
        A ``matplotlib`` style color for all histograms.
    smooth, smooth1d : float
       The standard deviation for Gaussian kernel passed to
       `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
       respectively. If `None` (default), no smoothing is applied.
    labels : iterable (ndim,)
        A list of names for the dimensions. If a ``xs`` is a
        ``pandas.DataFrame``, labels will default to column names.
    label_kwargs : dict
        Any extra keyword arguments to send to the `set_xlabel` and
        `set_ylabel` methods.
    show_titles : bool
        Displays a title above each 1-D histogram showing the 0.5 quantile
        with the upper and lower errors supplied by the quantiles argument.
    title_fmt : string
        The format string for the quantiles given in titles. If you explicitly
        set ``show_titles=True`` and ``title_fmt=None``, the labels will be
        shown as the titles. (default: ``.2f``)
    title_kwargs : dict
        Any extra keyword arguments to send to the `set_title` command.
    range : iterable (ndim,)
        A list where each element is either a length 2 tuple containing
        lower and upper bounds or a float in range (0., 1.)
        giving the fraction of samples to include in bounds, e.g.,
        [(0.,10.), (1.,5), 0.999, etc.].
        If a fraction, the bounds are chosen to be equal-tailed.
    truths : iterable (ndim,)
        A list of reference values to indicate on the plots.  Individual
        values can be omitted by using ``None``.
    truth_color : str
        A ``matplotlib`` style color for the ``truths`` makers.
    scale_hist : bool
        Should the 1-D histograms be scaled in such a way that the zero line
        is visible?
    quantiles : iterable
        A list of fractional quantiles to show on the 1-D histograms as
        vertical dashed lines.
    verbose : bool
        If true, print the values of the computed quantiles.
    plot_contours : bool
        Draw contours for dense regions of the plot.
    use_math_text : bool
        If true, then axis tick labels for very large or small exponents will
        be displayed as powers of 10 rather than using `e`.
        
    reverse : bool
        If true, plot the corner plot starting in the upper-right corner instead 
        of the usual bottom-left corner
        
    max_n_ticks: int
        Maximum number of ticks to try to use
    top_ticks : bool
        If true, label the top ticks of each axis
    fig : matplotlib.Figure
        Overplot onto the provided figure object.
    hist_kwargs : dict
        Any extra keyword arguments to send to the 1-D histogram plots.
    **hist2d_kwargs
        Any remaining keyword arguments are sent to `corner.hist2d` to generate
        the 2-D histogram plots.
    """
    if quantiles is None:
        quantiles = []
    if title_kwargs is None:
        title_kwargs = dict()
    if label_kwargs is None:
        label_kwargs = dict()

    # Try filling in labels from pandas.DataFrame columns.
    if labels is None:
        try:
            labels = xs.columns
        except AttributeError:
            pass

    # Deal with 1D sample lists.
    xs = np.atleast_1d(xs)
    if len(xs.shape) == 1:
        xs = np.atleast_2d(xs)
    else:
        assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
        xs = xs.T
    assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
                                       "dimensions than samples!"

    # Parse the weight array.
    if weights is not None:
        weights = np.asarray(weights)
        if weights.ndim != 1:
            raise ValueError("Weights must be 1-D")
        if xs.shape[1] != weights.shape[0]:
            raise ValueError("Lengths of weights must match number of samples")

    # Parse the parameter ranges.
    if range is None:
        if "extents" in hist2d_kwargs:
            logging.warn("Deprecated keyword argument 'extents'. "
                         "Use 'range' instead.")
            range = hist2d_kwargs.pop("extents")
        else:
            range = [[x.min(), x.max()] for x in xs]
            # Check for parameters that never change.
            m = np.array([e[0] == e[1] for e in range], dtype=bool)
            if np.any(m):
                raise ValueError(
                    ("It looks like the parameter(s) in "
                     "column(s) {0} have no dynamic range. "
                     "Please provide a `range` argument.").format(", ".join(
                         map("{0}".format,
                             np.arange(len(m))[m]))))

    else:
        # If any of the extents are percentiles, convert them to ranges.
        # Also make sure it's a normal list.
        range = list(range)
        for i, _ in enumerate(range):
            try:
                emin, emax = range[i]
            except TypeError:
                q = [0.5 - 0.5 * range[i], 0.5 + 0.5 * range[i]]
                range[i] = quantile(xs[i], q, weights=weights)

    if len(range) != xs.shape[0]:
        raise ValueError("Dimension mismatch between samples and range")

    # Parse the bin specifications.
    try:
        bins = [int(bins) for _ in range]
    except TypeError:
        if len(bins) != len(range):
            raise ValueError("Dimension mismatch between bins and range")

    # Some magic numbers for pretty axis layout.
    K = len(xs)
    factor = 2.0  # size of one side of one panel
    if reverse:
        lbdim = 0.2 * factor  # size of left/bottom margin
        trdim = 0.5 * factor  # size of top/right margin
    else:
        lbdim = 0.5 * factor  # size of left/bottom margin
        trdim = 0.2 * factor  # size of top/right margin
    whspace = 0.05  # w/hspace size
    plotdim = factor * K + factor * (K - 1.) * whspace
    dim = lbdim + plotdim + trdim

    # Create a new figure if one wasn't provided.
    if fig is None:
        fig, axes = pl.subplots(K, K, figsize=(dim, dim))
    else:
        try:
            axes = np.array(fig.axes).reshape((K, K))
        except:
            raise ValueError("Provided figure has {0} axes, but data has "
                             "dimensions K={1}".format(len(fig.axes), K))

    # Format the figure.
    lb = lbdim / dim
    tr = (lbdim + plotdim) / dim
    fig.subplots_adjust(left=lb,
                        bottom=lb,
                        right=tr,
                        top=tr,
                        wspace=whspace,
                        hspace=whspace)

    # Set up the default histogram keywords.
    if hist_kwargs is None:
        hist_kwargs = dict()
    hist_kwargs["color"] = hist_kwargs.get("color", color)
    if smooth1d is None:
        hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")

    for i, x in enumerate(xs):
        # Deal with masked arrays.
        if hasattr(x, "compressed"):
            x = x.compressed()

        if np.shape(xs)[0] == 1:
            ax = axes
        else:
            if reverse:
                ax = axes[K - i - 1, K - i - 1]
            else:
                ax = axes[i, i]
        # Plot the histograms.
        if not no_1d:
            if smooth1d is None:
                n, _, _ = ax.hist(x,
                                  bins=bins[i],
                                  weights=weights,
                                  range=np.sort(range[i]),
                                  **hist_kwargs)
            else:
                if gaussian_filter is None:
                    raise ImportError("Please install scipy for smoothing")
                n, b = np.histogram(x,
                                    bins=bins[i],
                                    weights=weights,
                                    range=np.sort(range[i]))
                n = gaussian_filter(n, smooth1d)
                x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
                y0 = np.array(list(zip(n, n))).flatten()
                ax.plot(x0, y0, **hist_kwargs)

        if truths is not None and truths[i] is not None:
            ax.axvline(truths[i], color=truth_color)

        # Plot quantiles if wanted.
        if len(quantiles) > 0:
            qvalues = quantile(x, quantiles, weights=weights)
            for q in qvalues:
                ax.axvline(q, ls="dashed", color=color)

            if verbose:
                print("Quantiles:")
                print([item for item in zip(quantiles, qvalues)])

        if show_titles:
            title = None
            if title_fmt is not None:
                # Compute the quantiles for the title. This might redo
                # unneeded computation but who cares.
                q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
                                            weights=weights)
                q_m, q_p = q_50 - q_16, q_84 - q_50

                # Format the quantile display.
                fmt = "{{0:{0}}}".format(title_fmt).format
                title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
                title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))

                # Add in the column name if it's given.
                if labels is not None:
                    title = "{0} = {1}".format(labels[i], title)

            elif labels is not None:
                title = "{0}".format(labels[i])

            if title is not None:
                if reverse:
                    ax.set_xlabel(title, **title_kwargs)
                else:
                    ax.set_title(title, **title_kwargs)

        # Set up the axes.
        ax.set_xlim(range[i])
        if not no_1d:
            if scale_hist:
                maxn = np.max(n)
                ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
            else:
                ax.set_ylim(0, 1.1 * np.max(n))
        ax.set_yticklabels([])
        if max_n_ticks == 0:
            ax.xaxis.set_major_locator(NullLocator())
            ax.yaxis.set_major_locator(NullLocator())
        else:
            ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
            ax.yaxis.set_major_locator(NullLocator())

        if i < K - 1:
            if top_ticks:
                ax.xaxis.set_ticks_position("top")
                [l.set_rotation(45) for l in ax.get_xticklabels()]
            else:
                ax.set_xticklabels([])
        else:
            if reverse:
                ax.xaxis.tick_top()
            [l.set_rotation(45) for l in ax.get_xticklabels()]
            if labels is not None:
                if reverse:
                    ax.set_title(labels[i], y=1.25, **label_kwargs)
                else:
                    ax.set_xlabel(labels[i], **label_kwargs)

            # use MathText for axes ticks
            ax.xaxis.set_major_formatter(
                ScalarFormatter(useMathText=use_math_text))

        for j, y in enumerate(xs):
            if np.shape(xs)[0] == 1:
                ax = axes
            else:
                if reverse:
                    ax = axes[K - i - 1, K - j - 1]
                else:
                    ax = axes[i, j]
            if j > i:
                ax.set_frame_on(False)
                ax.set_xticks([])
                ax.set_yticks([])
                continue
            elif j == i:
                continue

            # Deal with masked arrays.
            if hasattr(y, "compressed"):
                y = y.compressed()

            hist2d(y,
                   x,
                   ax=ax,
                   range=[range[j], range[i]],
                   weights=weights,
                   color=color,
                   smooth=smooth,
                   bins=[bins[j], bins[i]],
                   **hist2d_kwargs)

            if truths is not None:
                if truths[i] is not None and truths[j] is not None:
                    ax.plot(truths[j], truths[i], "s", color=truth_color)
                if truths[j] is not None:
                    ax.axvline(truths[j], color=truth_color)
                if truths[i] is not None:
                    ax.axhline(truths[i], color=truth_color)

            if max_n_ticks == 0:
                ax.xaxis.set_major_locator(NullLocator())
                ax.yaxis.set_major_locator(NullLocator())
            else:
                ax.xaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))
                ax.yaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))

            if i < K - 1:
                ax.set_xticklabels([])
            else:
                if reverse:
                    ax.xaxis.tick_top()
                [l.set_rotation(45) for l in ax.get_xticklabels()]
                if labels is not None:
                    ax.set_xlabel(labels[j], **label_kwargs)
                    if reverse:
                        ax.xaxis.set_label_coords(0.5, 1.4)
                    else:
                        ax.xaxis.set_label_coords(0.5, -0.3)

                # use MathText for axes ticks
                ax.xaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

            if j > 0:
                ax.set_yticklabels([])
            else:
                if reverse:
                    ax.yaxis.tick_right()
                [l.set_rotation(45) for l in ax.get_yticklabels()]
                if labels is not None:
                    if reverse:
                        ax.set_ylabel(labels[i], rotation=-90, **label_kwargs)
                        ax.yaxis.set_label_coords(1.3, 0.5)
                    else:
                        ax.set_ylabel(labels[i], **label_kwargs)
                        ax.yaxis.set_label_coords(-0.3, 0.5)

                # use MathText for axes ticks
                ax.yaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

    return fig
Esempio n. 5
0
def src():
    parser = argparse.ArgumentParser()
    parser.add_argument("--image_folder",
                        type=str,
                        default="data/samples",
                        help="path to dataset")
    parser.add_argument("--model_def",
                        type=str,
                        default="config/yolov3-cola.cfg",
                        help="path to model definition file")
    parser.add_argument("--weights_path",
                        type=str,
                        default="checkpoints/yolov3_ckpt_23.pth",
                        help="path to weights file")
    parser.add_argument("--class_path",
                        type=str,
                        default="data/cola/cola.names",
                        help="path to class label file")
    parser.add_argument("--conf_thres",
                        type=float,
                        default=0.5,
                        help="object confidence threshold")
    # parser.add_argument("--conf_thres", type=float, default=0.1, help="object confidence threshold")
    parser.add_argument("--nms_thres",
                        type=float,
                        default=0.4,
                        help="iou thresshold for non-maximum suppression")
    # parser.add_argument("--nms_thres", type=float, default=0.1, help="iou thresshold for non-maximum suppression")
    parser.add_argument("--batch_size",
                        type=int,
                        default=1,
                        help="size of the batches")
    parser.add_argument(
        "--n_cpu",
        type=int,
        default=0,
        help="number of cpu threads to use during batch generation")
    parser.add_argument("--img_size",
                        type=int,
                        default=416,
                        help="size of each image dimension")
    parser.add_argument("--checkpoint_model",
                        type=str,
                        help="path to checkpoint model")
    opt = parser.parse_args()
    print(opt)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    os.makedirs("output", exist_ok=True)

    # Set up model
    model = Darknet(opt.model_def, img_size=opt.img_size).to(device)

    if opt.weights_path.endswith(".weights"):
        # Load darknet weights
        model.load_darknet_weights(opt.weights_path)
    else:
        # Load checkpoint weights
        model.load_state_dict(torch.load(opt.weights_path,
                                         map_location=device))

    model.eval()  # Set in evaluation mode

    dataloader = DataLoader(
        ImageFolder(opt.image_folder, img_size=opt.img_size),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.n_cpu,
    )

    classes = load_classes(opt.class_path)  # Extracts class labels from file
    print(classes)
    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    imgs = []  # Stores image paths
    img_detections = []  # Stores detections for each image index

    print("\nPerforming object detection:")
    prev_time = time.time()
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        print(input_imgs.shape)
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))
        # Get detections
        with torch.no_grad():
            detections = model(input_imgs)
            detections = non_max_suppression(detections, opt.conf_thres,
                                             opt.nms_thres)

        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        # print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))
        # Save image and detections
        imgs.extend(img_paths)
        img_detections.extend(detections)

    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    print("images length :{}".format(len(imgs)))
    print(imgs)
    print("detections length :{}".format(len(img_detections)))
    print("\nSaving images:")
    # Iterate through images and save plot of detections
    for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):

        print("(%d) Image: '%s'" % (img_i, path))

        # Create plot
        img = np.array(Image.open(path))
        plt.figure()
        fig, ax = plt.subplots(1)
        ax.imshow(img)

        # Draw bounding boxes and labels of detections
        if detections is not None:
            # Rescale boxes to original image
            detections = rescale_boxes(detections, opt.img_size, img.shape[:2])
            unique_labels = detections[:, -1].cpu().unique()
            n_cls_preds = len(unique_labels)
            bbox_colors = random.sample(colors, n_cls_preds)
            print("detection : {}".format(detections))
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                # print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))

                box_w = x2 - x1
                box_h = y2 - y1
                print(x1, y1, box_w, box_h)
                color = bbox_colors[int(
                    np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                bbox = patches.Rectangle((x1, y1),
                                         box_w,
                                         box_h,
                                         linewidth=2,
                                         edgecolor=color,
                                         facecolor="none")
                # Add the bbox to the plot
                ax.add_patch(bbox)
                # Add label
                plt.text(
                    x1,
                    y1,
                    s=classes[int(cls_pred)],
                    color="white",
                    verticalalignment="top",
                    bbox={
                        "color": color,
                        "pad": 0
                    },
                )

        # Save generated image with detections
        plt.axis("off")
        plt.gca().xaxis.set_major_locator(NullLocator())
        plt.gca().yaxis.set_major_locator(NullLocator())
        filename = path.split(os.sep)[-1].split(".")[0]
        print(filename)
        plt.savefig(f"outputs/{filename}.png",
                    bbox_inches="tight",
                    pad_inches=0.0)
        plt.close()
Esempio n. 6
0
    def boundary_scan_2d(self,
                         fig=None,
                         title_fmt=".2f",
                         max_n_ticks=5,
                         steps=0.01,
                         label='parameter_scan'):
        """Fit result displayed on matrix."""
        if self.nbins <= 1:
            return None
        tx = np.arange(self.range[0], self.range[1], steps)
        ty = np.arange(self.range[0], self.range[1], steps)
        xx, yy = np.meshgrid(tx, ty)
        K = self.nbins
        factor = 2.0
        lbdim = 0.5 * factor  # size of left/bottom margin
        trdim = 0.2 * factor  # size of top/right margin
        whspace = 0.05  # w/hspace size
        plotdim = factor * K + factor * (K - 1.) * whspace
        dim = lbdim + plotdim + trdim

        if fig is None:
            fig, axes = plt.subplots(K, K, figsize=(dim, dim))
        else:
            try:
                axes = np.array(fig.axes).reshape((K, K))
            except:
                raise ValueError("Provided figure has {0} axes, but data has "
                                 "dimensions K={1}".format(len(fig.axes), K))
        lb = lbdim / dim
        tr = (lbdim + plotdim) / dim
        fig.subplots_adjust(left=lb,
                            bottom=lb,
                            right=tr,
                            top=tr,
                            wspace=whspace,
                            hspace=whspace)

        for i in range(0, self.nbins):
            ax = axes[i, i]

            def _fun_1d(x):
                """Fn 1d."""
                _param_ = [ix for ix in self.result.x]
                _param_[i] = x
                return self.cost_fun(np.array(_param_))

            vec_fun_1d = np.vectorize(_fun_1d)
            z1d = vec_fun_1d(tx)

            ax.plot(tx, z1d, 'red')
            ax.axvline(x=self.result.x[i], color='blue', ls="--")
            ax.set_xlim(self.range)
            ax.yaxis.tick_right()
            # if i > 0:
            # ax.set_yticklabels([])
            if max_n_ticks == 0:
                ax.xaxis.set_major_locator(NullLocator())
            else:
                ax.xaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))
            if i < (self.nbins - 1):
                ax.set_xticklabels([])
            else:
                [l.set_rotation(90) for l in ax.get_xticklabels()]
                ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
                ax.xaxis.set_label_text("$x_%i$" % i)
            if i == 0:
                [l.set_rotation(0) for l in ax.get_yticklabels()]
                ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
                ax.yaxis.set_label_text("cost function")

            for j in range(0, self.nbins):
                ax = axes[i, j]
                if j > i:
                    ax.set_frame_on(False)
                    ax.set_xticks([])
                    ax.set_yticks([])
                    continue
                elif j == i:
                    continue

                def _fun_(x, y):
                    """Fn 2D."""
                    _param_ = [ix for ix in self.result.x]
                    _param_[i] = x
                    _param_[j] = y
                    if x != y:
                        return self.cost_fun(np.array(_param_))
                    else:
                        return 0.0

                vec_fun_ = np.vectorize(_fun_)
                zz = vec_fun_(xx, yy)
                levels = np.linspace(zz.min(), 0.7 * zz.min(), 5)
                ax.contourf(xx,
                            yy,
                            zz,
                            np.linspace(zz.min(), 0.8 * zz.min(), 20),
                            cmap=plt.cm.Spectral_r)
                # C = ax.contour(xx, yy, zz, levels,
                #                 linewidth=0.1, colors='black')
                # ax.clabel(C, inline=1, fontsize=5)
                ax.plot(self.result.x[j],
                        self.result.x[i],
                        'ro',
                        label='best fit')
                ax.set_xlim(self.range)
                ax.set_ylim(self.range)
                if max_n_ticks == 0:
                    ax.xaxis.set_major_locator(NullLocator())
                    ax.yaxis.set_major_locator(NullLocator())
                else:
                    ax.xaxis.set_major_locator(
                        MaxNLocator(max_n_ticks, prune="lower"))
                    ax.yaxis.set_major_locator(
                        MaxNLocator(max_n_ticks, prune="lower"))
                if i < self.nbins - 1:
                    ax.set_xticklabels([])
                else:
                    [l.set_rotation(90) for l in ax.get_xticklabels()]
                    ax.xaxis.set_major_formatter(
                        ScalarFormatter(useMathText=True))
                    ax.xaxis.set_label_text("$x_%i$" % j)
                if j > 0:
                    ax.set_yticklabels([])
                else:
                    [l.set_rotation(0) for l in ax.get_yticklabels()]
                    ax.yaxis.set_major_formatter(
                        ScalarFormatter(useMathText=True))
                    ax.yaxis.set_label_text("$x_%i$" % i)
        return fig
Esempio n. 7
0
def test(config):
    is_training = False
    anchors = [int(x) for x in config["yolo"]["anchors"].split(",")]
    anchors = [[[anchors[i], anchors[i + 1]], [anchors[i + 2], anchors[i + 3]],
                [anchors[i + 4], anchors[i + 5]]]
               for i in range(0, len(anchors), 6)]
    anchors.reverse()
    config["yolo"]["anchors"] = []
    for i in range(3):
        config["yolo"]["anchors"].append(anchors[i])
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        logging.info("load checkpoint from {}".format(
            config["pretrain_snapshot"]))
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)
    else:
        raise Exception("missing pretrain_snapshot!!!")

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLayer(config["batch_size"], i, config["yolo"]["anchors"][i],
                      config["yolo"]["classes"],
                      (config["img_w"], config["img_h"])))

    # prepare images path
    images_name = os.listdir(config["images_path"])
    images_path = [
        os.path.join(config["images_path"], name) for name in images_name
    ]
    if len(images_path) == 0:
        raise Exception("no image found in {}".format(config["images_path"]))

    # Start inference
    batch_size = config["batch_size"]
    for step in range(0, len(images_path), batch_size):
        # preprocess
        images = []
        images_origin = []
        for path in images_path[step * batch_size:(step + 1) * batch_size]:
            logging.info("processing: {}".format(path))
            image = cv2.imread(path, cv2.IMREAD_COLOR)
            if image is None:
                logging.error("read path error: {}. skip it.".format(path))
                continue
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            images_origin.append(image)  # keep for save result
            image = cv2.resize(image, (config["img_w"], config["img_h"]),
                               interpolation=cv2.INTER_LINEAR)
            image = image.astype(np.float32)
            image /= 255.0
            image = np.transpose(image, (2, 0, 1))
            image = image.astype(np.float32)
            images.append(image)
        images = np.asarray(images)
        images = torch.from_numpy(images).cuda()
        # inference
        with torch.no_grad():
            outputs = net(images)
            output_list = []
            for i in range(3):
                output_list.append(yolo_losses[i](outputs[i]))
            output = torch.cat(output_list, 1)
            batch_detections = non_max_suppression(
                output,
                config["yolo"]["classes"],
                conf_thres=config["confidence_threshold"])

        # write result images. Draw bounding boxes and labels of detections
        classes = open(config["classes_names_path"],
                       "r").read().split("\n")[:-1]
        if not os.path.isdir("./output/"):
            os.makedirs("./output/")
        for idx, detections in enumerate(batch_detections):
            plt.figure()
            fig, ax = plt.subplots(1)
            ax.imshow(images_origin[idx])
            if detections is not None:
                unique_labels = detections[:, -1].cpu().unique()
                n_cls_preds = len(unique_labels)
                bbox_colors = random.sample(colors, n_cls_preds)
                for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                    color = bbox_colors[int(
                        np.where(unique_labels == int(cls_pred))[0])]
                    # Rescale coordinates to original dimensions
                    ori_h, ori_w = images_origin[idx].shape[:2]
                    pre_h, pre_w = config["img_h"], config["img_w"]
                    box_h = ((y2 - y1) / pre_h) * ori_h
                    box_w = ((x2 - x1) / pre_w) * ori_w
                    y1 = (y1 / pre_h) * ori_h
                    x1 = (x1 / pre_w) * ori_w
                    # Create a Rectangle patch
                    bbox = patches.Rectangle((x1, y1),
                                             box_w,
                                             box_h,
                                             linewidth=1,
                                             edgecolor=color,
                                             facecolor='none')
                    # Add the bbox to the plot
                    ax.add_patch(bbox)
                    # Add label
                    # plt.text(x1, y1, s=classes[int(cls_pred)], color='white',
                    #          verticalalignment='top',
                    #          bbox={'color': color, 'pad': 0})
            # Save generated image with detections
            plt.axis('off')
            plt.gca().xaxis.set_major_locator(NullLocator())
            plt.gca().yaxis.set_major_locator(NullLocator())
            plt.show()
            plt.savefig('output/{}_{}.jpg'.format(step, idx),
                        bbox_inches='tight',
                        pad_inches=0.0)
            # plt.close()
    logging.info("Save all results to ./output/")
Esempio n. 8
0
def plot_usa_daybyday_case_diffs(
    states_df: pd.DataFrame,
    *,
    geo_df: geopandas.GeoDataFrame = None,
    stage: Union[DiseaseStage, Literal[Select.ALL]],
    count: Union[Counting, Literal[Select.ALL]],
    dates: List[pd.Timestamp] = None,
) -> pd.DataFrame:

    Counting.verify(count, allow_select=True)
    DiseaseStage.verify(stage, allow_select=True)

    if geo_df is None:
        geo_df = get_geo_df()

    DIFF_COL = "Diff_"
    ASPECT_RATIO = 1 / 20
    PAD_FRAC = 0.5
    N_CBAR_BUCKETS = 6  # only used when bucketing colormap into discrete regions
    N_BUCKETS_BTWN_MAJOR_TICKS = 1
    N_MINOR_TICKS_BTWN_MAJOR_TICKS = 8  # major_1, minor_1, ..., minor_n, major_2
    N_CBAR_MAJOR_TICKS = N_CBAR_BUCKETS // N_BUCKETS_BTWN_MAJOR_TICKS + 1
    CMAP = cmocean.cm.matter
    # CMAP = ListedColormap(cmocean.cm.matter(np.linspace(0, 1, N_CBAR_BUCKETS)))
    DPI = 300
    NOW_STR = datetime.now(timezone.utc).strftime(r"%b %-d, %Y at %H:%M UTC")

    ID_COLS = [
        Columns.TWO_LETTER_STATE_CODE,
        Columns.DATE,
        Columns.STAGE,
        Columns.COUNT_TYPE,
    ]

    save_fig_kwargs = {
        "dpi": "figure",
        "bbox_inches": "tight",
        "facecolor": "w"
    }

    if count is Select.ALL:
        count_list = list(Counting)
    else:
        count_list = [count]

    if stage is Select.ALL:
        stage_list = list(DiseaseStage)
    else:
        stage_list = [stage]

    count_list: List[Counting]
    stage_list: List[DiseaseStage]

    if dates is None:
        dates: List[pd.Timestamp] = states_df[Columns.DATE].unique()

    dates = sorted(pd.Timestamp(date) for date in dates)

    # Get day-by-day case diffs per location, date, stage, count-type
    case_diffs_df = states_df[
        (states_df[Columns.TWO_LETTER_STATE_CODE].isin(USA_STATE_CODES))
        &
        (~states_df[Columns.TWO_LETTER_STATE_CODE].isin(["AK", "HI"]))].copy()

    # Make sure data exists for every date for every state so that the entire country is
    # plotted each day; fill missing data with 0 (missing really *is* as good as 0)
    state_date_stage_combos = pd.MultiIndex.from_product(
        [
            case_diffs_df[Columns.TWO_LETTER_STATE_CODE].unique(),
            dates,
            [s.name for s in DiseaseStage],
            [c.name for c in Counting],
        ],
        names=ID_COLS,
    )

    case_diffs_df = (state_date_stage_combos.to_frame(index=False).merge(
        case_diffs_df,
        how="left",
        on=ID_COLS,
    ).sort_values(ID_COLS))

    case_diffs_df[Columns.CASE_COUNT] = case_diffs_df[
        Columns.CASE_COUNT].fillna(0)

    case_diffs_df[DIFF_COL] = case_diffs_df.groupby(
        [Columns.TWO_LETTER_STATE_CODE, Columns.STAGE,
         Columns.COUNT_TYPE])[Columns.CASE_COUNT].diff()

    case_diffs_df = case_diffs_df[case_diffs_df[DIFF_COL].notna()]

    dates = case_diffs_df[Columns.DATE].unique()

    vmins = {
        Counting.TOTAL_CASES:
        1,
        Counting.PER_CAPITA:
        case_diffs_df.loc[case_diffs_df[DIFF_COL] > 0, DIFF_COL].min(),
    }
    vmaxs = case_diffs_df.groupby([Columns.STAGE,
                                   Columns.COUNT_TYPE])[DIFF_COL].max()

    fig: plt.Figure = plt.figure(facecolor="white", dpi=DPI)

    # Don't put too much stock in these, we tweak them later to make sure they're even
    fig_width_px = len(count_list) * 1800
    fig_height_px = len(stage_list) * 1000 + 200

    max_date = max(dates)

    # The order doesn't matter, but doing later dates first lets us see interesting
    # output in Finder earlier, which is good for debugging
    for date in reversed(dates):
        date: pd.Timestamp = pd.Timestamp(date)
        # Data is associated with the right endpoint of the data collection period,
        # e.g., data collected *on* March 20 is labeled March 21 -- this is done so that
        # data collected today (on the day the code is run) has a meaningful date
        # associated with it (today's current time)
        # Anyway, here we undo that and display data on the date it was collected
        # in order to show a meaningful title on the graph
        if date == date.normalize():
            collection_date = date - pd.Timedelta(days=1)
        else:
            collection_date = date.normalize()

        fig.suptitle(collection_date.strftime(r"%b %-d, %Y"))

        for subplot_index, (stage, count) in enumerate(itertools.product(
                stage_list, count_list),
                                                       start=1):
            ax: plt.Axes = fig.add_subplot(len(stage_list), len(count_list),
                                           subplot_index)

            # Add timestamp to top right axis
            if subplot_index == 2:
                ax.text(
                    1.25,  # Coords are arbitrary magic numbers
                    1.23,
                    f"Last updated {NOW_STR}",
                    horizontalalignment="right",
                    fontsize="small",
                    transform=ax.transAxes,
                )

            # Filter to just this axes: this stage, this count-type, this date
            stage_date_df = case_diffs_df[
                (case_diffs_df[Columns.STAGE] == stage.name)
                & (case_diffs_df[Columns.COUNT_TYPE] == count.name)
                & (case_diffs_df[Columns.DATE] == date)]

            # Should have length 49 (50 + DC - AK - HI)
            stage_geo_df: geopandas.GeoDataFrame = geo_df.merge(
                stage_date_df,
                how="inner",
                left_on="STUSPS",
                right_on=Columns.TWO_LETTER_STATE_CODE,
            )
            assert len(stage_geo_df) == 49

            vmin = vmins[count]
            vmax = vmaxs.loc[(stage.name, count.name)]

            # Create log-scaled color mapping
            # https://stackoverflow.com/a/43807666
            norm = LogNorm(vmin, vmax)
            scm = plt.cm.ScalarMappable(norm=norm, cmap=CMAP)

            # Actually plot the data. Omit legend, since we'll want to customize it and
            # it's easier to create a new one than customize the existing one.
            stage_geo_df.plot(
                column=DIFF_COL,
                ax=ax,
                legend=False,
                vmin=vmin,
                vmax=vmax,
                cmap=CMAP,
                norm=norm,
            )

            # Plot state boundaries
            stage_geo_df.boundary.plot(ax=ax, linewidth=0.06, edgecolor="k")

            # Add colorbar axes to right side of graph
            # https://stackoverflow.com/a/33505522
            divider = make_axes_locatable(ax)
            width = axes_size.AxesY(ax, aspect=ASPECT_RATIO)
            pad = axes_size.Fraction(PAD_FRAC, width)
            cax = divider.append_axes("right", size=width, pad=pad)

            # Add colorbar itself
            cbar = fig.colorbar(scm, cax=cax)

            # Add evenly spaced ticks and their labels
            # First major, then minor
            # Adapted from https://stackoverflow.com/a/50314773
            bucket_size = (vmax / vmin)**(1 / N_CBAR_BUCKETS)
            tick_dist = bucket_size**N_BUCKETS_BTWN_MAJOR_TICKS

            # Simple log scale math
            major_tick_locs = (
                vmin * (tick_dist**np.arange(0, N_CBAR_MAJOR_TICKS))
                # * (bucket_size ** 0.5) # Use this if centering ticks on buckets
            )

            cbar.set_ticks(major_tick_locs)

            # Get minor locs by linearly interpolating between major ticks
            minor_tick_locs = []
            for major_tick_index, this_major_tick in enumerate(
                    major_tick_locs[:-1]):
                next_major_tick = major_tick_locs[major_tick_index + 1]

                # Get minor ticks as numbers in range [this_major_tick, next_major_tick]
                # and exclude the major ticks themselves (once we've used them to
                # compute the minor tick locs)
                minor_tick_locs.extend(
                    np.linspace(
                        this_major_tick,
                        next_major_tick,
                        N_MINOR_TICKS_BTWN_MAJOR_TICKS + 2,
                    )[1:-1])

            cbar.ax.yaxis.set_ticks(minor_tick_locs, minor=True)
            cbar.ax.yaxis.set_minor_formatter(NullFormatter())

            # Add major tick labels
            if count is Counting.PER_CAPITA:
                fmt_func = "{:.2e}".format
            else:
                fmt_func = functools.partial(format_float,
                                             max_digits=5,
                                             decimal_penalty=2)

            cbar.set_ticklabels(
                [fmt_func(x) if x != 0 else "0" for x in major_tick_locs])

            # Set axes titles
            ax_stage_name: str = {
                DiseaseStage.CONFIRMED: "Cases",
                DiseaseStage.DEATH: "Deaths",
            }[stage]
            ax_title_components: List[str] = ["New Daily", ax_stage_name]
            if count is Counting.PER_CAPITA:
                ax_title_components.append("Per Capita")

            ax.set_title(" ".join(ax_title_components))

            # Remove axis ticks (I think they're lat/long but we don't need them)
            for spine in [ax.xaxis, ax.yaxis]:
                spine.set_major_locator(NullLocator())
                spine.set_minor_locator(NullLocator())

        # Save figure, and then deal with matplotlib weirdness that doesn't exactly
        # respect the dimensions we set due to bbox_inches='tight'
        save_path: Path = DOD_DIFF_DIR / f"dod_diff_{date.strftime(r'%Y%m%d')}.png"
        fig.set_size_inches(fig_width_px / DPI, fig_height_px / DPI)
        fig.savefig(save_path, **save_fig_kwargs)

        # x264 video encoder requires frames have even width and height
        resize_to_even_dims(save_path)

        # Save poster (preview frame for video on web)
        if date == max_date:
            (GEO_FIG_DIR / "dod_diff_poster.png").write_bytes(
                save_path.read_bytes())

        fig.clf()

        print(f"Saved '{save_path}'")

        # if date < pd.Timestamp("2020-4-16"):
        #     break

    return case_diffs_df
                     ha="center",
                     size=6,
                     c=line.get_color())

plt.semilogy()

plt.xlabel("Time")
plt.ylabel("Death count")

from matplotlib.dates import AutoDateLocator, DateFormatter
from matplotlib.ticker import LogLocator, NullLocator, LogFormatter
from util import LogFormatterSI
import numpy

plt.gca().xaxis.set_major_locator(AutoDateLocator())
plt.gca().xaxis.set_major_formatter(DateFormatter("%m/%d"))
#plt.gca().xaxis.set_minor_locator(AutoDateLocator())
plt.gca().yaxis.set_major_locator(LogLocator(subs=(1, 2, 5)))
plt.gca().yaxis.set_major_formatter(
    LogFormatterSI(labelOnlyBase=False,
                   minor_thresholds=(numpy.inf, numpy.inf)))
plt.gca().yaxis.set_minor_locator(NullLocator())

plt.title("New York City COVID-19 death count")

#plt.xlim(left=arrow.get("2020-03-01"))
#plt.ylim(bottom=10)

plt.legend()
plt.savefig("plots/NYState5.png", dpi=300)
Esempio n. 10
0
def status_plot(request,
                params,
                width=1000.0,
                height=500.0,
                hours=24.0,
                title=None,
                xlabel="Time, UT",
                ylabel=None,
                ylog=False,
                grid=True):
    hours = float(hours) if hours else 24.0

    time0 = None  # Mid-time for 'zooming' plot

    time1, time2 = None, None

    if request.GET:
        width = float(request.GET.get('width', width))
        height = float(request.GET.get('height', height))
        hours = float(request.GET.get('hours', hours))
        if request.GET.has_key('ylog'):
            # FIXME: make it possible to pass False somehow
            ylog = True

        title = request.GET.get('title', title)
        xlabel = request.GET.get('xlabel', xlabel)
        ylabel = request.GET.get('ylabel', ylabel)

        # Time range
        if request.GET.has_key('time0'):
            time0 = parse_time(request.GET.get('time0'))

            time1 = time0 - datetime.timedelta(hours=hours / 2)
            time2 = time0 + datetime.timedelta(hours=hours / 2)

    if time1 is None or time2 is None:
        # Default time range is given number of hours back from now
        time1 = datetime.datetime.utcnow() - datetime.timedelta(hours=hours)
        time2 = datetime.datetime.utcnow()

    if not title:
        title = params

        if time0:
            title += ' : ' + str(hours) + ' hours around ' + time0.strftime(
                '%Y.%m.%d %H:%M:%S') + ' UT'
        else:
            title += ' : ' + time1.strftime(
                '%Y.%m.%d %H:%M:%S') + ' UT - ' + time2.strftime(
                    '%Y.%m.%d %H:%M:%S') + ' UT'

    # Parse comma-separated list of client.param strings
    # TODO: add support for root level parameters, with no dots
    params = params.split(',')
    select = {}

    if not ylabel and len(params) == 1:
        ylabel = params[0]

    labels = []
    for param in params:
        s = param.split('.')  # Split
        select[s[0] + '.' +
               s[1]] = "(status #> '{%s}' #>> '{%s}')::float" % (s[0], s[1])
        labels.append(s[0] + '.' + s[1])

    ms = MonitorStatus.objects.extra(
        select=select).defer('status').order_by('time')
    ms = ms.filter(time__gt=time1)
    ms = ms.filter(time__lte=time2)

    values = [[getattr(_, __) for _ in ms] for __ in labels]
    time = [_.time for _ in ms]

    fig = Figure(facecolor='white',
                 dpi=72,
                 figsize=(width * 1.0 / 72, height * 1.0 / 72),
                 tight_layout=True)
    ax = fig.add_subplot(111)
    ax.autoscale()
    # ax.plot()

    has_data = False

    for _, value in enumerate(values):
        if np.any(np.array(value) != None):
            has_data = True
            ax.plot(time, value, '-', label=labels[_].split('.')[-1])

    # if time and has_data: # It is failing if no data are plotted
    if (time2 - time1).total_seconds() < 2 * 24 * 3600:
        ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
    elif (time2 - time1).total_seconds() > 3 * 24 * 3600:
        ax.xaxis.set_major_formatter(DateFormatter('%Y.%m.%d'))
    else:
        ax.xaxis.set_major_formatter(DateFormatter('%Y.%m.%d %H:%M:%S'))

    fig.autofmt_xdate()

    if request.GET and request.GET.has_key('mark'):
        time_mark = parse_time(request.GET.get('mark'))
        ax.axvline(time_mark, color='red', ls='--', alpha=1.0)

    ax.set_xlim(time1, time2)

    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)
    ax.set_title(title)
    ax.grid(grid)

    if len(labels) > 1:
        ax.legend(frameon=True, loc=2, framealpha=0.99)

    if ylog:
        ax.set_yscale('log', nonposy='clip')

        # Try to fix the ticks if the data span is too small
        axis = ax.get_yaxis()
        print np.ptp(np.log10(axis.get_data_interval()))
        if np.ptp(np.log10(axis.get_data_interval())) < 1:
            axis.set_major_locator(MaxNLocator())
            axis.set_minor_locator(NullLocator())

    # 10% margins on both axes
    ax.margins(0.03, 0.03)

    # handles, labels = ax.get_legend_handles_labels()
    # ax.legend(handles, labels, loc=2)

    canvas = FigureCanvas(fig)
    s = StringIO()
    canvas.print_png(s)
    response = HttpResponse(s.getvalue(), content_type='image/png')

    return response
Esempio n. 11
0
    def callback(self, req):
        excute_time = rospy.Time.now()
        print("\nyolov3_detection_server callback {}".format(excute_time))
        cv_image = np.zeros((req.image.height, req.image.width, 3), dtype=np.int8)
        try:
            cv_image = self._bridge.imgmsg_to_cv2(req.image, "bgr8")
        except CvBridgeError as e:
            print(e)

        single_input_img = transforms.ToTensor()(PILImage.fromarray(cv2.cvtColor(cv_image.copy(), cv2.COLOR_BGR2RGB)))
        single_input_img, _ = pad_to_square(single_input_img, 0)
        single_input_img = resize(single_input_img, self._opt.img_size)

        print("Performing object detection:")
        prev_time = time.time()
        Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
        single_input_img = Variable(single_input_img.type(Tensor))
        single_input_img = torch.unsqueeze(single_input_img, dim=0)

        # Get detections
        with torch.no_grad():
            detections = self.model(single_input_img)
            # print(type(detections))
            # print(detections)
            detections = non_max_suppression(detections, self._opt.conf_thres, self._opt.nms_thres)
            # print(type(detections))
            # print(detections)

        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        print("Inference Time: %s" % (inference_time))

        # Bounding-box colors
        cmap = plt.get_cmap("tab20b")
        colors = [cmap(i) for i in np.linspace(0, 1, 20)]

        # Create plot
        img = cv2.cvtColor(cv_image.copy(), cv2.COLOR_BGR2RGB)
        figure = plt.figure()
        fig, ax = plt.subplots(1)
        ax.imshow(img)

        # Draw bounding boxes and labels of detections

        # print(type(detections))
        # print(detections)

        # when the source has only one input image, we should use index 0 to convert data type of detections
        # Convert the following:
        # [tensor([   [ 17.0720, 143.8836, 103.6933, 200.3457,   1.0000,   1.0000,   0.0000],
        #             [297.4258, 142.2059, 389.5064, 205.5459,   1.0000,   1.0000,   0.0000],
        #             [184.6890, 181.4856, 258.8690, 232.0957,   1.0000,   1.0000,   0.0000]
        #             ])
        # ]
        # To:
        # tensor([    [ 17.0720, 143.8836, 103.6933, 200.3457,   1.0000,   1.0000,   0.0000],
        #             [297.4258, 142.2059, 389.5064, 205.5459,   1.0000,   1.0000,   0.0000],
        #             [184.6890, 181.4856, 258.8690, 232.0957,   1.0000,   1.0000,   0.0000]
        #             ])
        # This is caused by removing the img_detections list and its extend function.
        # When not using index of img_detections, the data type just mismatched.

        detections = detections[0]
        detection_list=list()
        if detections is not None:
            # Rescale boxes to original image
            detections = rescale_boxes(detections, self._opt.img_size, img.shape[:2])
            unique_labels = detections[:, -1].cpu().unique()
            n_cls_preds = len(unique_labels)
            bbox_colors = random.sample(colors, n_cls_preds)
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                print("\t + Label: %s, Conf: %.5f" % ((self.classes[int(cls_pred)]).replace('\n', '').replace('\r', ''), cls_conf.item()))
                box_w = x2 - x1
                box_h = y2 - y1
                color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
                # Add the bbox to the plot
                ax.add_patch(bbox)
                # Add label
                plt.text(
                    x1,
                    y1,
                    s=(self.classes[int(cls_pred)]).replace('\n', '').replace('\r', ''),
                    color="white",
                    verticalalignment="top",
                    bbox={"color": color, "pad": 0},
                )
                detection_list.extend([int(cls_pred), x1, y1, box_w, box_h])

        # Save generated image with detections
        plt.axis("off")
        plt.gca().xaxis.set_major_locator(NullLocator())
        plt.gca().yaxis.set_major_locator(NullLocator())

        # figure_name_to_save = self._opt.outputs_folder + "/" + str(excute_time) +".png"
        figure_name_to_save = self._opt.outputs_folder + "/results.png"
        plt.savefig(figure_name_to_save, bbox_inches="tight", pad_inches=0.0)
        plt.close()

        image = fig2data(figure)
        try:
            self._image_pub.publish(self._bridge.cv2_to_imgmsg(image, "rgba8"))
        except CvBridgeError as e:
            print(e)



        res = DetectionTaskResponse()
        res.results.data = detection_list
        # _image_pub.publish()
        # rospy.sleep(0.050)
        return res
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
plt.title('RBM Weight Matrix')
# Reverse the yaxis limits
ax.set_ylim(*ax.get_ylim()[::-1])
enAx = fig.add_subplot(1, 2, 2)
timeSeries = []
energyDiff = []

for i in range(numEpochs / UpdateTime):
    #setup the weight matrix plot
    ax.clear()
    ax.patch.set_facecolor('gray')
    ax.set_aspect('equal', 'box')
    ax.xaxis.set_major_locator(NullLocator())
    ax.yaxis.set_major_locator(NullLocator())
    # setup the energy time series plot
    # do the calcs
    print myNet.theWeights()
    W = matRound(myNet.weights, 3)[[0] + myNet.Iin, :][:, [0] + myNet.Hin].T
    maxWeight = 2**np.ceil(np.log(np.abs(W).max()) / np.log(2))
    maxWeight = np.abs(W).max() + 1
    for (x, y), w in np.ndenumerate(W):
        if w > 0: color = 'white'
        else: color = 'black'
        size = np.log10(np.abs(w) + 1) / np.log10(maxWeight) + 0.05
        rect = Rectangle([x - size / 2, y - size / 2],
                         size,
                         size,
                         facecolor=color,
Esempio n. 13
0
def detect(img_raw):
    matplotlib.use('agg')
    matplotlib.pyplot.switch_backend('Agg')

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_def",
                        type=str,
                        default="config/yolov3.cfg",
                        help="path to model definition file")
    parser.add_argument("--weights_path",
                        type=str,
                        default="weights/yolov3.weights",
                        help="path to weights file")
    parser.add_argument("--class_path",
                        type=str,
                        default="data/coco.names",
                        help="path to class label file")
    parser.add_argument("--conf_thres",
                        type=float,
                        default=0.8,
                        help="object confidence threshold")
    parser.add_argument("--nms_thres",
                        type=float,
                        default=0.4,
                        help="iou thresshold for non-maximum suppression")
    parser.add_argument("--batch_size",
                        type=int,
                        default=1,
                        help="size of the batches")
    parser.add_argument(
        "--n_cpu",
        type=int,
        default=0,
        help="number of cpu threads to use during batch generation")
    parser.add_argument("--img_size",
                        type=int,
                        default=416,
                        help="size of each image dimension")
    parser.add_argument("--checkpoint_model",
                        type=str,
                        help="path to checkpoint model")
    parser.add_argument("run", type=str, help="default flask instruction")
    opt = parser.parse_args()
    print(opt)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Set up model
    model = Darknet(opt.model_def, img_size=opt.img_size).to(device)

    if opt.weights_path.endswith(".weights"):
        # Load darknet weights
        model.load_darknet_weights(opt.weights_path)
    else:
        # Load checkpoint weights
        model.load_state_dict(torch.load(opt.weights_path))

    model.eval()  # Set in evaluation mode

    classes = load_classes(opt.class_path)  # Extracts class labels from file

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    input_img = img_raw
    # Extract image as PyTorch tensor
    input_img = transforms.ToTensor()(input_img)
    # Pad to square resolution
    input_img, _ = pad_to_square(input_img, 0)
    # Resize
    input_img = resize(input_img, opt.img_size)
    # Unsqueeze
    input_img = input_img.unsqueeze(0)

    # Configure input
    input_img = Variable(input_img.type(Tensor))

    print("\nPerforming object detections:")
    prev_time = time.time()

    # Get detections
    with torch.no_grad():
        detections = model(input_img)
        detections = non_max_suppression(detections, opt.conf_thres,
                                         opt.nms_thres)[0]

    # Log progress
    current_time = time.time()
    inference_time = datetime.timedelta(seconds=current_time - prev_time)
    prev_time = current_time
    print("\t+ Inference Time: %s" % (inference_time))

    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    # Create plot
    img = np.array(img_raw)
    plt.figure()
    fig, ax = plt.subplots(1)
    ax.imshow(img)
    cropped = {}

    # Draw bounding boxes and labels of detections
    if detections is not None:
        # Rescale boxes to original image
        detections = rescale_boxes(detections, opt.img_size, img.shape[:2])
        unique_labels = detections[:, -1].cpu().unique()
        n_cls_preds = len(unique_labels)
        bbox_colors = random.sample(colors, n_cls_preds)
        for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:

            print("\t+ Label: %s, Conf: %.5f" %
                  (classes[int(cls_pred)], cls_conf.item()))

            box_w = x2 - x1
            box_h = y2 - y1

            color = bbox_colors[int(
                np.where(unique_labels == int(cls_pred))[0])]
            # Create a Rectangle patch
            bbox = patches.Rectangle((x1, y1),
                                     box_w,
                                     box_h,
                                     linewidth=2,
                                     edgecolor=color,
                                     facecolor="none")
            # Add the bbox to the plot
            ax.add_patch(bbox)
            # Add label
            plt.text(
                x1,
                y1,
                s=classes[int(cls_pred)],
                color="white",
                verticalalignment="top",
                bbox={
                    "color": color,
                    "pad": 0
                },
            )

            # Crop for new images (left, upper, right, lower)
            if classes[int(cls_pred)] not in cropped:
                cropped[classes[int(cls_pred)]] = []
            cropped[classes[int(cls_pred)]].append(
                (x1.item(), y1.item(), x2.item(), y2.item()))

    # Save generated image with detections
    plt.axis("off")
    plt.gca().xaxis.set_major_locator(NullLocator())
    plt.gca().yaxis.set_major_locator(NullLocator())

    # Explicitly closing
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    plt_bytes = buf.getvalue()
    plt.close()
    buf.close()

    return plt_bytes, cropped
Esempio n. 14
0
def train_demo(model, logger, epoch_n, path="data/samples", img_size=416,
         class_path="data/classes.names", imag_path="misc/images"):

    conf_thres = 0.95
    nms_thres = 0.5

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
    classes = load_classes(class_path)  # Extracts class labels from file
    model.eval()  # Set in evaluation mode

    dataloader = DataLoader(
        ImageFolder(path, img_size=img_size, dataset=""),
        batch_size=1,
        shuffle=False,
        num_workers=1,
    )

    imgs = []  # Stores image paths
    img_detections = []  # Stores detections for each image index

    print("\nPerforming object detection:")
    prev_time = time.time()
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))

        # Get detections
        with torch.no_grad():
            detections = model(input_imgs)
            detections = non_max_suppression(detections, conf_thres, nms_thres)

        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))

        # Save image and detections
        imgs.extend(img_paths)
        img_detections.extend(detections)

    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    print("\nSaving images:")
    save_path = imag_path + str(epoch_n) + "/"
    # Iterate through images and save plot of detections
    for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):

        print("(%d) Image: '%s'" % (img_i, path))

        # Create plot
        img = np.array(Image.open(path))
        plt.figure()
        fig, ax = plt.subplots(1)
        ax.imshow(img)

        # Draw bounding boxes and labels of detections
        if detections is not None:
            # Rescale boxes to original image
            detections = rescale_boxes(detections, img_size, 1920)
            unique_labels = detections[:, -1].cpu().unique()
            n_cls_preds = len(unique_labels)
            bbox_colors = random.sample(colors, n_cls_preds)
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:

                box_w = x2 - x1
                box_h = y2 - y1

                color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=0.75, edgecolor=color, facecolor="none")
                # Add the bbox to the plot
                ax.add_patch(bbox)

        # Save generated image with detections
        plt.axis("off")
        plt.gca().xaxis.set_major_locator(NullLocator())
        plt.gca().yaxis.set_major_locator(NullLocator())
        filename = path.split("/")[-1].split(".")[0]
        os.makedirs(imag_path, exist_ok=True)
        plt.savefig(f"{imag_path}/{str(epoch_n)}_{filename}.png", bbox_inches="tight", pad_inches=0.0)
        plt.close('all')

        image = Image.open(f"{imag_path}/{str(epoch_n)}_{filename}.png")
        img = TF.to_tensor(image)
        logger.image_summary(filename, img, epoch_n)
Esempio n. 15
0
def draw_detections(img, detections_lst, output_path):
    assert(type(detections_lst) == list)
    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]
    plt.figure()
    fig, ax = plt.subplots(1)
    ax.imshow(img)

    class_lst = []

    # Draw bounding boxes and labels of detections
    for detections in detections_lst:
        if detections is None:
            continue
        # Rescale boxes to original image
        detections = rescale_boxes(detections, opt.img_size, img.shape[:2])
        unique_labels = detections[:, -1].cpu().unique()
        n_cls_preds = len(unique_labels)
        bbox_colors = random.sample(colors, n_cls_preds)
        for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
            if cls_conf.item() < opt.conf_thres:
                continue

            x1 = max(x1, 0)
            y1 = max(y1, 0)
            x2 = min(x2, img.shape[0])
            y2 = min(y2, img.shape[1])

            box_w = x2 - x1
            box_h = y2 - y1

            if box_w < 20 or box_h < 20:
                continue

            class_lst.append(classes[int(cls_pred)])
            print("\t+ Label: %s, (%d, %d) Conf: %.5f" % (classes[int(cls_pred)], box_w, box_h, cls_conf.item()))

            color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
            # Create a Rectangle patch
            bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
            # Add the bbox to the plot
            ax.add_patch(bbox)
            # Add label
            plt.text(
                x1,
                y1,
                s=classes[int(cls_pred)],
                color="white",
                verticalalignment="top",
                bbox={"color": color, "pad": 0},
            )

    # Save generated image with detections
    plt.axis("off")
    plt.gca().xaxis.set_major_locator(NullLocator())
    plt.gca().yaxis.set_major_locator(NullLocator())
    plt.savefig(output_path, bbox_inches="tight", pad_inches=0.0)
    plt.close()
    plt.clf()
    return class_lst
Esempio n. 16
0
def detect(inputDir, inputFile, framesDir, outputTxt, outputFolder, conf, nms, cuda, thread=None):

    device = torch.device("cuda" if cuda and torch.cuda.is_available() else "cpu")
    print("loading model...")
    model = Darknet("./resources/yolo-coco/yolov3.cfg").to(device)
    model.load_darknet_weights("./resources/yolo-coco/yolov3.weights")
    model.eval()
    print("model loaded: ", IMAGE_SIZE)

    imageOutPath = os.path.join(inputDir, outputFolder)
    os.makedirs(imageOutPath, exist_ok=True)

    classes = load_classes("./resources/yolo-coco/coco.names")  # Extracts class labels from file


    framesPath = os.path.join( inputDir, framesDir)
    os.makedirs(framesPath, exist_ok=True)
    # checks if there are enough frames in framesPath
    n_frames = len(os.listdir(framesPath))
    print(framesPath, n_frames)
    if n_frames < 500:
        if(thread):
            thread.signalCanvas("Extracting frames from {}".format(inputFile))
        create_frames(inputDir, inputFile, framesDir)
    else:
        if(thread):
            thread.signalCanvas("Found {} frames in  {}. Delete this folder to re-extract frames".format(n_frames, framesPath))


    dataloader = DataLoader(
        ImageFolder(framesPath, img_size=IMAGE_SIZE),
        batch_size=BS,
        shuffle=False,
        num_workers=0)
    
    model.eval()

    Tensor = torch.cuda.FloatTensor if cuda and torch.cuda.is_available() else torch.FloatTensor

    imgs = []  # Stores image paths
    img_detections = []  # Stores detections for each image index

    print("\nPerforming object detection:")
    if thread:
        total = len(dataloader)
        thread.signalCanvas("\nPerforming object detection:")
        thread.signalTopLabel("{}: 0/{} batches".format(inputFile, total))

    prev_time = time.time()
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        if thread:
            tlbl = "{}: {}/{} batches".format(inputFile, batch_i + 1, total)
            thread.signalTopLabel(tlbl)
            thread.signalTopBar(max( (((batch_i + 1) / total) * 100), 1))
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))
        print(np.shape(input_imgs))

        # Get detections
        with torch.no_grad():
            detections = model(input_imgs)
            detections = non_max_suppression(detections, conf, nms)


        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))


        # Save image and detections
        imgs.extend(img_paths)
        img_detections.extend(detections)

    print("imgs: ", imgs)
    print("dets: ", img_detections)

    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    # file to write to
    # f = open(os.path.join(inputDir,  outputTxt))

    print("\nSaving images:")
    # Iterate through images and save plot of detections
    detfile = os.path.join(inputDir, outputTxt)

    if thread:
        thread.signalCanvas("\n[INFO]: Detection done.")
        thread.signalCanvas("\n[INFO]: Saving results...")

    # print("items: ", imgs, img_detections)
    items_zip = zip(imgs, img_detections)
    items_list = list(items_zip)
    print("item list: ", items_list)
    n_items = len(list(items_list))

    with open(detfile, "w+") as f:

        print("items: ", list(items_list))

        for img_i, (path, detections) in enumerate(items_list):

            imgFname = path.split("/")[-1]


            fid = int(re.findall(r"\d+", imgFname)[0])

            if thread:
                thread.signalTopLabel("frame  {}/{}".format(fid, n_items))
                thread.signalTopBar(max((fid / n_items) * 100, 1))

            # print("(%d) Image: '%s'" % (img_i, path))
            # print(path.split("/"))

            # print("fid: ", fid)
            # Create plot
            # img = np.array(Image.open(path))
            img = Image.open(path)
            print('img')
            # fig, ax = plt.subplots(1)
            # print('fig')

            # Draw bounding boxes and labels of detections
            # print(detections)
            if detections is not None:




                # Rescale boxes to original image
                # ax.imshow(img)
                plt.imshow(img)
                ax = plt.gca()
                print('ax')
                detections = rescale_boxes(detections, IMAGE_SIZE, np.array(img).shape[:2])
                print('dir')
                unique_labels = detections[:, -1].cpu().unique()
                print('unique')
                n_cls_preds = len(unique_labels)
                print('n_cls_preds')
                bbox_colors = random.sample(colors, n_cls_preds)
                print('bbox_color')
                for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:

                    print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))


                    box_w = x2 - x1
                    box_h = y2 - y1

                    f.write("{},-1,{},{},{},{},{},-1,-1,-1,{}\n".format(fid, x1, y1, box_w, box_h, cls_conf.item(), cls_pred))

                    color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                    # Create a Rectangle patch
                    bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
                    # Add the bbox to the plot
                    ax.add_patch(bbox)


            print('plt_2')
            # Save generated image with detections
            plt.axis("off")
            plt.gca().xaxis.set_major_locator(NullLocator())
            plt.gca().yaxis.set_major_locator(NullLocator())
            filename = path.split("/")[-1].split(".")[0]
            plt.savefig("{}/{}.png".format(imageOutPath, filename), bbox_inches="tight", pad_inches=0.0)
            plt.close()
            # print("saving image: {}".format(f"{imageOutPath}/{filename}.png"))
            print("saving image: {}/{}.png".format(imageOutPath, filename))

            if thread and thread.args["display"]:
                thread.signalImg("{}/{}.png".format(imageOutPath, filename))
    def __init__(self,
                 fig,
                 cmap=None,
                 norm=None,
                 limit_func=None,
                 auto_redraw=True,
                 interpolation=None):

        self._cursor_position_cbs = []
        self._interpolation = interpolation
        # used to determine if setting properties should force a re-draw
        self._auto_redraw = auto_redraw
        # clean defaults
        if limit_func is None:
            limit_func = lambda image: (image.min(), image.max())
        if cmap is None:
            cmap = 'gray'
        # stash the color map
        self._cmap = cmap
        # let norm pass through as None, mpl defaults to linear which is fine
        if norm is None:
            norm = Normalize()
        self._norm = norm
        # save a copy of the limit function, we will need it later
        self._limit_func = limit_func

        # this is used by the widget logic
        self._active = True
        self._dirty = True
        self._cb_dirty = True

        # work on setting up the mpl axes

        self._fig = fig
        # blow away what ever is currently on the figure
        fig.clf()
        # Configure the figure in our own image
        #
        #     	  +----------------------+
        #	      |   H cross section    |
        #     	  +----------------------+
        #   +---+ +----------------------+
        #   | V | |                      |
        #   |   | |                      |
        #   | x | |                      |
        #   | s | |      Main Axes       |
        #   | e | |                      |
        #   | c | |                      |
        #   | t | |                      |
        #   | i | |                      |
        #   | o | |                      |
        #   | n | |                      |
        #   +---+ +----------------------+

        # make the main axes
        self._im_ax = fig.add_subplot(1, 1, 1)
        self._im_ax.set_aspect('equal')
        self._im_ax.xaxis.set_major_locator(NullLocator())
        self._im_ax.yaxis.set_major_locator(NullLocator())
        self._imdata = None
        self._im = self._im_ax.imshow([[]],
                                      cmap=self._cmap,
                                      norm=self._norm,
                                      interpolation=self._interpolation,
                                      aspect='equal',
                                      vmin=0,
                                      vmax=1)

        # make it dividable
        divider = make_axes_locatable(self._im_ax)

        # set up all the other axes
        # (set up the horizontal and vertical cuts)
        self._ax_h = divider.append_axes('top',
                                         .5,
                                         pad=0.1,
                                         sharex=self._im_ax)
        self._ax_h.yaxis.set_major_locator(LinearLocator(numticks=2))
        self._ax_v = divider.append_axes('left',
                                         .5,
                                         pad=0.1,
                                         sharey=self._im_ax)
        self._ax_v.xaxis.set_major_locator(LinearLocator(numticks=2))
        self._ax_cb = divider.append_axes('right', .2, pad=.5)
        # add the color bar
        self._cb = fig.colorbar(self._im, cax=self._ax_cb)

        # add the cursor place holder
        self._cur = None

        # turn off auto-scale for the horizontal cut
        self._ax_h.autoscale(enable=False)

        # turn off auto-scale scale for the vertical cut
        self._ax_v.autoscale(enable=False)

        # create line artists
        self._ln_v, = self._ax_v.plot([], [],
                                      'k-',
                                      animated=True,
                                      visible=False)

        self._ln_h, = self._ax_h.plot([], [],
                                      'k-',
                                      animated=True,
                                      visible=False)

        # backgrounds for blitting
        self._ax_v_bk = None
        self._ax_h_bk = None

        # stash last-drawn row/col to skip if possible
        self._row = None
        self._col = None

        # make attributes for callback ids
        self._move_cid = None
        self._click_cid = None
        self._clear_cid = None
Esempio n. 18
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     plt.axis('off')
     plt.gca().xaxis.set_major_locator(NullLocator())
     plt.gca().yaxis.set_major_locator(NullLocator())
     plt.savefig(self.savePath, bbox_inches="tight", pad_inches=0.0)
     plt.close()
Esempio n. 19
0
def render_state(locations, figsize=(32, 32), ball_radius=0.08,
                 box_size=1., n_children=None, fixed_ball_radius=False,
                 render_only_last_level=False, edges=None, fixed_sides=0, curtain=False):
    """Renders the state of the environment from its locations (T, 2, K)."""

    images = []

    BG_COLOR = 'black'
    BALL_COLORS = ['black', 'blue', '#00ff00', 'red',
                   'white', 'cyan', 'magenta', 'yellow']
    BALL_COLORS.remove(BG_COLOR)

    n_total = locations.shape[-1]
    # handle the hierarchy
    nl = 0
    nn = [locations.shape[-1]]
    if n_children is not None:
        if '-' in n_children:
            nc = list(map(int, n_children.split('-')))
            nl = len(nc)
            # number of nodes in each level of the graph that have children
            nn = [1]  # root node
            for l in range(1, nl + 1):
                nn.append(nn[-1] * nc[l - 1])

    sides = []
    for l in range(nl + 1):
        for n in range(nn[l]):
            sample = np.random.sample()
            if fixed_sides < 9:  # 9 means sample a random shape
                sides.append(fixed_sides)
            elif sample < 1/3:
                sides.append(0)
            elif sample < 2/3:
                sides.append(3)
            else:
                sides.append(4)

    ch, cw = 12, 12
    xc = np.random.randint(5, 32-cw-5)
    yc = np.random.randint(5, 32-ch-5)

    for i in range(locations.shape[0]):
        loc = locations[i]
        fig = Figure(figsize=(figsize[0] / 100, figsize[1] / 100))
        fig.patch.set_facecolor(BG_COLOR)

        canvas = FigureCanvas(fig)
        ax = fig.gca()
        ax.xaxis.set_major_locator(NullLocator())
        ax.yaxis.set_major_locator(NullLocator())

        # for p in range(locations.shape[-1]):
        p = 0
        br = ball_radius
        for l in range(nl + 1):
            for n in range(nn[l]):

                x_pos = (loc[0, p] + box_size) / (2 * box_size)
                y_pos = (loc[1, p] + box_size) / (2 * box_size)

                if nl == 0:
                    cc = BALL_COLORS[p % len(BALL_COLORS)]
                else:
                    cc = BALL_COLORS[p % len(BALL_COLORS)]
                    if not render_only_last_level and nl > 0:
                        if l == 0:
                            cc = 'gold'
                        elif l == 1:
                            cc = 'silver'

                def get_polygon_coords(xc, yc, edge_len=0.10, sides=3):
                    L = edge_len
                    if sides == 3:
                        L *= 2
                    N = sides
                    R = L / (2 * np.sin(np.pi / N))

                    xy = np.ndarray((N, 2), dtype=float)
                    for i in range(N):
                        xy[i, 0] = xc + R * np.cos(np.pi / N * (1 + 2 * i))
                        xy[i, 1] = yc + R * np.sin(np.pi / N * (1 + 2 * i))
                    return xy

                if not (l < nl and render_only_last_level):
                    if sides[p] == 0:
                        particle = plt.Circle((x_pos, y_pos), br, color=cc, clip_on=False)
                    else:
                        xy = get_polygon_coords(x_pos, y_pos, edge_len=br*2, sides=sides[p])
                        particle = plt.Polygon(xy, color=cc, fill=True, clip_on=False)
                    ax.add_artist(particle)

                if edges is not None and not render_only_last_level and p < n_total-1:
                    edg = edges[i]
                    for pp in range(p, n_total-1):
                        if edg[p * (n_total - 1) + pp]:
                            x1 = (loc[0, p] + box_size) / (2 * box_size)
                            x2 = (loc[0, pp+1] + box_size) / (2 * box_size)
                            y1 = (loc[1, p] + box_size) / (2 * box_size)
                            y2 = (loc[1, pp+1] + box_size) / (2 * box_size)
                            llll = mlines.Line2D([x1, x2], [y1, y2],
                                                 color='white',
                                                 linewidth=1.4,
                                                 linestyle=':'
                                                 # '-', '--', '-.', ':', ''
                                                 )
                            ax.add_artist(llll)

                p += 1

            if not fixed_ball_radius:
                br /= 2

        ax.axis('off')

        # Draw image
        canvas.draw()

        # Convert to numpy array
        flat_image = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
        image = flat_image.reshape(fig.canvas.get_width_height()[::-1] + (3,))

        if curtain:
            im = image.copy()
            im[yc:yc+ch, xc:xc+cw, :] = 0
            image = im

        images.append(image)
        plt.close(fig)

    return np.array(images)
Esempio n. 20
0
def main(label_to_data_path: dict, var_pairs: list,
         periods_info: CcPeriodsInfo,
         vname_display_names=None,
         season_to_months: dict = None,
         cur_label=common_params.crcm_nemo_cur_label,
         fut_label=common_params.crcm_nemo_fut_label,
         hles_region_mask=None, lakes_mask=None):
    # get a flat list of all the required variable names (unique)
    varnames = []
    for vpair in var_pairs:
        for v in vpair:
            if v not in varnames:
                varnames.append(v)

    print(f"Considering {varnames}, based on {var_pairs}")

    if vname_display_names is None:
        vname_display_names = {}

    varname_mapping = {v: v for v in varnames}
    level_mapping = {v: VerticalLevel(0) for v in
                     varnames}  # Does not really make a difference, since all variables are 2d

    comon_store_config = {
        DataManager.SP_DATASOURCE_TYPE: data_source_types.ALL_VARS_IN_A_FOLDER_IN_NETCDF_FILES,
        DataManager.SP_INTERNAL_TO_INPUT_VNAME_MAPPING: varname_mapping,
        DataManager.SP_LEVEL_MAPPING: level_mapping
    }

    cur_dm = DataManager(
        store_config=dict({DataManager.SP_BASE_FOLDER: label_to_data_path[cur_label]}, **comon_store_config)
    )

    fut_dm = DataManager(
        store_config=dict({DataManager.SP_BASE_FOLDER: label_to_data_path[fut_label]}, **comon_store_config)
    )

    # get the data and do calculations
    label_to_vname_to_season_to_data = {}

    cur_start_yr, cur_end_year = periods_info.get_cur_year_limits()
    fut_start_yr, fut_end_year = periods_info.get_fut_year_limits()


    #load coordinates in memory
    cur_dm.read_data_for_period(Period(datetime(cur_start_yr, 1, 1), datetime(cur_start_yr, 1, 2)), varname_internal=varnames[0])

    label_to_vname_to_season_to_data = {
        cur_label: {}, fut_label: {}
    }

    for vname in varnames:
        cur_means = cur_dm.get_seasonal_means(start_year=cur_start_yr, end_year=cur_end_year,
                                              season_to_months=season_to_months, varname_internal=vname)

        fut_means = fut_dm.get_seasonal_means(start_year=fut_start_yr, end_year=fut_end_year,
                                              season_to_months=season_to_months, varname_internal=vname)

        label_to_vname_to_season_to_data[cur_label][vname] = cur_means
        label_to_vname_to_season_to_data[fut_label][vname] = fut_means


    if hles_region_mask is None:
        data_field = label_to_vname_to_season_to_data[common_params.crcm_nemo_cur_label][list(season_to_months.keys())[0]]
        hles_region_mask = np.ones_like(data_field)



    correlation_data = calculate_correlations_and_pvalues(var_pairs, label_to_vname_to_season_to_data,
                                                          season_to_months=season_to_months,
                                                          region_of_interest_mask=hles_region_mask,
                                                          lats=cur_dm.lats, lakes_mask=lakes_mask)


    # Calculate mean seasonal temperature
    label_to_season_to_tt_mean = {}
    for label, vname_to_season_to_data in label_to_vname_to_season_to_data.items():
        label_to_season_to_tt_mean[label] = {}
        for season, yearly_data in vname_to_season_to_data["TT"].items():
            label_to_season_to_tt_mean[label][season] = np.mean([f for f in yearly_data.values()], axis=0)



    # do the plotting
    fig = plt.figure()

    ncols = len(season_to_months)
    nrows = len(var_pairs) * len(label_to_vname_to_season_to_data)

    gs = GridSpec(nrows, ncols, wspace=0, hspace=0)

    for col, season in enumerate(season_to_months):
        row = 0

        for vpair in var_pairs:
            for label in sorted(label_to_vname_to_season_to_data):
                ax = fig.add_subplot(gs[row, col], projection=cartopy.crs.PlateCarree())

                r, pv = correlation_data[vpair][label][season]

                r[np.isnan(r)] = 0
                r = np.ma.masked_where(~hles_region_mask, r)
                ax.set_facecolor("0.75")

                # hide the ticks
                ax.xaxis.set_major_locator(NullLocator())
                ax.yaxis.set_major_locator(NullLocator())

                im = ax.pcolormesh(cur_dm.lons, cur_dm.lats, r, cmap=cm.get_cmap("bwr", 11), vmin=-1, vmax=1)

                # add 0 deg line
                cs = ax.contour(cur_dm.lons, cur_dm.lats, label_to_season_to_tt_mean[label][season], levels=[0,],
                                linewidths=1, colors="k")
                ax.set_extent([cur_dm.lons[0, 0], cur_dm.lons[-1, -1], cur_dm.lats[0, 0], cur_dm.lats[-1, -1]])

                ax.background_patch.set_facecolor("0.75")

                if row == 0:
                    # ax.set_title(season + f", {vname_display_names[vpair[0]]}")
                    ax.text(0.5, 1.05, season, transform=ax.transAxes,
                            va="bottom", ha="center", multialignment="center")

                if col == 0:
                    # ax.set_ylabel(f"HLES\nvs {vname_display_names[vpair[1]]}\n{label}")
                    ax.text(-0.05, 0.5, f"HLES\nvs {vname_display_names[vpair[1]]}\n{label}",
                            va="center", ha="right",
                            multialignment="center",
                            rotation=90,
                            transform=ax.transAxes)


                divider = make_axes_locatable(ax)
                ax_cb = divider.new_horizontal(size="5%", pad=0.1, axes_class=plt.Axes)
                fig.add_axes(ax_cb)
                cb = plt.colorbar(im, extend="both", cax=ax_cb)

                if row < nrows - 1 or col < ncols - 1:
                    cb.ax.set_visible(False)

                row += 1

    img_dir = common_params.img_folder
    img_dir.mkdir(exist_ok=True)

    img_file = img_dir / "hles_tt_pr_correlation_fields_cur_and_fut.png"
    fig.savefig(str(img_file), **common_params.image_file_options)
Esempio n. 21
0
            color = bbox_colors[int(
                np.where(unique_labels == int(cls_pred))[0])]
            # Create a Rectangle patch
            bbox = patches.Rectangle((x1, y1),
                                     box_w,
                                     box_h,
                                     linewidth=2,
                                     edgecolor=color,
                                     facecolor='none')
            # Add the bbox to the plot
            ax.add_patch(bbox)
            # Add label
            plt.text(x1,
                     y1 - 30,
                     s=classes[int(cls_pred)] + ' ' +
                     str('%.4f' % cls_conf.item()),
                     color='white',
                     verticalalignment='top',
                     bbox={
                         'color': color,
                         'pad': 0
                     })

    # Save generated image with detections
    plt.axis('off')
    plt.gca().xaxis.set_major_locator(NullLocator())
    plt.gca().yaxis.set_major_locator(NullLocator())
    plt.savefig('output/%d.png' % (img_i), bbox_inches='tight', pad_inches=0.0)
    plt.close()
Esempio n. 22
0
def plot_triangular(list_points,
                    weights,
                    Nbin=200,
                    param_label='',
                    rangeBin=None,
                    units='',
                    scales='',
                    DoInterp=True,
                    addPoints=True,
                    factor=1.5,
                    DoHisto=False,
                    justContour=False,
                    addHDP=True,
                    title1D=True,
                    numberSigmaContour=3,
                    figAndAxes=None,
                    color=None,
                    addLengend=None):
    """
    This is an hack of coner.py
    https://github.com/dfm/corner.py/blob/master/docs/index.rst
    I change/adapt to my personal preference and some need. 
    
    It perform a triangular plot for dimentions > 1
    - It can project an MCMC on a grid
    - or interpolate weighted point on a grid
    - lots of option added by hand from the needs
    
    Parameters
    ----------
    list_points : array_like[nPoints,NDIM,]
        Contain the sample of points.
    weights : array_like[nPoints,]
        The weights of each points.
    Nbin : Optional[int]
        Dimention of the final grid.
        
    DoInterp : Optional[bool]
        Interpolate the weights on a grid of dimention power(Nbin,NDIM)
    DoHisto : Optional[bool]
        Project on the weights on a grid of dimention power(Nbin,NDIM) 
        
    param_label : Optional[[NDIM,str]]
        The axis labels. It use the function my_utils.latex
    rangeBin : Optional[array_like[NDIM,2,]]
        The axis range, by default takes the min/max
    units : Optional[[NDIM,str]]
        The str of unit of the axis
    scales : Optional[[NDIM,str]] 'lin' or 'log'
        The scale of each axis
        
    justContour : Optional[bool],
        Just do contour plot, remove the backgroud histogram.
    addHDP : Optional[bool] (typo here)
        Add on the 1D plots the HPD bars and area.
    title1D : Optional[bool] 
        Add the max and =- HPD on the title of each 1D plot
        
    numberSigmaContour : Optional[int] 0, 1, 2, 3
        The number of sigmas contour wanted,
    color : Optional[[NDIM,str]]
        List of colors for the sigmas contours.
    addLengend : Optional[str]
        To name the curent triangular plot. 
    
    figAndAxes : [fig,ax] object
        A given figure and ax objects from a previous plot_triangular to over-plot it.
    
    Returns
    -------
    fig, axes :  [fig,ax] object
        The figure and ax objects from the current plot_triangular to over-plot it later. 
    
    Raises
    ------
    None
    """

    if color is None:
        color = 'k'

    ### number of dimension
    DIM = len(list_points)

    if scales == '':
        scales = ['' for i in range(DIM)]
    if units == '':
        units = ['' for i in range(DIM)]
    if param_label == '':
        param_label = ['' for i in range(DIM)]

    ### compute the stat
    list_bins, ND_pdf, TwoD_proba, OneD_proba, axe_marg_2D, axe_marg_1D = compute_stat(
        list_points,
        weights,
        Nbin=Nbin,
        rangeBin=rangeBin,
        DoInterp=DoInterp,
        DoHisto=DoHisto)

    ### PLOT NICE DIMENTIONS
    K = DIM
    #factor = 1.5
    lbdim = 0.5 * factor
    trdim = 0.2 * factor
    whspace = 0.05
    plotdim = factor * K + factor * (K - 1) * whspace
    dim = lbdim + plotdim + trdim

    plt.rcParams['font.size'] = 6 * factor
    #plt.rcParams['image.cmap'] =   'Greys' #'plasma' #viridis' #'Greys' #'nipy_spectral'

    if figAndAxes is None:
        fig, axes = plt.subplots(K, K, figsize=(dim, dim))
    else:
        fig, axes = figAndAxes

    lb = lbdim / dim
    tr = (lbdim + plotdim) / dim
    fig.subplots_adjust(left=lb,
                        bottom=lb,
                        right=tr,
                        top=tr,
                        wspace=whspace,
                        hspace=whspace)

    COUNT_2D_PANEL = 0

    ### DO EACH PANEL
    for i in range(DIM):
        for j in range(DIM):

            ### the panel above the diagonal are empty
            if i < j:
                ax = axes[i, j]
                ax.set_frame_on(False)
                ax.set_xticks([])
                ax.set_yticks([])

            ### the diagonal panel: 1D
            if i == j:
                ax = axes[i, j]

                ID_axe = np.where(i == axe_marg_1D)[0][0]
                binX = np.squeeze(list_bins[axe_marg_1D[ID_axe]])
                proba = np.squeeze(OneD_proba[ID_axe, :])
                label = param_label[axe_marg_1D[ID_axe]]
                unit = units[axe_marg_1D[ID_axe]]
                scale = scales[axe_marg_1D[ID_axe]]

                ### the 1D plot
                ax.plot(binX,
                        proba / proba.sum(),
                        lw=1,
                        color=color,
                        label=addLengend)

                ### x limit
                ax.set_xlim(binX.min(), binX.max())
                ### x labels and ticks
                if i < DIM - 1:
                    ax.set_xticklabels([])
                    ax.xaxis.set_major_locator(MaxNLocator(6, prune='lower'))
                else:
                    [l.set_rotation(45) for l in ax.get_xticklabels()]
                    ax.xaxis.set_major_locator(MaxNLocator(6, prune='lower'))
                    ax.set_xlabel(label + ' ' + unit)
                ### y labels and ticks
                ax.set_yticklabels([])
                ax.yaxis.set_major_locator(NullLocator())

                #q_50,q_84,_,_,q_16,_,_ = sigma123_1D( proba, binX )
                #q_m, q_p = q_50-q_16, q_84-q_50
                #ax.axvline( q_50, color='k', ls="dashed" )
                #ax.axvline( q_84, color='k', ls="dashed" )
                #ax.axvline( q_16, color='k', ls="dashed" )

                #avg = np.average( binX, weights=proba )
                #max_proba = binX[ np.where( proba==proba.max() ) ]
                #s0 ,   _ = find_123sigma( binX, proba, 0.   )
                #s1m, s1p = find_123sigma( binX, proba, 0.68 )
                #s2m, s2p = find_123sigma( binX, proba, 0.95 )
                #s3m, s3p = find_123sigma( binX, proba, 0.99 )
                #ax.axvline( avg, color='k', ls="dashed", lw=1 )
                #ax.axvline( max_proba, color='y', ls="dashed", lw=1 )
                #ax.axvline( s0 , color='k', ls="dashed", lw=1 )
                #ax.axvline( s1m, color='r', ls="dashed" )
                #ax.axvline( s2m, color='g', ls="dashed" )
                #ax.axvline( s3m, color='b', ls="dashed" )
                #ax.axvline( s1p, color='r', ls="dashed" )
                #ax.axvline( s2p, color='g', ls="dashed" )
                #ax.axvline( s3p, color='b', ls="dashed" )

                ### max
                smax = binX[proba == proba.max()][0]
                p1 = find_proba_limit(proba / proba.sum(),
                                      confidence_level=0.68)
                zeros = find_zeros(binX, proba / proba.sum() - p1)
                new_binX = np.linspace(binX.min(), binX.max(), 1000)
                new_pdf = np.interp(new_binX, binX, proba / proba.sum())
                if addHDP:

                    ax.axvline(smax, color='k', ls="dashed", lw=1)
                    ### mean
                    #s0 = (binX*proba).sum()/proba.sum()
                    #ax.axvline( s0, color='b', ls="dashed", lw=1 )

                    #s1, p1 = get_HDP( binX, proba/proba.sum() )[0]

                    #print(p1)
                    ax.axhline(p1, color='r', ls="dashed", lw=1)

                    #for ze in zeros:
                    #    ax.axvline( ze, color='r', ls="dashed" )
                    ax.axvline(zeros.min(), color='r', ls="dashed", lw=1)
                    ax.axvline(zeros.max(), color='r', ls="dashed", lw=1)

                    ax.fill_between(new_binX, new_pdf * (new_pdf >= p1))

                #s1m, s1p = s1
                ### temporary sampling the proba to compute HDT with pymc3
                #tempo = np.interp( np.random.rand(100000), proba.cumsum()/proba.sum(), binX )
                #s1m, _ = pm.stats.hpd( tempo, alpha=0.32 )
                #tempo = np.interp( np.random.rand(100000),
                #                   np.concatenate( ([0], proba.cumsum()/proba.sum() ) ),
                #                   np.concatenate( (binX, [binX[-1]] ) ) )
                #_, s1p = pm.stats.hpd( tempo, alpha=0.32 )
                #ax.axvline( s1m, color='r', ls="dashed" )
                #ax.axvline( s1p, color='r', ls="dashed" )
                #print( s1m, s1p )

                if title1D:
                    # Format the quantile display.
                    fmt = "{{0:{0}}}".format(".2f").format
                    title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
                    #print( smax, s1m, s1p )
                    #print( zeros.min(), smax, zeros.max() )
                    #title = title.format( fmt(q_50), fmt(q_m), fmt(q_p) )
                    #title = title.format( fmt(smax), fmt(np.abs(smax-s1m)), fmt(np.abs(s1p-smax)) )
                    title = title.format(fmt(smax),
                                         fmt(np.abs(smax - zeros.min())),
                                         fmt(np.abs(smax - zeros.max())))

                    # Add in the column name if it's given.
                    if scale == 'lin':
                        title = "{0} = {1}".format(label, title)
                    else:
                        title = "{0} = {1}".format(
                            r'$\rm{log_{10}(}$' + label + ')', title)
                    ax.set_title(title)

                if i == 0 and (addLengend is not None):
                    ax.legend(bbox_to_anchor=(1.05, 1))

            ### 2D : the bellow panels
            if i > j:
                ax = axes[i, j]

                ID_axe = np.where(np.prod(axe_marg_2D == [j, i], axis=1))[0][0]
                binX = np.squeeze(list_bins[axe_marg_2D[ID_axe][0]])
                binY = np.squeeze(list_bins[axe_marg_2D[ID_axe][1]])
                proba = np.squeeze(TwoD_proba[ID_axe]).T
                labelX = param_label[axe_marg_2D[ID_axe][0]]
                labelY = param_label[axe_marg_2D[ID_axe][1]]
                unitX = units[axe_marg_2D[ID_axe][0]]
                unitY = units[axe_marg_2D[ID_axe][1]]

                if addPoints:
                    ax.plot(list_points[axe_marg_2D[ID_axe][0]],
                            list_points[axe_marg_2D[ID_axe][1]],
                            'k.',
                            markerfacecolor='none',
                            markeredgecolor='k',
                            markersize=5,
                            markeredgewidth=0.5)

                if not (justContour):
                    ax.imshow(
                        (proba / proba.max()),
                        origin='lower',
                        extent=[
                            binX.min(),
                            binX.max(),
                            binY.min(),
                            binY.max()
                        ],  #)
                        cmap=cm.viridis,
                        interpolation='gaussian',
                    )  ### nearest

                ONE_sigma = so.brentq(find_confidence_interval,
                                      0.,
                                      1.,
                                      args=(proba, 0.68))
                TWO_sigma = so.brentq(find_confidence_interval,
                                      0.,
                                      1.,
                                      args=(proba, 0.95))
                THREE_sigma = so.brentq(find_confidence_interval,
                                        0.,
                                        1.,
                                        args=(proba, 0.99))
                #print('ONE_sigma : ', ONE_sigma)
                X, Y = np.meshgrid(binX, binY)
                #ax.contour( X, Y, proba, 3, levels=[THREE_sigma, TWO_sigma, ONE_sigma], colors=('k', 'grey', 'w') )
                if numberSigmaContour == 1:
                    ax.contour(X,
                               Y,
                               proba,
                               1,
                               levels=[ONE_sigma],
                               colors=(color),
                               zorder=200)
                elif numberSigmaContour == 2:
                    ax.contour(X,
                               Y,
                               proba,
                               2,
                               levels=[TWO_sigma, ONE_sigma],
                               colors=('g', 'r'),
                               zorder=200)
                elif numberSigmaContour == 3:
                    ax.contour(X,
                               Y,
                               proba,
                               3,
                               levels=[THREE_sigma, TWO_sigma, ONE_sigma],
                               colors=('b', 'g', 'r'),
                               zorder=200)

                ###
                x0, x1 = ax.get_xlim()
                y0, y1 = ax.get_ylim()
                ax.set_aspect((x1 - x0) / (y1 - y0))

                if (i < (DIM - 1)):
                    ax.set_xticklabels([])
                    ax.xaxis.set_major_locator(MaxNLocator(6, prune='lower'))
                else:
                    ax.xaxis.set_major_locator(MaxNLocator(6, prune='lower'))
                    ax.set_xlabel(labelX + ' ' + unitX)
                    [l.set_rotation(45) for l in ax.get_xticklabels()]

                if (j == 0):
                    ax.yaxis.set_major_locator(MaxNLocator(6, prune='lower'))
                    ax.yaxis.set_major_formatter(
                        ScalarFormatter(useMathText=True))
                    ax.set_ylabel(labelY + ' ' + unitY)
                    ax.yaxis.set_label_coords(-0.3, 0.5)
                    ax.yaxis.tick_left()
                    [l.set_rotation(45) for l in ax.get_yticklabels()]
                else:
                    ax.set_yticklabels([])
                    ax.yaxis.set_major_locator(MaxNLocator(6, prune='lower'))
    return fig, axes
Esempio n. 23
0
def add_scalebar(fname, outname, img_width, sbar_length, text, lower_left=[0.05, 0.075], lw=5, text_below=True, color='white', font='STIXGeneral', fontsize=50, dpi=600, offset=0.01, use_PIL=False, sbar_height=0.025):
    """
    add a scale bar to an exist image (that fills the entire frame).

    :param fname: string
        filename to add scale bar to

    :param outname: string
        output filename

    img_width: float : 
        size of the image in units
    sbar_length : float :
        size of the scale bar to add, in same units
    bottom_left: list-like, float:
        x,y position of the bottom left of the scale bar, in 0-1 units
    sbar_height:  float
        height of the scale bar, in 0-1 units
    """

    if use_PIL:
        from PIL import Image, ImageDraw, ImageFont
        im = Image.open(fname)

        draw = ImageDraw.Draw(im)
        font = ImageFont.truetype(font, fontsize)

        w = im.width
        h = im.height

        lower_left = [lower_left[0]*w, lower_left[1]*h]

        x1 = lower_left[0] + (sbar_length/img_width)*w
        y1 = lower_left[1] + sbar_height*h

        draw.rectangle([tuple(lower_left), (x1, y1)],
                       outline=color, fill=color)
        text_x = (x1 + lower_left[0])/2.0
        text_y = (y1 + lower_left[1])/2.0
        # if text_below:
        #     text_y = lower_left[0] - sbar_height*h/2.
        # else:
        #     text_y = y1 + fontsize*2
        if text_below:
            text = '\n'+text
        else:
            text = text + '\n'

        draw.text([text_x, text_y], text, font=font, fill=color)
        im.save(outname, 'PNG')

    else:
        import matplotlib.pyplot as plt
        import matplotlib.image as mpimg
        from matplotlib.ticker import NullLocator
        import numpy as np

        fontdict = {'family': font, 'size': fontsize, 'color': color}
        img = mpimg.imread(fname)

        fig = plt.figure()
        ax = fig.add_axes([0, 0, 1, 1])
        ax.set_axis_off()
        plt.subplots_adjust(top=1, bottom=0, right=1,
                            left=0, hspace=0, wspace=0)
        plt.margins(0, 0)
        ax.xaxis.set_major_locator(NullLocator())
        ax.yaxis.set_major_locator(NullLocator())
        for spine in ['top', 'left', 'bottom', 'right']:
            plt.setp(ax.spines[spine], visible=False)
        plt.setp(ax.get_xaxis(), visible=False)
        plt.setp(ax.get_yaxis(), visible=False)

        h, w, dim = img.shape
        long_length = max([h, w])
        if w == long_length:
            h = h/w
            w = 1
        else:
            w = w/h
            h = 1

        im = ax.imshow(img, extent=[0, w, 0, h])

        x0, y0 = lower_left
        x1 = x0 + (sbar_length/img_width)

        ax.plot([x0, x1], [y0, y0], ls='-', lw=lw, color=color)
        text_x = (x1 + x0)/2.0
        if text_below:
            va = 'top'
            text_y = y0 - offset
        else:
            va = 'bottom'
            text_y = y0 + offset
        ax.text(text_x, text_y, text, color=color,
                fontdict=fontdict, ha='center', va=va)
        plt.savefig(outname, dpi=dpi, bbox_inches='tight', pad_inches=0)
Esempio n. 24
0
    def detect(self,img_folder):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        os.makedirs("output", exist_ok=True)
        model=self.model_plate


        dataloader = DataLoader(
            ImageFolder(img_folder, img_size=self.img_size),
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=self.n_cpu,
        )

        classes = load_classes(self.plate_classes)  # Extracts class labels from file

        Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor

        imgs = []  # Stores image paths
        img_detections = []  # Stores detections for each image index

        print("\nPerforming object detection:")
        prev_time = time.time()
        for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
            # Configure input
            input_imgs = Variable(input_imgs.type(Tensor))

            # Get detections
            with torch.no_grad():
                detections = self.model_plate(input_imgs)
                detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
            print(detections)

            # Log progress
            current_time = time.time()
            inference_time = datetime.timedelta(seconds=current_time - prev_time)
            prev_time = current_time
            print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))

            # Save image and detections
            imgs.extend(img_paths)
            img_detections.extend(detections)

        # Bounding-box colors
        cmap = plt.get_cmap("tab20b")
        colors = [cmap(i) for i in np.linspace(0, 1, 20)]

        print("\nSaving images:")
        # Iterate through images and save plot of detections
        for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):

            print("(%d) Image: '%s'" % (img_i, path))

            # Create plot
            img = np.array(Image.open(path))
            plt.figure()
            plt.rcParams['font.sans-serif'] = ['SimHei']
            plt.rcParams['axes.unicode_minus'] = False
            fig, ax = plt.subplots(1)
            ax.imshow(img)

            # Draw bounding boxes and labels of detections
            if detections is not None:
                # Rescale boxes to original image
                detections = rescale_boxes(detections, self.img_size, img.shape[:2])
                unique_labels = detections[:, -1].cpu().unique()
                n_cls_preds = len(unique_labels)
                bbox_colors = random.sample(colors, n_cls_preds)
                for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                    # print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
                    print(x1, y1)
                    box_w = x2 - x1
                    box_h = y2 - y1

                    color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                    # Create a Rectangle patch
                    bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
                    # Add the bbox to the plot
                    ax.add_patch(bbox)
                    # Add label
                    plt.text(
                        x1,
                        y1,
                        s=classes[int(cls_pred)],
                        color="white",
                        verticalalignment="top",
                        bbox={"color": color, "pad": 0},
                    )

            # Save generated image with detections
            plt.axis("off")
            plt.gca().xaxis.set_major_locator(NullLocator())
            plt.gca().yaxis.set_major_locator(NullLocator())
            filename = path.split("/")[-1].split(".")[0]
            plt.savefig(f"output/{filename}.png", bbox_inches="tight", pad_inches=0.0)

            plt.close()
def corner(xs,
           xfull,
           xbin,
           yfull,
           ybin,
           ydata,
           x_err,
           y_err,
           range2,
           color_list,
           bins=20,
           range=None,
           weights=None,
           color="k",
           smooth=None,
           smooth1d=None,
           labels=None,
           label_kwargs=None,
           show_titles=False,
           title_fmt=".2f",
           title_kwargs=None,
           truths=None,
           truth_color="k",
           ini_guess=[None] * (len(parameters) - 2) +
           [(rstar, rstar_uncertainty), (g, g_uncertainty)],
           density=True,
           scale_hist=False,
           quantiles=None,
           verbose=False,
           fig=None,
           max_n_ticks=5,
           top_ticks=False,
           use_math_text=False,
           reverse=False,
           hist_kwargs=None,
           **hist2d_kwargs):
    """
    Make a *sick* corner plot showing the projections of a data set in a
    multi-dimensional space. kwargs are passed to hist2d() or used for
    `matplotlib` styling.

    Parameters
    ----------
    xs : array_like[nsamples, ndim]
        The samples. This should be a 1- or 2-dimensional array. For a 1-D
        array this results in a simple histogram. For a 2-D array, the zeroth
        axis is the list of samples and the next axis are the dimensions of
        the space.

    bins : int or array_like[ndim,]
        The number of bins to use in histograms, either as a fixed value for
        all dimensions, or as a list of integers for each dimension.

    weights : array_like[nsamples,]
        The weight of each sample. If `None` (default), samples are given
        equal weight.

    color : str
        A ``matplotlib`` style color for all histograms.

    smooth, smooth1d : float
       The standard deviation for Gaussian kernel passed to
       `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
       respectively. If `None` (default), no smoothing is applied.

    labels : iterable (ndim,)
        A list of names for the dimensions. If a ``xs`` is a
        ``pandas.DataFrame``, labels will default to column names.

    label_kwargs : dict
        Any extra keyword arguments to send to the `set_xlabel` and
        `set_ylabel` methods.

    show_titles : bool
        Displays a title above each 1-D histogram showing the 0.5 quantile
        with the upper and lower errors supplied by the quantiles argument.

    title_fmt : string
        The format string for the quantiles given in titles. If you explicitly
        set ``show_titles=True`` and ``title_fmt=None``, the labels will be
        shown as the titles. (default: ``.2f``)

    title_kwargs : dict
        Any extra keyword arguments to send to the `set_title` command.

    range : iterable (ndim,)
        A list where each element is either a length 2 tuple containing
        lower and upper bounds or a float in range (0., 1.)
        giving the fraction of samples to include in bounds, e.g.,
        [(0.,10.), (1.,5), 0.999, etc.].
        If a fraction, the bounds are chosen to be equal-tailed.

    truths : iterable (ndim,)
        A list of reference values to indicate on the plots.  Individual
        values can be omitted by using ``None``.

    truth_color : str
        A ``matplotlib`` style color for the ``truths`` makers.

    scale_hist : bool
        Should the 1-D histograms be scaled in such a way that the zero line
        is visible?

    quantiles : iterable
        A list of fractional quantiles to show on the 1-D histograms as
        vertical dashed lines.

    verbose : bool
        If true, print the values of the computed quantiles.

    plot_contours : bool
        Draw contours for dense regions of the plot.

    use_math_text : bool
        If true, then axis tick labels for very large or small exponents will
        be displayed as powers of 10 rather than using `e`.
        
    reverse : bool
        If true, plot the corner plot starting in the upper-right corner instead 
        of the usual bottom-left corner
        
    max_n_ticks: int
        Maximum number of ticks to try to use

    top_ticks : bool
        If true, label the top ticks of each axis

    fig : matplotlib.Figure
        Overplot onto the provided figure object.

    hist_kwargs : dict
        Any extra keyword arguments to send to the 1-D histogram plots.

    **hist2d_kwargs
        Any remaining keyword arguments are sent to `corner.hist2d` to generate
        the 2-D histogram plots.

    """

    pl.rc('text', usetex=True)
    # pl.rcParams['text.latex.preamble'] = [r'\boldmath']

    if quantiles is None:
        quantiles = []
    if title_kwargs is None:
        title_kwargs = dict()
    if label_kwargs is None:
        label_kwargs = dict()

    # Try filling in labels from pandas.DataFrame columns.
    if labels is None:
        try:
            labels = xs.columns
        except AttributeError:
            pass

    # Deal with 1D sample lists.
    xs = np.atleast_1d(xs)
    if len(xs.shape) == 1:
        xs = np.atleast_2d(xs)
    else:
        assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
        xs = xs.T
    assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
                                       "dimensions than samples!"

    # Parse the weight array.
    # if weights is not None:
    #     weights = np.asarray(weights)
    #     if weights.ndim != 1:
    #         raise ValueError("Weights must be 1-D")
    #     if xs.shape[1] != weights.shape[0]:
    #         raise ValueError("Lengths of weights must match number of samples")

    # Parse the parameter ranges.
    if range is None:
        if "extents" in hist2d_kwargs:
            logging.warn("Deprecated keyword argument 'extents'. "
                         "Use 'range' instead.")
            range = hist2d_kwargs.pop("extents")
        else:
            range = [[x.min(), x.max()] for x in xs]
            # Check for parameters that never change.
            m = np.array([e[0] == e[1] for e in range], dtype=bool)
            if np.any(m):
                raise ValueError(
                    ("It looks like the parameter(s) in "
                     "column(s) {0} have no dynamic range. "
                     "Please provide a `range` argument.").format(", ".join(
                         map("{0}".format,
                             np.arange(len(m))[m]))))

    else:
        # If any of the extents are percentiles, convert them to ranges.
        # Also make sure it's a normal list.
        range = list(range)
        for i, _ in enumerate(range):
            try:
                emin, emax = range[i]
            except TypeError:
                q = [0.5 - 0.5 * range[i], 0.5 + 0.5 * range[i]]
                range[i] = quantile(xs[i], q, weights=weights)

    if len(range) != xs.shape[0]:
        raise ValueError("Dimension mismatch between samples and range")

    # Parse the bin specifications.
    try:
        bins = [int(bins) for _ in range]
    except TypeError:
        if len(bins) != len(range):
            raise ValueError("Dimension mismatch between bins and range")

    # Some magic numbers for pretty axis layout.
    K = len(xs)
    factor = 5.0  # size of one side of one panel
    if reverse:
        lbdim = 0.2 * factor  # size of left/bottom margin
        trdim = 0.5 * factor  # size of top/right margin
    else:
        lbdim = 0.5 * factor  # size of left/bottom margin
        trdim = 0.2 * factor  # size of top/right margin
    whspace = 0.05  # w/hspace size
    plotdim = factor * K + factor * (K - 1.) * whspace
    dim = lbdim + plotdim + trdim

    # Create a new figure if one wasn't provided.
    if fig is None:
        fig, axes = pl.subplots(K, K, figsize=(dim, dim))
    else:
        try:
            axes = np.array(fig.axes).reshape((K, K))
        except:
            raise ValueError("Provided figure has {0} axes, but data has "
                             "dimensions K={1}".format(len(fig.axes), K))

    # Format the figure.
    lb = lbdim / dim
    tr = (lbdim + plotdim) / dim
    fig.subplots_adjust(left=lb,
                        bottom=lb,
                        right=tr,
                        top=tr,
                        wspace=whspace,
                        hspace=whspace)

    # Set up the default histogram keywords.
    if hist_kwargs is None:
        hist_kwargs = dict()
    hist_kwargs["color"] = hist_kwargs.get("color", color)
    if smooth1d is None:
        hist_kwargs["histtype"] = hist_kwargs.get("histtype", "stepfilled")

    for i, x in enumerate(xs):

        cmap = matplotlib.cm.get_cmap(color_list[i][0])
        fill_color = cmap(color_list[i][1])
        sigma_color = cmap(color_list[i][1] + 0.4)

        # Deal with masked arrays.
        if hasattr(x, "compressed"):
            x = x.compressed()

        if np.shape(xs)[0] == 1:
            ax = axes
        else:
            if reverse:
                ax = axes[K - i - 1, K - i - 1]
            else:
                ax = axes[i, i]
        # Plot the histograms.
        if smooth1d is None:
            n, _, _ = ax.hist(x,
                              bins=bins[i],
                              weights=weights,
                              range=np.sort(range[i]),
                              facecolor=fill_color,
                              density=True,
                              **hist_kwargs)
        else:
            if gaussian_filter is None:
                raise ImportError("Please install scipy for smoothing")
            n, b = np.histogram(x,
                                bins=bins[i],
                                weights=weights,
                                range=np.sort(range[i]))
            n = gaussian_filter(n, smooth1d)
            x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
            y0 = np.array(list(zip(n, n))).flatten()
            ax.plot(x0, y0, **hist_kwargs)

        if truths is not None and truths[i] is not None:
            # ax.axvline(truths[i][0], color=truth_color)
            ax.axvline(truths[i][0], color=sigma_color, linestyle='-')
            ax.axvline(truths[i][0] + truths[i][1],
                       color=sigma_color,
                       linestyle=':')
            ax.axvline(truths[i][0] - truths[i][2],
                       color=sigma_color,
                       linestyle=':')

        # Plot Gaussian with input values.
        if ini_guess is not None and ini_guess[i] is not None:
            mu = ini_guess[i][0]
            sigma = ini_guess[i][1]
            x_gauss = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 100)
            ax2 = ax.twinx()
            ax2.plot(x_gauss,
                     norm.pdf(x_gauss, mu, sigma),
                     color='k',
                     linestyle='-',
                     linewidth=4)
            ax2.tick_params(right=False, labelright=False)

        # Plot quantiles if wanted.
        if len(quantiles) > 0:
            qvalues = quantile(x, quantiles, weights=weights)
            for q in qvalues:
                ax.axvline(q, ls="dashed", color=color)

            if verbose:
                print("Quantiles:")
                print([item for item in zip(quantiles, qvalues)])

        if show_titles:
            title = None
            if title_fmt is not None:
                # Compute the quantiles for the title. This might redo
                # unneeded computation but who cares.
                q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
                                            weights=weights)
                q_m, q_p = q_50 - q_16, q_84 - q_50

                # Format the quantile display.
                fmt = "{{0:{0}}}".format(title_fmt).format
                title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
                title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))

                # Add in the column name if it's given.
                if labels is not None:
                    title = "{0} = {1}".format(labels[i], title)

            elif labels is not None:
                title = "{0}".format(labels[i])

            if title is not None:
                if reverse:
                    ax.set_xlabel(title, **title_kwargs)
                else:
                    ax.set_title(title, **title_kwargs)

        # Fix ranges #
        if ini_guess is not None and ini_guess[i] is not None:
            mu = ini_guess[i][0]
            sigma = ini_guess[i][1]
            ax.set_xlim((mu - 3 * sigma, mu + 3 * sigma))
        else:
            ax.set_xlim(range2[i])
        if scale_hist:
            maxn = np.max(n)
            ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
        else:
            ax.set_ylim(0, 1.1 * np.max(n))
        ax.set_yticklabels([])
        if max_n_ticks == 0:
            ax.xaxis.set_major_locator(NullLocator())
            ax.yaxis.set_major_locator(NullLocator())
        else:
            ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
            ax.yaxis.set_major_locator(NullLocator())

        if i < K - 1:
            if top_ticks:
                ax.xaxis.set_ticks_position("top")
                [l.set_rotation(45) for l in ax.get_xticklabels()]
            else:
                ax.set_xticklabels([])
        else:
            if reverse:
                ax.xaxis.tick_top()
            [l.set_rotation(45) for l in ax.get_xticklabels()]
            if labels is not None:
                if reverse:
                    ax.set_title(labels[i], y=1.25, **label_kwargs)
                else:
                    ax.set_xlabel(labels[i], **label_kwargs)

            # use MathText for axes ticks
            ax.xaxis.set_major_formatter(
                ScalarFormatter(useMathText=use_math_text))

        for j, y in enumerate(xs):
            if np.shape(xs)[0] == 1:
                ax = axes
            else:
                if reverse:
                    ax = axes[K - i - 1, K - j - 1]
                else:
                    ax = axes[i, j]
            if j > i:
                ax.set_frame_on(False)
                ax.set_xticks([])
                ax.set_yticks([])
                continue
            elif j == i:
                continue

            # Deal with masked arrays.
            if hasattr(y, "compressed"):
                y = y.compressed()

            cmap = matplotlib.cm.get_cmap(color_list[j][0])
            fill_color = cmap(color_list[j][1] + 0.2)

            hist2d(y,
                   x,
                   ax=ax,
                   range=[range2[j], range2[i]],
                   weights=weights,
                   color=fill_color,
                   smooth=smooth,
                   bins=[bins[j], bins[i]],
                   **hist2d_kwargs)

            #if ini_guess is not None and ini_guess[i] is not None:
            #    mu = ini_guess[i][0]
            #    sigma = ini_guess[i][1]
            #    ax.set_xlim((mu - 3 * sigma, mu + 3 * sigma))
            #else:
            #    ax.set_xlim(range2[i])

            #
            # if truths is not None:
            #     if truths[i] is not None and truths[j] is not None:
            #         ax.plot(truths[j], truths[i], "s", color=truth_color)
            #     if truths[j] is not None:
            #         ax.axvline(truths[j], color=truth_color)
            #     if truths[i] is not None:
            #         ax.axhline(truths[i], color=truth_color)

            if max_n_ticks == 0:
                ax.xaxis.set_major_locator(NullLocator())
                ax.yaxis.set_major_locator(NullLocator())
            else:
                ax.xaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))
                ax.yaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))

            if i < K - 1:
                ax.set_xticklabels([])
            else:
                if reverse:
                    ax.xaxis.tick_top()
                [l.set_rotation(45) for l in ax.get_xticklabels()]
                if labels is not None:
                    ax.set_xlabel(labels[j], **label_kwargs)
                    if reverse:
                        ax.xaxis.set_label_coords(0.5, 1.4)
                    else:
                        ax.xaxis.set_label_coords(0.5, -0.3)

                # use MathText for axes ticks
                ax.xaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

            if j > 0:
                ax.set_yticklabels([])
            else:
                if reverse:
                    ax.yaxis.tick_right()
                [l.set_rotation(45) for l in ax.get_yticklabels()]
                if labels is not None:
                    if reverse:
                        ax.set_ylabel(labels[i], rotation=-90, **label_kwargs)
                        ax.yaxis.set_label_coords(1.3, 0.5)
                    else:
                        ax.set_ylabel(labels[i], **label_kwargs)
                        ax.yaxis.set_label_coords(-0.3, 0.5)

                # use MathText for axes ticks
                ax.yaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

            size1 = 30 + 2 * K / 3
            ax.tick_params(axis='both', which='major', labelsize=size1)

        size1 = 30 + 2 * K / 3
        ax.xaxis.set_label_coords(0.5, -0.3)
        ax.tick_params(axis='both', which='major', labelsize=size1)

    if K == 2:
        ax = pl.subplot2grid((4 * K, 4 * K), (0, 5), colspan=3, rowspan=3)
    else:
        ax = pl.subplot2grid((2 * K, 2 * K), (0, K + 1),
                             colspan=K - 1,
                             rowspan=K - 1)
    ax.set_frame_on(True)
    linethick = 0.5
    line1, = ax.plot(xfull,
                     yfull,
                     linewidth=linethick,
                     color='b',
                     linestyle='-')
    symsize2 = 1
    mew1 = 5 * K / 3
    msize = 2.5 * K / 3
    elw = 1.5 * K / 3
    ax.plot(xbin, ybin, 'ks', mew=mew1, markersize=msize)
    ax.errorbar(xbin, ybin, xerr=x_err, yerr=y_err, fmt='ks', elinewidth=elw)
    symsize = 4
    ax.plot(xbin, ydata, 'ro', mew=mew1, markersize=msize)
    ax.errorbar(xbin,
                ydata,
                xerr=x_err,
                yerr=y_err,
                fmt='ro',
                capthick=2,
                elinewidth=elw)
    text_size = 4 * K + 4
    wavelength_min = np.amin(wavelength_bins)
    wavelength_max = np.amax(wavelength_bins)
    transit_min = np.amin(transit_depth)
    transit_max = np.amax(transit_depth)
    ax.text(0.85 * wavelength_min + 0.1 * wavelength_max,
            2.5 * transit_max - 1.5 * transit_min,
            'Circles: ' + planet_name + ' data',
            color='r',
            fontsize=text_size)
    # ax.text(0.9, 1.487, 'from Kreidberg et al. (2015)', color='r', fontsize=text_size)
    ax.text(0.85 * wavelength_min + 0.1 * wavelength_max,
            2.75 * transit_max - 1.75 * transit_min,
            'Squares: Model (binned)',
            color='k',
            fontsize=text_size)
    ax.set_xlim([wavelength_min - 0.03, wavelength_max + 0.03])
    ax.set_ylim([
        1.5 * transit_min - 0.5 * transit_max,
        3 * transit_max - 2 * transit_min
    ])
    ax.xaxis.set_major_locator(MaxNLocator(5, prune="lower"))
    ax.yaxis.set_major_locator(MaxNLocator(5, prune="lower"))
    ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=use_math_text))
    ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=use_math_text))
    tick_size = 4 * K + 10
    ax.tick_params(axis='both', which='major', labelsize=tick_size)
    label_size = 10 * K / 3 + 16
    ax.set_xlabel(r'\textbf{wavelength (}\boldmath $\mu$\textbf{m)}',
                  fontsize=label_size,
                  fontweight='bold')
    ax.set_ylabel(r'\boldmath $(R/R_\star)^2$ \textbf{(\%)}',
                  fontsize=label_size,
                  fontweight='bold')
    ax.xaxis.set_label_coords(0.5, -0.08)
    y_label_x = -0.25 + 0.06 * K / 3
    ax.yaxis.set_label_coords(y_label_x, 0.5)

    return fig
Esempio n. 26
0
def detect(kitti_weights = '../checkpoints/best_weights_kitti.pth', config_path = '../config/yolov3-kitti.cfg', class_path = '../data/names.txt'):
    """
        Script to run inference on sample images. It will store all the inference results in /output directory (relative to repo root)
        
        Args
            kitti_weights: Path of weights
            config_path: Yolo configuration file path
            class_path: Path of class names txt file
            
    """
    cuda = torch.cuda.is_available()
    os.makedirs('../output', exist_ok=True)

    # Set up model
    model = Darknet(config_path, img_size=416)
    model.load_weights(kitti_weights)

    if cuda:
        model.cuda()
        print("Cuda available for inference")

    model.eval() # Set in evaluation mode

    dataloader = DataLoader(ImageFolder("../data/samples/", img_size=416),
                            batch_size=2, shuffle=False, num_workers=0)

    classes = load_classes(class_path) # Extracts class labels from file

    Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

    imgs = []           # Stores image paths
    img_detections = [] # Stores detections for each image index

    print('data size : %d' % len(dataloader) )
    print ('\nPerforming object detection:')
    prev_time = time.time()
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))

        # Get detections
        with torch.no_grad():
            detections = model(input_imgs)
            detections = non_max_suppression(detections, 80, 0.8, 0.4)
            #print(detections)

        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        print ('\t+ Batch %d, Inference Time: %s' % (batch_i, inference_time))

        # Save image and detections
        imgs.extend(img_paths)
        img_detections.extend(detections)

    # Bounding-box colors
    #cmap = plt.get_cmap('tab20b')
    cmap = plt.get_cmap('tab10')
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    print ('\nSaving images:')
    # Iterate through images and save plot of detections
    for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):

        print ("(%d) Image: '%s'" % (img_i, path))

        # Create plot
        img = np.array(Image.open(path))
        plt.figure()
        fig, ax = plt.subplots(1)
        ax.imshow(img)

        kitti_img_size = 416
        
        # The amount of padding that was added
        pad_x = max(img.shape[0] - img.shape[1], 0) * (kitti_img_size / max(img.shape))
        pad_y = max(img.shape[1] - img.shape[0], 0) * (kitti_img_size / max(img.shape))
        # Image height and width after padding is removed
        unpad_h = kitti_img_size - pad_y
        unpad_w = kitti_img_size - pad_x

        # Draw bounding boxes and labels of detections
        if detections is not None:
            print(type(detections))
            print(detections.size())
            unique_labels = detections[:, -1].cpu().unique()
            n_cls_preds = len(unique_labels)
            bbox_colors = random.sample(colors, n_cls_preds)
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:

                print ('\t+ Label: %s, Conf: %.5f' % (classes[int(cls_pred)], cls_conf.item()))
                # Rescale coordinates to original dimensions
                box_h = int(((y2 - y1) / unpad_h) * (img.shape[0]))
                box_w = int(((x2 - x1) / unpad_w) * (img.shape[1]) )
                y1 = int(((y1 - pad_y // 2) / unpad_h) * (img.shape[0]))
                x1 = int(((x1 - pad_x // 2) / unpad_w) * (img.shape[1]))

                color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2,
                                        edgecolor=color,
                                        facecolor='none')
                # Add the bbox to the plot
                ax.add_patch(bbox)
                # Add label
                plt.text(x1, y1-30, s=classes[int(cls_pred)]+' '+ str('%.4f'%cls_conf.item()), color='white', verticalalignment='top',
                        bbox={'color': color, 'pad': 0})

        # Save generated image with detections
        plt.axis('off')
        plt.gca().xaxis.set_major_locator(NullLocator())
        plt.gca().yaxis.set_major_locator(NullLocator())
        plt.savefig('../output/%d.png' % (img_i), bbox_inches='tight', pad_inches=0.0)
        plt.close()
Esempio n. 27
0
def detect():
    # net
    net = mobile_yolo.Mobile_YOLO(config)
    net = torch.nn.DataParallel(net.cuda())
    net.eval()

    # checkpoint
    net.load_state_dict(torch.load(config.checkpoint))

    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            yolo_loss.YOLOLoss(config.anchors[i], config.classes_num,
                               (config.img_w, config.img_h)))

    # prepare images path
    images_name = os.listdir(config.image_path)
    images_path = [
        os.path.join(config.image_path, name) for name in images_name
    ]
    if len(images_path) == 0:
        raise Exception("no image found in {}".format(config.image_path))

    # Start inference
    batch_size = config.batch_size
    for step in range(0, len(images_path), batch_size):
        # preprocess
        images = []
        images_origin = []
        for path in images_path[step * batch_size:(step + 1) * batch_size]:
            print("processing: {}".format(path))
            image = cv2.imread(path, cv2.IMREAD_COLOR)
            if image is None:
                print("read path error: {}. skip it.".format(path))
                continue
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            images_origin.append(image)  # keep for save result
            image = cv2.resize(image, (config.img_w, config.img_h),
                               interpolation=cv2.INTER_LINEAR)
            image = image.astype(np.float32)
            image /= 255.0
            image = np.transpose(image, (2, 0, 1))
            image = image.astype(np.float32)
            images.append(image)
        images = np.asarray(images)
        images = torch.from_numpy(images).cuda()

        # inference
        with torch.no_grad():
            outputs = net(images)
            output_list = []
            for i in range(3):
                output_list.append(yolo_losses[i](outputs[i]))
            output = torch.cat(output_list, 1)
            batch_detections = utils.non_max_suppression(
                output, config.classes_num, config.conf_thres)

        # write result images. Draw bounding boxes and labels of detections
        classes = open(config.classes_names_path, "r").read().split("\n")[:-1]
        if not os.path.isdir(config.save_path):
            os.makedirs(config.save_path)
        for idx, detections in enumerate(batch_detections):
            plt.figure()
            fig, ax = plt.subplots(1)
            ax.imshow(images_origin[idx])
            if detections is not None:
                unique_labels = detections[:, -1].cpu().unique()
                n_cls_preds = len(unique_labels)
                bbox_colors = random.sample(colors, n_cls_preds)
                for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                    color = bbox_colors[int(
                        np.where(unique_labels == int(cls_pred))[0])]
                    # Rescale coordinates to original dimensions
                    ori_h, ori_w = images_origin[idx].shape[:2]
                    pre_h, pre_w = config.img_h, config.img_w
                    box_h = ((y2 - y1) / pre_h) * ori_h
                    box_w = ((x2 - x1) / pre_w) * ori_w
                    y1 = (y1 / pre_h) * ori_h
                    x1 = (x1 / pre_w) * ori_w
                    # Create a Rectangle patch
                    bbox = patches.Rectangle((x1, y1),
                                             box_w,
                                             box_h,
                                             linewidth=2,
                                             edgecolor=color,
                                             facecolor='none')
                    # Add the bbox to the plot
                    ax.add_patch(bbox)
                    # Add label
                    plt.text(x1,
                             y1,
                             s=classes[int(cls_pred)],
                             color='white',
                             verticalalignment='top',
                             bbox={
                                 'color': color,
                                 'pad': 0
                             })
            # Save generated image with detections
            plt.axis('off')
            plt.gca().xaxis.set_major_locator(NullLocator())
            plt.gca().yaxis.set_major_locator(NullLocator())
            plt.savefig(config.save_path + '/{}_{}.jpg'.format(step, idx),
                        bbox_inches='tight',
                        pad_inches=0.0)
            plt.close()
Esempio n. 28
0
def plot_pdfq1d(histfile, nosave, vb):
    """
	Read in data for a single file and plot a quasi-1d 2D PDF projections.
	"""
    me = me0 + ".plot_pdfq1d: "
    t0 = time.time()

    ##-------------------------------------------------------------------------

    ## Get pars from filename

    assert "_CAR_" in histfile, me + "Functional only for Cartesian geometry."
    Casimir = "_CL_" in histfile

    a = filename_par(histfile, "_a")
    R = filename_par(histfile, "_R")
    S = filename_par(histfile, "_S")
    T = filename_par(histfile, "_T") if Casimir else -S

    ##-------------------------------------------------------------------------

    ## Space
    bins = np.load(
        os.path.dirname(histfile) + "/BHISBIN" +
        os.path.basename(histfile)[4:-4] + ".npz")
    xbins = bins["xbins"]
    exbins = bins["exbins"]
    x = 0.5 * (xbins[1:] + xbins[:-1])
    etax = 0.5 * (exbins[1:] + exbins[:-1])

    ## Wall indices
    Rind, Sind = np.abs(x - R).argmin(), np.abs(x - S).argmin()

    ##-------------------------------------------------------------------------

    ## Histogram / density
    H = np.load(histfile).sum(axis=2)
    rhoxex = H / (H.sum() * (x[1] - x[0]) * (etax[1] - etax[0]))

    ## ------------------------------------------------------------------------

    ## Plotting

    fig, ax = plt.subplots(1, 1, figsize=fs["figsize"])
    fig.canvas.set_window_title("Quasi-1D PDF")

    ## Set number of ticks
    ax.xaxis.set_major_locator(NullLocator())
    ax.yaxis.set_major_locator(NullLocator())

    plt.rcParams["image.cmap"] = "Greys"  #"coolwarm"

    ## ------------------------------------------------------------------------

    ## x-etax

    ax.contourf(x, etax, rhoxex.T)

    ## Indicate bulk
    ax.axvline(S, c="k", lw=1)
    ax.axvline(R, c="k", lw=1)
    if T >= 0.0: ax.axvline(T, c="k", lw=1)
    elif T < 0.0 and ("_DL_" not in histfile and "_DC_" not in histfile):
        ax.axvline(-R, c="k", lw=1)
    # ax.axvspan(S,R,color="y",alpha=0.1)

    ax.set_xlabel(r"$x$", fontsize=fs["fsa"])
    ax.set_ylabel(r"$\eta_x$", fontsize=fs["fsa"])

    ## ------------------------------------------------------------------------

    if not nosave:
        plotfile = os.path.dirname(histfile) + "/PDFxyq1d" + os.path.basename(
            histfile)[4:-4]
        plotfile += "." + fs["saveext"]
        fig.savefig(plotfile)
        if vb: print me + "Figure saved to", plotfile

    if vb: print me + "Execution time %.1f seconds." % (time.time() - t0)

    return
Esempio n. 29
0
def detect_one(
        img_path,
        modelSelcet="PyTorch_YOLOv3/checkpoints/yolov3_ckpt_best_f01.pth"):
    parser = argparse.ArgumentParser()
    parser.add_argument("--image_path",
                        type=str,
                        default=f"{img_path}",
                        help="path to dataset")
    parser.add_argument("--model_def",
                        type=str,
                        default="PyTorch_YOLOv3/config/yolov3-custom.cfg",
                        help="path to model definition file")
    parser.add_argument("--class_path",
                        type=str,
                        default="PyTorch_YOLOv3/data/custom/classes.names",
                        help="path to class label file")
    parser.add_argument("--conf_thres",
                        type=float,
                        default=0.8,
                        help="object confidence threshold")  # 0.8
    parser.add_argument(
        "--nms_thres",
        type=float,
        default=0.3,
        help="iou thresshold for non-maximum suppression")  # 0.25
    parser.add_argument(
        "--n_cpu",
        type=int,
        default=0,
        help="number of cpu threads to use during batch generation")
    parser.add_argument("--img_size",
                        type=int,
                        default=416,
                        help="size of each image dimension")
    parser.add_argument("--checkpoint_model",
                        type=str,
                        default=f"{modelSelcet}",
                        help="path to checkpoint model")
    opt = parser.parse_args()

    print(opt.checkpoint_model)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    os.makedirs("./output", exist_ok=True)
    os.makedirs("./coordinate", exist_ok=True)

    # Set up model
    model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
    # Load checkpoint weights
    model.load_state_dict(torch.load(opt.checkpoint_model))
    classes = load_classes(opt.class_path)  # Extracts class labels from file

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    print(opt.image_path + ".png")

    img = cv2.imread(opt.image_path + ".png", 0)

    img = clahe_hist(img)
    img = preprocess(img / 255)
    img = transforms.ToTensor()(img).float()
    img, _ = pad_to_square(img, 0)
    img = resize(img, opt.img_size)

    print("\nPerforming object detection:")

    input_imgs = Variable(img.type(Tensor)).unsqueeze(0)

    detections = None
    with torch.no_grad():
        detections = model(input_imgs)
        detections = non_max_suppression(detections, opt.conf_thres,
                                         opt.nms_thres)

    plt.set_cmap('gray')
    rewrite = True

    img = np.array(Image.open(img_path + ".png").convert('L'))
    plt.figure()
    fig, ax = plt.subplots(1)
    ax.imshow(img)
    # print(img.shape)
    filename = img_path[-4:]

    if detections is not None:
        # Rescale boxes to original image
        detections = rescale_boxes(detections[0], opt.img_size, img.shape[:2])

        for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
            box_w = x2 - x1
            box_h = y2 - y1
            x1, y1, x2, y2 = math.floor(x1), math.floor(y1), math.ceil(
                x2), math.ceil(y2)
            box_w, box_h = x2 - x1, y2 - y1

            if rewrite:
                f1 = open(f"./coordinate/{filename}.txt", 'w')
                f1.write("{:d} {:d} {:d} {:d} {:d} {:d}\n".format(
                    x1, y1, x2, y2, box_w, box_h))
                rewrite = False
            else:
                f1 = open(f"./coordinate/{filename}.txt", 'a')
                f1.write("{:d} {:d} {:d} {:d} {:d} {:d}\n".format(
                    x1, y1, x2, y2, box_w, box_h))

            bbox = patches.Rectangle((x1, y1),
                                     box_w,
                                     box_h,
                                     linewidth=0.5,
                                     edgecolor='red',
                                     facecolor="none")
            ax.add_patch(bbox)

    plt.axis("off")
    plt.gca().xaxis.set_major_locator(NullLocator())
    plt.gca().yaxis.set_major_locator(NullLocator())
    plt.savefig(f"./output/{filename}.png",
                bbox_inches="tight",
                pad_inches=0.0,
                facecolor="none")
    plt.close()
    print("\nImage has saved")

    f1.close()
    path1 = join("./coordinate", filename)
    path2 = join("./GT_coordinate", filename)

    Sort_coordinate(f"{path1}.txt", flag=True)
    Sort_coordinate(f"{path2}.txt", flag=False)
Esempio n. 30
0
     #         gval = 0.1 + cval
     #         bval = 0.45 - ((0.45 - cval)/2) + cval
     rgb = (rval,gval,bval)
     RGB.append(rgb)        
 print RGB, '\n'
     
     
 p.figure(1)
 ax = p.axes([.15, .15, .65, .75], axisbg = 'w')    
 p.ioff()
 p.ylabel('Latitude')
 p.xlabel('Time [Month-Day]')
 p.title('Ensemble Results')
 days = DayLocator()
 daysFmt= DateFormatter("%m-%d")   
 noLabels = NullLocator()
 Labels = NullFormatter()
 ax.xaxis.set_major_locator(days)
 ax.xaxis.set_major_formatter(daysFmt)      
 latFmt = FormatStrFormatter('%5.2f')
 ax.yaxis.set_major_formatter(latFmt)  
     
 width_all = 1.
         
 xmin = None
 
 if 'obs' in os.listdir(os.getcwd()):        
     oTime,oLat = buildTimeSeries()    
          
 #     print Time,Lat
 #     print Lat[:,0]