コード例 #1
0
    def __init__(self,
                 width=5,
                 height=5,
                 render_type='cubes',
                 num_objects=5,
                 seed=None,
                 immovable=False,
                 immovable_fixed=False,
                 opposite_direction=False,
                 background=BACKGROUND_WHITE,
                 num_colors=5,
                 same_shape_and_color=False):
        self.width = width
        self.height = height
        self.render_type = render_type
        self.immovable = immovable
        self.immovable_fixed = immovable_fixed
        self.opposite_direction = opposite_direction
        self.same_shape_and_color = same_shape_and_color

        self.num_objects = num_objects
        self.num_actions = 4 * self.num_objects  # Move NESW
        self.num_colors = num_colors

        self.colors = utils.get_colors(num_colors=max(9, self.num_objects))
        self.background = background
        # used only if background != BACKGROUND_WHITE
        self.background_colors = utils.get_colors(cmap="Set2",
                                                  num_colors=num_colors)
        # used only if background in [BACKGROUND_RANDOM_SAME_EP, BACKGROUND_DETERMINISTIC]
        self.background_index = 0

        self.np_random = None
        self.game = None
        self.target = None

        # Initialize to pos outside of env for easier collision resolution.
        self.objects = [[-1, -1] for _ in range(self.num_objects)]

        # If True, then check for collisions and don't allow two
        #   objects to occupy the same position.
        self.collisions = True

        self.action_space = spaces.Discrete(self.num_actions)
        self.observation_space = spaces.Box(low=0,
                                            high=1,
                                            shape=(3, self.width, self.height),
                                            dtype=np.float32)

        self.seed(seed)
        self.reset()
コード例 #2
0
ファイル: block_pushing.py プロジェクト: zymale/c-swm
    def __init__(self,
                 width=5,
                 height=5,
                 render_type='cubes',
                 num_objects=5,
                 seed=None):
        self.width = width
        self.height = height
        self.render_type = render_type

        self.num_objects = num_objects
        self.num_actions = 4 * self.num_objects  # Move NESW

        self.colors = utils.get_colors(num_colors=max(9, self.num_objects))

        self.np_random = None
        self.game = None
        self.target = None

        # Initialize to pos outside of env for easier collision resolution.
        self.objects = [[-1, -1] for _ in range(self.num_objects)]

        # If True, then check for collisions and don't allow two
        #   objects to occupy the same position.
        self.collisions = True

        self.action_space = spaces.Discrete(self.num_actions)
        self.observation_space = spaces.Box(low=0,
                                            high=1,
                                            shape=(3, self.width, self.height),
                                            dtype=np.float32)

        self.seed(seed)
        self.reset()
コード例 #3
0
    def __init__(self,
                 width=5,
                 height=5,
                 render_type='cubes',
                 num_objects=5,
                 seed=None,
                 immovable=False,
                 same_shape_and_color=False):

        super(BlockPushingCursor,
              self).__init__(width=width,
                             height=height,
                             render_type=render_type,
                             num_objects=num_objects,
                             seed=seed,
                             immovable=immovable,
                             same_shape_and_color=same_shape_and_color)

        # overwrite action space
        self.num_actions = 8
        self.action_space = spaces.Discrete(self.num_actions)

        # get color for cursor
        self.colors = utils.get_colors(num_colors=max(9, self.num_objects + 1))

        # initialize cursor outside of the env
        self.cursor = [-1, -1]
コード例 #4
0
def get_image():
    tile_size = 40

    cars = [
        Boxy(get_colors(4)),
        Midsize(get_colors(4)),
        Race(get_colors(4)),
        OldSchool(get_colors(2, False))
    ]

    car = Car(random.choice(cars).picture, tile_size)

    output = BytesIO()
    car.result.save(output, 'PNG')
    output.seek(0)

    return send_file(io.BytesIO(output.read()), mimetype='image/png')
コード例 #5
0
    def __init__(self, n_clusters=8, beta=16, distance_func=euclidean_distance):
        self.n_clusters = n_clusters
        self.beta = beta
        self.d = distance_func

        self.labels = {}            # maps a point to the id of the cluster it belongs to
        self.centers = list()       # the list of centers such that the pairwise distances > 2β
        self.collection = list()    # the collection of disjoint clusters
        self.unclustered_points = set()         # the set of unclustered points
        self.colors = get_colors(n_clusters)    # the colors used to visualize the clusters
コード例 #6
0
ファイル: vis.py プロジェクト: NREL/MetMastVis
def monthlyhourlyplot(metdat, catinfo, category=None, basecolor='span'):
    """**Get Monthly Hourly Averaged Profile**.

    Plot the monthly hourly averaged profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
    
    Parameters:
        1. metdat (Pandas DataFrame): The desired input data (Met Mast).
        2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
        3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
        4. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".

    Returns:
        1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
        2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
    """

    if category is None:
        print('not sure what to plot...')
        pass

    months = utils.monthnames()
    colors = utils.get_colors(len(catinfo['columns'][category]),
                              basecolor=basecolor,
                              reverse=True)
    colnames, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category], reverse=True)

    plotdat = metdat[colnames].groupby(
        [metdat.index.month.rename('month'),
         metdat.index.hour.rename('hour')]).mean()

    fig, ax = plt.subplots(4, 3, figsize=(9, 11), sharex=True, sharey=True)
    for iax in range(len(months)):
        for catitem in range(len(colnames)):
            ax.flatten()[iax].plot(plotdat[colnames[catitem]].xs(iax + 1),
                                   color=colors[catitem])
        ax.flatten()[iax].set_title(months[iax], fontsize=12)

    fig.text(0.5, 0.2, 'Time of Day [hour]', ha='center', va='center')
    leg = fig.legend([str(v) + ' m' for v in vertlocs],
                     loc='upper center',
                     bbox_to_anchor=(0, -0.825, 1, 1),
                     bbox_transform=plt.gcf().transFigure,
                     frameon=False,
                     ncol=2)
    fig.tight_layout()
    fig.subplots_adjust(bottom=0.25)
    fig.text(0,
             0.6125,
             catinfo['labels'][category],
             ha='center',
             va='center',
             rotation='vertical')

    return fig, ax
コード例 #7
0
ファイル: vis.py プロジェクト: NREL/MetMastVis
def normalized_hist_by_stability(metdat, catinfo, vertloc=80):
    """**Get Normalized Stability Grouped Histogram Figure**.

    Plot the normalized stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
    
    Parameters:
        1. metdat (Pandas DataFrame): The desired input data (Met Mast).
        2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
        3. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.        

    Returns:
        1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
        2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
    """

    stabconds = utils.get_stabconds()
    stabcol, _, _ = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    colors = utils.get_colors(len(stabconds), basecolor='span')

    temp = metdat[stabcol].dropna()
    garb = temp.groupby(temp.index.hour).value_counts(normalize=True)
    garb.index.names = ['hour', 'stabclass']
    garb = garb.reorder_levels(['stabclass', 'hour'])

    hours = np.arange(24)
    newbottom = np.zeros(24)

    fig, ax = plt.subplots()
    for jj, cond in enumerate(stabconds):
        # Use this for missing data, also works for full data
        a = garb.loc[cond]
        b = a.index.tolist()
        c = a.values.tolist()
        for i in range(len(hours)):
            if (hours[i]) in b:
                pass
            else:
                b.insert(i, hours[i])
                c.insert(i, 0)

        d = pd.Series(data=c, index=b)
        ax.bar(hours, d, color=colors[jj], bottom=newbottom)
        newbottom += c  #<-- for if missing data, also works for full data

        #ax.bar(hours, garb.loc[cond], color=colors[jj], bottom=newbottom)
        #newbottom += garb.loc[cond]

    ax.set_ylabel('Probability [%]')
    ax.set_xlabel('Time of Day [Hour]')
    fig.legend(stabconds)
    #fig.legend(stabconds, loc=6, bbox_to_anchor=(1,0.5),framealpha=0)
    fig.tight_layout()

    return fig, ax
コード例 #8
0
def hist_by_stability(metdat, catinfo, category, vertloc=80, basecolor='span'):
    ###########################################
    """
    make histograms separating the variable (colname) by stability class.
    stability is the list of column names containing stability flags
    Parameters:
        metdat:
            Pandas dataframe containing met mast data
        catinfo:
            dict containing categorization info for the metmast data. Fore each category,
            catinfo holds column names, labels, units, and save names
        category:
            string specifying category of information to plot (e.g. 'speed', 'stability', etc.)
        vertloc:
            int or float describing the exact or approximate height of interest along the tower
        basecolor:
            string with the color code info to get from utils.
    """
    stabconds = utils.get_stabconds()
    stabcol, _, _ = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    varcol, vertloc, _ = utils.get_vertical_locations(
        catinfo['columns'][category], location=vertloc)
    colors = utils.get_colors(len(stabconds), basecolor=basecolor)

    metdat = metdat.groupby(stabcol)

    fig, ax = plt.subplots(len(stabconds),
                           1,
                           figsize=(4, 6),
                           sharex=True,
                           sharey=True)
    for ii, stab in enumerate(stabconds):
        data = metdat[varcol].get_group(stab).dropna()
        ax.flatten()[ii].hist(data,
                              facecolor=colors[ii],
                              edgecolor='k',
                              bins=50,
                              weights=np.ones(len(data)) / len(data),
                              density=False)
        ax.flatten()[ii].legend([stab], fontsize=10, frameon=False)

    ax.flatten()[0].set_title(r'$z={}$m'.format(vertloc))

    fig.text(-0.03,
             0.5,
             'Frequency [%]',
             rotation='vertical',
             ha='center',
             va='center')
    fig.text(0.5, 0, catinfo['labels'][category], ha='center', va='center')

    fig.tight_layout()

    return fig, ax
コード例 #9
0
ファイル: get_images.py プロジェクト: jwansek/yaoi-communism
def main(draw_faces=False):
    try:
        simg = get_image(get_random_searchtag())
    except ConnectionError:
        logging.warning("Retried since couldn't get source...")
        return main()

    if id_is_blacklisted(simg.id):
        logging.info("Retried, already posted image...")
        return main()

    if check_pixiv_404(fix_source_url(simg.source)):
        logging.warning("Skipping since pixiv linked 404'd")
        return main()

    append_blacklisted(simg.id)

    with DownloadedImage(simg.imurl) as impath:
        img = cv2.imread(impath)

        cascade = cascade = cv2.CascadeClassifier(
            os.path.join("lbpcascade_animeface", "lbpcascade_animeface.xml"))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        faces = cascade.detectMultiScale(
            gray,
            # detector options
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(24, 24))
        if draw_faces:
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)

        logging.info("Found image %i faces, id: %i" % (len(faces), simg.id))

        pilimg = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        text = utils.get_quote(CONFIG["texts"])
        logging.info(text)
        font = utils.set_font(pilimg, text)
        draw = ImageDraw.Draw(pilimg)
        lines = utils.messages_multiline(text, font, pilimg)
        colours = utils.get_colors(impath)

        (x, y, faces) = utils.randomize_location(pilimg, lines, font, faces)
        for line in lines:
            height = font.getsize(line[1])[1]
            utils.draw_with_border(x, y, line, colours[0], colours[1], font,
                                   draw)
            y = y + height

        pilimg.save("img.png")
        return "img.png", fix_source_url(simg.source), text
コード例 #10
0
ファイル: vis.py プロジェクト: NREL/MetMastVis
def hist_by_stability(metdat, catinfo, category, vertloc=80, basecolor='span'):
    """**Get Stability Grouped Histogram Figure**.

    Plot the stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
    
    Parameters:
        1. metdat (Pandas DataFrame): The desired input data (Met Mast).
        2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
        3. category (string): Specifies the category of information that is desired for plotting.
        4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.        
        5. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".

    Returns:
        1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
        2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
    """

    stabconds = utils.get_stabconds()
    stabcol, _, _ = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    varcol, vertloc, _ = utils.get_vertical_locations(
        catinfo['columns'][category], location=vertloc)
    colors = utils.get_colors(len(stabconds), basecolor=basecolor)

    metdat = metdat.groupby(stabcol)

    fig, ax = plt.subplots(len(stabconds),
                           1,
                           figsize=(4, 6),
                           sharex=True,
                           sharey=True)
    for ii, stab in enumerate(stabconds):
        data = metdat[varcol].get_group(stab).dropna()
        ax.flatten()[ii].hist(data,
                              facecolor=colors[ii],
                              edgecolor='k',
                              bins=50,
                              weights=np.ones(len(data)) / len(data),
                              density=False)
        ax.flatten()[ii].legend([stab], fontsize=10, frameon=False)

    ax.flatten()[0].set_title(r'$z={}$m'.format(vertloc))

    fig.text(-0.03,
             0.5,
             'Frequency [%]',
             rotation='vertical',
             ha='center',
             va='center')
    fig.text(0.5, 0, catinfo['labels'][category], ha='center', va='center')

    fig.tight_layout()

    return fig, ax
コード例 #11
0
def figs_hyperparameters_rej_abc():
    df = get_df(
        path=f"{basepath_dfs}/supplement_hyperparameters_rej_abc.csv", )
    for tasks in tqdm(all_tasks):
        for metric in ["C2ST"]:
            plot_task_metric(
                df,
                tasks,
                metric,
                subfolder="hyperparameters_rej_abc",
                default_color=get_colors(df=df, include_defaults=True)["REJ"],
            )
            plot_task_metric(
                df,
                tasks,
                metric,
                subfolder="hyperparameters_rej_abc",
                labels=False,
                default_color=get_colors(df=df, include_defaults=True)["REJ"],
            )
コード例 #12
0
def figs_abc_lra_sass():
    df = get_df(path=f"{basepath_dfs}/supplement_abc_lra_sass.csv", )

    for tasks in tqdm(all_tasks):
        for metric in ["C2ST"]:
            plot_task_metric(
                df,
                tasks,
                metric,
                subfolder="abc_lra_sass",
                default_color=get_colors(df=df),
            )
            plot_task_metric(
                df,
                tasks,
                metric,
                subfolder="abc_lra_sass",
                labels=False,
                default_color=get_colors(df=df),
            )
コード例 #13
0
def monthlyhourlyplot(metdat, catinfo, category=None, basecolor='span'):
    ###########################################
    """
    Plot monthly average profiles against one another.
    Parameters:
        metdat:
            Pandas dataframe containing met mast data
        catinfo:
            dict containing categorization info for the metmast data. Fore each category,
            catinfo holds column names, labels, units, and save names
        category:
            string specifying category of information to plot (e.g. 'speed', 'stability', etc.)
        basecolor:
            string with the color code info to get from utils.
    """
    if category is None:
        print('not sure what to plot...')
        pass

    months = utils.monthnames()
    colors = utils.get_colors(len(catinfo['columns'][category]),
                              basecolor=basecolor,
                              reverse=True)
    colnames, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category], reverse=True)

    plotdat = metdat[colnames].groupby([metdat.index.month,
                                        metdat.index.hour]).mean()

    fig, ax = plt.subplots(4, 3, figsize=(9, 11), sharex=True, sharey=True)
    for iax in range(len(months)):
        for catitem in range(len(colnames)):
            ax.flatten()[iax].plot(plotdat[colnames[catitem]].xs(iax + 1),
                                   color=colors[catitem])
        ax.flatten()[iax].set_title(months[iax], fontsize=12)

    fig.text(0.5, 0.2, 'Time of Day [hour]', ha='center', va='center')
    leg = fig.legend([str(v) + ' m' for v in vertlocs],
                     loc='upper center',
                     bbox_to_anchor=(0, -0.825, 1, 1),
                     bbox_transform=plt.gcf().transFigure,
                     frameon=False,
                     ncol=2)
    fig.tight_layout()
    fig.subplots_adjust(bottom=0.25)
    fig.text(0,
             0.6125,
             catinfo['labels'][category],
             ha='center',
             va='center',
             rotation='vertical')

    return fig, ax
コード例 #14
0
def figs_mmd():
    subfolder = "mmd"
    if not os.path.exists(basepath_figs / subfolder):
        os.makedirs(basepath_figs / subfolder)

    chart = fig_posterior(
        task_name="two_moons",
        width=200,
        height=200,
        samples_path=
        "runs/8ad671f1-1e7b-4af8-b0e9-ec35a99d354f/posterior_samples.csv.bz2",
        samples_name="REJ",
        title="REJ-ABC on Two Moons",
        legend=False,
        colors_dict=get_colors(include_defaults=True),
        config="manuscript",
    )

    path = str(basepath_figs / subfolder / f"rej_abc.svg")
    chart.save(path)
    for ff in ["pdf", "png"]:
        den.convert_file(path, to=ff, debug=0)

    chart = fig_posterior(
        task_name="two_moons",
        width=200,
        height=200,
        samples_path=
        "runs/e37c16cf-3dda-4e26-ac3e-781fd27971a8/posterior_samples.csv.bz2",
        samples_name="SNLE",
        title="SNLE on Two Moons",
        legend=False,
        colors_dict=get_colors(include_defaults=True),
        config="manuscript",
    )

    path = str(basepath_figs / subfolder / f"snle.svg")
    chart.save(path)
    for ff in ["pdf", "png"]:
        den.convert_file(path, to=ff, debug=0)
コード例 #15
0
def stability_profile(metdat,
                      catinfo,
                      category=None,
                      vertloc=80,
                      basecolor='span'):
    ###########################################
    """
    Plot cumulative average profiles sorted by stability.
    Parameters:
        metdat:
            Pandas dataframe containing met mast data
        catinfo:
            dict containing categorization info for the metmast data. Fore each category,
            catinfo holds column names, labels, units, and save names
        category:
            string specifying category of information to plot (e.g. 'speed', 'stability', etc.)
        vertloc:
            int or float describing the exact or approximate height of interest along the tower
        basecolor:
            string with the color code info to get from utils.
    """
    if category is None:
        print('not sure what to plot...')
        pass

    stab, stabloc, ind = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    colors = utils.get_colors(5, basecolor=basecolor)
    stabconds = utils.get_stabconds()

    # extract vertical locations of data from variable names
    _, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category])

    plotdat = metdat.groupby(stab).mean()
    pdat = plotdat[catinfo['columns'][category]].T.iloc[ind]

    fig, ax = plt.subplots(figsize=(3.5, 5))

    for ii, cond in enumerate(stabconds):

        ax.plot(pdat[cond], vertlocs, color=colors[ii], label=cond)

    ax.set_ylabel('Probe Height [m]')
    ax.set_xlabel(catinfo['labels'][category])
    fig.legend(stabconds, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)

    fig.tight_layout()

    return fig, ax
コード例 #16
0
ファイル: vis.py プロジェクト: NREL/MetMastVis
def stability_profile(metdat,
                      catinfo,
                      category=None,
                      vertloc=80,
                      basecolor='cycle'):
    """**Get Stability Profile**.

    Plot the stability profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
    
    Parameters:
        1. metdat (Pandas DataFrame): The desired input data (Met Mast).
        2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
        3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
        4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
        5. basecolor (string) [default: 'cycle]: Provides the color code information to get from "utils.py".

    Returns:
        1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
        2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
    """

    if category is None:
        print('not sure what to plot...')
        pass

    stab, stabloc, ind = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    colors = utils.get_colors(5, basecolor=basecolor)
    stabconds = utils.get_stabconds()

    plotdat = metdat.groupby(stab).mean()
    pdat = plotdat[catinfo['columns'][category]].get_values()

    # Extract vertical locations of data from variable names
    _, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category])

    fig, ax = plt.subplots(figsize=(3.5, 5))
    for ii, cond in enumerate(stabconds):

        ax.plot(pdat[ii, ind], vertlocs, color=colors[ii])

    ax.set_ylabel('Probe Height [m]')
    ax.set_xlabel(catinfo['labels'][category])
    fig.legend(stabconds, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)

    fig.tight_layout()

    return fig, ax
コード例 #17
0
ファイル: vis.py プロジェクト: NREL/MetMastVis
def stacked_hist_by_stability(metdat, catinfo, category, vertloc=80):
    """**Get Stacked Stability Grouped Histogram Figure**.

    Plot the stacked stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
    
    Parameters:
        1. metdat (Pandas DataFrame): The desired input data (Met Mast).
        2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
        3. category (string): Specifies the category of information that is desired for plotting.
        4. vertloc (integer, float): Describes the desired vertical location alond the tower for analysis.        

    Returns:
        1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
        2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
    """

    stabconds = utils.get_stabconds()
    stabcol, _, _ = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    varcol, vertloc, _ = utils.get_vertical_locations(
        catinfo['columns'][category], location=vertloc)
    colors = utils.get_colors(len(stabconds), basecolor='span')

    plotdat = metdat.groupby(stabcol)

    fig, ax = plt.subplots()
    temp = pd.DataFrame(
        {cond: plotdat[varcol].get_group(cond)
         for cond in stabconds})
    temp.plot.hist(
        ax=ax,
        stacked=True,
        color=colors,
        bins=35,
        edgecolor='k',
        legend=False,
        #    weights = np.ones(temp.shape) / len(temp.index),
        density=True)

    ax.set_xlabel(catinfo['labels'][category])
    ax.set_title(r'$z={}$m'.format(vertloc))
    fig.legend(stabconds, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)

    fig.tight_layout()

    return fig, ax
コード例 #18
0
def stacked_hist_by_stability(metdat, catinfo, category, vertloc=80):
    ###########################################
    """
    make a stacked histogram of data separated by stability class.
    Parameters:
        metdat:
            Pandas dataframe containing met mast data
        catinfo:
            dict containing categorization info for the metmast data. Fore each category,
            catinfo holds column names, labels, units, and save names
        category:
            string specifying category of information to plot (e.g. 'speed', 'stability', etc.)
        vertloc:
            int or float describing the exact or approximate height of interest along the tower
    """
    stabconds = utils.get_stabconds()
    stabcol, _, _ = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    varcol, vertloc, _ = utils.get_vertical_locations(
        catinfo['columns'][category], location=vertloc)
    colors = utils.get_colors(len(stabconds), basecolor='span')

    plotdat = metdat.groupby(stabcol)

    fig, ax = plt.subplots(figsize=(5, 3))
    temp = pd.DataFrame(
        {cond: plotdat[varcol].get_group(cond)
         for cond in stabconds})
    temp.plot.hist(
        ax=ax,
        stacked=True,
        color=colors,
        bins=35,
        edgecolor='k',
        legend=False,
        #    weights = np.ones(temp.shape) / len(temp.index),
        density=True)

    ax.set_xlabel(catinfo['labels'][category])
    # ax.set_title(r'$z={}$m'.format(vertloc))
    fig.legend(stabconds, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)

    fig.tight_layout()

    return fig, ax
コード例 #19
0
ファイル: vis.py プロジェクト: NREL/MetMastVis
def hourlyplot(metdat, catinfo, category=None, basecolor='span'):
    """**Get Hourly Averaged Profile**.

    Plot the hourly averaged profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
    
    Parameters:
        1. metdat (Pandas DataFrame): The desired input data (Met Mast).
        2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
        3. category (string): Specifies the category of information that is desired for plotting.
        4. basecolor (string): Provides the color code information to get from "utils.py".

    Returns:
        1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
        2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
    """

    if category is None:
        print('not sure what to plot...')
        pass

    colors = utils.get_colors(len(catinfo['columns'][category]),
                              basecolor=basecolor,
                              reverse=True)
    colnames, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category], reverse=True)

    plotdat = metdat[colnames].groupby(metdat.index.hour).mean()

    fig, ax = plt.subplots(figsize=(5, 3.5), sharex=True, sharey=True)
    for iax in range(len(colnames)):
        ax.plot(plotdat[colnames[iax]], color=colors[iax])

    leg = ax.legend([str(v) + ' m' for v in vertlocs],
                    loc=6,
                    bbox_to_anchor=(1, 0.5),
                    frameon=False)
    ax.set_xlabel('Time [hour]')
    ax.set_ylabel(catinfo['labels'][category])

    fig.tight_layout()

    return fig, ax
コード例 #20
0
def hourlyplot(metdat, catinfo, category=None, basecolor='span'):
    ###########################################
    """
    Plot monthly average profiles against one another.
     Parameters:
        metdat:
            Pandas dataframe containing met mast data
        catinfo:
            dict containing categorization info for the metmast data. Fore each category,
            catinfo holds column names, labels, units, and save names
        category:
            string specifying category of information to plot (e.g. 'speed', 'stability', etc.)
        basecolor:
            string with the color code info to get from utils.
    """
    if category is None:
        print('not sure what to plot...')
        pass

    colors = utils.get_colors(len(catinfo['columns'][category]),
                              basecolor=basecolor,
                              reverse=True)
    colnames, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category], reverse=True)

    plotdat = metdat[colnames].groupby(metdat.index.hour).mean()

    fig, ax = plt.subplots(figsize=(5, 3), sharex=True, sharey=True)
    for iax in range(len(colnames)):
        ax.plot(plotdat[colnames[iax]], color=colors[iax])

    leg = ax.legend([str(v) + ' m' for v in vertlocs],
                    loc=6,
                    bbox_to_anchor=(1, 0.5),
                    frameon=False)
    ax.set_xlabel('Time [hour]')
    ax.set_ylabel(catinfo['labels'][category])

    fig.tight_layout()

    return fig, ax
コード例 #21
0
def normalized_hist_by_stability(metdat, catinfo, vertloc=80):
    ###########################################
    """
    make a normlizec histogram of data separated by stability class.
    Parameters:
        metdat:
            Pandas dataframe containing met mast data
        catinfo:
            dict containing categorization info for the metmast data. Fore each category,
            catinfo holds column names, labels, units, and save names
        vertloc:
            int or float describing the exact or approximate height of interest along the tower
    """
    stabconds = utils.get_stabconds()
    stabcol, _, _ = utils.get_vertical_locations(
        catinfo['columns']['stability flag'], location=vertloc)
    colors = utils.get_colors(len(stabconds), basecolor='span')

    temp = metdat[stabcol].dropna()
    garb = temp.groupby(temp.index.hour).value_counts(normalize=True)
    garb.index.names = ['hour', 'stabclass']
    garb = garb.reorder_levels(['stabclass', 'hour'])

    hours = np.arange(24)
    newbottom = np.zeros(24)

    fig, ax = plt.subplots(figsize=(5, 3))
    for jj, cond in enumerate(stabconds):

        ax.bar(hours, garb.loc[cond], color=colors[jj], bottom=newbottom)
        newbottom += garb.loc[cond]

    ax.set_ylabel('Probability [%]')
    ax.set_xlabel('Time of Day [Hour]')
    fig.legend(stabconds, loc=6, bbox_to_anchor=(1, 0.5), framealpha=0)
    fig.tight_layout()

    return fig, ax
コード例 #22
0
def monthly_profile(metdat, catinfo, category=None, basecolor='cycle'):
    ###########################################
    """
    Plot monthly average profiles against one another.
    Parameters:
        metdat:
            Pandas dataframe containing met mast data
        catinfo:
            dict containing categorization info for the metmast data. Fore each category,
            catinfo holds column names, labels, units, and save names
        category:
            string specifying category of information to plot (e.g. 'speed', 'stability', etc.)
        basecolor:
            string with the color code info to get from utils.
    """

    if category is None:
        print('not sure what to plot...')
        pass

    months = utils.monthnames()
    colors = utils.get_colors(len(months), basecolor=basecolor)
    colnames, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category])

    plotdat = metdat[colnames].groupby(metdat.index.month).mean()

    fig, ax = plt.subplots(figsize=(3.5, 5), sharex=True, sharey=True)
    for iax in range(len(months)):
        ax.plot(plotdat.xs(iax + 1), vertlocs, color=colors[iax])

    leg = ax.legend(months, loc=7, bbox_to_anchor=(1.75, 0.5), edgecolor='w')
    ax.set_ylabel('Probe Height [m]')
    ax.set_xlabel(catinfo['labels'][category])

    fig.tight_layout()

    return fig, ax
コード例 #23
0
ファイル: vis.py プロジェクト: NREL/MetMastVis
def monthly_profile(metdat, catinfo, category=None, basecolor='cycle'):
    """**Get Monthly Profile**.

    Plot the monthly profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
    
    Parameters:
        1. metdat (Pandas DataFrame): The desired input data (Met Mast).
        2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
        3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
        4. basecolor (string) [default: 'cycle']: Provides the color code information to get from "utils.py".

    Returns:
        1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
        2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
    """

    if category is None:
        print('not sure what to plot...')
        pass

    months = utils.monthnames()
    colors = utils.get_colors(len(months), basecolor=basecolor)
    colnames, vertlocs, ind = utils.get_vertical_locations(
        catinfo['columns'][category])

    plotdat = metdat[colnames].groupby(metdat.index.month).mean()

    fig, ax = plt.subplots(figsize=(3.5, 5), sharex=True, sharey=True)
    for iax in range(len(months)):
        ax.plot(plotdat.xs(iax + 1), vertlocs, color=colors[iax])

    leg = ax.legend(months, loc=7, bbox_to_anchor=(1.75, 0.5), edgecolor='w')
    ax.set_ylabel('Probe Height [m]')
    ax.set_xlabel(catinfo['labels'][category])

    fig.tight_layout()

    return fig, ax
コード例 #24
0
ファイル: bot.py プロジェクト: carp/yuri-communism
def post():
    with open("files.csv") as f:
        reader = csv.reader(f)
        chosen_row = random.choice(list(reader))
        source = chosen_row[1]
        file_extension = chosen_row[2].split(".")[1]
        filename = "pics/" + chosen_row[2]

    text = get_quote("texts/quotes.txt")
    image = Image.open(filename)
    font = set_font(image, text)  # get font size based on image size
    draw = ImageDraw.Draw(image)
    lines = messages_multiline(text, font, image)  # split up lines for text wrapping
    colors = get_colors(image.filename)  # get colors

    (x, y, faces) = randomize_location(
        image, lines, font
    )  # where to start drawing text

    for line in lines:
        height = font.getsize(line[1])[1]
        draw_with_border(x, y, line, colors[0], colors[1], font, draw)
        y = y + height

    image.save(f"to_tweet.{file_extension}")
    photo = open(f"to_tweet.{file_extension}", "rb")
    response = twitter.upload_media(media=photo)
    message = f"{text} ({source})"
    print(filename, message)
    twitter.update_status(status=message, media_ids=[response["media_id"]])
    photo.close()
    os.remove(photo.name)
    with open("log", "a") as f:
        f.write(
            f"{datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\t{filename}\t({image.size[0]} {image.size[1]})\t{font.size} ({max((font.size // 25), 2)})\t{text}\n"
        )
コード例 #25
0
                                fill_value=0.0)
        df_err = df.pivot_table(index=['site', 'date'],
                                columns='trt',
                                values=dat,
                                aggfunc=np.std,
                                fill_value=0.0)
        df_pvt.reset_index(inplace=True)
        df_err.reset_index(inplace=True)

        df_pvt.to_csv(constants.base_dir + constants.soil_dir + 'df_pvt.csv')
        # Determine maximum value on y axis
        y_max = utils.roundup(df[dat].max(), 10.0)

        # Start plotting
        utils.set_matplotlib_params()
        colors = utils.get_colors()

        # Set up the axes and figure
        fig, axis = plt.subplots(nrows=nrow,
                                 ncols=ncol,
                                 figsize=(5 * ncol, 5 * nrow))

        ctr = [(x, y) for x in np.arange(nrow) for y in np.arange(ncol)]
        site_ctr = 0

        for i in ctr:
            if (nrow > 1):
                ax = axis[i[0], i[1]]
            else:
                ax = axis[site_ctr]
コード例 #26
0
def main():

    ### Main definitions
    ROOT.gROOT.SetBatch()
    ###
    canvas, c_top, c_bottom = utils.create_double_pad('plot_3lcr_2l2v')
    ###
    root_file = ROOT.TFile('inputs/MTW_all_met_tst.root')
    stack = utils.re_style_top(
        root_file.Get('c1').GetPrimitive('pad1').GetPrimitive('hs1'))
    stack.GetXaxis().SetTitle('#it{E}_{T}^{miss} [GeV]')
    stack.GetYaxis().SetTitle('Events / 30 GeV')
    hists = {}
    merging_scheme = utils.complex_merging
    full_stack = stack.GetStack().Last()
    for hist in stack.GetHists():
        prev_color = hist.GetFillColor()
        sample_name = None
        for key, vals in utils.original_colors_2l2v.iteritems():
            if prev_color in vals:
                sample_name = key
        if not sample_name:
            'Sample Name Not Found'
        else:
            '''
      for merging_key, merging_list in merging_scheme.iteritems():
        if sample_name in merging_list:
          print sample_name, ' is supposed to be merged into ', merging_key
      '''
        hists[sample_name] = utils.re_shape_tail(hist)
        '''
    a = hist.GetXaxis()
    print
    print a.GetNbins()
    print a.GetXmin()
    print a.GetXmax()
    '''
    ### Getting complicated here...
    loop_over_this = hists.keys()
    to_be_removed = utils.wjets_removal
    for key in loop_over_this:
        for merging_key, merging_list in merging_scheme.iteritems():
            if key in merging_list:
                to_be_removed.add(key)
                if merging_key in hists:
                    hists[merging_key].Add(hists[key])
                else:
                    hists[merging_key] = hists[key].Clone('another_clone_' +
                                                          hists[key].GetName())
    to_be_used = []
    for k in hists:
        hist = hists[k]
        hist.SetFillColor(utils.get_colors(k))
        hist.SetLineColor(ROOT.kBlack)
        hist.SetLineWidth(1)
        hist.SetTitle(utils.titles[k])
        if not k in to_be_removed:
            to_be_used.append((k, hist.Integral()))
    sample_list = sorted(to_be_used, key=lambda x: x[1])
    sample_list_r = sorted(to_be_used, key=lambda x: x[1], reverse=True)
    new_stack = ROOT.THStack(stack.GetName() + '_clone', '')
    for name, integral in sample_list:
        new_stack.Add(hists[name])
    ###
    data = utils.re_style_top(
        utils.th1_to_tgraph(
            root_file.Get('c1').GetPrimitive('pad1').GetPrimitive(
                'met_tst_MTW_all_Nominal'), True))
    prev_error, last_bin_ratio = utils.re_shape_tail(
        root_file.Get('c1').GetPrimitive('pad1').GetPrimitive('h0'), True)
    error = utils.re_style_top(utils.th1_to_tgraph(prev_error))
    error.SetMarkerStyle(1)
    error.SetFillColor(ROOT.kBlack)
    error.SetFillStyle(3345)
    ###
    ratio_axis = utils.re_style_bot(
        root_file.Get('c1').GetPrimitive('pad2').GetPrimitive('h3'))
    ratio = utils.re_style_bot(
        utils.th1_to_tgraph(
            root_file.Get('c1').GetPrimitive('pad2').GetPrimitive('h3'), True))
    ratio_axis.GetXaxis().SetTitle('#it{E}_{T}^{miss} [GeV]')
    ratio_axis.GetYaxis().SetTitle('#frac{Data}{Prediction}')
    syst_band = utils.re_style_bot(
        utils.th1_to_tgraph(
            utils.change_last_bin(
                root_file.Get('c1').GetPrimitive('pad2').GetPrimitive('h0'),
                last_bin_ratio)))
    #ratio = utils.re_style_bot( root_file.Get( 'c1' ).GetPrimitive( 'pad2' ).GetPrimitive( 'h3' ) )
    #ratio.GetXaxis().SetTitle( '#it{E}_{T}^{miss} [GeV]' )
    #ratio.GetYaxis().SetTitle( '#frac{Data}{Prediction}' )
    ##syst_band = utils.re_style_bot( utils.re_shape_tail( root_file.Get( 'c1' ).GetPrimitive( 'pad2' ).GetPrimitive( 'h0' ), full_stack ) )
    #syst_band = utils.re_style_bot( utils.change_last_bin( root_file.Get( 'c1' ).GetPrimitive( 'pad2' ).GetPrimitive( 'h0' ), last_bin_ratio ) )
    syst_band.SetMarkerStyle(1)
    syst_band.SetFillColor(ROOT.kBlack)
    syst_band.SetFillStyle(3345)
    ###
    c_top.cd()
    new_stack.SetMaximum(1000000)
    new_stack.SetMinimum(0.001)
    new_stack.Draw('hist')
    new_stack = utils.re_style_top(new_stack)
    new_stack.Draw('hist')
    new_stack.GetXaxis().SetTitle('#it{E}_{T}^{miss} [GeV]')
    new_stack.GetYaxis().SetTitle('Events / 30 GeV')
    new_stack.GetXaxis().SetRangeUser(0, 600)
    error.Draw('e2 same')
    data.Draw('pe')
    x_l1, y_l1, latex1 = utils.draw_latex(utils.lumi, True)
    latex1.DrawLatex(
        x_l1, y_l1 - 0.12, utils.h_to_zz_to + utils.lepp + utils.lepm +
        utils.nu + utils.nubar + utils.void_char + utils.inv_prime)
    latex1.DrawLatex(x_l1, y_l1 - 0.18, '3#it{l} Control Region')
    leg = utils.create_legend(len(sample_list_r), 0.3, 1)
    leg.AddEntry(data, 'Data', 'pe')
    for name, integral in sample_list_r:
        leg.AddEntry(hists[name], hists[name].GetTitle(), 'f')
    leg.AddEntry(error, 'Uncertainty', 'f')
    leg.Draw()
    #utils.patch_bar( 229. / 566., 234. / 566., 322. / 407., 322. / 407., True )
    c_top.SetLogy()
    c_bottom.cd()
    ratio_axis.Draw('axis')
    ratio_axis.GetXaxis().SetRangeUser(0, 600)
    syst_band.Draw('e2 same')
    ratio.Draw('pe0 same')
    c_bottom.Update()
    for arrow in utils.get_arrows(ROOT.gPad.GetUymin(), ROOT.gPad.GetUymax(),
                                  ratio):
        arrow.Draw()
    Line = ROOT.TLine(ROOT.gPad.GetUxmin(), 1, ROOT.gPad.GetUxmax(), 1)
    Line.SetLineColor(ROOT.kBlack)
    Line.SetLineWidth(2)
    Line.SetLineStyle(2)
    Line.Draw()
    #canvas.SaveAs( canvas.GetName() + '.pdf' )
    utils.save(canvas)
コード例 #27
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Do the forward pass and estimate a set of primitives")
    parser.add_argument("dataset_directory",
                        help="Path to the directory containing the dataset")

    parser.add_argument(
        "primitives_directory",
        help=
        "Path to the directory containing the superquadrics of the instance")

    parser.add_argument("output_directory",
                        help="Save the output files in that directory")

    parser.add_argument(
        "--weight_file",
        default=None,
        help="The path to the previously trainined model to be used")

    parser.add_argument("--save_prediction_as_mesh",
                        action="store_true",
                        help="When true store prediction as a mesh")

    parser.add_argument("--run_on_gpu", action="store_true", help="Use GPU")

    parser.add_argument("--prob_threshold",
                        type=float,
                        default=0.5,
                        help="Probability threshold")

    # Parse args
    add_nn_parameters(parser)
    add_dataset_parameters(parser)
    add_datatype_parameters(parser)
    add_training_parameters(parser)
    args = parser.parse_args(argv)

    # Check if output directory exists and if it doesn't create it
    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    if args.run_on_gpu and torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    # device = torch.device("cuda:0")
    print("Running code on ", device)

    # TODO
    M = 11
    data_output_shape = (M, 7)

    # Create a factory that returns the appropriate data type based on the
    # input argument
    data_factory = DataFactory(
        args.data_type, tuple([data_input_shape(args), data_output_shape]))

    # Create a dataset instance to generate the samples for training
    dataset = get_dataset_type("matrix_loss")(
        (DatasetBuilder().with_dataset(args.dataset_type).build(
            args.dataset_directory)),
        data_factory,
        transform=compose_transformations(args.data_type))

    # TODO: Change batch_size in dataloader
    dataloader = DataLoader(dataset, batch_size=1, num_workers=4)

    network_params = NetworkParameters(args.architecture, M, False)
    model = network_params.network(network_params)
    # Move model to device to be used

    model.to(device)
    if args.weight_file is not None:
        # Load the model parameters of the previously trained model
        model.load_state_dict(torch.load(args.weight_file))
    model.eval()

    # Keep track of the files containing the parameters of each primitive
    primitives = load_all_primitive_parameters(args.primitives_directory,
                                               args.prob_threshold)
    gt_primitives = list(primitives)
    colors = get_colors(M)

    # Prepare matlab figs
    # mlab.view(azimuth=0.0, elevation=0.0, distance=2)

    # Iterate thru the data
    total_runs = 0
    total = 0
    r_loss_total = 0
    t_loss_total = 0
    # fp = open(os.path.join(args.output_directory, "stats.csv"), "w")
    # fp.write("loss_total\trot_loss\ttrans_loss\t\n")

    for sample in dataloader:
        primitive_list = []
        total_runs += 1
        X, y_target = sample

        # Show input image
        # img = X.numpy()[0]
        # img = np.transpose(img, (1,2,0))
        # img = img.reshape((224, 224, 3))
        # imgplot = plt.imshow(img)
        # plt.show()

        X, y_target = X.to(device), y_target.to(device)

        # Declare some variables
        B = y_target.shape[0]  # batch size
        M = y_target.shape[1]  # number of primitives
        poses_target = y_target.view(B, M, 7).detach().cpu().numpy()
        rotations_target = poses_target[:, :, :4].reshape(B, M, 4)[0]
        translations_target = poses_target[:, :, 4:].reshape(B, M, 3)[0]

        # # Do the forward pass
        y_hat = model(X)
        translations = y_hat[0].detach().cpu().numpy().reshape(B, M, 3)[0]
        rotations = y_hat[1].detach().cpu().numpy().reshape(B, M, 4)[0]

        # Loss computations
        # options = dict()
        # options["device"] = device
        # loss, extra = matrix_loss(y_hat, y_target, options)
        # total += (extra["r_loss"] + extra["t_loss"])
        # r_loss_total += extra["r_loss"]
        # t_loss_total += extra["t_loss"]

        # fp.write(str(total / total_runs))
        # fp.write("\t")
        # fp.write(str(r_loss_total / total_runs))
        # fp.write("\t")
        # fp.write(str(t_loss_total / total_runs))
        # fp.write("\t")
        # fp.write("\n")

        # if total_runs % 50 == 0:
        #     print(total / total_runs )

        i = 0
        # fig1 = mlab.figure(size=(400, 400), bgcolor=(1, 1, 1))
        # fig2 = mlab.figure(size=(400, 400), bgcolor=(1, 1, 1))

        for p in primitives:
            # primitives[i]["rotation"] = rotations[i]
            # primitives[i]["location"] = translations[i]

            # gt_primitives[i]["rotation"] = rotations_target[i]
            # gt_primitives[i]["location"] = translations_target[i]
            print("using GT...")
            x_tr, y_tr, z_tr, prim_pts =\
                points_on_sq_surface(
                    p["size"][0],
                    p["size"][1],
                    p["size"][2],
                    p["shape"][0],
                    p["shape"][1],
                    # Quaternion(rotations_target[i]).rotation_matrix.reshape(3, 3),
                    # np.array(translations_target[i]).reshape(3, 1),
                    Quaternion(rotations[i]).rotation_matrix.reshape(3, 3),
                    np.array(translations[i]).reshape(3, 1),
                    p["tapering"][0],
                    p["tapering"][1],
                    None,
                    None
                )

            primitive_list.append((prim_pts, p['color']))
            i += 1

        print("-------- GT ---------")
        print(rotations_target)
        print(translations_target)
        print("--------- Pred ---------")
        print(rotations)
        print(translations)
        display_primitives(primitive_list)
コード例 #28
0
    df     = df[df.site.isin(constants.SITE_NAMES.values())]
    
    for dat in constants.DATA_COLS:
        print dat        
        df_pvt = df.pivot_table(index=['site','date'],columns='trt',values=dat,aggfunc='mean',fill_value=0.0)
        df_err = df.pivot_table(index=['site','date'],columns='trt',values=dat,aggfunc=np.std,fill_value=0.0)
        df_pvt.reset_index(inplace=True)
        df_err.reset_index(inplace=True)

        df_pvt.to_csv(constants.base_dir+constants.soil_dir+'df_pvt.csv')
        # Determine maximum value on y axis
        y_max = utils.roundup(df[dat].max(),10.0)
            
        # Start plotting                               
        utils.set_matplotlib_params()
        colors = utils.get_colors()
              
        # Set up the axes and figure
        fig, axis = plt.subplots(nrows=nrow, ncols=ncol, figsize=(5*ncol,5*nrow))

        ctr       = [(x, y) for x in np.arange(nrow) for y in np.arange(ncol)]
        site_ctr  = 0
        
        for i in ctr:
            if(nrow > 1):
                ax = axis[i[0],i[1]]
            else:
                ax = axis[site_ctr]
            
            if(site_ctr >= len(constants.SITE_NAMES)):
                ax.axis('off')
コード例 #29
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Do the forward pass and estimate a set of primitives"
    )
    parser.add_argument(
        "dataset_directory",
        help="Path to the directory containing the dataset"
    )
    parser.add_argument(
        "output_directory",
        help="Save the output files in that directory"
    )
    parser.add_argument(
        "--tsdf_directory",
        default="",
        help="Path to the directory containing the precomputed tsdf files"
    )
    parser.add_argument(
        "--weight_file",
        default=None,
        help="The path to the previously trainined model to be used"
    )

    parser.add_argument(
        "--n_primitives",
        type=int,
        default=32,
        help="Number of primitives"
    )
    parser.add_argument(
        "--prob_threshold",
        type=float,
        default=0.5,
        help="Probability threshold"
    )
    parser.add_argument(
        "--use_deformations",
        action="store_true",
        help="Use Superquadrics with deformations as the shape configuration"
    )
    parser.add_argument(
        "--save_prediction_as_mesh",
        action="store_true",
        help="When true store prediction as a mesh"
    )
    parser.add_argument(
        "--run_on_gpu",
        action="store_true",
        help="Use GPU"
    )
    parser.add_argument(
        "--with_animation",
        action="store_true",
        help="Add animation"
    )

    add_dataset_parameters(parser)
    add_nn_parameters(parser)
    add_voxelizer_parameters(parser)
    add_gaussian_noise_layer_parameters(parser)
    add_loss_parameters(parser)
    add_loss_options_parameters(parser)
    args = parser.parse_args(argv)

    # A sampler instance
    e = EqualDistanceSamplerSQ(200)

    # Check if output directory exists and if it doesn't create it
    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    if args.run_on_gpu and torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    print ("Running code on {}".format(device))

    # Create a factory that returns the appropriate voxelizer based on the
    # input argument
    voxelizer_factory = VoxelizerFactory(
        args.voxelizer_factory,
        np.array(voxelizer_shape(args)),
        args.save_voxels_to
    )

    # Create a dataset instance to generate the samples for training
    dataset = get_dataset_type("euclidean_dual_loss")(
        (DatasetBuilder()
            .with_dataset(args.dataset_type)
            .filter_tags(args.model_tags)
            .build(args.dataset_directory)),
        voxelizer_factory,
        args.n_points_from_mesh,
        transform=compose_transformations(voxelizer_factory)
    )

    model_tags = dataset._dataset_object._tags

    # TODO: Change batch_size in dataloader
    dataloader = DataLoader(dataset, batch_size=1, num_workers=4)

    network_params = NetworkParameters.from_options(args)
    # Build the model to be used for testing
    model = network_params.network(network_params)
    # Move model to device to be used
    model.to(device)
    if args.weight_file is not None:
        # Load the model parameters of the previously trained model
        model.load_state_dict(
            torch.load(args.weight_file, map_location=device)
        )
    model.eval()

    colors = get_colors(args.n_primitives)

    for sample_idx, sample in enumerate(dataloader):

        model_tag = model_tags[sample_idx]

        X, y_target = sample
        X, y_target = X.to(device), y_target.to(device)

        # Do the forward pass and estimate the primitive parameters
        y_hat = model(X)

        M = args.n_primitives  # number of primitives
        probs = y_hat[0].to("cpu").detach().numpy()
        # Transform the Euler angles to rotation matrices
        if y_hat[2].shape[1] == 3:
            R = euler_angles_to_rotation_matrices(
                y_hat[2].view(-1, 3)
            ).to("cpu").detach()
        else:
            R = quaternions_to_rotation_matrices(
                    y_hat[2].view(-1, 4)
                ).to("cpu").detach()
            # get also the raw quaternions
            quats = y_hat[2].view(-1, 4).to("cpu").detach().numpy()
        translations = y_hat[1].to("cpu").view(args.n_primitives, 3)
        translations = translations.detach().numpy()

        shapes = y_hat[3].to("cpu").view(args.n_primitives, 3).detach().numpy()
        epsilons = y_hat[4].to("cpu").view(
            args.n_primitives, 2
        ).detach().numpy()
        taperings = y_hat[5].to("cpu").view(
            args.n_primitives, 2
        ).detach().numpy()

        pts = y_target[:, :, :3].to("cpu")
        pts_labels = y_target[:, :, -1].to("cpu").squeeze().numpy()
        pts = pts.squeeze().detach().numpy().T

        on_prims = 0

        # XXX: UNTIL I FIX THE MLAB ISSUE
        # fig = mlab.figure(size=(400, 400), bgcolor=(1, 1, 1))
        # mlab.view(azimuth=0.0, elevation=0.0, distance=2)

        # Uncomment to visualize the points sampled from the target mesh
        # t = np.array([1.2, 0, 0]).reshape(3, -1)
        # pts_n = pts + t
        #     mlab.points3d(
        #        # pts_n[0], pts_n[1], pts_n[2],
        #        pts[0], pts[1], pts[2],
        #        scale_factor=0.03, color=(0.8, 0.8, 0.8)
        #     )


        save_dir = os.path.join(args.output_directory,
                                os.path.basename(os.path.dirname(args.dataset_directory)),
                                model_tag)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        # args.output_directory/class/model_id/primitive_%d.p
        # args.output_directory/class/model_id/reconstruction

        # Keep track of the files containing the parameters of each primitive
        primitive_files = []
        for i in range(args.n_primitives):
            x_tr, y_tr, z_tr, prim_pts =\
                get_shape_configuration(args.use_cuboids)(
                    shapes[i, 0],
                    shapes[i, 1],
                    shapes[i, 2],
                    epsilons[i, 0],
                    epsilons[i, 1],
                    R[i].numpy(),
                    translations[i].reshape(-1, 1),
                    taperings[i, 0],
                    taperings[i, 1]
                )

            # Dump the parameters of each primitive as a dictionary
            # TODO: change filepath
            store_primitive_parameters(
                size=tuple(shapes[i]),
                shape=tuple(epsilons[i]),
                rotation=tuple(quats[i]),
                location=tuple(translations[i]),
                tapering=tuple(taperings[i]),
                probability=(probs[0, i],),
                color=(colors[i % len(colors)]) + (1.0,),
                filepath=os.path.join(
                    save_dir,
                    "primitive_%d.p" %(i,)
                )
            )
            if probs[0, i] >= args.prob_threshold:
                on_prims += 1
                # mlab.mesh(
                #     x_tr,
                #     y_tr,
                #     z_tr,
                #     color=tuple(colors[i % len(colors)]),
                #     opacity=1.0
                # )
                primitive_files.append(
                    os.path.join(save_dir, "primitive_%d.p" % (i,))
                )

        if args.with_animation:
            cnt = 0
            for az in range(0, 360, 1):
                cnt += 1

                # XXX UNTIL I FIX THE MLAB ISSUE
                # mlab.view(azimuth=az, elevation=0.0, distance=2)
                # mlab.savefig(
                #     os.path.join(
                #         args.output_directory,
                #         "img_%04d.png" % (cnt,)
                #     )
                # )
        for i in range(args.n_primitives):
            print("{} {}".format(i, probs[0, i]))

        print ("Using %d primitives out of %d" % (on_prims, args.n_primitives))

        # XXX UNTIL I FIX THE MLAB ISSUE
        # mlab.show()

        # TODO: from_primitive_parms_to_mesh()
        # TODO: get parts for this chair.
        # TODO: push the parts and superquadric meshes through the metric function
        # TODO: record metrics

        if args.save_prediction_as_mesh:
            # TODO: save with model information, class information ...etc
            print ("Saving prediction as mesh....")
            save_prediction_as_ply(
                primitive_files,
                os.path.join(save_dir, "reconstruction.ply")
            )
            print("Saved prediction as ply file in {}".format(
                os.path.join(save_dir, "reconstruction.ply")
            ))
コード例 #30
0
ファイル: plot_xfe_feh.py プロジェクト: jsobeck/flexCE
path_flexce_top = os.path.abspath(join(path_plot, '../..'))
path_output = join(path_flexce_top, 'output')
path_fileio = join(path_flexce_top, 'flexCE', 'fileio')
path_plots_top = join(path_flexce_top, 'plots')
path_config = join(path_plots_top, 'config')
path_plots = join(path_plots_top, 'plots')
# ---------------------

sys.path.append(path_fileio)
import txt_io
import cfg_io

# Read config file
fin = utils.get_filename(sys.argv, path_config)
cfg = cfg_io.read_plot_config(join(path_config, fin))
colors = utils.get_colors(cfg)
abund = cfg['General']['abundance']
labels = cfg['General']['labels']

# Read in simulation results
sims = []
for sim_id in cfg['General']['sim_ids']:
    sims.append(txt_io.load_dataframe(path_output, sim_id))

# Make plot
for i, (sim, color) in enumerate(zip(sims, colors)):
    marg_kws = dict(norm_hist=True,
                    hist_kws=dict(weights=sim.Survivors.values))
    if i == 0:
        fig = sns.jointplot('[Fe/H]', abund, data=sim, stat_func=None,
                            color=color, marginal_kws=marg_kws)
コード例 #31
0
network_params = NetworkParameters(architecture, M, False)
model = network_params.network(network_params)
# Move model to device to be used
model.to(device)
if weight_file is not None:
    # Load the model parameters of the previously trained model
    model.load_state_dict(torch.load(weight_file))
    print("Loading...", weight_file)
model.eval()

# Keep track of the files containing the parameters of each primitive
primitives = load_all_primitive_parameters(primitives_directory,
                                           prob_threshold)
gt_primitives = list(primitives)
colors = get_colors(M)

parser = argparse.ArgumentParser(
    description="Do the forward pass and estimate a set of primitives")
add_nn_parameters(parser)
add_dataset_parameters(parser)
add_datatype_parameters(parser)
add_training_parameters(parser)
args = parser.parse_args("")
print(args)

data_type = "image"
data_factory = DataFactory(data_type,
                           tuple([data_input_shape(args), data_output_shape]))
dataset = get_dataset_type("matrix_loss")(
    (DatasetBuilder().with_dataset(
コード例 #32
0
ファイル: main.py プロジェクト: bessaFan/Cell-t-SNE
def image_scatter(csv, color_by, x, y, dpi, channel):
    #./main.py image_scatter  --csv ResultTable\ -\ 2\ wells\,\ 1\ fields\,\ thresh\ 160_with_Traces_full_curated.csv -x tsne1 -y tsne2 --color-by CellLine --dpi 650
    df = utils.read_csv(csv)
    print('Found number of cells: %s' % df.shape[0])
    #image_dir = './images/cropped_images/Ron/'
    image_dir = './images/cropped_images/'
    cell_imgs = []
    colors = []
    xx = np.array([])
    yy = np.array([])

    cmap = 'gist_rainbow'
    color_list = utils.get_colors(len(np.unique(df[color_by])), cmap=cmap)

    # blue is better when there are only 3 colours
    if len(color_list) == 3 and cmap == 'gist_rainbow':
        color_list[2] = (0.05, 0.529, 1, 1.0)

    for row_id, row in df.iterrows():
        cell_id = row.CellID
        image_filename = image_dir + 'ch' + str(
            channel) + '-' + cell_id + '.jpg'
        if not os.path.exists(image_filename):
            print('[WARN] no image found %s' % image_filename)
            continue
        print('Loading image %s' % image_filename)
        image = skimage.io.imread(image_filename)
        image = utils.gray_to_color(image)
        cell_imgs.append(image)
        xx = np.append(xx, row[x])
        yy = np.append(yy, row[y])
        color_id = np.where(np.unique(df[color_by]) == row[color_by])[0][
            0]  # find position where this value appears
        c = color_list[color_id]
        c = (int(c[0] * 255), int(c[1] * 255), int(c[2] * 255)
             )  # convert value range, ex. 1.0 -> 255 or 0.0 -> 0
        colors.append(c)

    if len(cell_imgs) == 0:
        print('[ERROR] 0 cropped single cell images found.')
        return

    canvas = plot.image_scatter(yy,
                                xx,
                                cell_imgs,
                                colors,
                                min_canvas_size=4000)
    plt.imshow(canvas, origin='lower')
    plt.title('%s vs %s' % (x, y))
    plt.xlabel('%s' % x)
    plt.ylabel('%s' % y)
    plt.xticks([])
    plt.yticks([])
    patches = []
    for i in range(len(np.unique(df[color_by]))):
        label = '%s %s' % (color_by, np.unique(df[color_by])[i])
        if color_by == 'Dend.cat':
            label = 'Detected Category %s' % (i + 1)
        # Plot additional data that can't be in the main csv
        # extra_datafile = 'PCaxes.csv'
        # if os.path.exists(extra_datafile):
        #   df_extra = utils.read_csv(extra_datafile)
        #   from IPython import embed
        #   embed() # drop into an IPython session
        #   plt.scatter(avg_x, avg_y,c=color,marker='*')
        patch = mpatches.Patch(color=color_list[i], label=label)
        patches.append(patch)

    plt.legend(handles=patches,
               bbox_to_anchor=(1.04, 0.5),
               loc="center left",
               borderaxespad=0,
               frameon=False)
    save_location = './images/%s_image_scatter_by_%s_dpi%s.jpg' % (
        csv, color_by, dpi)
    plt.savefig(save_location, dpi=dpi, pad_inches=1, bbox_inches='tight')
    # plt.show()
    print('Saved image scatter to %s' % save_location)