Example #1
0
def softmax_experiment():
    """Run softmax experiment."""
    print('Running softmax experiment.')
    taus = [0.01, 0.1, 1]
    ars, pos = [], []
    for tau in taus:
        ar, po = run_experiment(2000, 1000, tau=tau, alpha=0.1)
        ars.append
        ars.append(np.mean(ar, 0))
        pos.append(np.mean(po, 0))
        
    # plot the results
    plt.close('all')
    f, (ax1, ax2) = plt.subplots(2)
    for i,tau in enumerate(taus):
        ax1.plot(ars[i].T, label='$\\tau$ = %.2f' % tau)
        ax2.plot(pos[i].T, label='$\\tau$ = %.2f' % tau)
    ax1.legend(loc='lower right')
    ax1.set_ylabel('Average reward')
    ax1.set_xlim(xmin=-10)
    ax2.legend(loc='lower right')
    ax2.set_xlabel('Plays')
    ax2.set_ylabel('% Optimal action')
    ax2.set_xlim(xmin=-20)
    plt.savefig('softmax_experiment.pdf')
    plt.show()
Example #2
0
def test_plot_acf_kwargs():
    # Just test that it runs.
    fig = plt.figure()
    ax = fig.add_subplot(111)

    ar = np.r_[1., -0.9]
    ma = np.r_[1., 0.9]
    armaprocess = tsp.ArmaProcess(ar, ma)
    rs = np.random.RandomState(1234)
    acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)

    buff = BytesIO()
    plot_acf(acf, ax=ax)
    fig.savefig(buff, format='rgba')
    plt.close(fig)

    buff_with_vlines = BytesIO()
    fig_with_vlines = plt.figure()
    ax = fig_with_vlines.add_subplot(111)
    vlines_kwargs = {'linestyles': 'dashdot'}
    plot_acf(acf, ax=ax, vlines_kwargs=vlines_kwargs)
    fig_with_vlines.savefig(buff_with_vlines, format='rgba')
    plt.close(fig_with_vlines)

    buff.seek(0)
    buff_with_vlines.seek(0)
    plain = buff.read()
    with_vlines = buff_with_vlines.read()

    assert_(with_vlines != plain)
def draw_ranges_for_parameters(data, title='', save_path='./pictures/'):
  parameters = data.columns.values.tolist()

  # remove flight name parameter
  for idx, parameter in enumerate(parameters):
    if parameter == 'flight_name':
      del parameters[idx]

  flight_names = np.unique(data['flight_name'])

  print len(flight_names)

  for parameter in parameters:
    plt.figure()

    axis = plt.gca()

    # ax.set_xticks(numpy.arange(0,1,0.1))
    axis.set_yticks(flight_names)
    axis.tick_params(labelright=True)
    axis.set_ylim([94., 130.])
    plt.grid()

    plt.title(title)
    plt.xlabel(parameter)
    plt.ylabel('flight name')

    colors = iter(cm.rainbow(np.linspace(0, 1,len(flight_names))))

    for flight in flight_names:
      temp = data[data.flight_name == flight][parameter]

      plt.plot([np.min(temp), np.max(temp)], [flight, flight], c=next(colors), linewidth=2.0)
    plt.savefig(save_path+title+'_'+parameter+'.jpg')
    plt.close()
Example #4
0
def test_radardisplay_init():
    # test that a display object can be created with and without
    radar = pyart.io.read_cfradial(pyart.testing.CFRADIAL_PPI_FILE)
    radar.antenna_transition = {'data': np.zeros((40, ))}
    display = pyart.graph.RadarDisplay(radar)
    assert display.antenna_transition is not None
    plt.close()
Example #5
0
def display_d3(fig=None, closefig=True, d3_url=None):
    """Display figure in IPython notebook via the HTML display hook

    Parameters
    ----------
    fig : matplotlib figure
        The figure to display (grabs current figure if missing)
    closefig : boolean (default: True)
        If true, close the figure so that the IPython matplotlib mode will not
        display the png version of the figure.
    d3_url : string (optional)
        The URL of the d3 library.  If not specified, a standard web path
        will be used.

    Returns
    -------
    fig_d3 : IPython.display.HTML object
        the IPython HTML rich display of the figure.

    See Also
    --------
    show_d3 : show a figure in a new browser window, notebook not required.
    enable_notebook : automatically embed figures in the IPython notebook
    """
    # import here, in case users don't have requirements installed
    from IPython.display import HTML
    import matplotlib.pyplot as plt
    if fig is None:
        fig = plt.gcf()
    if closefig:
        plt.close(fig)
    return HTML(fig_to_d3(fig, d3_url=d3_url))
Example #6
0
File: ACGAN.py Project: CODEJIN/GAN
    def Test(self):
        test_Dir = "Result";        
        if not os.path.exists(test_Dir):
            os.makedirs(test_Dir);

        test_Label_List = [0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5];
        test_Label_Pattern = np.zeros((16, 10));
        test_Label_Pattern[np.arange(16), test_Label_List] = 1;            
        feed_Dict = {
            self.noise_Placeholder: np.random.uniform(-1., 1., size=[16, self.noise_Size]),
            self.label_for_Fake_Placeholder: test_Label_Pattern,
            self.is_Training_Placeholder: False
            };   #Batch is constant in the test.
        global_Step, mnist_List = self.tf_Session.run(self.test_Tensor_List, feed_dict = feed_Dict);

        fig = plt.figure(figsize=(4, 4))
        gs = gridspec.GridSpec(4, 4)
        gs.update(wspace=0.05, hspace=0.05)

        for index, mnist in enumerate(mnist_List):
            ax = plt.subplot(gs[index])
            plt.axis('off')
            ax.set_xticklabels([])
            ax.set_yticklabels([])
            ax.set_aspect('equal')
            plt.imshow(mnist.reshape(28, 28), cmap='Greys_r')

        plt.savefig('%s/S%d.png' % (test_Dir, global_Step), bbox_inches='tight');
        plt.close();
Example #7
0
def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
def vis_result(image, seg, gt, title1='Segmentation', title2='Ground truth', savefile=None):
    indices = np.where(seg >= 0.5)
    indices_gt = np.where(gt >= 0.5)

    im_norm = image / image.max()
    rgb_image = color.gray2rgb(im_norm)
    multiplier = [0., 1., 1.]
    multiplier_gt = [1., 1., 0.]

    im_seg = rgb_image.copy()
    im_gt = rgb_image.copy()
    im_seg[indices[0], indices[1], :] *= multiplier
    im_gt[indices_gt[0], indices_gt[1], :] *= multiplier_gt

    fig = plt.figure()
    a = fig.add_subplot(1, 2, 1)
    plt.imshow(im_seg)
    a.set_title(title1)
    a = fig.add_subplot(1, 2, 2)
    plt.imshow(im_gt)
    a.set_title(title2)

    if savefile is None:
        plt.show()
    else:
        plt.savefig(savefile)
    plt.close()
Example #9
0
def make_iso_visual_panel(fn, img_compo, img_map, contours1, contours3, z, pixsize, legend_suffix, name, title_compo, title_map, label_cbar):
    fig, (ax0, ax1, ax_cb) = get_figure_grids(n_panel=2)

    # plot composit
    ax_imshow(fig, ax0, img_compo, origin='upper', tocolorbar=False, tosetlim=True)

    nx, ny = img_compo.shape[:2]
    overplot_ruler(ax0, z, pixsize=pixsize, rlength_arcsec=10., nx=nx, ny=ny)

    ax0.text(5, 12, name, color='white', fontsize=12)
    ax0.text(nx-35, 12, '$z={}$'.format('%.2f'%z), color='white', fontsize=10)
    ax0.set_title(title_compo)
    ax0.title.set_position([.5, 1.03])

    # plot line map
    im = ax_imshow(fig, ax1, img_map, vmin=-1, vmax=8, origin='lower', tocolorbar=False, tosetlim=True)
    overplot_contours(ax1, contours3, lw=1.)
    overplot_contours(ax1, contours1, lw=0.2)

    make_legend_isophotes(ax1, lw=2, suffix=legend_suffix)

    ax1.set_title(title_map)
    ax1.title.set_position([.5, 1.03])

    # plot color bar
    cbar = fig.colorbar(im, cax=ax_cb, label=label_cbar, format='%i')
    ax_cb.set_aspect(20)

    # set ticks off
    for ax in [ax0, ax1]: 
        ax.axis('off')

    # saving
    fig.savefig(fn, format='pdf')
    plt.close()
Example #10
0
 def _figure_data(self, format):
     fig = self.plot()
     data = print_figure(fig, format)
     # We MUST close the figure, otherwise IPython's display machinery
     # will pick it up and send it as output, resulting in a double display
     plt.close(fig)
     return data
Example #11
0
def graph_view(request):
    try:
        fig = plot.figure()
        ax = fig.add_subplot(111)
        plot_data(request, ax, 0)
        plot_data(request, ax, 1)
        ax.set_xlabel("Time")
        ax.set_ylabel(u"Temperature (°C)")
        fig.autofmt_xdate()
        imgdata = StringIO.StringIO()
        fig.savefig(imgdata, format='svg')
        return Response(imgdata.getvalue(), content_type='image/svg+xml')
    except DBAPIError:
        conn_err_msg = """\
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
  "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="10cm" height="10cm" viewBox="0 0 100 300"
     xmlns="http://www.w3.org/2000/svg" version="1.1">
  <desc>Database connection error message</desc>
  <text x="0" y="0" fill="red">Database error.</text>
  </svg>"""
        return Response(conn_err_msg, content_type='image/svg+xml', status_int=500)
    finally:
        plot.close('all')
Example #12
0
File: extract.py Project: gmace/plp
    def key_press(event): 
        
        ax = event.inaxes 
        if ax == None: return 
        ax_title = ax.get_title()
        
        if event.key == 'q': plt.close('all')

        if event.key == 'm':
            click_x, click_y = event.xdata, event.ydata
            rr = np.arange((x2-2*dpix),(x2+2*dpix+1), dtype=np.int)
            #p = ip.gauss_fit(rr, row[rr], p0=[1, x2, 1])
            #a2.plot(rr, ip.gauss(rr, *p), 'r--')
            #mx = p[1]
            mx, mval = ip.find_features(rr, row[rr], gpix=dpix)
            mx, mval = mx[0], mval[0]
            a2.plot(mx, mval, 'ro')
            fig.canvas.draw()
            fwv = cwv[np.argmin(np.abs(cwv-wav[mx]))]
            strtmp = raw_input('input wavelength at (%d, %d) = %.7f:' % (mx, my, fwv))
            if strtmp == '': strtmp = fwv
            try:
                mwv = np.float(strtmp)
                lxx.append(mx)
                lyy.append(my)
                lwv.append(mwv)
                a1.plot(mx, my, 'ro')
                fig.canvas.draw()
                print '%.7f at (%d, %d)' % (mwv, mx, my)
            except:
                print 'No input, again...'
def export(data, F, k):
    '''Write data to a png image
    
    Arguments
    ---------
    data : numpy.ndarray
        array containing the data to be written as png image
    F : float
        feed rate of the current configuration
    k : float
        rate constant of the current configuration
    '''
        
    figsize = tuple(s / 72.0 for s in data.shape)
    fig = plt.figure(figsize=figsize, dpi=72.0, facecolor='white')
    fig.add_axes([0, 0, 1, 1], frameon=False)
    plt.xticks([])
    plt.yticks([])

    plt.imshow(data, cmap=plt.cm.RdBu_r, interpolation='bicubic')
    plt.gci().set_clim(0, 1)

    filename = './study/F{:03d}-k{:03d}.png'.format(int(1000*F), int(1000*k))
    plt.savefig(filename, dpi=72.0)
    plt.close()
Example #14
0
    def test_savefig(self):
        # Not sure if this is the right way to test....
        cm = mat.ClusterGrid(self.df_norm, **self.default_kws)
        cm.plot(**self.default_plot_kws)
        cm.savefig(tempfile.NamedTemporaryFile(), format='png')

        plt.close('all')
Example #15
0
    def plot_cc(self):
        with PdfPages("CCplot.pdf") as pdf:
            for each_frame in self.frame_list:
                fig, axs = plt.subplots(5,5, sharex=True, sharey=True, squeeze=True, facecolor='w', edgecolor='k')
                fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, hspace = 0.5, wspace=0.1)

                fig.text(0.5, 0.04, 'CCweak', ha='center')
                fig.text(0.04, 0.5, 'CCall', va='center', rotation='vertical')
                fig.set_size_inches(8,11)

                axs = axs.flatten()

                for i in range(len(each_frame)):
                   ccall, ccweak = get_CCs(each_frame[i])
                   if len(ccweak) > len(ccall):
                     diff = len(ccweak) - len(ccall)
                     axs[i].plot(ccweak[0:(len(ccweak)-diff)], ccall, 'o', linewidth=1, rasterized=True)
                   elif len(ccweak) < len(ccall):
                      diff = len(ccall) - len(ccweak)
                      axs[i].plot(ccweak, ccall[0:(len(ccall)-diff)], 'o', linewidth=1, rasterized=True)
                   else:
                      axs[i].plot(ccweak, ccall, 'o', linewidth=1, rasterized=True)

                   titles = each_frame[i].strip('-shelxd.log')
                   axs[i].set_title(titles, fontsize=8)
                pdf.savefig(fig)
                plt.close()
	def draw_img_for_viewing_ice(self):
		#print "Press 'p' to save PNG."
		global colmax
		global colmin
		fig = P.figure(num=None, figsize=(13.5, 5), dpi=100, facecolor='w', edgecolor='k')
		cid1 = fig.canvas.mpl_connect('key_press_event', self.on_keypress_for_viewing)
		cid2 = fig.canvas.mpl_connect('button_press_event', self.on_click)
		canvas = fig.add_subplot(121)
		canvas.set_title(self.filename)
		self.axes = P.imshow(self.inarr, origin='lower', vmax = colmax, vmin = colmin)
		self.colbar = P.colorbar(self.axes, pad=0.01)
		self.orglims = self.axes.get_clim()
		canvas = fig.add_subplot(122)
		canvas.set_title("Angular Average")
		
		maxAngAvg = (self.inangavg).max()
		numQLabels = len(eDD.iceHInvAngQ.keys())+1
		labelPosition = maxAngAvg/numQLabels
		for i,j in eDD.iceHInvAngQ.iteritems():
			P.axvline(j,0,colmax,color='r')
			P.text(j,labelPosition,str(i), rotation="45")
			labelPosition += maxAngAvg/numQLabels
			
		P.plot(self.inangavgQ, self.inangavg)
		P.xlabel("Q (A-1)")
		P.ylabel("I(Q) (ADU/srad)")
		pngtag = original_dir + "peakfit-gdvn_%s.png" % (self.filename)
		P.savefig(pngtag)
		print "%s saved." % (pngtag)
		P.close()
Example #17
0
    def test_heatmap_ticklabel_rotation(self):

        f, ax = plt.subplots(figsize=(2, 2))
        mat.heatmap(self.df_norm, ax=ax)

        for t in ax.get_xticklabels():
            nt.assert_equal(t.get_rotation(), 0)

        for t in ax.get_yticklabels():
            nt.assert_equal(t.get_rotation(), 90)

        plt.close(f)

        df = self.df_norm.copy()
        df.columns = [str(c) * 10 for c in df.columns]
        df.index = [i * 10 for i in df.index]

        f, ax = plt.subplots(figsize=(2, 2))
        mat.heatmap(df, ax=ax)

        for t in ax.get_xticklabels():
            nt.assert_equal(t.get_rotation(), 90)

        for t in ax.get_yticklabels():
            nt.assert_equal(t.get_rotation(), 0)

        plt.close(f)
Example #18
0
    def make_bar(
        x,
        y,
        f_name,
        title=None,
        legend=None,
        x_label=None,
        y_label=None,
        x_ticks=None,
        y_ticks=None,
    ):
        fig = plt.figure()

        if title is not None:
            plt.title(title, fontsize=16)
        if x_label is not None:
            plt.ylabel(x_label)
        if y_label is not None:
            plt.xlabel(y_label)
        if x_ticks is not None:
            plt.xticks(x, x_ticks)
        if y_ticks is not None:
            plt.yticks(y_ticks)

        plt.bar(x, y, align="center")

        if legend is not None:
            plt.legend(legend)

        plt.savefig(f_name)
        plt.close(fig)
Example #19
0
    def make_line(
        x,
        y,
        f_name,
        title=None,
        legend=None,
        x_label=None,
        y_label=None,
        x_ticks=None,
        y_ticks=None,
    ):
        fig = plt.figure()

        if title is not None:
            plt.title(title, fontsize=16)
        if x_label is not None:
            plt.ylabel(x_label)
        if y_label is not None:
            plt.xlabel(y_label)
        if x_ticks is not None:
            plt.xticks(x, x_ticks)
        if y_ticks is not None:
            plt.yticks(y_ticks)

        if isinstance(y[0], list):
            for data in y:
                plt.plot(x, data)
        else:
            plt.plot(x, y)

        if legend is not None:
            plt.legend(legend)

        plt.savefig(f_name)
        plt.close(fig)
Example #20
0
def plot_jacobian(A, name, cmap= plt.cm.coolwarm, normalize=True, precision=1e-6):

    """
    Customized visualization of jacobian matrices for observing
    sparsity patterns
    """
    
    plt.figure()
    fig, ax = plt.subplots()
    
    if normalize is True:
        plt.imshow(A, interpolation='none', cmap=cmap,
                   norm = mpl.colors.Normalize(vmin=-1.,vmax=1.))
    else:
        plt.imshow(A, interpolation='none', cmap=cmap)        
    plt.colorbar(format=ticker.FuncFormatter(fmt))
    
    ax.spy(A, marker='.', markersize=0,  precision=precision)
    
    ax.spines['right'].set_visible(True)
    ax.spines['bottom'].set_visible(True)
    ax.xaxis.set_ticks_position('top')
    ax.yaxis.set_ticks_position('left')

    xlabels = np.linspace(0, A.shape[0], 5, True, dtype=int)
    ylabels = np.linspace(0, A.shape[1], 5, True, dtype=int)

    plt.xticks(xlabels)
    plt.yticks(ylabels)

    plt.savefig(name, bbox_inches='tight', pad_inches=0.05)
    
    plt.close()

    return
Example #21
0
    def plot(self, path, num_bins=0):
        """
        draw a histogram to represent the data
        :param num_bins: number of bars, default is (Number different word in the file )/ 2,
                            if it is too large take 50 as default (see '#default of num_bins')
        """
        # plot data
        mu = self.Average  # mean of distribution
        sigma = self.StdE  # standard deviation of distribution
        if num_bins == 0:  # default of num_bins
            num_bins = min([round(self.NumWord / 2), 50])
            # print num_bins
        # the histogram of the data
        n, bins, patches = plt.hist(self.WordCount.values(), num_bins, normed=1, facecolor='green', alpha=0.5)
        # add a 'best fit' line
        y = mlab.normpdf(bins, mu, sigma)
        plt.plot(bins, y, 'r--')
        plt.xlabel('Word Count')
        plt.ylabel('Probability(how many words have this word count)')
        plt.title(r'Histogram of word count: $\mu=' + str(self.Average) + '$, $\sigma=' + str(self.StdE) + '$')

        # Tweak spacing to prevent clipping of ylabel
        plt.subplots_adjust(left=0.15)
        plt.savefig(path)
        plt.close()
Example #22
0
def make_fish(zoom=False):
    plt.close(1)
    plt.figure(1, figsize=(6, 4))
    plt.plot(plot_limits['pitch'], plot_limits['rolldev'], '-g', lw=3)
    plt.plot(plot_limits['pitch'], -plot_limits['rolldev'], '-g', lw=3)
    plt.plot(pitch.midvals, roll.midvals, '.b', ms=1, alpha=0.7)

    p, r = make_ellipse()  # pitch, off nominal roll
    plt.plot(p, r, '-c', lw=2)

    gf = -0.08  # Fudge on pitch value for illustrative purposes
    plt.plot(greta['pitch'] + gf, -greta['roll'], '.r', ms=1, alpha=0.7)
    plt.plot(greta['pitch'][-1] + gf, -greta['roll'][-1], 'xr', ms=10, mew=2)

    if zoom:
        plt.xlim(46.3, 56.1)
        plt.ylim(4.1, 7.3)
    else:
        plt.ylim(-22, 22)
        plt.xlim(40, 180)
    plt.xlabel('Sun pitch angle (deg)')
    plt.ylabel('Sun off-nominal roll angle (deg)')
    plt.title('Mission off-nominal roll vs. pitch (5 minute samples)')
    plt.grid()
    plt.tight_layout()
    plt.savefig('fish{}.png'.format('_zoom' if zoom else ''))
def test_singleton_ax_dim():
    for axis, direction in enumerate("xyz"):
        shape = [5, 6, 7]
        shape[axis] = 1
        img = nibabel.Nifti1Image(np.ones(shape), np.eye(4))
        plot_stat_map(img, None, display_mode=direction)
        plt.close()
Example #24
0
def test_radardisplay_get_colorbar_label():
    radar = pyart.io.read_cfradial(pyart.testing.CFRADIAL_PPI_FILE)
    display = pyart.graph.RadarDisplay(radar)

    # default is to base the label on the standard_name key
    assert (display._get_colorbar_label('reflectivity_horizontal') ==
            'equivalent reflectivity factor (dBZ)')

    # next is to look at the long_name
    del display.fields['reflectivity_horizontal']['standard_name']
    assert (display._get_colorbar_label('reflectivity_horizontal') ==
            'Reflectivity (dBZ)')

    # use the field if standard_name and long_name missing
    del display.fields['reflectivity_horizontal']['long_name']
    print(display._get_colorbar_label('reflectivity_horizontal'))
    assert (display._get_colorbar_label('reflectivity_horizontal') ==
            'reflectivity horizontal (dBZ)')

    # no units if key is missing
    del display.fields['reflectivity_horizontal']['units']
    print(display._get_colorbar_label('reflectivity_horizontal'))
    assert (display._get_colorbar_label('reflectivity_horizontal') ==
            'reflectivity horizontal (?)')
    plt.close()
def test_plot_stat_map():
    img = _generate_img()

    plot_stat_map(img, cut_coords=(80, -120, -60))

    # Smoke test coordinate finder, with and without mask
    masked_img = nibabel.Nifti1Image(
        np.ma.masked_equal(img.get_data(), 0),
        mni_affine)
    plot_stat_map(masked_img, display_mode='x')
    plot_stat_map(img, display_mode='y', cut_coords=2)

    # 'yx' display_mode
    plot_stat_map(img, display_mode='yx')

    # regression test #510
    data = np.zeros((91, 109, 91))
    aff = np.eye(4)
    new_img = nibabel.Nifti1Image(data, aff)
    plot_stat_map(new_img, threshold=1000, colorbar=True)

    rng = np.random.RandomState(42)
    data = rng.randn(91, 109, 91)
    new_img = nibabel.Nifti1Image(data, aff)
    plot_stat_map(new_img, threshold=1000, colorbar=True)

    # Save execution time and memory
    plt.close()
Example #26
0
def test_radardisplay_user_specified_labels():
    # test that labels are set when a user specifies them.
    radar = pyart.io.read_cfradial(pyart.testing.CFRADIAL_PPI_FILE)
    display = pyart.graph.RadarDisplay(radar)
    fig = plt.figure()
    ax = fig.add_subplot(111)

    display._set_ray_title('field', 0, 'foo', ax)
    assert ax.get_title() == 'foo'

    display._label_axes_ppi(('foo', 'bar'), ax)
    assert ax.get_xlabel() == 'foo'
    assert ax.get_ylabel() == 'bar'

    display._label_axes_rhi(('spam', 'eggs'), ax)
    assert ax.get_xlabel() == 'spam'
    assert ax.get_ylabel() == 'eggs'

    display._label_axes_ray(('baz', 'qux'), 'field', ax)
    assert ax.get_xlabel() == 'baz'
    assert ax.get_ylabel() == 'qux'

    display._label_axes_vpt(('nick', 'nock'), False, ax)
    assert ax.get_xlabel() == 'nick'
    assert ax.get_ylabel() == 'nock'
    plt.close()
Example #27
0
def test_radardisplay_misc():
    # misc methods which are not tested above
    radar = pyart.io.read_cfradial(pyart.testing.CFRADIAL_PPI_FILE)
    display = pyart.graph.RadarDisplay(radar)
    fig = plt.figure()
    ax = fig.add_subplot(111)

    # _set_vpt_title with a title
    display._set_vpt_title('foo_field', 'title_string', ax)
    assert ax.get_title() == 'title_string'

    # _generate_field_name method
    fn = pyart.graph.common.generate_field_name(
        radar, 'reflectivity_horizontal')
    assert fn == 'Equivalent reflectivity factor'

    display.fields['reflectivity_horizontal'].pop('standard_name')
    fn = pyart.graph.common.generate_field_name(
        radar, 'reflectivity_horizontal')
    assert fn == 'Reflectivity'

    display.fields['reflectivity_horizontal'].pop('long_name')
    fn = pyart.graph.common.generate_field_name(
        radar, 'reflectivity_horizontal')
    assert fn == 'Reflectivity horizontal'

    plt.close()
def test_plot_anat():
    img = _generate_img()

    # Test saving with empty plot
    z_slicer = plot_anat(anat_img=False, display_mode='z')
    filename = tempfile.mktemp(suffix='.png')
    try:
        z_slicer.savefig(filename)
    finally:
        os.remove(filename)

    z_slicer = plot_anat(display_mode='z')
    filename = tempfile.mktemp(suffix='.png')
    try:
        z_slicer.savefig(filename)
    finally:
        os.remove(filename)

    ortho_slicer = plot_anat(img, dim='auto')
    filename = tempfile.mktemp(suffix='.png')
    try:
        ortho_slicer.savefig(filename)
    finally:
        os.remove(filename)

    # Save execution time and memory
    plt.close()
Example #29
0
def test_radardisplay_plot_rhi_reverse():
    radar = pyart.io.read_cfradial(pyart.testing.CFRADIAL_RHI_FILE)
    display = pyart.graph.RadarDisplay(radar)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    display.plot('reflectivity_horizontal', 0, ax=ax, reverse_xaxis=True)
    plt.close()
def plot_dpi_dpr_distribution(args, dpis, dprs, diagnoses):
    print log.INFO, 'Plotting estimate distributions...'
    diagnoses = np.array(diagnoses)
    diagnoses[(0.25 <= diagnoses) & (diagnoses <= 0.75)] = 0.5

    # Setup plot
    fig, ax = plt.subplots()
    pt.setup_axes(plt, ax)

    biomarkers_str = args.method if args.biomarkers is None else ', '.join(args.biomarkers)
    ax.set_title('DP estimation using {0} at {1}'.format(biomarkers_str, ', '.join(args.visits)))
    ax.set_xlabel('DP')
    ax.set_ylabel('DPR')

    plt.scatter(dpis, dprs, c=diagnoses, edgecolor='none', s=25.0,
                vmin=0.0, vmax=1.0, cmap=pt.progression_cmap,
                alpha=0.5)

    # Plot legend
    # noinspection PyUnresolvedReferences
    rects = [mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_cn + (0.5,), linewidth=0),
             mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_mci + (0.5,), linewidth=0),
             mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_ad + (0.5,), linewidth=0)]
    labels = ['CN', 'MCI', 'AD']
    legend = ax.legend(rects, labels, fontsize=10, ncol=len(rects), loc='upper center', framealpha=0.9)
    legend.get_frame().set_edgecolor((0.6, 0.6, 0.6))

    # Draw or save the plot
    plt.tight_layout()
    if args.plot_file is not None:
        plt.savefig(args.plot_file, transparent=True)
    else:
        plt.show()
    plt.close(fig)
Example #31
0
def run_write_one(ticid, sector, out_dir, lc_author = 'qlp',local_dir = None,
               run_tag = None, config_file = None, plot=False):
    """
    Run the full bls search on a list of ticids stored in a file.

    Parameters
    ----------
    ticid : int
       tess input catalog number
    sector : int
       tess sector to search
    out_dir : string
        directory to store all the results. One dir per ticid will be created.
    lc_author : string
        'qlp' or 'tess-spoc'
    local_dir : string
        defaul is None and then pulls data from MAST API. Otherwise contains
        directory name for the location of the data files.
    run_tag : string, optional
        directory name and string to attach to output file names. 

    Returns
    -------
    None.

    """
    
    if run_tag is None:
        now = datetime.now()
        run_tag = now.strftime("crz%m%d%Y") + "_"+lc_author
    
    if config_file is None:
        config = load_def_config()
    else:
        print("Not implememted read in config file")
        #config = pipeline.load_config_file()
    
    vetter_list = load_def_vetter()
    thresholds = load_def_thresholds()
    
    
    target_dir = "/tic%09is%02i/" % (int(ticid), sector)
    log_name = out_dir + target_dir + "tic%09i-%s.log" % (ticid, run_tag)
    output_file = out_dir + target_dir + "tic%09i-%s-tcesum.csv" % (ticid, run_tag)
    
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    try:
        os.mkdir(out_dir+target_dir)   
    except FileExistsError:
        pass
    except PermissionError as e:
        log_obj = open(log_name,'w+')
        log_obj.write("Permission Error on Target Directory ")
        log_obj.write(e)
        log_obj.close()
        
    try:
        
        lcdata = genlc.hlsp(ticid, sector, author=lc_author,local_dir = local_dir)
        if lc_author == 'qlp':
            lcdata['quality'] = lcdata['quality'].value & 2237
         
        tce_list, result_strings, metrics_list = pipeline.search_and_vet_one(ticid, 
                                sector, lcdata, config, 
                                vetter_list, thresholds, plot=plot)
        
        if plot:
            plotfilename = "tic%09i-%s-plot.png" % (ticid, 
                                                    run_tag)
            plt.savefig(out_dir + target_dir + plotfilename, bbox_inches='tight')
            plt.close()
        
        output_obj = open(output_file, 'w')
        for r in result_strings:
            output_obj.write(r)
    
        output_obj.close()
        
        #Write TCEs
        for tce in tce_list:
            tcefilename = "tic%09i-%02i-%s.json" % (ticid, 
                                                    int(tce['event']), 
                                                    run_tag)
    
            full_filename = out_dir + target_dir + tcefilename
            tce['lc_author'] = lc_author
            tce.to_json(full_filename)
            
        #Write metrics    
        #print(metrics_list)
        #for i, metric in enumerate(metrics_list):
        #    metricfilename = "tic%09i-%02i-%s-vetting.json" % (ticid, 
        #                                            i+1, run_tag)
        #    full_filename = out_dir + target_dir + metricfilename
        #    thejson = json.dumps(metric)
        #    mobj = open(full_filename,'w+')
        #    mobj.write(thejson)
        #    mobj.close()
 

        log_obj = open(log_name, 'w+')
        log_obj.write("Success.")
        log_obj.close()

    except Exception as e:
        log_obj = open(log_name,'w+')
        log_obj.write("Failed to create TCEs for TIC %i for Sector %i \n" % (ticid, sector))
        log_obj.write(str(e))
        log_obj.close() 
            clients[client_socket] = user

            print('Accepted new connection from {}:{}, username: {}'.format(
                *client_address, user['data'].decode('utf-8')))

        # Else existing socket is sending a message
        else:

            # Receive message
            message = receive_message(notified_socket)

            # If False, client disconnected, cleanup
            if message is False:
                print('Closed connection from: {}'.format(
                    clients[notified_socket]['data'].decode('utf-8')))
                plt.close('all')
                # Remove from list for socket.socket()
                sockets_list.remove(notified_socket)

                # Remove from our list of users
                del clients[notified_socket]

                continue

            # Get user by notified socket, so we will know who sent the message
            user = clients[notified_socket]

            print(
                f'Received message from {user["data"].decode("utf-8")}: {message["data"].decode("utf-8")}')
            x = message["data"].decode("utf-8")
    def divide_region_n_train_val_test(self):

        region_dict = {'skyscraper':[0,[]],'suburban':[0,[]],'shopping':[0,[]]}
        test_ratio = 0.25
        val_ratio = 0.25

        dataset_div= {'train':{'skyscraper':[0,[]],'suburban':[0,[]],'shopping':[0,[]]},
                      'val'  :{'skyscraper':[0,[]],'suburban':[0,[]],'shopping':[0,[]]},
                      'test' :{'skyscraper':[0,[]],'suburban':[0,[]],'shopping':[0,[]]}}

        process_edges = []
        # label and compute distance
        for i, path in enumerate(self.all_edges):
            process_edges.append(label_region_n_compute_distance(i,path))

            region_dict[process_edges[i][4]][1].append(process_edges[i])
            region_dict[process_edges[i][4]][0] = region_dict[process_edges[i][4]][0] + process_edges[i][3]


        for region_type, distance_and_path_list in region_dict.items():
            total_distance = distance_and_path_list[0]
            test_distance = total_distance*test_ratio
            val_distance = total_distance*val_ratio

            path_list = distance_and_path_list[1] 
            tem_list = copy.deepcopy(path_list)

            random.seed(2019)
            shuffle(tem_list)

            sum_distance = 0 

            # Test Set
            while sum_distance < test_distance*0.8:
                path = tem_list.pop()
                sum_distance += path[3]
                dataset_div['test'][region_type][0] = dataset_div['test'][region_type][0] + path[3]
                dataset_div['test'][region_type][1].append(path)

            # Val Set
            while sum_distance < (test_distance + val_distance)*0.8:
                path = tem_list.pop()
                sum_distance += path[3]
                dataset_div['val'][region_type][0] = dataset_div['val'][region_type][0] + path[3]
                dataset_div['val'][region_type][1].append(path)

            # Train Set
            dataset_div['train'][region_type][0] = total_distance - sum_distance
            dataset_div['train'][region_type][1] = tem_list

        color=['red','green','blue']
        ## Visualiaztion with respect to region
        fig, ax = plt.subplots(figsize=(30, 15))
        div_type = 'train'

        vis_txt_height = 800
        for div_type in ['train','val','test']:
            for region in ['skyscraper','suburban','shopping']:
                vis_path_list = dataset_div[div_type][region][1]
                for path in vis_path_list:
                    x = [path[1][0],path[2][0]]
                    y = [path[1][1],path[2][1]]

                    if region == 'skyscraper':
                        ax.plot(x, y, color='red', zorder=1, lw=3)
                    elif region == 'suburban':
                        ax.plot(x, y, color='blue', zorder=1, lw=3)
                    elif region == 'shopping':
                        ax.plot(x, y, color='green', zorder=1, lw=3)

                    ax.scatter(x, y,color='black', s=120, zorder=2)

                # Visualize distance text
                distance = dataset_div[div_type][region][0]
                if region == 'skyscraper':
                    ax.annotate(div_type+' - '+ region+': '+str(distance), (-800, vis_txt_height),fontsize=20,color='red')
                elif region == 'suburban':
                    ax.annotate(div_type+' - '+ region+': '+str(distance), (-800, vis_txt_height),fontsize=20,color='blue')
                elif region == 'shopping':
                    ax.annotate(div_type+' - '+ region+': '+str(distance), (-800, vis_txt_height),fontsize=20,color='green')
                vis_txt_height-=30

        plt.savefig('region.png', dpi=200)
        plt.close()

        ## Visualization with respect to train/val/test
        fig, ax = plt.subplots(figsize=(30, 15))
        div_type = 'train'
        vis_txt_height = 800
        for div_type in ['train','val','test']:
            for region in ['skyscraper','suburban','shopping']:
                vis_path_list = dataset_div[div_type][region][1]
                for path in vis_path_list:
                    x = [path[1][0],path[2][0]]
                    y = [path[1][1],path[2][1]]

                    if div_type == 'train':
                        ax.plot(x, y, color='red', zorder=1, lw=3)
                    elif div_type == 'val':
                        ax.plot(x, y, color='blue', zorder=1, lw=3)
                    elif div_type == 'test':
                        ax.plot(x, y, color='green', zorder=1, lw=3)

                    ax.scatter(x, y,color='black', s=120, zorder=2)

                # Visualize distance text
                distance = dataset_div[div_type][region][0]
                if div_type == 'train':
                    ax.annotate(div_type+' - '+ region+': '+str(distance), (-800, vis_txt_height),fontsize=20,color='red')
                elif div_type == 'val':
                    ax.annotate(div_type+' - '+ region+': '+str(distance), (-800, vis_txt_height),fontsize=20,color='blue')
                elif div_type == 'test':
                    ax.annotate(div_type+' - '+ region+': '+str(distance), (-800, vis_txt_height),fontsize=20,color='green')
                vis_txt_height-=30

                    #ax.annotate(txt, (x, y))
        plt.savefig('train_val_test.png', dpi=200)
        plt.close()

        return dataset_div
Example #34
0
def dependence_study(use_efficiencies=False):

    extra_vars = [
        gcm().ltime_var
    ]
    all_vars = gcm().phsp_vars + extra_vars

    # Current mode stuff
    data = gcm().get_data([f.var for f in extra_vars])
    add_variables.append_phsp(data)
    df_sel = final_selection.get_final_selection()
    df_sel &= selection.delta_mass_signal_region()

    gen = get_model()

    if use_efficiencies:
        outfile = gcm().get_output_path('effs') + 'Gen_DATA_Eff_dep_eff.pdf'
        gen['weight'] = get_efficiency_gen()
    else:
        outfile = gcm().get_output_path('effs') + 'Gen_DATA_Eff_dep.pdf'
        gen['weight'] = 1.

    lim_file = gcm().get_output_path('effs') + 'limits_for_eff.p'
    with PdfPages(outfile) as pdf:
        for selected, plotted in permutations(all_vars, 2):
            log.info('Plotting {} in intervals of {}'.format(
                plotted.var, selected.var))
            percentiles = np.arange(0, 1.1, 0.2)
            boundaries = helpers.weighted_quantile(
                data[selected.var][df_sel], percentiles)
            fig, ax = plt.subplots(figsize=(10, 10))
            for low, high in zip(boundaries[:-1], boundaries[1:]):
                num_sel = (data[selected.var] > low) & (data[selected.var] < high)  # NOQA
                den_sel = (gen[selected.var] > low) & (gen[selected.var] < high)

                denominator = gen[plotted.var][den_sel]
                numerator = data[plotted.var][df_sel & num_sel]

                weight_d = gen['weight'][den_sel]
                weight_d /= np.sum(weight_d)
                weight_n = np.ones(numerator.index.size)*1./numerator.index.size  # NOQA

                x, y, x_err, y_err = helpers.make_efficiency(
                    numerator, denominator, 50, weight_n, weight_d, independent=True)  # NOQA
                options = dict(
                    fmt='o', markersize=5, capthick=1, capsize=0, elinewidth=2,
                    alpha=1)

                rlow, prec = helpers.rounder(low, boundaries)
                rhigh, _ = helpers.rounder(high, boundaries)

                spec = '{{:.{}f}}'.format(prec)
                label = r'${} <$ {} $ < {}$'.format(
                    spec.format(rlow), selected.xlabel, spec.format(rhigh))

                ax.errorbar(x, y, y_err, x_err, label=label, **options)
            ax.set_xlabel(plotted.xlabel)
            ax.set_ylabel('Relative efficiency')
            try:
                limits = load(lim_file)
            except:
                log.info('Creating new limits file')
                limits = {}
            if limits is None:
                log.info('Creating new limits file')
                limits = {}

            if (plotted.var, selected.var) not in limits or use_efficiencies is False:  # NOQA
                plot_utils.y_margin_scaler(ax, hf=0.4)
                limits[(plotted.var, selected.var)] = ax.get_ylim()
            else:
                log.info('Applying limits')
                lim = limits[(plotted.var, selected.var)]
                ax.set_ylim(lim)
            dump(limits, lim_file)
            ax.legend()
            pdf.savefig(plt.gcf())
            plt.close()
from numpy import genfromtxt,zeros,c_,mean,arange,where
from obspy.core import UTCDateTime
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
from mudpy.forward import lowpass
from datetime import timedelta
from obspy import Stream,Trace

plt.close("all")

station='crot'
fin='/Users/dmelgar/Lefkada2015/tsunami/raw/'+station+'_ioc.txt'
fout='/Users/dmelgar/Lefkada2015/tsunami/sac/'+station+'.sac'
plot_out='/Users/dmelgar/Lefkada2015/tsunami/plots/'+station+'.pdf'
time_epi=UTCDateTime('2015-11-17T07:10:07')
tcut=timedelta(seconds=72*3600)
tprior=timedelta(hours=48)
spike_threshold=0.3

def highpass(data,fcorner,fsample,order):
    '''
    Make a lowpass zero phase filter
    '''
    from scipy.signal import butter,filtfilt
    from numpy import size,array
    
    fnyquist=fsample/2
    b, a = butter(order, array(fcorner)/(fnyquist),'highpass')
    data_filt=filtfilt(b,a,data)
    return data_filt
Example #36
0
def plot_variable(u, name, direc, 
                  coords              = None,
                  cells               = None,
                  figsize             = (8,7),
                  cmap                = 'gist_yarg',
                  scale               = 'lin',
                  numLvls             = 10,
                  levels              = None,
                  levels_2            = None,
                  umin                = None,
                  umax                = None,
                  normalize_vec       = False,
                  plot_tp             = False,
                  tp_kwargs           = {'linestyle'      : '-',
                                         'lw'             : 1.0,
                                         'color'          : 'k',
                                         'alpha'          : 0.5},
                  show                = True,
                  hide_ax_tick_labels = False,
                  xlabel              = r'$x$',
                  ylabel              = r'$y$',
                  equal_axes          = True,
                  title               = '',
                  hide_axis           = False,
                  colorbar_loc        = 'right',
                  contour_type        = 'filled',
                  extend              = 'neither',
                  ext                 = '.pdf',
                  plot_quiver         = True,
                  quiver_kwargs       = {'pivot'          : 'middle',
                                         'color'          : 'k',
                                         'alpha'          : 0.8,
                                         'width'          : 0.004,
                                         'headwidth'      : 4.0, 
                                         'headlength'     : 4.0, 
                                         'headaxislength' : 4.0},
                  res                 = 150,
                  cb                  = True,
                  cb_format           = '%.1e'):
  """
  """
  vec = False  # assume initially that 'u' is not a vector

  # if 'u' is a NumPy array and the cell arrays and coordinates are supplied :
  if (type(u) == np.ndarray or type(u) == list or type(u) == tuple) \
    and type(cells) == np.ndarray and len(coords) == 2:
    t = cells;
    x = coords[0]
    y = coords[1]
    v = u
   
    # if there are multiple components to 'u', it is a vector :
    if len(np.shape(u)) > 1:
      vec = True  # used for plotting below
      v0  = u[0]
      v1  = u[1]
      # compute norm :
      v     = 0
      for k in u:
        v  += k**2
      v     = np.sqrt(v + 1e-16)

  # if 'u' is a FEniCS Function :
  elif    type(u) == indexed.Indexed \
       or type(u) == fe.dolfin.function.Function \
       or type(u) == fe.dolfin.functions.function.Function \
       or type(u) == da.function.Function:
    
    # if this is a scalar :
    if len(u.ufl_shape) == 0:
      mesh     = u.function_space().mesh()
      v        = u.compute_vertex_values(mesh)
    
    # otherwise it is a vector, so calculate the L^2 norm :
    else:
      vec = True  # used for plotting below
      # if the function is defined on a mixed space, deepcopy :
      # TODO: there is a way to do this without deepcopy
      if type(u[0]) == indexed.Indexed:
        out    = u.split(True)
      else:
        out    = u
      
      # extract the mesh :
      mesh = out[0].function_space().mesh()
    
      # compute norm :
      v     = 0
      for k in out:
        kv  = k.compute_vertex_values(mesh)
        v  += kv**2
      v     = np.sqrt(v + 1e-16)
      v0    = out[0].compute_vertex_values(mesh)
      v1    = out[1].compute_vertex_values(mesh)

    t    = mesh.cells()
    x    = mesh.coordinates()[:,0]
    y    = mesh.coordinates()[:,1]

  # if normalized vectors are desired :
  if vec and normalize_vec:
    v0 = v0 / v
    v1 = v1 / v
  
  if vec:  print_text("::: plotting vector variable :::", 'red')
  else:    print_text("::: plotting scalar variable :::", 'red')

  #=============================================================================
  # plotting :
  if umin != None and levels is None:
    vmin = umin
  elif levels is not None:
    vmin = levels.min()
  else:
    vmin = v.min()

  if umax != None and levels is None:
    vmax = umax
  elif levels is not None:
    vmax = levels.max()
  else:
    vmax = v.max()
  
  # set the extended colormap :  
  cmap = plt.get_cmap(cmap)
  
  # countour levels :
  if scale == 'log':
    if levels is None:
      levels    = np.logspace(np.log10(vmin), np.log10(vmax), numLvls)
    v[v < vmin] = vmin + 2e-16
    v[v > vmax] = vmax - 2e-16
    formatter   = LogFormatter(10, labelOnlyBase=False)
    norm        = colors.LogNorm()
  
  # countour levels :
  elif scale == 'sym_log':
    if levels is None:
      levels  = np.linspace(vmin, vmax, numLvls)
    v[v < vmin] = vmin + 2e-16
    v[v > vmax] = vmax - 2e-16
    formatter   = LogFormatter(e, labelOnlyBase=False)
    norm        = colors.SymLogNorm(vmin=vmin, vmax=vmax,
                                    linscale=0.001, linthresh=0.001)
  
  elif scale == 'lin':
    if levels is None:
      levels  = np.linspace(vmin, vmax, numLvls)
    norm = colors.BoundaryNorm(levels, cmap.N)
  
  elif scale == 'bool':
    v[v < 0.0] = 0.0
    levels  = [0, 1, 2]
    norm    = colors.BoundaryNorm(levels, cmap.N)

  fig = plt.figure(figsize=figsize)
  ax  = fig.add_subplot(111)
  ax.set_xlabel(xlabel)
  ax.set_ylabel(ylabel)
  if hide_ax_tick_labels:
    ax.set_xticklabels([])
    ax.set_yticklabels([])
  if hide_axis:
    ax.axis('off')
  if equal_axes:
    ax.axis('equal')

  if contour_type == 'filled':
    # if the number of degrees equal the number of cells, a DG space is used :
    if len(v) == len(t):
      cs = ax.tripcolor(mesh2triang(mesh), v, shading='flat',
                        cmap=cmap, norm=norm)
    # otherwise, a CG space is used :
    elif len(v) != len(t) and scale != 'log':
      cs = ax.tricontourf(x, y, t, v, levels=levels, 
                          cmap=cmap, norm=norm, extend=extend)
    elif len(v) != len(t)  and scale == 'log':
      cs = ax.tricontourf(x, y, t, v, levels=levels, 
                          cmap=cmap, norm=norm)
  elif contour_type == 'lines':
    cs = ax.tricontour(x, y, t, v, linewidths=2.0,
                       levels=levels, colors='k') 
    for line in cs.collections:
      if line.get_linestyle() != [(None, None)]:
        #line.set_linestyle([(None, None)])
        line.set_color('red')
        line.set_linewidth(1.5)
    if levels_2 is not None:
      cs2 = ax.tricontour(x, y, t, v, levels=levels_2, colors='0.30') 
      for line in cs2.collections:
        if line.get_linestyle() != [(None, None)]:
          line.set_linestyle([(None, None)])
          line.set_color('#c1000e')
          line.set_linewidth(0.5)
    ax.clabel(cs, inline=1)

  # plot vectors, if desired :
  if vec and plot_quiver:
    q  = ax.quiver(x, y, v0, v1, **quiver_kwargs)
  
  # plot triangles, if desired :
  if plot_tp == True:
    tp = ax.triplot(x, y, t, **tp_kwargs)
  
  # this enforces equal axes no matter what (yeah, a hack) : 
  divider = make_axes_locatable(ax)

  # include colorbar :
  if cb and scale != 'bool' and contour_type != 'lines':
    cax  = divider.append_axes("right", "5%", pad="3%")
    cbar = fig.colorbar(cs, cax=cax, 
                        ticks=levels, format=cb_format) 
  
  ax.set_xlim([x.min(), x.max()])
  ax.set_ylim([y.min(), y.max()])
  plt.tight_layout(rect=[0,0,1,0.95])
  
  #mpl.rcParams['axes.titlesize'] = 'small'
  #tit = plt.title(title)

  # title :
  tit = plt.title(title)
  #tit.set_fontsize(40)
  
  # create the output directory : 
  d     = os.path.dirname(direc)
  if not os.path.exists(d):
    os.makedirs(d)

  # always save the figure to a file :
  plt.savefig(direc + name + ext, res=res)

  # show the figure too, if desired : 
  if show: plt.show()
  else:    plt.close(fig)
Example #37
0
    ax1 = plt.subplot(121)
    plt.plot(V,'ok')
    plt.ylabel('#')
    plt.xlabel('Overpass')
    plt.grid(color='blue')

    ax2 = plt.subplot(122)
    plt.hist(V, bins=len(V), orientation='horizontal')
    plt.grid(color='blue')
    plt.xlabel('freq')
    plt.show()
    #plt.savefig('/automount/ftp/velibor/GPM/Stat/Overpassstat_dpr-rado_'+str(tit)+'.png')
    #plt.close()


'''
for i in range(17):
    print i
    df.columns[i]
    try:
        plot_ostat(df[df.columns[i]].values, df.columns[i])
    except:
        print str(df.columns[i]) + 'NICHT BERECHNET!'

plt.close()
'''
#df.values[:,1][np.where(df['N']<300)]= np.nan
#plot_ostat(df['HSS'].values, 'HSST')
#plot_ostat(df['BID'].values, 'BID')
#plot_ostat(df['FAR'].values, 'FAR')
#plot_ostat(df['r_value'].values, 'Corr')
Example #38
0
def check_doctests_testfile(fname, verbose, ns=None,
                   dots=True, doctest_warnings=False):
    """Check code in a text file.

    Mimic `check_doctests` above, differing mostly in test discovery.
    (which is borrowed from stdlib's doctest.testfile here,
     https://github.com/python-git/python/blob/master/Lib/doctest.py)

    Returns: list of [(item_name, success_flag, output), ...]

    Notes
    -----

    refguide can be signalled to skip testing code by adding
    ``#doctest: +SKIP`` to the end of the line. If the output varies or is
    random, add ``# may vary`` or ``# random`` to the comment. for example

    >>> plt.plot(...)  # doctest: +SKIP
    >>> random.randint(0,10)
    5 # random

    We also try to weed out pseudocode:
    * We maintain a list of exceptions which signal pseudocode,
    * We split the text file into "blocks" of code separated by empty lines
      and/or intervening text.
    * If a block contains a marker, the whole block is then assumed to be
      pseudocode. It is then not being doctested.

    The rationale is that typically, the text looks like this:

    blah
    <BLANKLINE>
    >>> from numpy import some_module   # pseudocode!
    >>> func = some_module.some_function
    >>> func(42)                  # still pseudocode
    146
    <BLANKLINE>
    blah
    <BLANKLINE>
    >>> 2 + 3        # real code, doctest it
    5

    """
    results = []

    if ns is None:
        ns = dict(DEFAULT_NAMESPACE)

    _, short_name = os.path.split(fname)
    if short_name in DOCTEST_SKIPLIST:
        return results

    full_name = fname
    with open(fname, encoding='utf-8') as f:
        text = f.read()

    PSEUDOCODE = set(['some_function', 'some_module', 'import example',
                      'ctypes.CDLL',     # likely need compiling, skip it
                      'integrate.nquad(func,'  # ctypes integrate tutotial
    ])

    # split the text into "blocks" and try to detect and omit pseudocode blocks.
    parser = doctest.DocTestParser()
    good_parts = []
    for part in text.split('\n\n'):
        tests = parser.get_doctest(part, ns, fname, fname, 0)
        if any(word in ex.source for word in PSEUDOCODE
                                 for ex in tests.examples):
            # omit it
            pass
        else:
            # `part` looks like a good code, let's doctest it
            good_parts += [part]

    # Reassemble the good bits and doctest them:
    good_text = '\n\n'.join(good_parts)
    tests = parser.get_doctest(good_text, ns, fname, fname, 0)
    success, output = _run_doctests([tests], full_name, verbose,
                                    doctest_warnings)

    if dots:
        output_dot('.' if success else 'F')

    results.append((full_name, success, output))

    if HAVE_MATPLOTLIB:
        import matplotlib.pyplot as plt
        plt.close('all')

    return results
Example #39
0
            X = Variable(X.view(mb_size, -1))
            elbo, t_loss = model.forward(X)

            # Print and plot every now and then
            if it % 1000 == 0:
                print('Iter-{}; ELBO: {:.4}; T_loss: {:.4}'.format(
                    it, -elbo.data[0], -t_loss.data[0]))

                z = Variable(torch.randn(mb_size, z_dim))
                samples = model.P(z).data.numpy()[:16]

                fig = plt.figure(figsize=(4, 4))
                gs = gridspec.GridSpec(4, 4)
                gs.update(wspace=0.05, hspace=0.05)

                for i, sample in enumerate(samples):
                    ax = plt.subplot(gs[i])
                    plt.axis('off')
                    ax.set_xticklabels([])
                    ax.set_yticklabels([])
                    ax.set_aspect('equal')
                    plt.imshow(sample.reshape(28, 28), cmap='Greys_r')

                if not os.path.exists('out/'):
                    os.makedirs('out/')

                plt.savefig('out/{}.png'.format(str(cnt).zfill(3)),
                            bbox_inches='tight')
                cnt += 1
                plt.close(fig)
Example #40
0
    def create_plot(self):
        print('\n' + self.__class__.__name__ + ": updating plot.")

        timestamp = datetime.utcnow()

        with self.lock:
            for ts_type, n_events in self.timeslice_counts.items():
                timeslice_rate = n_events / self.interval
                self.timeslice_rates[ts_type].append((timestamp, timeslice_rate))
            self.timeslice_counts = defaultdict(int)

        fig, ax = plt.subplots(figsize=(16, 4))

        for ts_type, rates in self.timeslice_rates.items():
            if not rates:
                self.log.warning("Empty rates, skipping...")
                continue
            timestamps, timeslice_rates = zip(*rates)
            ax.plot(
                timestamps,
                timeslice_rates,
                **self.styles[ts_type],
                **self.styles['general'],
                label=ts_type)

        run_changes_to_plot = self._get_run_changes_to_plot()
        if run_changes_to_plot:
            self.log.critical("No run changes!")
            self.print("Recorded run changes: {}".format(run_changes_to_plot))
            all_rates = [r for d, r in chain(*self.timeslice_rates.values())]
            if not all_rates:
                self.log.warning("Empty rates, skipping...")
                return
            min_timeslice_rate = min(all_rates)
            max_timeslice_rate = max(all_rates)
            for run_start, run in run_changes_to_plot:
                plt.text(
                    run_start, (min_timeslice_rate + max_timeslice_rate) / 2,
                    "\nRUN %s  " % run,
                    rotation=60,
                    verticalalignment='top',
                    fontsize=8,
                    color='gray')
                ax.axvline(
                    run_start, color='#ff0f5b', linestyle='--', alpha=0.8)  # added

        ax.set_title("Timeslice Rates for DetID-{0}\n{1} UTC".format(
            self.det_id,
            datetime.utcnow().strftime("%c")))
        ax.set_xlabel("time")
        ax.set_ylabel("timeslice rate [Hz]")
        ax.xaxis.set_major_formatter(self.styles["xfmt"])
        ax.grid(True, which='minor')
        if self.with_minor_ticks:
            ax.minorticks_on()
        plt.legend()

        fig.tight_layout()

        filename = join(self.plots_path, self.filename + '_lin.png')
        filename_tmp = join(self.plots_path, self.filename + '_lin_tmp.png')
        plt.savefig(filename_tmp, dpi=120, bbox_inches="tight")
        shutil.move(filename_tmp, filename)

        try:
            ax.set_yscale('log')
        except ValueError:
            pass

        filename = join(self.plots_path, self.filename + '.png')
        filename_tmp = join(self.plots_path, self.filename + '_tmp.png')
        plt.savefig(filename_tmp, dpi=120, bbox_inches="tight")
        shutil.move(filename_tmp, filename)

        plt.close('all')
        print("Plot updated at '{}'.".format(filename))
Example #41
0
def generate_plot(df_path, marker, verbose=True):
    df = pd.read_excel(df_path)
    df = df[df[marker] != 0]
    if marker == 'upstream':
        df[marker] = df[marker] * -1
    df = df.set_index([marker])
    columns = df.columns
    colors = ['red', 'blue', 'green', 'purple', 'yellow']
    shapes = ['o', 'D', '^', 'x']
    labels = []
    handals = []

    fig, ax = plt.subplots()
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.xaxis.set_ticks_position('bottom')
    ax.yaxis.set_ticks_position('left')
    if marker != 'height':
        ax.set_xscale('symlog')
        ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))

    for i in range(0, len(columns)):
        shape = shapes[i]
        # ax = df.plot.scatter(columns[0], columns[i], color=colors[i], xlim=(df[columns[0]].min(), df[columns[0]].max()),
        #                      ylim=(0, df[columns[i]].max()),
        #                       edgecolor='', ax=ax)
        if marker == 'downstream':
            ax = plt.scatter(df.index / 1000,
                             df[columns[i]],
                             color=colors[i],
                             edgecolor='',
                             marker=shape)
            mask = np.isfinite(df[columns[i]])
            plt.plot(df.index[mask] / 1000,
                     df[columns[i]][mask],
                     color=colors[i],
                     linestyle='-')

        elif marker == 'upstream':
            ax = plt.scatter(df.index / 1000,
                             df[columns[i]],
                             color=colors[i],
                             edgecolor='',
                             marker=shape)
            mask = np.isfinite(df[columns[i]])
            plt.plot(df.index[mask] / 1000,
                     df[columns[i]][mask],
                     color=colors[i],
                     linestyle='-')
        else:
            ax = plt.scatter(df.index,
                             df[columns[i]],
                             color=colors[i],
                             edgecolor='',
                             marker=shape)
            mask = np.isfinite(df[columns[i]])
            plt.plot(df.index[mask],
                     df[columns[i]][mask],
                     color=colors[i],
                     linestyle='-')

        handals.append(ax)
        labels.append(columns[i])

    # print handles, labels
    # ax.legend(handles, labels, loc='best')
    font = {'fontname': 'Helvetica', 'fontsize': 10}

    if marker == 'downstream':
        plt.xlabel('Downstream distance (kb)', **font)
        plt.xlim((df.index.min() / 1000, df.index.max() / 1000))
    elif marker == 'upstream':
        plt.xlabel('Upstream distance (kb)', **font)
        plt.xlim(df.index.max() / 1000, df.index.min() / 1000)
    else:
        plt.xlabel('Height cutoff', **font)
        # plt.xlim((np.log10(df.index.min()), np.log10(df.index.max())))
        # plt.xlim((0, df.index.max()))
        plt.xlim((0, 100))
    #
    plt.ylabel('-log10 enrich P', **font)
    # print labels
    # print ((0, df.max().max()))
    plt.ylim(((0, (int(df.max().max()) / 10 + 1) * 10)))
    plt.xticks(**font)
    plt.yticks(**font)
    print labels
    if verbose:
        plt.legend(handals,
                   labels,
                   scatterpoints=1,
                   loc=9,
                   fontsize=10,
                   ncol=2)

    plt.savefig(df_path.replace('.xlsx', '.pdf'))
    plt.close('all')
def create_team_records_fig(team_records, team1, team2):
    df = pd.DataFrame(data=list(team_records), columns=['match_date', 'home_team', 'away_team', 'winning_team', 'home_closing', 'away_closing']) # need list of tuples instead of tuple of tuples
    print(df)
    #print('1 got past')
    n_games = df['home_team'].size
    # x is integers from 1 to n_games for team1 then 1 to n_games for team2
    x = np.concatenate([np.linspace(1, n_games, n_games)] * 2)
    # y is home_teams first at y=1 then away teams at y=y_height
    y_height = 3
    y = np.concatenate([np.ones(n_games), y_height * np.ones(n_games)])
    # sizes of bubbles are odds of each team
    sizes = np.concatenate([df['home_closing'], df['away_closing']])
    # colors is the color of each bubble where team1 = blue and team2  = red if they win
    colors = ['grey']*n_games*2
    bubble_text = [''] * n_games * 2
    i = 0
    while i < n_games: # i is the row of df
        if df['home_team'].values[i] == team1:
            bubble_text[i] = str(df['home_closing'].values[i]) + '\n home'
            sizes[i] = df['home_closing'].values[i]
            bubble_text[i+n_games] = str(df['away_closing'].values[i]) + '\n away'
            sizes[i+n_games] = df['away_closing'].values[i]
        else:
            bubble_text[i] = str(df['away_closing'].values[i]) + '\n away'
            sizes[i] = df['away_closing'].values[i]
            bubble_text[i+n_games] = str(df['home_closing'].values[i]) + '\n home'
            sizes[i+n_games] = df['home_closing'].values[i]
        winner = df['winning_team'].values[i]
        if winner == team1:
            colors[i] = 'green'
        elif winner == team2:
            colors[i+n_games] = 'green'
        i = i+1
        print(i)

    fig = plt.figure(figsize=(8, 12), tight_layout = True)
    ax1 = fig.add_subplot(2,1,1)
    ax1.scatter(x, y, s=sizes * 1000 * (6 - n_games), c=colors, alpha=0.4)
    ax1.set_xlim([0, n_games + 1])
    ax1.set_ylim([0, y_height + 1])
    ax1.set_xticks(x[0:n_games])
    ax1.set_xticklabels(df['match_date'])
    ax1.set_yticks([1, y_height])
    ax1.set_yticklabels([team1, team2])
    ax1.set_title(team1 + ' VS. ' + team2 + ' Final Odds')

    i = 0
    while i < n_games * 2:
        ax1.text(x[i], y[i], bubble_text[i],
                     horizontalalignment='center',
                     verticalalignment='center')
        i = i + 1

    # Next plot
    df = pd.DataFrame(data=list(teams.team1_full_record), columns = ['winning_team', 'closing_odds_outcome'])
    n_games = len(df['winning_team'])
    size_of_each_bet = 100
    return_on_bet = (df['winning_team'] == team1)*df['closing_odds_outcome']*size_of_each_bet
    winnings = np.zeros(n_games)
    winnings[0] = -size_of_each_bet + return_on_bet.values[0]
    i = 1
    while i < n_games:
        winnings[i] = winnings[i-1] - size_of_each_bet + return_on_bet.values[i]
        i = i+1
    x = np.linspace(1, n_games, n_games)
    fig.add_subplot(2,1,2)
    plt.plot(x,winnings, c = 'grey')
    plt.hlines(y=0, xmin=0, xmax = n_games)
    green = winnings > 0
    colors = ['']*len(green)
    for i in range(len(green)):
        if green[i] == True:
            colors[i] = 'g'
        else:
            colors[i] = 'r'
    plt.scatter(x, winnings, c = colors)

    # last scatter plot on same axes as first scatter
    df = pd.DataFrame(data=list(teams.team2_full_record), columns = ['winning_team', 'closing_odds_outcome'])
    n_games = len(df['winning_team'])
    size_of_each_bet = 100
    return_on_bet = (df['winning_team'] == team2)*df['closing_odds_outcome']*size_of_each_bet
    winnings = np.zeros(n_games)
    winnings[0] = -size_of_each_bet + return_on_bet.values[0]
    i = 1
    while i < n_games:
        winnings[i] = winnings[i-1] - size_of_each_bet + return_on_bet.values[i]
        i = i+1
    x = np.linspace(1, n_games, n_games)

    green = winnings > 0
    colors = ['']*len(green)
    for i in range(len(green)):
        if green[i] == True:
            colors[i] = 'g'
        else:
            colors[i] = 'r'

    fig.add_subplot(2,1,2)
    plt.plot(x,winnings, c = 'grey', linestyle = 'dashed')
    plt.hlines(y=0, xmin=0, xmax = n_games)
    plt.scatter(x, winnings, c = colors, marker = 'v')
    plt.legend(labels = [teams.team1, teams.team2])
    plt.title('Return when betting $100 each game', fontsize=16)
    plt.xlabel('Number of Games')
    plt.ylabel('Total Return($)')
    plt.close() # plt.close() is needed or my mac gets a weird error
    return fig
Example #43
0
def visualize_fronts(pareto_dict: Dict[str, Union[Pareto, SimplePareto]] = {},
                     filepath='./fronts.png',
                     title='pareto fronts',
                     objective_name: List[str] = ['obj1', 'obj2'],
                     save=False,
                     show=True,
                     referenced_points=None,
                     plot_line=True,
                     marker=None,
                     linestyle=None,
                     markersize=None,
                     s=None,
                     linewidth=None,
                     do_axis=lambda ax : None,
                     do_plt=lambda ax : None,
                     fillstyle=None,
                     dpi=None,
                     frameon=None,
                     **kwargs):
    pareto_dict.update(kwargs)
    for name, pareto in pareto_dict.items():
        if not isinstance(pareto, Pareto) and (not isinstance(pareto, (list, tuple)) or
                                               not all(isinstance(solution, (list, tuple)) for solution in pareto) or
                                               not all(isinstance(value, (int, float)) for solution in pareto for value in solution)):
            raise ValueError(
                f'pareto value must be an instance of List[List[Union[int, float]]], \
                pareto {name} isnot')

        if isinstance(pareto, Pareto):
            pareto_dict[name] = pareto.all_objectives()

        for solution in pareto_dict[name]:
            if not len(solution) == 2:
                raise ValueError(
                    f"This method only supports two objectives,\
                    solution {solution} of pareto {name} has {len(solution)}")
        pareto_dict[name] = sorted(
            pareto_dict[name], key=lambda solution: tuple(solution))
    do_plt(plt)
    fig, ax = plt.subplots()
    do_axis(ax)
    legends = []
    marker = marker or ('o', '+', '*', '^', '.', ',')
    iter_marker = itertools.cycle(marker)
    
    if fillstyle == 'flicker':
        fillstyle = ['full', 'none']
    elif fillstyle == 'all':
        fillstyle = ['none', 'top', 'bottom', 'right', 'left', 'full']
    else:
        fillstyle = [fillstyle]

    iter_fillstyle = itertools.cycle(fillstyle)

    for name, pareto in pareto_dict.items():
        legends.append(name)
        obj1 = [solution[0] for solution in pareto]
        obj2 = [solution[1] for solution in pareto]
        m = next(iter_marker)
        fs = next(iter_fillstyle)
        if plot_line:
            ax.plot(obj1, obj2, 
                     linewidth=linewidth,
                     linestyle=linestyle,
                     fillstyle=fs,
                     markersize=markersize,
                     marker=m)
        else:
            ax.scatter(obj1, obj2, marker=m, s=markersize)

    if referenced_points is not None:
        if isinstance(referenced_points, np.ndarray):
            if referenced_points.shape[1] != 2:
                raise ValueError(
                    'referenced_points must have shape like (number_of_points)*(numer_of_objectives) \
                    and save_history_as_gif method only supports two objective problems')
            ax.plot(referenced_points[:, 0],
                     referenced_points[:, 1], color='black', linewidth=linewidth)
        elif isinstance(referenced_points, (list, tuple)):
            if not all(isinstance(solution, (list, tuple)) for solution in referenced_points) or \
                    not all(isinstance(value, (int, float)) for solution in referenced_points for value in solution):
                raise ValueError(
                    f'referenced_points value must be an instance of List[List[Union[int, float]]]')
            f1_rp = [solution[0] for solution in referenced_points]
            f2_rp = [solution[0] for solution in referenced_points]
            plt.plot(f1_rp, f2_rp, color='black', linewidth=linewidth)

    plt.xlabel(objective_name[0])
    plt.ylabel(objective_name[1])
    plt.title(title)
    plt.legend(legends, frameon=frameon)
    plt.tight_layout()

    if save:
        plt.savefig(filepath, dpi=dpi)

    if show:
        plt.show()

    plt.close('all')
Example #44
0
def plot_psychrometrics(self, nbins=50, cm="Greys", close=False, save=False):
    # Construct the save_path and create directory if it doesn't exist
    save_path = self.file_path.parent / "{}_Plot".format(self.file_path.stem) / "psychrometrics.png"

    hr = self.humidity_ratio
    dbt = self.dry_bulb_temperature

    dry_bulb_temperature_plot_range = range(-20, 51, 1)
    humidity_ratio_plot_range = [i / 10000 for i in range(0, 301, 1)]
    enthalpy_plot_range = range(-10, 120, 10)
    relative_humidity_plot_range = [i / 100 for i in range(0, 101, 10)]

    # figure instantiation
    fig, ax = plt.subplots(1, 1, figsize=(15, 8))

    # plot values from weather file
    counts, xedges, yedges, im = ax.hist2d(dbt, hr, bins=nbins, cmin=1, alpha=0.9, normed=False, cmap=cm, lw=0,
                                           zorder=0)

    # y-axis formatting
    ax.yaxis.tick_right()
    ax.yaxis.set_label_position("right")
    ax.set_ylim(0, 0.03)
    ax.set_yticks([i / 1000 for i in range(0, 35, 5)])
    ax.set_ylabel("Humidity ratio ($kg_{water}/kg_{air}$)", color="k", fontsize="x-large")

    # x-axis formatting
    ax.set_xlim(-20, 50)
    ax.set_xticks(range(-20, 55, 5))
    ax.set_xlabel("Dry-bulb temperature ($°C$)", color="k", fontsize="x-large")
    ax.tick_params(axis='both', colors='k')

    # canvas formatting
    ax.tick_params(axis="both", color="k", grid_color="k", grid_alpha=1, grid_lw=0.5)
    for edge in ["right", "bottom"]:
        ax.spines[edge].set_alpha(1)
        ax.spines[edge].set_color("k")
        ax.spines[edge].set_lw(1)
    for edge in ["left", "top"]:
        ax.spines[edge].set_visible(False)

    # relative humidity grid/curves
    n = 0
    for rh in relative_humidity_plot_range:
        h_r = [GetHumRatioFromRelHum(i, rh, 101350) for i in dry_bulb_temperature_plot_range]
        ax.plot(dry_bulb_temperature_plot_range, h_r, color="k", alpha=1, lw=0.2)
        # Fill the top part of the plot
        if rh == 1:
            ax.fill_between(dry_bulb_temperature_plot_range, h_r, 0.031, interpolate=True, color='w', lw=0,
                            edgecolor=None,
                            zorder=4)
        # add annotation describing line
        ax.text(30, GetHumRatioFromRelHum(30, rh, 101350) + 0.0, "{0:0.0f}% RH".format(rh * 100),
                ha="right", va="bottom", rotation=0, zorder=9, fontsize="small", color="k")  # n*55
        n += 1 / len(relative_humidity_plot_range)

    # TODO: Fix enthalpy grid curves
    # # enthalpy grid/curves
    # for enthalpy in enthalpy_plot_range:
    #     ys = [0, 0.030]
    #     xs = np.array([GetTDryBulbFromEnthalpyAndHumRatio(enthalpy, i) for i in ys]) /1000
    #     if (enthalpy <= 50) & (enthalpy != 30):
    #         ax.text(xs[0], 0.0002, "{}kJ/kg".format(enthalpy), ha="right", va="bottom", color="k", zorder=9,
    #                 fontsize="small")
    #     else:
    #         pass
    #     # ax.text(50, ys[0], "{}kJ/kg".format(enthalpy), ha="right", va="bottom", color="#555555", zorder=9, fontsize="small")
    #     ax.plot(xs, ys, color="k", alpha=1, lw=0.2)

    # grid formatting
    ax.grid(True, lw=0.2, zorder=5)

    # Generating summary metrics
    min_dbt = self.df[self.index == self.dry_bulb_temperature.idxmin()].squeeze()
    max_dbt = self.df[self.index == self.dry_bulb_temperature.idxmax()].squeeze()
    max_hr = self.df[self.index == self.humidity_ratio.idxmax()].squeeze()
    max_enthalpy = self.df[self.index == self.enthalpy.idxmax()].squeeze()

    text_fontsize = "medium"

    # Generate peak cooling summary
    max_dbt_table = "Peak cooling {0:}\n" \
                    "WS:  {1:>6.1f} m/s\n" \
                    "WD:  {2:>6.1f} deg\n" \
                    "DBT: {3:>6.1f} °C\n" \
                    "WBT: {4:>6.1f} °C\n" \
                    "RH:  {5:>6.1f} %\n" \
                    "DPT: {6:>6.1f} °C\n" \
                    "h:   {7:>6.1f} kJ/kg\n" \
                    "HR:  {8:<5.4f} kg/kg".format(
        max_dbt.name.strftime("%b %d %H:%M"),
        max_dbt.wind_speed,
        max_dbt.wind_direction,
        max_dbt.dry_bulb_temperature,
        max_dbt.wet_bulb_temperature,
        max_dbt.relative_humidity,
        max_dbt.dew_point_temperature,
        max_dbt.enthalpy / 1000,
        max_dbt.humidity_ratio)

    ax.text(0, 0.98, max_dbt_table, transform=ax.transAxes, ha="left", va="top", zorder=8, fontsize=text_fontsize,
            color="k", **{'fontname': 'monospace'})

    ## Generate peak heating summary
    min_dbt_table = "Peak heating {0:}\n" \
                    "WS:  {1:>6.1f} m/s\n" \
                    "WD:  {2:>6.1f} deg\n" \
                    "DBT: {3:>6.1f} °C\n" \
                    "WBT: {4:>6.1f} °C\n" \
                    "RH:  {5:>6.1f} %\n" \
                    "DPT: {6:>6.1f} °C\n" \
                    "h:   {7:>6.1f} kJ/kg\n" \
                    "HR:  {8:<5.4f} kg/kg".format(
        min_dbt.name.strftime("%b %d %H:%M"),
        min_dbt.wind_speed,
        min_dbt.wind_direction,
        min_dbt.dry_bulb_temperature,
        min_dbt.wet_bulb_temperature,
        min_dbt.relative_humidity,
        min_dbt.dew_point_temperature,
        min_dbt.enthalpy / 1000,
        min_dbt.humidity_ratio
    )
    ax.text(0, 0.72, min_dbt_table, transform=ax.transAxes, ha="left", va="top", zorder=8, fontsize=text_fontsize,
            color="k", **{'fontname': 'monospace'})

    ## Generate max HumidityRatio summary
    max_hr_table = "Peak humidity ratio {0:}\n" \
                   "WS:  {1:>6.1f} m/s\n" \
                   "WD:  {2:>6.1f} deg\n" \
                   "DBT: {3:>6.1f} °C\n" \
                   "WBT: {4:>6.1f} °C\n" \
                   "RH:  {5:>6.1f} %\n" \
                   "DPT: {6:>6.1f} °C\n" \
                   "h:   {7:>6.1f} kJ/kg\n" \
                   "HR:  {8:<5.4f} kg/kg".format(
        max_hr.name.strftime("%b %d %H:%M"),
        max_hr.wind_speed,
        max_hr.wind_direction,
        max_hr.dry_bulb_temperature,
        max_hr.wet_bulb_temperature,
        max_hr.relative_humidity,
        max_hr.dew_point_temperature,
        max_hr.enthalpy / 1000,
        max_hr.humidity_ratio
    )
    ax.text(0.17, 0.98, max_hr_table, transform=ax.transAxes, ha="left", va="top", zorder=8, fontsize=text_fontsize,
            color="k", **{'fontname': 'monospace'})

    ## Generate max enthalpy summary
    max_enthalpy_table = "Peak enthalpy ratio {0:}\n" \
                         "WS:  {1:>6.1f} m/s\n" \
                         "WD:  {2:>6.1f} deg\n" \
                         "DBT: {3:>6.1f} °C\n" \
                         "WBT: {4:>6.1f} °C\n" \
                         "RH:  {5:>6.1f} %\n" \
                         "DPT: {6:>6.1f} °C\n" \
                         "h:   {7:>6.1f} kJ/kg\n" \
                         "HR:  {8:<5.4f} kg/kg".format(
        max_enthalpy.name.strftime("%b %d %H:%M"),
        max_enthalpy.wind_speed,
        max_enthalpy.wind_direction,
        max_enthalpy.dry_bulb_temperature,
        max_enthalpy.wet_bulb_temperature,
        max_enthalpy.relative_humidity,
        max_enthalpy.dew_point_temperature,
        max_enthalpy.enthalpy / 1000,
        max_enthalpy.humidity_ratio
    )
    ax.text(0.17, 0.72, max_enthalpy_table, transform=ax.transAxes, ha="left", va="top", zorder=8,
            fontsize=text_fontsize, color="k", **{'fontname': 'monospace'})

    # Title formatting
    ti = ax.set_title("{} - {} - {}".format(self.city, self.country, self.station_id),
                      color="k", loc="left", fontsize="xx-large")

    # text
    keys = "WS: Wind speed | WD: Wind direction | DBT: Dry-bulb temperature | WBT: Wet-bulb temperature\nRH: Relative humidity | DPT: Dew-point temperature | h: Enthalpy | HR: Humidity ratio"
    te = ax.text(0.5, -0.1, keys, transform=ax.transAxes, ha="center", va="top", zorder=8, fontsize="medium",
                 color="k", **{'fontname': 'monospace'})

    # Colorbar
    cb = plt.colorbar(im, ax=ax, shrink=1, pad=0.071)
    cb.ax.set_title('Hours', color="k")
    cb.outline.set_visible(False)
    plt.setp(plt.getp(cb.ax.axes, 'yticklabels'), color='k')

    plt.tight_layout()

    # Save figure
    if save:
        save_path.parent.mkdir(parents=True, exist_ok=True)
        fig.savefig(save_path, bbox_inches="tight", dpi=300, transparent=False, bbox_extra_artists=[ti, te, ])
        print("Psychrometric plot saved to {}".format(save_path))
    if close:
        plt.close()
def render(states, actions, instantaneous_reward_log, cumulative_reward_log,
           critic_distributions, target_critic_distributions,
           projected_target_distribution, bins, loss_log,
           guidance_position_log, episode_number, filename, save_directory):

    faulthandler.enable(
    )  # to get more information about segmentation fault (core dumped) error

    # Load in a temporary environment, used to grab the physical parameters
    temp_env = Environment()

    # Checking if we want the additional reward and value distribution information
    extra_information = temp_env.ADDITIONAL_VALUE_INFO

    # Unpacking state
    chaser_x, chaser_y, chaser_z = states[:, 0], states[:, 1], states[:, 2]

    # Assigning the chaser a fixed attitude
    chaser_theta = np.zeros(len(chaser_x))

    target_x, target_y, target_z, target_theta = states[:,
                                                        3], states[:,
                                                                   4], states[:,
                                                                              5], states[:,
                                                                                         6]

    # Extracting physical properties
    length = temp_env.LENGTH

    ### Calculating spacecraft corner locations through time ###

    # Corner locations in body frame
    chaser_body_body_frame = length / 2. * np.array(
        [[[1], [-1], [1]], [[-1], [-1], [1]], [[-1], [-1], [-1]],
         [[1], [-1], [-1]], [[-1], [-1], [-1]], [[-1], [1], [-1]],
         [[1], [1], [-1]], [[-1], [1], [-1]], [[-1], [1], [1]],
         [[1], [1], [1]], [[-1], [1], [1]], [[-1], [-1], [1]]]).squeeze().T

    chaser_front_face_body_frame = length / 2. * np.array(
        [[[1], [-1], [1]], [[1], [1], [1]], [[1], [1], [-1]],
         [[1], [-1], [-1]], [[1], [-1], [1]]]).squeeze().T

    # Rotation matrix (body -> inertial)
    C_Ib = np.moveaxis(np.array([[
        np.cos(chaser_theta), -np.sin(chaser_theta),
        np.zeros(len(chaser_theta))
    ], [
        np.sin(chaser_theta),
        np.cos(chaser_theta),
        np.zeros(len(chaser_theta))
    ],
                                 [
                                     np.zeros(len(chaser_theta)),
                                     np.zeros(len(chaser_theta)),
                                     np.ones(len(chaser_theta))
                                 ]]),
                       source=2,
                       destination=0)  # [NUM_TIMESTEPS, 3, 3]

    # Rotating body frame coordinates to inertial frame
    chaser_body_inertial = np.matmul(C_Ib, chaser_body_body_frame) + np.array(
        [chaser_x, chaser_y, chaser_z]).T.reshape([-1, 3, 1])
    chaser_front_face_inertial = np.matmul(
        C_Ib, chaser_front_face_body_frame) + np.array(
            [chaser_x, chaser_y, chaser_z]).T.reshape([-1, 3, 1])

    ### Calculating target spacecraft corner locations through time ###

    # Corner locations in body frame
    target_body_frame = length / 2. * np.array(
        [[[1], [-1], [1]], [[-1], [-1], [1]], [[-1], [-1], [-1]],
         [[1], [-1], [-1]], [[-1], [-1], [-1]], [[-1], [1], [-1]],
         [[1], [1], [-1]], [[-1], [1], [-1]], [[-1], [1], [1]],
         [[1], [1], [1]], [[-1], [1], [1]], [[-1], [-1], [1]]]).squeeze().T

    target_front_face_body_frame = length / 2. * np.array(
        [[[1], [-1], [1]], [[1], [1], [1]], [[1], [1], [-1]],
         [[1], [-1], [-1]], [[1], [-1], [1]]]).squeeze().T

    # Rotation matrix (body -> inertial)
    C_Ib = np.moveaxis(np.array([[
        np.cos(target_theta), -np.sin(target_theta),
        np.zeros(len(target_theta))
    ], [
        np.sin(target_theta),
        np.cos(target_theta),
        np.zeros(len(target_theta))
    ],
                                 [
                                     np.zeros(len(target_theta)),
                                     np.zeros(len(target_theta)),
                                     np.ones(len(target_theta))
                                 ]]),
                       source=2,
                       destination=0)  # [NUM_TIMESTEPS, 3, 3]
    target_body_inertial = np.matmul(C_Ib, target_body_frame) + np.array(
        [target_x, target_y, target_z]).T.reshape([-1, 3, 1])
    target_front_face_inertial = np.matmul(
        C_Ib, target_front_face_body_frame) + np.array(
            [target_x, target_y, target_z]).T.reshape([-1, 3, 1])

    # Generating figure window
    figure = plt.figure(constrained_layout=True)
    figure.set_size_inches(5, 4, True)

    if extra_information:
        grid_spec = gridspec.GridSpec(nrows=2, ncols=3, figure=figure)
        subfig1 = figure.add_subplot(grid_spec[0, 0],
                                     projection='3d',
                                     aspect='equal',
                                     autoscale_on=False,
                                     xlim3d=(-5, 5),
                                     ylim3d=(-5, 5),
                                     zlim3d=(0, 10),
                                     xlabel='X (m)',
                                     ylabel='Y (m)',
                                     zlabel='Z (m)')
        subfig2 = figure.add_subplot(
            grid_spec[0, 1],
            xlim=(np.min([np.min(instantaneous_reward_log), 0]) -
                  (np.max(instantaneous_reward_log) -
                   np.min(instantaneous_reward_log)) * 0.02,
                  np.max([np.max(instantaneous_reward_log), 0]) +
                  (np.max(instantaneous_reward_log) -
                   np.min(instantaneous_reward_log)) * 0.02),
            ylim=(-0.5, 0.5))
        subfig3 = figure.add_subplot(grid_spec[0, 2],
                                     xlim=(np.min(loss_log) - 0.01,
                                           np.max(loss_log) + 0.01),
                                     ylim=(-0.5, 0.5))
        subfig4 = figure.add_subplot(grid_spec[1, 0], ylim=(0, 1.02))
        subfig5 = figure.add_subplot(grid_spec[1, 1], ylim=(0, 1.02))
        subfig6 = figure.add_subplot(grid_spec[1, 2], ylim=(0, 1.02))

        # Setting titles
        subfig1.set_xlabel("X (m)", fontdict={'fontsize': 8})
        subfig1.set_ylabel("Y (m)", fontdict={'fontsize': 8})
        subfig2.set_title("Timestep Reward", fontdict={'fontsize': 8})
        subfig3.set_title("Current loss", fontdict={'fontsize': 8})
        subfig4.set_title("Q-dist", fontdict={'fontsize': 8})
        subfig5.set_title("Target Q-dist", fontdict={'fontsize': 8})
        subfig6.set_title("Bellman projection", fontdict={'fontsize': 8})

        # Changing around the axes
        subfig1.tick_params(labelsize=8)
        subfig2.tick_params(which='both',
                            left=False,
                            labelleft=False,
                            labelsize=8)
        subfig3.tick_params(which='both',
                            left=False,
                            labelleft=False,
                            labelsize=8)
        subfig4.tick_params(which='both',
                            left=False,
                            labelleft=False,
                            right=True,
                            labelright=False,
                            labelsize=8)
        subfig5.tick_params(which='both',
                            left=False,
                            labelleft=False,
                            right=True,
                            labelright=False,
                            labelsize=8)
        subfig6.tick_params(which='both',
                            left=False,
                            labelleft=False,
                            right=True,
                            labelright=True,
                            labelsize=8)

        # Adding the grid
        subfig4.grid(True)
        subfig5.grid(True)
        subfig6.grid(True)

        # Setting appropriate axes ticks
        subfig2.set_xticks([
            np.min(instantaneous_reward_log), 0,
            np.max(instantaneous_reward_log)
        ] if np.sign(np.min(instantaneous_reward_log)) != np.
                           sign(np.max(instantaneous_reward_log)) else [
                               np.min(instantaneous_reward_log),
                               np.max(instantaneous_reward_log)
                           ])
        subfig3.set_xticks([np.min(loss_log), np.max(loss_log)])
        subfig4.set_xticks(
            [bins[i * 5] for i in range(round(len(bins) / 5) + 1)])
        subfig4.tick_params(axis='x', labelrotation=-90)
        subfig4.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
        subfig5.set_xticks(
            [bins[i * 5] for i in range(round(len(bins) / 5) + 1)])
        subfig5.tick_params(axis='x', labelrotation=-90)
        subfig5.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
        subfig6.set_xticks(
            [bins[i * 5] for i in range(round(len(bins) / 5) + 1)])
        subfig6.tick_params(axis='x', labelrotation=-90)
        subfig6.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])

    else:
        subfig1 = figure.add_subplot(1,
                                     1,
                                     1,
                                     projection='3d',
                                     aspect='equal',
                                     autoscale_on=False,
                                     xlim3d=(-5, 5),
                                     ylim3d=(-5, 5),
                                     zlim3d=(0, 10),
                                     xlabel='X (m)',
                                     ylabel='Y (m)',
                                     zlabel='Z (m)')

    # Setting the proper view
    if temp_env.TOP_DOWN_VIEW:
        subfig1.view_init(-90, 0)
    else:
        subfig1.view_init(25, 190)

    # Defining plotting objects that change each frame
    chaser_body, = subfig1.plot([], [], [],
                                color='r',
                                linestyle='-',
                                linewidth=2)  # Note, the comma is needed
    chaser_front_face, = subfig1.plot([], [], [],
                                      color='r',
                                      linestyle='-',
                                      linewidth=2)  # Note, the comma is needed
    target_body, = subfig1.plot([], [], [],
                                color='g',
                                linestyle='-',
                                linewidth=2)
    target_front_face, = subfig1.plot([], [], [],
                                      color='k',
                                      linestyle='-',
                                      linewidth=2)
    chaser_body_dot = subfig1.scatter(0., 0., 0., color='r', s=0.1)

    if extra_information:
        reward_bar = subfig2.barh(y=0, height=0.2, width=0)
        loss_bar = subfig3.barh(y=0, height=0.2, width=0)
        q_dist_bar = subfig4.bar(x=bins,
                                 height=np.zeros(shape=len(bins)),
                                 width=bins[1] - bins[0])
        target_q_dist_bar = subfig5.bar(x=bins,
                                        height=np.zeros(shape=len(bins)),
                                        width=bins[1] - bins[0])
        projected_q_dist_bar = subfig6.bar(x=bins,
                                           height=np.zeros(shape=len(bins)),
                                           width=bins[1] - bins[0])
        time_text = subfig1.text2D(x=0.2,
                                   y=0.91,
                                   s='',
                                   fontsize=8,
                                   transform=subfig1.transAxes)
        reward_text = subfig1.text2D(x=0.0,
                                     y=1.02,
                                     s='',
                                     fontsize=8,
                                     transform=subfig1.transAxes)
    else:
        time_text = subfig1.text2D(x=0.1,
                                   y=0.9,
                                   s='',
                                   fontsize=8,
                                   transform=subfig1.transAxes)
        reward_text = subfig1.text2D(x=0.62,
                                     y=0.9,
                                     s='',
                                     fontsize=8,
                                     transform=subfig1.transAxes)
        episode_text = subfig1.text2D(x=0.4,
                                      y=0.96,
                                      s='',
                                      fontsize=8,
                                      transform=subfig1.transAxes)
        episode_text.set_text('Episode ' + str(episode_number))

    # Function called repeatedly to draw each frame
    def render_one_frame(frame, *fargs):
        temp_env = fargs[0]  # Extract environment from passed args

        # Draw the chaser body
        chaser_body.set_data(chaser_body_inertial[frame, 0, :],
                             chaser_body_inertial[frame, 1, :])
        chaser_body.set_3d_properties(chaser_body_inertial[frame, 2, :])

        # Draw the front face of the chaser body in a different colour
        chaser_front_face.set_data(chaser_front_face_inertial[frame, 0, :],
                                   chaser_front_face_inertial[frame, 1, :])
        chaser_front_face.set_3d_properties(chaser_front_face_inertial[frame,
                                                                       2, :])

        # Draw the target body
        target_body.set_data(target_body_inertial[frame, 0, :],
                             target_body_inertial[frame, 1, :])
        target_body.set_3d_properties(target_body_inertial[frame, 2, :])

        # Draw the front face of the target body in a different colour
        target_front_face.set_data(target_front_face_inertial[frame, 0, :],
                                   target_front_face_inertial[frame, 1, :])
        target_front_face.set_3d_properties(target_front_face_inertial[frame,
                                                                       2, :])

        # Drawing a dot in the centre of the chaser
        chaser_body_dot._offsets3d = ([chaser_x[frame]], [chaser_y[frame]],
                                      [chaser_z[frame]])

        # Update the time text
        time_text.set_text('Time = %.1f s' % (frame * temp_env.TIMESTEP))

        # Update the reward text
        reward_text.set_text('Total reward = %.1f' %
                             cumulative_reward_log[frame])

        if extra_information:
            # Updating the instantaneous reward bar graph
            reward_bar[0].set_width(instantaneous_reward_log[frame])
            # And colouring it appropriately
            if instantaneous_reward_log[frame] < 0:
                reward_bar[0].set_color('r')
            else:
                reward_bar[0].set_color('g')

            # Updating the loss bar graph
            loss_bar[0].set_width(loss_log[frame])

            # Updating the q-distribution plot
            for this_bar, new_value in zip(q_dist_bar,
                                           critic_distributions[frame, :]):
                this_bar.set_height(new_value)

            # Updating the target q-distribution plot
            for this_bar, new_value in zip(
                    target_q_dist_bar, target_critic_distributions[frame, :]):
                this_bar.set_height(new_value)

            # Updating the projected target q-distribution plot
            for this_bar, new_value in zip(
                    projected_q_dist_bar,
                    projected_target_distribution[frame, :]):
                this_bar.set_height(new_value)
#
# Since blit = True, must return everything that has changed at this frame
        return chaser_body_dot, time_text, chaser_body, chaser_front_face, target_body, target_front_face

    # Generate the animation!
    fargs = [temp_env]  # bundling additional arguments
    animator = animation.FuncAnimation(figure,
                                       render_one_frame,
                                       frames=np.linspace(
                                           0,
                                           len(states) - 1,
                                           len(states)).astype(int),
                                       blit=False,
                                       fargs=fargs)
    """
    frames = the int that is passed to render_one_frame. I use it to selectively plot certain data
    fargs = additional arguments for render_one_frame
    interval = delay between frames in ms
    """

    # Save the animation!
    if temp_env.SKIP_FAILED_ANIMATIONS:
        try:
            # Save it to the working directory [have to], then move it to the proper folder
            animator.save(filename=filename + '_episode_' +
                          str(episode_number) + '.mp4',
                          fps=30,
                          dpi=100)
            # Make directory if it doesn't already exist
            os.makedirs(os.path.dirname(save_directory + filename +
                                        '/videos/'),
                        exist_ok=True)
            # Move animation to the proper directory
            os.rename(
                filename + '_episode_' + str(episode_number) + '.mp4',
                save_directory + filename + '/videos/episode_' +
                str(episode_number) + '.mp4')
        except:
            print("Skipping animation for episode %i due to an error" %
                  episode_number)
            # Try to delete the partially completed video file
            try:
                os.remove(filename + '_episode_' + str(episode_number) +
                          '.mp4')
            except:
                pass
    else:
        # Save it to the working directory [have to], then move it to the proper folder
        animator.save(filename=filename + '_episode_' + str(episode_number) +
                      '.mp4',
                      fps=30,
                      dpi=100)
        # Make directory if it doesn't already exist
        os.makedirs(os.path.dirname(save_directory + filename + '/videos/'),
                    exist_ok=True)
        # Move animation to the proper directory
        os.rename(
            filename + '_episode_' + str(episode_number) + '.mp4',
            save_directory + filename + '/videos/episode_' +
            str(episode_number) + '.mp4')

    del temp_env
    plt.close(figure)
Example #46
0
    img1 = np.expand_dims(img, 0)
    tic = time.time()
    result = centernet.test_one_image(img1)
    toc = time.time()
    print('time:', toc - tic)
    id_to_clasname = {k: v for (v, k) in classname_to_ids.items()}
    scores = result[0]
    bbox = result[1]
    class_id = result[2]
    classes = [id_to_clasname[key] for key in class_id]

    plt.figure(1)
    plt.imshow(np.squeeze(img))
    axis = plt.gca()
    for i in range(len(scores)):
        rect = patches.Rectangle((bbox[i][1], bbox[i][0]),
                                 bbox[i][3] - bbox[i][1],
                                 bbox[i][2] - bbox[i][0],
                                 linewidth=2,
                                 edgecolor='b',
                                 facecolor='none')
        axis.add_patch(rect)
        plt.text(bbox[i][1],
                 bbox[i][0],
                 id_to_clasname[class_id[i]] + str(' ') + str(scores[i]),
                 color='red',
                 fontsize=12)
    plt.savefig(save_addesss + image)
    # plt.show()
    plt.close()
Example #47
0
def vis_one_image(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)
                
    #   DensePose Visualization Starts!!
    ##  Get full IUV image out 
    IUV_fields = body_uv[1]
    #
    All_Coords = np.zeros(im.shape)
    All_inds = np.zeros([im.shape[0],im.shape[1]])
    K = 26
    ##
    inds = np.argsort(boxes[:,4])
    ##
    for i, ind in enumerate(inds):
        entry = boxes[ind,:]
        if entry[4] > 0.65:
            entry=entry[0:4].astype(int)
            ####
            output = IUV_fields[ind]
            ####
            All_Coords_Old = All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]
            All_Coords_Old[All_Coords_Old==0]=output.transpose([1,2,0])[All_Coords_Old==0]
            All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]= All_Coords_Old
            ###
            CurrentMask = (output[0,:,:]>0).astype(np.float32)
            All_inds_old = All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]]
            All_inds_old[All_inds_old==0] = CurrentMask[All_inds_old==0]*i
            All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]] = All_inds_old
    #
    All_Coords[:,:,1:3] = 255. * All_Coords[:,:,1:3]
    All_Coords[All_Coords>255] = 255.
    All_Coords = All_Coords.astype(np.uint8)
    All_inds = All_inds.astype(np.uint8)
    #
    IUV_SaveName = os.path.basename(im_name).split('.')[0]+'_IUV.png'
    INDS_SaveName = os.path.basename(im_name).split('.')[0]+'_INDS.png'
    cv2.imwrite(os.path.join(output_dir, '{}'.format(IUV_SaveName)), All_Coords )
    cv2.imwrite(os.path.join(output_dir, '{}'.format(INDS_SaveName)), All_inds )
    print('IUV written to: ' , os.path.join(output_dir, '{}'.format(IUV_SaveName)) )
    ###
    ### DensePose Visualization Done!!
    #
    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
def looper(p):
	vP.save_data(p)

	robots = parallel_wop(partial(Object,o_type='R',p=p),range(p.num_robots))
	after_replication_ratio = None
	for i in range(10):
		for j in range(20):
			options = parallel_wop(partial(Object,o_type='O',p=p),range(p.num_opts))
			r = np.random.choice(np.arange(5,(p.boundary_max - p.boundary_min)/(p.num_opts),0.001),int(sqrt(p.num_robots+20)),replace = False)
			theta = np.random.choice(np.arange(0,2*np.pi,0.0001),int(sqrt(p.num_robots+10)*2),replace=False)
			p.r_poses = []
			for i in r:
				for j in theta:
					p.r_poses.append([i,j])
			for r in robots:
				r.assigned_opt = np.random.randint(low = 1,high = p.num_opts+1)
				options[r.assigned_opt-1].assigned_count += 1
				while options[r.assigned_opt-1].assigned_count>options[r.assigned_opt-1].to_be_assigned_count:
					options[r.assigned_opt-1].assigned_count -= 1
					r.assigned_opt = np.random.randint(low = 1,high = p.num_opts+1)
					options[r.assigned_opt-1].assigned_count += 1
				[radius,theta] = p.r_poses[r.id]
				r.pose = options[r.assigned_opt-1].pose + np.array([radius*np.cos(theta),radius*np.sin(theta)])
				r.response = 0	# 0 = "No", 1 = "Yes"
				r.opt = None
				r.best_opt = None
				r.yes_count = 0
				r.no_count = 0
			anim = Animator()
			anim.plotter(options=options,robots=robots,p=p)
			model = VoterModel(anim,robots,options,p)
			plt.show()
			model.dissemination(robots,options,p)
			matching = model.compare_with_best(options)
			if int(model.consensus) and matching:
				result = "achieved on best option!"
			else:
				result = "not achieved on best option!\n best option is "+ str(options[model.ref_best].id)
			anim.ax.text(10,110,s = "Consensus " + result)
			
			no = 0
			yes = 0
			for r in robots:
				if r.response == 0:
					no+=1
				elif r.response == 1:
					yes+=1

			if isinstance(model.best_option,type(None)) == False and isinstance(model.ref_best,type(None)) == False:
				out = pd.DataFrame(data=[{'n':p.num_opts,'$\mu_{m_1}$':p.mu_m_1,'$\sigma_{m_1}$':p.sigma_m_1,'$\mu_{h_{pred}}$':p.mu_h_pred,'$\sigma_{h_{pred}}$':p.sigma_h_pred,'$\mu_{h_1}$':p.mu_h_1,'$\sigma_{h_1}$':p.sigma_h_1,'$x_{max}$ opt No.':model.ref_best,'$x_{max}$':options[model.ref_best].quality,'$CDM$ opt No.':model.best_option,'$CDM$':options[model.best_option].quality,'Iterations':model.iterations,'no:yes':no/(yes+0.001),'after_replication_no:yes':after_replication_ratio}],columns=vP.data_columns_name)

			else:
				out = pd.DataFrame(data=[{'n':p.num_opts,'$\mu_{m_1}$':p.mu_m_1,'$\sigma_{m_1}$':p.sigma_m_1,'$\mu_{h_{pred}}$':p.mu_h_pred,'$\sigma_{h_{pred}}$':p.sigma_h_pred,'$\mu_{h_1}$':p.mu_h_1,'$\sigma_{h_1}$':p.sigma_h_1,'$x_{max}$ opt No.':model.ref_best,'$x_{max}$':None,'$CDM$ opt No.':model.best_option,'$CDM$':None,'Iterations':model.iterations,'no:yes':no/(yes+0.001),'after_replication_no:yes':after_replication_ratio}],columns=vP.data_columns_name)

			out.to_csv(p.data_f_path,mode = 'a',header = False, index=False)
			plt.close()

		for r in robots:
			r.prev_num_opts = p.num_opts

		p.num_opts += np.random.randint(low=-1,high=2)
		if p.num_opts <2:
			p.num_opts = np.random.randint(low=2,high=4)
		model.replication_phase(robots=robots,p=p)
		no = 0
		yes = 0
		for r in robots:
			if r.response == 0:
				no+=1
			elif r.response == 1:
				yes+=1
		after_replication_ratio = no/yes
		sum = 0
		for r in robots:
			sum += r.threshold
		muh = sum/len(robots)
		variance = 0
		for r in robots:
			variance += (r.threshold-muh)**2
		variance = (variance/(len(robots)-1))**0.5
		p.mu_h_1 = muh
		p.mu_h_2 = muh
		p.sigma_h_1 = variance
		p.sigma_h_2 = variance
		p.packaging()
Example #49
0
   #=============================
   ax1 = fig.add_subplot(133)
   ax1.get_xaxis().set_visible(False)
   ax1.get_yaxis().set_visible(False)   

   _ = ax1.imshow(rgb_img)
   #plt.title('c) CRF prediction', loc='left', fontsize=6)
   im2 = ax1.imshow(resr, cmap=cmap, alpha=0.5, vmin=0, vmax=len(labels))
   divider = make_axes_locatable(ax1)
   cax = divider.append_axes("right", size="5%")
   cb=plt.colorbar(im2, cax=cax)
   cb.set_ticks(0.5+np.arange(len(labels)+1))
   cb.ax.set_yticklabels(labels)
   cb.ax.tick_params(labelsize=4)
   plt.savefig(name+'_mres.png', dpi=600, bbox_inches='tight')
   del fig; plt.close()
   
   #=============================================   
   #=============================================
   if os.name=='posix': # true if linux/mac
      elapsed = (time.time() - start)
   else: # windows
      elapsed = (time.clock() - start)
   print("Processing took "+ str(elapsed/60) + "minutes")
   
   # write report
   file = open(name+'_report_'+hostname+'.txt','w') 
   file.write('Image: '+image_path+'\n')
   counter = 0
   for label in labels:
      file.write(label+': '+str(np.sum(resr==counter)/(nxo*nyo))+'\n')
Example #50
0
    def make_plot(self, classifier, show, save, target_bitrate, hvs_metric, ymin, ymax, figsize, baselines=None,
                  topk=1, font_name='times_roman'):

        # hvs_color = 'darkturquoise'
        # hvs_color = 'palevioletred'
        hvs_color = 'grey'
        acc_color = 'midnightblue'

        if baselines is None:
            baselines = []

        # ========= get data
        # get rnn compression data
        rnn_accuracy_data = self.get_accuracy_data_rnn(target_bitrate, classifier, topk, True)
        rnn_hvs_data = self.get_hvs_data_rnn(target_bitrate, hvs_metric)

        baselines_accuracy_data = self.get_accuracy_data_baselines(target_bitrate, classifier, topk, baselines, True,
                                                                   True)
        baselines_hvs_data = self.get_hvs_data_baselines(target_bitrate, hvs_metric, baselines, True)

        # ========= make plot
        fig, ax1 = plt.subplots(figsize=figsize)
        ax2 = ax1.twinx()

        # baseline data
        for baseline_hvs, key in zip(baselines_hvs_data, baselines):
            ax1.axhline(float(baseline_hvs[_BASELINE_IDX_VAL]), color=hvs_color,
                        linestyle=convert_linestyle(self._configs[key]['linestyle']), label=COMPRESSION_NAMES[key],
                        lw=self._configs[key]['lw'])

        for baseline_acc, key in zip(baselines_accuracy_data, baselines):
            ax1.axhline(float(baseline_acc[_BASELINE_IDX_VAL]), color=acc_color,
                        linestyle=convert_linestyle(convert_linestyle(self._configs[key]['linestyle'])),
                        label=COMPRESSION_NAMES[key], lw=self._configs[key]['lw'])

        # == fill in data RNN compression
        # left y-axis (MS-SSIM)
        ax1.set_xlabel(r'$\alpha$', fontproperties=get_font(font_name, FONTSIZES.Large))
        ax1.set_ylabel('MS-SSIM', fontproperties=get_font(font_name, FONTSIZES.Large), labelpad=10, color=hvs_color)
        ax1.plot(rnn_hvs_data[:, _RNN_IDX_ALPHA], rnn_hvs_data[:, _RNN_IDX_VAL], color=hvs_color,
                 lw=self._configs['rnn']['lw'], linestyle=self._configs['rnn']['linestyle'],
                 markersize=3 * self._configs['rnn']['lw'], marker=self._configs['rnn']['marker'])
        ax1.tick_params(axis='y', labelcolor=hvs_color)
        ax1.tick_params(axis='y', which='minor', width=0.7, colors=hvs_color)
        ax1.tick_params(axis='y', which='major', colors=hvs_color)

        # right y-axis (Accuracy)
        ax2.set_ylabel('Preserved Val. Accuracy', fontproperties=get_font(font_name, FONTSIZES.Large), labelpad=10,
                       color=acc_color)
        ax2.plot(rnn_accuracy_data[:, _RNN_IDX_ALPHA], rnn_accuracy_data[:, _RNN_IDX_VAL], color=acc_color,
                 lw=self._configs['rnn']['lw'], linestyle=self._configs['rnn']['linestyle'],
                 markersize=3 * self._configs['rnn']['lw'], marker=self._configs['rnn']['marker'])
        ax2.tick_params(axis='y', labelcolor=acc_color)
        ax2.tick_params(axis='y', which='minor', width=0.7, colors=acc_color)
        ax2.tick_params(axis='y', which='major', colors=acc_color)

        # == format
        # axis limits
        ax1.set_xlim((-0.05, 1.05))
        ax1.set_ylim((ymin, ymax))
        ax2.set_ylim((ymin, ymax))

        # ticks
        ax1.yaxis.set_major_locator(MultipleLocator(0.05))
        ax1.yaxis.set_minor_locator(MultipleLocator(0.025))
        ax1.set_xticks(np.arange(0, 1.25, 0.25))
        ax2.yaxis.set_major_locator(MultipleLocator(0.05))
        ax2.yaxis.set_minor_locator(MultipleLocator(0.025))
        ax2.set_xticks(np.arange(0, 1.25, 0.25))

        # fontprops
        for labelx in ax1.get_xticklabels():
            labelx.set_fontproperties(get_font(font_name, FONTSIZES.large))

        for labely1 in ax1.get_yticklabels():
            labely1.set_fontproperties(get_font(font_name, FONTSIZES.large))
            labely1.set_color(hvs_color)

        for labely2 in ax2.get_yticklabels():
            labely2.set_fontproperties(get_font(font_name, FONTSIZES.large))
            labely2.set_color(acc_color)

        # grid, facecolor
        ax1.grid(True, color=_GRID_COLOR, linewidth=0.5)
        ax1.set_facecolor(_FACECOLOR)
        ax2.spines['right'].set_visible(True)
        ax2.spines['right'].set_color(acc_color)
        ax2.spines['left'].set_visible(True)
        ax2.spines['left'].set_color(hvs_color)

        # legend for compression method
        legend1 = ax1.legend(**self._get_compression_legend_kwargs(configs=self._configs,
                                                                   legend_loc='lower center',
                                                                   font_name=font_name,
                                                                   compression_keys=['rnn', *baselines]))
        ax1.add_artist(legend1)
        fig.tight_layout()

        if show:
            plt.show()

        if save:
            if not os.path.exists(self._plots_save_dir):
                os.makedirs(self._plots_save_dir)

            save_as = '{}_tradeoff_{}_{}_{}bpp.png'.format(self._dataset, hvs_metric, classifier, target_bitrate)
            fig.savefig(os.path.join(self._plots_save_dir, save_as), dpi=200)
            print('plot saved as {}'.format(os.path.join(self._plots_save_dir, save_as)))

        plt.close(fig)
Example #51
0
def close_figures():
    plt.close("all")
    yield
    plt.close("all")
Example #52
0
def main():
    parser = argparse.ArgumentParser(prog='LES_CP')
    parser.add_argument("casename")
    parser.add_argument("path_root")
    parser.add_argument("dTh", type=int)
    parser.add_argument("--zparams", nargs='+', type=int)
    parser.add_argument('--rparams', nargs='+', type=int)
    parser.add_argument("--tmin")
    parser.add_argument("--tmax")
    args = parser.parse_args()

    global cm_bwr, cm_grey, cm_vir, cm_hsv
    cm_bwr = plt.cm.get_cmap('bwr')
    cm_grey = plt.cm.get_cmap('gist_gray_r')
    cm_hsv = plt.cm.get_cmap('hsv')

    nml, dTh, z_params, r_params, tmin, tmax = set_input_parameters(args)
    i0_center, j0_center = define_geometry(case_name, nml)

    ng = len(z_params)
    kmax = np.amax(z_params) / dx[2]

    print ' '
    print('--- plotting timeseries ---')
    fig, axes = plt.subplots(1, 4, figsize=(20, 4))
    for istar in range(ng):
        zstar = z_params[istar]
        rstar = r_params[istar]
        irstar = np.int(np.round(rstar / dx[0]))
        id = 'dTh' + str(dTh) + '_z' + str(zstar) + '_r' + str(rstar)

        path = os.path.join(path_root, id)
        path_in = os.path.join(path_root, id, 'figs_vorticity')
        path_fields = os.path.join(path_root, id, 'fields')
        path_out = os.path.join(path_root, 'figs_vorticity')
        if not os.path.exists(path_out):
            os.mkdir(path_out)

        filename = 'Stats_rotational.nc'
        rootgrp = nc.Dataset(os.path.join(path_in, filename),
                             'r+',
                             format='NETCDF4')
        ts_grp = rootgrp.groups['timeseries']
        times = ts_grp.variables['time'][:]
        max = ts_grp.variables['vort_yz_max'][:]
        min = ts_grp.variables['vort_yz_min'][:]
        sum = ts_grp.variables['vort_yz_sum'][:]
        env = ts_grp.variables['vort_yz_env'][:]
        rootgrp.close()

        ax = axes[0]
        ax.plot(times, max, '-o', markeredgecolor='w', label=id)
        ax.set_title('max(vort_yz)')
        ax.set_xlabel('time  [s]')
        ax = axes[1]
        # ax.plot(times, min, '-o', markeredgecolor='w', label=id)
        ax.set_title('min(vort_yz)')
        ax.set_xlabel('time  [s]')
        ax = axes[2]
        ax.plot(times, sum, '-o', markeredgecolor='w', label=id)
        ax.set_title('sum_ijk(vort_yz)')
        ax.set_xlabel('time  [s]')
        ax = axes[3]
        ax.plot(times, env, '-o', markeredgecolor='w', label=id)
        ax.set_title('environmental vort_yz')
        ax.set_xlabel('time  [s]')

    for ax in axes:
        ax.legend(loc='best')
    # plt.lines.set_markeredgewidth(0.0)
    plt.tight_layout
    plt.savefig(
        os.path.join(path_out, 'dTh_' + str(dTh) + '_vort_yz_domain.png'))
    plt.close(fig)

    # --------------------------------------
    print ' '
    print('--- plotting r=1km ---')
    dTh_params = [2, 3, 4]
    z_params = [1225, 1000, 870]
    r_params = z_params
    n_params = len(dTh_params)
    fig, axes = plt.subplots(1, 4, figsize=(20, 4))
    for istar in range(n_params):
        dTh = dTh_params[istar]
        zstar = z_params[istar]
        rstar = r_params[istar]
        id = 'dTh' + str(dTh) + '_z' + str(zstar) + '_r' + str(rstar)
        print('id', id)
        path = os.path.join(path_root, id)
        path_in = os.path.join(path_root, id, 'figs_vorticity')
        path_out = os.path.join(path_root, 'figs_vorticity')
        if not os.path.exists(path_out):
            os.mkdir(path_out)
        print ''
        rootgrp = nc.Dataset(os.path.join(path_in, filename),
                             'r+',
                             format='NETCDF4')
        ts_grp = rootgrp.groups['timeseries']
        times = ts_grp.variables['time'][:]
        max = ts_grp.variables['vort_yz_max'][:]
        min = ts_grp.variables['vort_yz_min'][:]
        sum = ts_grp.variables['vort_yz_sum'][:]
        env = ts_grp.variables['vort_yz_env'][:]
        rootgrp.close()

        ax = axes[0]
        ax.plot(times, max, '-o', markeredgecolor='w', label=id)
        ax.set_title('max(vort_yz)')
        ax.set_xlabel('time  [s]')
        ax = axes[1]
        ax.plot(times, min, '-o', markeredgecolor='w', label=id)
        ax.set_title('min(vort_yz)')
        ax.set_xlabel('time  [s]')
        ax = axes[2]
        ax.plot(times, sum, '-o', markeredgecolor='w', label=id)
        ax.set_title('sum_ijk(vort_yz)')
        ax.set_xlabel('time  [s]')
        ax = axes[3]
        ax.plot(times, env, '-o', markeredgecolor='w', label=id)
        ax.set_title('environmental vort_yz')
        ax.set_xlabel('time  [s]')

    for ax in axes:
        ax.legend(loc='best')
        # plt.lines.set_markeredgewidth(0.0)
    plt.tight_layout
    figname = 'vort_yz_domain_r1km.png'
    plt.savefig(os.path.join(path_out, figname))

    plt.close(fig)

    return
Example #53
0
def main(grav):
    
    import astropy.io.ascii as asc
    import numpy as np
    import matplotlib.pyplot as plt
    import pdb

    # Initialize variables --------------------------------------------------------
    with open("def_constants.py") as f:
        code = compile(f.read(), "def_constants.py", "exec")
        exec(code)
    
    SKIPL1 = True
    DELL_CHAR = '\t' # Delimiter character
    COMM_CHAR = '#'  # Comment character
    
    # Break down template sequence for plotting
    if grav == 'f':
        DIVISIONS = [0,5,9] # EDIT THIS ARRAY TO BREAK DOWN FIELD SEQUENCE
        TITLE = 'Field gravity'
        plotColors = colorSet[9]
        suffix = ''
    elif grav == 'lg':
        DIVISIONS = [0,5] # EDIT THIS ARRAY TO BREAK DOWN LOW-G SEQUENCE [0,3,6]
        TITLE = r'low gravity'
        if SKIPL1:
            plotColors = colorSet[5]
        else:
            plotColors = colorSet[6]
        suffix = 'low-g'
    elif grav == 'g':
        DIVISIONS = [0,5] # EDIT THIS ARRAY TO BREAK DOWN LOW-G SEQUENCE
        TITLE = r'$\gamma$ gravity'
        plotColors = colorSet[5]
        suffix = r'$\gamma$'
    elif grav == 'b':
        DIVISIONS = [0,2] # EDIT THIS ARRAY TO BREAK DOWN LOW-G SEQUENCE
        TITLE = r'$\beta$ gravity'
        plotColors = colorSet[2]
        suffix = r'$\beta$'

    # Arrays to hold template data
    labels = []
    data = {}.fromkeys(BANDS)
    for band in BANDS:
        data[band] = []
    
    # Read template files ---------------------------------------------------------
    for isp, spType in enumerate(SPTYPES):
        if grav == 'lg' and (SKIPL1 and spType == 'L1'):
            continue
        for band in BANDS:
            filename = spType + band + '_' + grav + '.txt'
            try:
                tmpdata = asc.read(FOLDER_OUT_TMPL + filename, format='no_header', \
                                   delimiter=DELL_CHAR, comment=COMM_CHAR)
            except:
                tmpdata = []
            if len(tmpdata) != 0:
                data[band].append(tmpdata)
                if band == 'J':
                    labels.append(spType + suffix)
    
    # Plot templates --------------------------------------------------------------
    plotColorsPop = list(plotColors)
    for idiv in range(len(DIVISIONS) - 1):
        plt.close()
        plt.rc('font', size=8)
        fig = plt.figure(idiv, figsize=(6.5,2.8))
        plt.subplots_adjust(wspace=0.1, hspace=0.001, top=0.98, \
                            bottom=0.1, right=0.98, left=0.03)
        plt.clf()
        
        # Choose colors (colorSet defined in def_constants.py)
        numtempls = DIVISIONS[idiv+1] - DIVISIONS[idiv]
        if grav == 'lg' and (SKIPL1 and idiv == 0):
            numtempls = numtempls - 1
        tmpplotColors = []
        for ipop in range(0,numtempls):
            tmpplotColors.append(plotColorsPop.pop())
        
        for iband, band in enumerate(BANDS):
            ax = fig.add_subplot(131 + iband)
            
            # Plot templates within range specified in DIVISIONS
            icolor = 0
            for itempl,templ in enumerate(data[band]):
                # Manually skip low-g L1 (template not that solid)
                #if grav == 'lg' and labels[itempl] == 'L1':
                #    continue
            
                if labels[itempl] >= ('L' + str(DIVISIONS[idiv])) \
                            and labels[itempl] < ('L' + str(DIVISIONS[idiv+1])):
                    xs = templ['col1']
                    ys = templ['col2']
                    ax.plot(xs, ys, color=tmpplotColors[icolor], label=labels[itempl], \
                            linewidth=1.3)
                    icolor = icolor + 1
            
            # Add labels and legend
            if iband == 1:
                ax.set_xlabel('Wavelength ($\mu$m)', labelpad=0)
            if iband == 0:
                ax.text(0.01, 0.82, TITLE, fontsize=11, transform=ax.transAxes)
                ax.text(0.01, 0.76, 'templates', fontsize=11, transform=ax.transAxes)
                ax.set_ylabel('Normalized Flux (F$_{\lambda}$)', labelpad=-1)
                lgd = ax.legend(handlelength=0, handletextpad=0.1, loc='upper left', \
                                bbox_to_anchor=(0,0.71), labelspacing=0.3, \
                                frameon=False, numpoints=1)
                for ilgdtxt,lgdtxt in enumerate(lgd.get_texts()):
                    plt.setp(lgdtxt, color=tmpplotColors[ilgdtxt])
            
            # Clean up axes
            ax.spines['left'].set_color('none')
            ax.spines['right'].set_color('none')
            ax.yaxis.set_ticks([])
            ax.set_ylim(ymax=ax.get_ylim()[1]*1.05)
            
            # Add annotations
            addannot(data[band], ax, band, 'L0')
        
        fig.savefig(FOLDER_OUT_PLT + 'sequence' + str(idiv) + '_' + grav + '.pdf', \
                    dpi=300)
    optimizer = tf.keras.optimizers.Adam(learning_rate=learn_rate)
    NN_SC.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
    NN_SC.summary()

    MODEL_SAVE_FOLDER_PATH = './model/ %s = %d %d/' % (var_str , var1, var2)
    if not os.path.exists(MODEL_SAVE_FOLDER_PATH):
      os.mkdir(MODEL_SAVE_FOLDER_PATH)
    model_path = MODEL_SAVE_FOLDER_PATH + '{epoch:04d} {val_loss:.5f}.hdf5'

    checkpoint = ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1,
                                 save_best_only=True, mode='auto', period=1)
    earlystopping = EarlyStopping(monitor='val_loss', patience=140 )
    # Single-Task Train
    SC_history = NN_SC.fit(err_train, status_train_cat,
                                          epochs=train_epoch, batch_size=batch_size, shuffle=True, verbose=1,
                                          validation_data=(err_test, status_test_cat), callbacks = [checkpoint, earlystopping])

    #■■■■■■■ Learning saving  ■■■■■■■■

    fig1 = plt.figure(1)
    plt.plot(SC_history.history['loss'], label='Train NN')
    plt.plot(SC_history.history['val_loss'], label='Test NN')
    plt.title('Model loss %s = %d %d'% (var_str , var1, var2))
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend()
    figname1 = 'Model loss %s = %d %d.png'% (var_str , var1, var2)
    model_path1 = MODEL_SAVE_FOLDER_PATH + figname1
    plt.savefig(model_path1)
    plt.close(fig1)
trial_times = trials.stimOn_times[incl_trials]
probability_left = trials.probabilityLeft[incl_trials]
trial_blocks = (trials.probabilityLeft[incl_trials] == 0.2).astype(int)

# Get clusters in this brain region
region_clusters = combine_layers_cortex(clusters[probe]['acronym'])
clusters_in_region = clusters[probe].metrics.cluster_id[region_clusters == region]

# Select spikes and clusters
spks_region = spikes[probe].times[np.isin(spikes[probe].clusters, clusters_in_region)]
clus_region = spikes[probe].clusters[np.isin(spikes[probe].clusters,
                                             clusters_in_region)]

# Get matrix of all neuronal responses
times = np.column_stack(((trial_times - PRE_TIME), (trial_times + POST_TIME)))
pop_vector, cluster_ids = _get_spike_counts_in_bins(spks_region, clus_region, times)
pop_vector = pop_vector.T

# Plot histograms
for k in range(pop_vector.shape[1]):
    f, ax1 = plt.subplots(1, 1)
    ax1.hist(pop_vector[trial_blocks == 0, k], label='L', histtype='step', lw=3)
    ax1.hist(pop_vector[trial_blocks == 1, k], label='R', histtype='step', lw=3)
    ax1.set(xlabel='Spike count per trial', ylabel='Count', title='Region: %s, neuron %d' %
            (region, cluster_ids[k]))
    plt.legend()
    plt.savefig(join(FIG_PATH, '%s_neuron%d_%s' % (region, cluster_ids[k], eid)))
    plt.close(f)


    def learning(self, batch_size):
        '''
            Learning process
        '''
        explore_rate = self.get_explore_rate(0)
        step_index =0
        loss_list = list()
        reward_list = list()
        for epi in range(self.episode):
            state_old = self.environment.reset()
            temp_loss_list = list()
            total_reward = 0
            for t in range(self.max_t):
                #self.environment.render()
                if random.random() < explore_rate:
                    action = self.environment.action_space.sample()
                else:
                    temp = self.mlp.predict(state[np.newaxis, :])
                    action = np.argmax(temp[0])
                    
                    
                
                state, reward, done, _ = self.environment.step(action)
                total_reward += reward

                if self.reward_func != None:
                    reward = self.reward_func(self.environment, state, state_old, done)
                


                self.memory.append((state_old, action, reward, state, done))
                state_old = state
                if len(self.memory) > batch_size:
                    if step_index % self.C_replace == 0 or epi == self.episode - 1:
                        self.mlp.update_paramters()
                    
                    step_index = step_index + 1

                    sample_data = random.sample(self.memory, batch_size)
                    totrain = list()
                    next_states = [data[3] for data in sample_data]
                    pred_reward = self.mlp.get_target(next_states)

                    for b_index in range(batch_size):
                        temp_state, temp_action, temp_reward, temp_next_state, temp_done = sample_data[b_index]
                        predict_action = max(pred_reward[b_index])
                        #print(predict_action)
                        

                        if temp_done:
                            yj = temp_reward
                        else:
                            yj = temp_reward + self.discount_factor * predict_action

                        totrain.append([temp_state, temp_action, yj])
                    
                    ##update
                    states = [k[0] for k in totrain]
                    actions = [k[1] for k in totrain]
                    rewards = [k[2] for k in totrain]

                    loss = self.mlp.update(states, actions, rewards)
                    temp_loss_list.append(loss)

                    if len(self.memory) > self.max_memory:
                        self.memory = self.memory[1:]
                
                if done or t >= self.max_t - 1:
                    reward_list.append(total_reward)

                    if len(temp_loss_list) > 0:
                        loss_list.append(sum(temp_loss_list)/len(temp_loss_list))

                    print("Episode %d finished %d" % (epi, t))
                    break

            explore_rate = self.get_explore_rate(epi)
        
        if  self.model_name != None:  
            self.mlp.saveModel(self.model_name)
        
        #plot the figure
        fig = plt.figure()
        plt.plot(range(len(loss_list)), loss_list)
        plt.xlabel("Episode")
        plt.ylabel("Average Loss")
        plt.savefig("loss_Idqn.png")
        plt.close('all')

        fig = plt.figure()
        plt.plot(range(self.episode), reward_list)
        plt.xlabel("Episode")
        plt.ylabel("Total reward")
        plt.savefig("reward_Idqn.png")
        plt.close('all')
def checkWorkerAssignments( workerID, savePath = '/home/ubuntu/mTurkFeedback/' ):

    print "NOT DONE REFACTORING CHECKWORKERASSIGNMENTS(). EXITING..."
    sys.exit()
    
    mtc_assignments = getReviewableAssignments()
    assignments_data = extractAssignmentData( mtc_assignments, verbose=False )

    # store assignments info here, for persistence
    # list containing all the assignments
    _all_assignments = []
    # list containing the assignments that were flagged by the turkers
    _flagged_assignments = []
    # list with the assignments that were rejected
    _rejected_assignments = []
    # list with the assignments that are not rejected nor flagged
    _good_assignments = []
    # list with the assignments were something inexpected on my side happened
    _error_assignments = []
    
    count = 0
    for ass_data in assignments_data:
        # skip if the worker ID is not of interest
        if ass_data['_worker_id'] != workerID:
            continue

        count += 1    
        cleaned_data     = cleanAssignmentData( ass_data )
        _polished_data   = cleaned_data[0]
        _error           = cleaned_data[1]
        _hit_reject_flag = cleaned_data[2]
        _hit_flag        = cleaned_data[3]
        
        _all_assignments.append( _polished_data )
        if _error:
            _error_assignments.append( _polished_data )
        else:
            if _hit_reject_flag:
                _rejected_assignments.append( _polished_data )
            else:
                if _hit_flag:
                    _flagged_assignments.append( _polished_data )
                else:
                    _good_assignments.append( _polished_data )

    # print out some stats
    print "===================================================="
    print "Assignment Stats for Worker: [%s]" % workerID
    print "===================================================="

    print "Total number of assignments:    [%d]" % (len(_all_assignments),)
    print "Rejected assignments:           [%d]" % (len(_rejected_assignments),)
    print "Flagged assignments:            [%d]" % (len(_flagged_assignments),)
    print "Good assignments:               [%d]" % (len(_good_assignments),)
    print "Error assignments:              [%d]" % (len(_error_assignments),)

    #l = [_flagged_assignments, _rejected_assignments, _good_assignments]
    #analyze_ass = [item for sublist in l for item in sublist]
    
    if not os.path.exists( savePath + workerID ):
        os.makedirs( savePath + workerID )
        
    # coco = COCO( COCO_ANNOTATION_FILE )

    # Plot and save Good Assignments
    if len(_good_assignments) > 0:
        filename = 'GoodAssignments'
        pdf = PdfPages( savePath + workerID + '/' + filename + '.pdf')
        fig = plt.figure()
            
        for ass in _good_assignments:       
            for trial_key in ass['trials'].keys():
            
                trial_data = ass['trials'][trial_key]
                human_subj_id = trial_data['human_subj_id']
                human_img_id  = trial_data['human_img_id']
                trial_flag    = trial_data['flag']
                depth         = trial_data['depth']

                plt.clf()
                tmp_img_info = coco.loadImgs( coco_img_id )[0]
                tmp_img = io.imread( COCO_IMAGES_FOLDER + '/' + '%s' %(tmp_img_info['file_name']) )
                plt.imshow( tmp_img )

                _subj_anno = coco.loadAnns( coco_subj_id )[0]
                # _objs_anno = coco.loadAnns( depth )
                # coco.showDepth( _subj_anno, _objs_anno, plt.gca() )

                if trial_flag:
                    ax = plt.gca()
                    xpos = int(max(ax.get_xlim())/2)
                    ypos = int(max(ax.get_ylim())/2)
                    plt.annotate('FLAGGED',xy=(xpos,ypos),horizontalalignment='center', verticalalignment='center',size=12,color='r')

                pdf.savefig()
        pdf.close()
        plt.close()

    # Plot and save Flagged Assignments
    if len(_flagged_assignments) > 0:
        filename = 'FlaggedAssignments'
        pdf = PdfPages( savePath + workerID + '/' + filename + '.pdf')
        fig = plt.figure()
               
        for ass in _flagged_assignments:        
            for trial_key in ass['trials'].keys():
            
                trial_data = ass['trials'][trial_key]
                coco_subj_id = trial_data['coco_subj_id']
                # coco_img_id  = trial_data['coco_img_id']
                trial_flag   = trial_data['flag']
                depth        = trial_data['depth']

                plt.clf()
                tmp_img_info = coco.loadImgs( coco_img_id )[0]
                tmp_img = io.imread( COCO_IMAGES_FOLDER + '/' + '%s' %(tmp_img_info['file_name']) )
                plt.imshow( tmp_img )

                _subj_anno = coco.loadAnns( coco_subj_id )[0]
                # _objs_anno = coco.loadAnns( interactions )
                # coco.showInteractions( _subj_anno, _objs_anno, plt.gca() )

                if trial_flag:
                    ax = plt.gca()
                    xpos = int(max(ax.get_xlim())/2)
                    ypos = int(max(ax.get_ylim())/2)
                    plt.annotate('FLAGGED',xy=(xpos,ypos),horizontalalignment='center', verticalalignment='center',size=12,color='r')

                pdf.savefig()
        pdf.close()
        plt.close()  
    
    # Plot and save Rejected Assignments
    if len(_rejected_assignments) > 0:
        filename = 'RejectedAssignments'
        pdf = PdfPages( savePath + workerID + '/' + filename + '.pdf')
        fig = plt.figure()
        
        for ass in _rejected_assignments:       
            for trial_key in ass['trials'].keys():
            
                trial_data = ass['trials'][trial_key]
                coco_subj_id = trial_data['coco_subj_id']
                # coco_img_id  = trial_data['coco_img_id']
                trial_flag   = trial_data['flag']
                depth        = trial_data['depth']

                plt.clf()
                tmp_img_info = coco.loadImgs( coco_img_id )[0]
                tmp_img = io.imread( COCO_IMAGES_FOLDER + '/' + '%s' %(tmp_img_info['file_name']) )
                plt.imshow( tmp_img )

                _subj_anno = coco.loadAnns( coco_subj_id )[0]
                _objs_anno = coco.loadAnns( interactions )
                coco.showInteractions( _subj_anno, _objs_anno, plt.gca() )

                if trial_flag:
                    ax = plt.gca()
                    xpos = int(max(ax.get_xlim())/2)
                    ypos = int(max(ax.get_ylim())/2)
                    plt.annotate('FLAGGED',xy=(xpos,ypos),horizontalalignment='center', verticalalignment='center',size=12,color='r')

                pdf.savefig()
        pdf.close()
        plt.close()

    return_dict = {
        "_all_assignments":_all_assignments,
        "_rejected_assignments":_rejected_assignments,
        "_flagged_assignments":_flagged_assignments,
        "_good_assignments":_good_assignments,
        "_error_assignments":_error_assignments}
    
    return return_dict
Example #58
0
def run(n_neurons=60, t=10, t_test=10, n_trains=10, n_encodes=20, n_tests=10, 
        f=DoubleExp(1e-3, 3e-2), f_out=DoubleExp(1e-3, 1e-1), 
        dt=0.001, neuron_type=LIF(), reg=1e-2, penalty=0.5, load_w=None, load_df=None):

    d_ens = np.zeros((n_neurons, 1))
    f_ens = f
    w_ens = None
    e_ens = None
    w_ens2 = None
    e_ens2 = None
    f_smooth = DoubleExp(1e-2, 2e-1)
    print('\nNeuron Type: %s'%neuron_type)

    if isinstance(neuron_type, DurstewitzNeuron):
        if load_w:
            w_ens = np.load(load_w)['w_ens']
        else:
            print('Optimizing ens1 encoders')
            for nenc in range(n_encodes):
                print("encoding trial %s"%nenc)
                stim_func1, stim_func2 = make_normed_flipped(value=1.4, t=t, N=1, f=f, seed=nenc)
                data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, stim_func1=stim_func1, stim_func2=stim_func2, neuron_type=neuron_type, w_ens=w_ens, e_ens=e_ens, L=True)
                w_ens = data['w_ens']            
                e_ens = data['e_ens']   
                np.savez('data/multiply_w.npz', w_ens=w_ens, e_ens=e_ens)
                
                fig, ax = plt.subplots()
                sns.distplot(np.ravel(w_ens), ax=ax)
                ax.set(xlabel='weights', ylabel='frequency')
                plt.savefig("plots/tuning/multiply_%s_w_ens.pdf"%neuron_type)
                
                a_ens = f_smooth.filt(data['ens'], dt=dt)
                a_supv = f_smooth.filt(data['supv'], dt=dt)
                for n in range(n_neurons):
                    fig, ax = plt.subplots(1, 1)
                    ax.plot(data['times'], a_supv[:,n], alpha=0.5, label='supv')
                    ax.plot(data['times'], a_ens[:,n], alpha=0.5, label='ens')
                    ax.set(ylim=((0, 40)))
                    plt.legend()
                    plt.savefig('plots/tuning/multiply_ens_nenc_%s_activity_%s.pdf'%(nenc, n))
                    plt.close('all')

    if load_df:
        load = np.load(load_df)
        d_ens = load['d_ens']
        d_out1 = load['d_out1']
        taus_ens = load['taus_ens']
        taus_out1 = load['taus_out1']
        f_ens = DoubleExp(taus_ens[0], taus_ens[1])
        f_out1 =  DoubleExp(taus_out1[0], taus_out1[1])
    else:
        print('Optimizing ens1 filters and decoders')
        stim_func1, stim_func2 = make_normed_flipped(value=1.2, t=t, N=n_trains, f=f, seed=0)
        data = go(d_ens, f_ens, n_neurons=n_neurons, t=t*n_trains, f=f, dt=dt, neuron_type=neuron_type,
            stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens)
        d_ens, f_ens, taus_ens = df_opt(data['x'][:,0]*data['x'][:,1], data['ens'], f, dt=dt, penalty=penalty, reg=reg, name='multiply_%s'%neuron_type)
        d_ens = d_ens.reshape((n_neurons, 1))
        d_out1, f_out1, taus_out1 = df_opt(data['x'], data['ens'], f_out, dt=dt, name='multiply_%s'%neuron_type)
        np.savez('data/multiply_%s_df.npz'%neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1)

        times = np.arange(0, 1, 0.0001)
        fig, ax = plt.subplots()
        ax.plot(times, f.impulse(len(times), dt=0.0001), label=r"$f^x, \tau_1=%.3f, \tau_2=%.3f$" %(-1./f.poles[0], -1./f.poles[1]))
        ax.plot(times, f_ens.impulse(len(times), dt=0.0001), label=r"$f^{ens}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$"
           %(-1./f_ens.poles[0], -1./f_ens.poles[1], np.count_nonzero(d_ens), n_neurons))
        ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10)))
        ax.legend(loc='upper right')
        plt.tight_layout()
        plt.savefig("plots/multiply_%s_filters_ens.pdf"%neuron_type)

        times = np.arange(0, 1, 0.0001)
        fig, ax = plt.subplots()
        ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" %(-1./f_out.poles[0], -1./f_out.poles[1]))
        ax.plot(times, f_out1.impulse(len(times), dt=0.0001), label=r"$f^{out1}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$"
           %(-1./f_out1.poles[0], -1./f_out1.poles[1], np.count_nonzero(d_out1), n_neurons))
        ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10)))
        ax.legend(loc='upper right')
        plt.tight_layout()
        plt.savefig("plots/multiply_%s_filters_out1.pdf"%neuron_type)

        a_ens = f_ens.filt(data['ens'], dt=dt)
        x = f.filt(data['x'][:,0]*data['x'][:,1], dt=dt).ravel()
        xhat_ens = np.dot(a_ens, d_ens).ravel()
        rmse_ens = rmse(xhat_ens, x)
        fig, ax = plt.subplots()
        ax.plot(data['times'], x, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' %rmse_ens)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens1")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_train.pdf"%neuron_type)

        a_ens = f_out1.filt(data['ens'], dt=dt)
        x_out = f_out.filt(data['x'], dt=dt)
        xhat_ens_out = np.dot(a_ens, d_out1)
        rmse_ens_out1 = rmse(xhat_ens_out[:,0], x_out[:,0])
        rmse_ens_out2 = rmse(xhat_ens_out[:,1], x_out[:,1])
        fig, ax = plt.subplots()
        ax.plot(data['times'], x_out[:,0], linestyle="--", label='x_0')
        ax.plot(data['times'], x_out[:,1], linestyle="--", label='x_1')
        ax.plot(data['times'], xhat_ens_out[:,0], label='ens_0, rmse=%.3f' %rmse_ens_out1)
        ax.plot(data['times'], xhat_ens_out[:,1], label='ens_1, rmse=%.3f' %rmse_ens_out2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens1")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_out_train.pdf"%neuron_type)

    if isinstance(neuron_type, DurstewitzNeuron):
        if load_w:
            w_ens2 = np.load(load_w)['w_ens2']
        else:
            print('Optimizing ens2 encoders')
            for nenc in range(n_encodes):
                print("encoding trial %s"%nenc)
                stim_func1, stim_func2 = make_normed_flipped(value=1.4, t=t, N=1, f=f, seed=nenc)
                data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, stim_func1=stim_func1, stim_func2=stim_func2, neuron_type=neuron_type, w_ens=w_ens, w_ens2=w_ens2, e_ens2=e_ens2, L2=True)
                w_ens2 = data['w_ens2']            
                e_ens2 = data['e_ens2']   
                np.savez('data/multiply_w.npz', w_ens=w_ens, e_ens=e_ens, w_ens2=w_ens2, e_ens2=e_ens2)

                fig, ax = plt.subplots()
                sns.distplot(np.ravel(w_ens2), ax=ax)
                ax.set(xlabel='weights', ylabel='frequency')
                plt.savefig("plots/tuning/multiply_%s_w_ens2.pdf"%neuron_type)

                a_ens = f_smooth.filt(data['ens2'], dt=dt)
                a_supv = f_smooth.filt(data['supv2'], dt=dt)
                for n in range(30):
                    fig, ax = plt.subplots(1, 1)
                    ax.plot(data['times'], a_supv[:,n], alpha=0.5, label='supv2')
                    ax.plot(data['times'], a_ens[:,n], alpha=0.5, label='ens2')
                    ax.set(ylim=((0, 40)))
                    plt.legend()
                    plt.savefig('plots/tuning/multiply_ens2_nenc_%s_activity_%s.pdf'%(nenc, n))
                    plt.close('all')

    if load_df:
        load = np.load(load_df)
        d_out2 = load['d_out2']
        taus_out2 = load['taus_out2']
        f_out2 = DoubleExp(taus_out2[0], taus_out2[1])
    else:
        print('Optimizing ens2 filters and decoders')
        stim_func1, stim_func2 = make_normed_flipped(value=1.2, t=t, N=n_trains, f=f, seed=0)
        data = go(d_ens, f_ens, n_neurons=n_neurons, t=t*n_trains, f=f, dt=dt, neuron_type=neuron_type,
            stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens, w_ens2=w_ens2)
        d_out2, f_out2, taus_out2  = df_opt(data['x2'], data['ens2'], f_out, dt=dt, name='multiply_%s'%neuron_type)
        np.savez('data/multiply_%s_df.npz'%neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1, d_out2=d_out2, taus_out2=taus_out2)

        times = np.arange(0, 1, 0.0001)
        fig, ax = plt.subplots()
        ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" %(-1./f_out.poles[0], -1./f_out.poles[1]))
        ax.plot(times, f_out2.impulse(len(times), dt=0.0001), label=r"$f^{out2}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$"
           %(-1./f_out2.poles[0], -1./f_out2.poles[1], np.count_nonzero(d_out2), 30))
        ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10)))
        ax.legend(loc='upper right')
        plt.tight_layout()
        plt.savefig("plots/multiply_%s_filters_out2.pdf"%neuron_type)

        a_ens2 = f_out2.filt(data['ens2'], dt=dt)
        x2 = f_out.filt(data['x2'], dt=dt)
        xhat_ens2 = np.dot(a_ens2, d_out2)
        rmse_ens2 = rmse(xhat_ens2, x2)
        fig, ax = plt.subplots()
        ax.plot(data['times'], x2, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' %rmse_ens2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens2")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens2_train.pdf"%neuron_type)


    rmses_ens = np.zeros((n_tests))
    rmses_ens_out = np.zeros((n_tests))
    rmses_ens2 = np.zeros((n_tests))
    for test in range(n_tests):
        print('test %s' %test)
        stim_func1, stim_func2 = make_normed_flipped(value=1.0, t=t_test, N=1, f=f, seed=100+test)
        data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_test, f=f, dt=dt, neuron_type=neuron_type,
            stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens, w_ens2=w_ens2)

        a_ens = f_ens.filt(data['ens'], dt=dt)
        x = f.filt(data['x'][:,0]*data['x'][:,1], dt=dt).ravel()
        xhat_ens = np.dot(a_ens, d_ens).ravel()
        rmse_ens = rmse(xhat_ens, x)

        a_ens_out = f_out1.filt(data['ens'], dt=dt)
        x_out = f_out.filt(data['x'], dt=dt)
        xhat_ens_out = np.dot(a_ens, d_out1)
        rmse_ens_out = rmse(xhat_ens_out, x_out)
        rmse_ens_out1 = rmse(xhat_ens_out[:,0], x_out[:,0])
        rmse_ens_out2 = rmse(xhat_ens_out[:,1], x_out[:,1])

        a_ens2 = f_out2.filt(data['ens2'], dt=dt)
        x2 = f_out.filt(data['x2'], dt=dt)
        xhat_ens2 = np.dot(a_ens2, d_out2)
        rmse_ens2 = rmse(xhat_ens2, x2)
        rmses_ens[test] = rmse_ens
        rmses_ens_out[test] = rmse_ens_out
        rmses_ens2[test] = rmse_ens2        
    
        fig, ax = plt.subplots()
        ax.plot(data['times'], x_out[:,0], linestyle="--", label='x_0')
        ax.plot(data['times'], x_out[:,1], linestyle="--", label='x_1')
        ax.plot(data['times'], xhat_ens_out[:,0], label='ens_0, rmse=%.3f' %rmse_ens_out1)
        ax.plot(data['times'], xhat_ens_out[:,1], label='ens_1, rmse=%.3f' %rmse_ens_out2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens1 out")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_out_test_%s.pdf"%(neuron_type, test))
        
        fig, ax = plt.subplots()
        ax.plot(data['times'], x, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' %rmse_ens)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens1")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_test_%s.pdf"%(neuron_type, test))
        
        fig, ax = plt.subplots()
        ax.plot(data['times'], x2, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' %rmse_ens2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens2")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens2_test_%s.pdf"%(neuron_type, test))
        plt.close('all')

    mean_ens = np.mean(rmses_ens)
    mean_ens_out = np.mean(rmses_ens_out)
    mean_ens2 = np.mean(rmses_ens2)
    CI_ens = sns.utils.ci(rmses_ens)
    CI_ens_out = sns.utils.ci(rmses_ens_out)
    CI_ens2 = sns.utils.ci(rmses_ens2)
    
    fig, ax = plt.subplots()
    sns.barplot(data=rmses_ens2)
    ax.set(ylabel='RMSE', title="mean=%.3f, CI=%.3f-%.3f"%(mean_ens2, CI_ens2[0], CI_ens2[1]))
    plt.xticks()
    plt.savefig("plots/multiply_%s_rmse.pdf"%neuron_type)

    print('rmses: ', rmses_ens, rmses_ens_out, rmses_ens2)
    print('means: ', mean_ens, mean_ens_out, mean_ens2)
    print('confidence intervals: ', CI_ens, CI_ens_out, CI_ens2)
    np.savez('data/multiply_%s_results.npz'%neuron_type, rmses_ens=rmses_ens, rmses_ens_out=rmses_ens_out, rmses_ens2=rmses_ens2)
    return rmses_ens2
def plotHITStatus( savePath = '/home/ubuntu/amt_guis/cocoa_depth/plots/', filename = 'time_info' ):

    pdf = PdfPages( savePath + filename + '.pdf')
    fig = plt.figure()
    plt.clf()

    page_size = 100
    ass_time_info_list = []
    
    mtc = MTurkConnection( host = _host )
    assignments = getReviewableAssignments()
    #hits = getReviewableHITs()
    
    #for hit in hits:
    #    assignments = mtc.get_assignments( hit.HITId, page_size=page_size )
    for ass in assignments:
        time_info = \
        {'AcceptTime':ass.AcceptTime,
        'SubmitTime':ass.SubmitTime,
        'ExecutionTime': [ question_form_answer.fields[0] for question_form_answer in ass.answers[0] if question_form_answer.qid == '_hit_rt' ][0] }
            
        ass_time_info_list.append( time_info )
            
    ass_time_info_list.sort(key=lambda x: datetime.datetime.strptime(x['AcceptTime'],'%Y-%m-%dT%H:%M:%SZ'))
    first_assignment = ass_time_info_list[0]
    ass_time_info_list.sort(key=lambda x: datetime.datetime.strptime(x['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ'))
    last_assignment = ass_time_info_list[-1]

    time_since_beginning = int(( datetime.datetime.strptime(last_assignment['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ') - datetime.datetime.strptime(first_assignment['AcceptTime'],'%Y-%m-%dT%H:%M:%SZ')).total_seconds())
    completed_percentage = []
    # time since beginning in one hour intervals
    time_range = range( 0, time_since_beginning + 3600, 3600 )

    for s in time_range:
        currently_completed = \
            [x for x in ass_time_info_list if datetime.datetime.strptime(x['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ') < datetime.timedelta(seconds=s) + datetime.datetime.strptime(first_assignment['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ')] 
        perc = len( currently_completed ) / float( NUMBER_HITS * NUMBER_HIT_ASSIGNMENTS )
        completed_percentage.append( perc )

    per_hour_completion_rate = len(ass_time_info_list) / float(time_since_beginning / 3600)
    #print per_hour_completion_rate
    
    hours_to_completion = ((NUMBER_HITS * NUMBER_HIT_ASSIGNMENTS) - len(ass_time_info_list)) / per_hour_completion_rate
    #print hours_to_completion

    plt.plot( time_range, completed_percentage )
   
    rows = ['Completed Assignments','Total Assignments','Hour Completion Rate','Hours to Completion']
    data = [["%d"%(len(ass_time_info_list))],["%d"%(NUMBER_HITS * NUMBER_HIT_ASSIGNMENTS)],["%.2f" % per_hour_completion_rate],["%.2f" % hours_to_completion]]
    
    plt.table(cellText=data,rowLabels=rows,loc='center',colWidths = [0.1]*3)
    
    plt.title('Per hour completion percentage')
    
    plt.xticks( time_range[0::10], [str(x/3600) for x in time_range[0::10]] )
    plt.yticks([0,0.2,0.4,0.6,0.8,1],['0%', '20%','40%','60%','80%','100%'])
    
    plt.ylabel('Completion Percentage')
    plt.xlabel('Hours since beginning of task')

    plt.grid()
    pdf.savefig()
    pdf.close()
    plt.close()
 def _plot_and_save_attention(self, att_w, filename):
     plt = self.draw_attention_plot(att_w)
     plt.savefig(filename)
     plt.close()