예제 #1
0
def test_psv_dataset_tfm_segmentation_cropped():
    from psv.ptdataset import PsvDataset, TFM_SEGMENTATION_CROPPED

    ds = PsvDataset(transform=TFM_SEGMENTATION_CROPPED)

    assert len(ds) == 956, "The dataset should have this many entries"

    mb = ds[0]
    image = mb['image']
    mask = mb['mask']
    assert isinstance(image, torch.Tensor)
    assert isinstance(mask, torch.Tensor)

    assert mask.shape[-2:] == image.shape[-2:]

    # Hard to test due to randomness....

    PLOT=True
    if PLOT:
        from matplotlib.pylab import plt
        import torchvision.transforms.functional as F
        a = ds.get_annotation(0)
        plt.figure()
        plt.suptitle('Visualizing test_psv_dataset_tfm_segmentation_cropped, close if ok')
        plt.subplot(121)
        plt.imshow(F.to_pil_image(image))
        plt.title('image')

        plt.subplot(122)
        plt.imshow(a.colors[mask.numpy()])
        plt.title('mask')
        plt.show()
def plot():
    plt.figure(figsize=(20, 10))
    width = 0.5
    index = np.arange(26)

    print 'SUM PLOT 1', sum(row[0] for row in data)
    print 'SUM PLOT 2', sum(row[1] for row in data)
    print 'SUM PLOT 3', sum(row[2] for row in data)

    print data[0]
    
    p0 = plt.bar(index, data[0], width, color='y')  # people
    p1 = plt.bar(index, data[1], width, color='g')  # nature
    p2 = plt.bar(index, data[2], width, color='r')  # activity
    p3 = plt.bar(index, data[3], width, color='b')  # food
    p4 = plt.bar(index, data[4], width, color='c')  # symbols
    p5 = plt.bar(index, data[5], width, color='m')  # objects
    p6 = plt.bar(index, data[6], width, color='k')  # flags
    p7 = plt.bar(index, data[7], width, color='w')  # uncategorized


    plt.ylabel('Usage')
    plt.title('Emoji category usage per city')
    plt.xticks(index + width/2.0, cities)
    plt.yticks(np.arange(0, 1, 0.1))
    plt.legend((p0[0], p1[0], p2[0], p3[0], p4[0], p5[0], p6[0], p7[0]), categories_names)

    plt.show()
예제 #3
0
def compare_MC_exact():
	max_error = []
	sum_error = []
	walkers = []
	for i in range(500,20000,500):
		d1 = Diffusion(t=0.2)
		t_exact,u_exact = d1.exact()
		t_uniform , mc_uniform = d1.MC_uniform(i)
		print len(u_exact)
		print len(mc_uniform)
		diff = u_exact[:] - mc_uniform[:]
		temp = max(abs(diff))
		max_error.append(temp)
		temp = sum(diff)
		sum_error.append(temp)
		temp = i
		print temp
		walkers.append(temp)

	from matplotlib.pylab import plt
	plt.figure(2)	
	plt.plot(walkers,max_error,'o-')
	plt.xlabel('Number of walkers')
	plt.ylabel('Maximum error')
	plt.savefig('mcuniform_error1.eps')

	plt.figure(3)
	plt.plot(walkers,sum_error,'o-')
	plt.xlabel('Number of walkers')
	plt.ylabel('Accumulated error')
	plt.savefig('mcuniform_error2.eps')
예제 #4
0
    def __init__(self,
                 dir_name='/home/steve/Data/FusingLocationData/0010/'):
        if not dir_name[-1] is '/':
            dir_name = dir_name + '/'

        imu_left = np.loadtxt(dir_name + 'LEFT_FOOT.data', delimiter=',')
        imu_right = np.loadtxt(dir_name + 'RIGHT_FOOT.data', delimiter=',')
        imu_head = np.loadtxt(dir_name + 'HEAD.data', delimiter=',')

        uwb_head = np.loadtxt(dir_name + 'uwb_result.csv', delimiter=',')
        beacon_set = np.loadtxt(dir_name + 'beaconSet.csv', delimiter=',')

        print('average time interval of left:',
              float(imu_left[-1, 1] - imu_left[0, 1]) / float(imu_left.shape[0]))
        print('average time interval of right:',
              float(imu_right[-1, 1] - imu_right[0, 1]) / float(imu_right.shape[0]))

        print('average time interval of head:',
              float(imu_head[-1, 1] - imu_head[0, 1]) / float(imu_head.shape[0]))

        plt.figure()
        plt.plot(imu_left[1:, 1] - imu_left[:-1, 1], label='time left')
        plt.plot(imu_right[1:, 1] - imu_right[:-1, 1], label='time right')
        # time_diff = imu_left[1:,1] - imu_left[:-1,1]
        # plt.plot(time_diff-time_diff.mean(),label='time diff')
        plt.plot(imu_head[1:, 1] - imu_head[:-1, 1], label=' time head')

        plt.grid()
        plt.legend()
        plt.show()
예제 #5
0
def AddRegressor():
    rng = np.random.RandomState(1)  # 和random_state的用法一致   在这可以固定生成的随机数
    x = np.sort(5 * rng.rand(80, 1),
                axis=0)  # rng.rand(80,1)  生成80行1列的随机数 乘以5就是生成0-5的随机数
    y = np.sin(x).ravel()  # ravel()降维
    y[::5] += 0.3 * (0.5 - rng.rand(16))

    # plt.show()
    reg1 = tree.DecisionTreeRegressor(max_depth=2)
    reg2 = tree.DecisionTreeRegressor(max_depth=5)
    reg1.fit(x, y)
    reg2.fit(x, y)

    test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
    y1 = reg1.predict(test)
    y2 = reg2.predict(test)
    #plt.figure()
    plt.figure()
    plt.scatter(x, y, label='dian')
    plt.plot(test, y1, color='red', label='max_depth=2')
    plt.plot(test, y2, color='yellow', label="max_depth=5")
    plt.xlabel('data')
    plt.ylabel('target')
    plt.legend(loc='upper right')
    plt.show()
예제 #6
0
def view(dataset, i):
    if not (hasattr(dataset, 'sensor_events')):
        return
    tmp_act_evants = dataset.activity_events.loc[
        dataset.activity_events['Activity'] == i]
    print(dataset.activities_map[i])
    print(tmp_act_evants['Duration'].describe())

    fig = plt.figure()

    tmp_act_evants['StartTime'].iloc[0]
    all = pd.DataFrame()
    for index, row in tmp_act_evants.iterrows():
        myse = dataset.sensor_events.loc[
            (dataset.sensor_events['time'] >= row['StartTime'])
            & (dataset.sensor_events['time'] <= row['EndTime'])].copy()
        myse['relative'] = dataset.sensor_events['time'] - row['StartTime']
        myse['tpercent'] = myse['relative'] / row['Duration']
        all = pd.concat([all, myse[['tpercent', 'SID']]])
        # plt.scatter(myse['tpercent'],myse['SID'])

    tmp = all.copy()

    tmp['tpercent'] = (tmp['tpercent'] * 2).round(0) / 2
    fig = plt.figure(figsize=(10, 5))
    a = pd.pivot_table(tmp,
                       columns='tpercent',
                       index='SID',
                       aggfunc=np.count_nonzero,
                       fill_value=0)
    a = a / a.max()
    # plt.imshow(a, cmap='hot', interpolation='nearest')

    sns.heatmap(a / a.max(), cmap=sns.cm.rocket_r)
예제 #7
0
파일: utils.py 프로젝트: leotrs/erdos
def example_image(graph, filename, layout='spring', edge_labels=False,
                  node_labels=False, show=False):
    """Generates example image with graph.

    Uses nx.draw_networkx_* methods, and matplotlib to draw and save the
    image.

    """
    # positions for all nodes
    pos = LAYOUT_DICT[layout](graph)

    # configure the image
    plt.figure(figsize=(2, 2))
    plt.axis('off')

    # draw all of the things!
    nx.draw_networkx_nodes(graph, pos, nodelist=graph.nodes(), node_color='r')
    nx.draw_networkx_edges(graph, pos, width=1.0, alpha=0.5, arrows=True)

    if node_labels:
        nlabels = {node: str(node) for node in graph.nodes()}
        nx.draw_networkx_labels(graph, pos, nlabels, font_size=16)

    if edge_labels:
        elabels = {edge: str(idx) for idx, edge in enumerate(graph.edges())}
        nx.draw_networkx_edge_labels(graph, pos, elabels)

    # place the file where it belongs
    path = os.path.join(os.environ['ERDOS_PATH'], "content/images", filename)
    plt.savefig(path)

    if show:
        plt.show()
def plot_first_factors(X_data, y_data, analysis_type="lda"):
    """Generate scatterplot of two principal components (pca or lda) of data set."""
    le = preprocessing.LabelEncoder()
    le.fit(y_data)
    colours = [
        'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'darkgreen',
        'darkorange', 'yellow', 'black'
    ]
    if analysis_type == "pca":
        transform = PCA(n_components=2)
        XX_data = transform.fit(X_data).transform(X_data)
    elif analysis_type == "lda":
        transform = LinearDiscriminantAnalysis(n_components=2)
        XX_data = transform.fit(X_data, y_data).transform(X_data)
    else:
        print("Type", analysis_type,
              "not recognised, use either 'pca' or 'lda'.")
        return
    plt.figure()
    for i, j in enumerate(le.classes_):
        plt.scatter(XX_data[y_data == j, 0],
                    XX_data[y_data == j, 1],
                    alpha=0.8,
                    color=colours[i],
                    label=str(j))
    plt.legend(loc='best', shadow=False, scatterpoints=1)
    plt.title(analysis_type)
def bar_graph(category, age_grp, sex, x, y, year=None, country=None):

    plt.figure()
    plt.ylabel('ATE = Y1 - Y0')
    plt.xlabel('Years')
    plt.bar(range(len(x)), x, align='center')
    plt.xticks(range(len(x)), y, rotation='vertical')

    if country:
        plt.title("%s Suicide Rates for WC; %s ages %s" %
                  (country, sex, age_grp))
        name = country + sex + age_grp + '.png'
        # plt.show()
        plt.tight_layout()
        plt.savefig('./graphs/Countries' + '/' + sex + '/' +
                    name.replace(' ', '_'))

    elif year:
        plt.title("Change in Suicide Rates per Country in %s; %s ages %s" %
                  (year, sex, age_grp))
        name = category + sex + str(year) + age_grp + '.png'
        # plt.show()
        plt.tight_layout()
        plt.savefig('./graphs/' + category + '/' + sex + '/' + str(year) +
                    '/' + name.replace(' ', ''))
    else:
        plt.title("Change in Suicide Rates in %s Countries; %s ages %s" %
                  (category, sex, age_grp))
        name = category + sex + age_grp + '.png'
        # plt.show()
        plt.tight_layout()
        plt.savefig('./graphs/' + category + '/' + sex + '/' +
                    name.replace(' ', ''))
def graph(train_df, test_df, p_forecast, f_forecast, metric, key):
	fig = plt.figure(figsize=(40,10))
	forecast_ds = np.array(f_forecast["ds"])
	print(len(forecast_ds))
	print(len(train_df))
	forecast_ds = forecast_ds[int(train_df["values"].count()):]


	plt.plot(np.array(train_df["ds"]), np.array(train_df["y"]),'b', label="train", linewidth=3)
	plt.plot(np.array(test_df["ds"]), np.array(test_df["y"]), 'k', label="test", linewidth=3)

	plt.savefig( "../testing/compare_fourier_prophet/" + str(key) + "_raw_" + metric + ".png", transparent=True)
	prophet = np.array(p_forecast["yhat"])
	prophet_upper = np.array(p_forecast["yhat_upper"])
	prophet_lower = np.array(p_forecast["yhat_lower"])

	fourier = f_forecast["yhat"]
	fourier = fourier[len(train_df["values"]):]
	print(len(forecast_ds))
	print(len(fourier))
	plt.plot(forecast_ds, fourier, 'g', label="fourier_yhat", linewidth=3)
	plt.savefig( "../testing/compare_fourier_prophet/" + str(key) + "_fourier_" + metric + ".png", transparent=True)

	prophet = prophet[len(train_df["values"]):]
	prophet_upper = prophet_upper[len(train_df["values"]):]
	prophet_lower = prophet_lower[len(train_df["values"]):]
	plt.plot(forecast_ds, prophet, '*y', label="prophet_yhat", linewidth=3)
	plt.plot(forecast_ds, prophet_upper, 'y', label="yhat_upper", linewidth=3)
	plt.plot(forecast_ds, prophet_lower, 'y', label="yhat_lower", linewidth=3)
	
	
	plt.plot()
	plt.xlabel("Timestamp")
	plt.ylabel("Value")
	plt.legend(loc=1)
	plt.title("Prophet Model Forecast")
	plt.savefig( "../testing/compare_fourier_prophet/" + str(key) + "_compare_" + metric + ".png", transparent=True)
	plt.close()


	fig = plt.figure(figsize=(40,10))
	forecast_ds = np.array(f_forecast["ds"])
	forecast_ds = forecast_ds[len(train_df["values"]):]


	plt.plot(np.array(train_df["ds"]), np.array(train_df["y"]),'b', label="train", linewidth=3)
	plt.plot(np.array(test_df["ds"]), np.array(test_df["y"]), 'k', label="test", linewidth=3)

	prophet = np.array(p_forecast["yhat"])
	prophet_upper = np.array(p_forecast["yhat_upper"])
	prophet_lower = np.array(p_forecast["yhat_lower"])
	prophet = prophet[len(train_df["values"]):]
	prophet_upper = prophet_upper[len(train_df["values"]):]
	prophet_lower = prophet_lower[len(train_df["values"]):]
	plt.plot(forecast_ds, prophet, '*y', label="prophet_yhat", linewidth=3)
	plt.plot(forecast_ds, prophet_upper, 'y', label="yhat_upper", linewidth=3)
	plt.plot(forecast_ds, prophet_lower, 'y', label="yhat_lower", linewidth=3)
	plt.savefig( "../testing/compare_fourier_prophet/" + str(key) + "_prophet_" + metric + ".png", transparent=True)
	plt.close()
예제 #11
0
def test_psv_dataset_crop_and_pad():
    import psv.ptdataset as P


    TFM_SEGMENTATION_CROPPED = psv.transforms.Compose(
        psv.transforms.ToSegmentation(),

        # Crop in on the facades
        psv.transforms.SetCropToFacades(pad=20, pad_units='percent', skip_unlabeled=True, minsize=(512, 512)),
        psv.transforms.ApplyCrop('image'),
        psv.transforms.ApplyCrop('mask'),
        
        # Resize the height to fit in the net (with some wiggle room)
        # THIS is the test case -- the crops will not usually fit anymore
        psv.transforms.Resize('image', height=400),
        psv.transforms.Resize('mask', height=400, interpolation=P.Image.NEAREST),

        # Reandomly choose a subimage
        psv.transforms.SetRandomCrop(512, 512),
        psv.transforms.ApplyCrop('image'),
        psv.transforms.ApplyCrop('mask', fill=24), # 24 should be unlabeled
        
        psv.transforms.DropKey('annotation'),
        psv.transforms.ToTensor('image'),
        psv.transforms.ToTensor('mask', preserve_range=True),
        ) 

    ds = P.PsvDataset(transform=TFM_SEGMENTATION_CROPPED)

    assert len(ds) == 956, "The dataset should have this many entries"

    mb = ds[0]
    image = mb['image']
    mask = mb['mask']
    assert isinstance(image, torch.Tensor)
    assert isinstance(mask, torch.Tensor)

    assert mask.shape[-2:] == image.shape[-2:]

    # Hard to test due to randomness....

    PLOT=True
    if PLOT:
        from matplotlib.pylab import plt
        import torchvision.transforms.functional as F
        a = ds.get_annotation(0)
        plt.figure()
        plt.suptitle('Visualizing test_psv_dataset_tfm_segmentation_cropped,\n'
                     'close if ok \n '
                     'confirm boundary is  marked unlabeled')
        plt.subplot(121)
        plt.imshow(F.to_pil_image(image))
        plt.title('image')

        plt.subplot(122)
        plt.imshow(a.colors[mask.numpy()])
        plt.title('mask')
        plt.show()
def plot_gallery(images, titles, h, w, n_row=3,n_col=4):
    plt.figure(figsize=(1.8*n_col, 2.4*n_row))
    plt.subplots_adjust(bottom=0,left=.01,right=.99,top=.90,hspace=.35)
    for i in range(n_row * n_col):
        plt.subplot(n_row,n_col,i+1)
        plt.imshow(images[i].reshape(h,w),cmap=plt.cm.gray)
        plt.title(titles[i],size=12)
        plt.xticks(())
        plt.yticks(())
예제 #13
0
 def f(self, **kwargs):
     kwargs['always_apply'] = True
     print(kwargs)
     aug = self.tfms(**kwargs)
     #         Just copy all images, next step will be for continious albus
     image = aug(image=self.image.copy())['image']
     plt.figure(figsize=(10, 10))
     plt.imshow(image)
     plt.axis('off')
     plt.show()
예제 #14
0
def plot(ranks):
    plt.figure(figsize=(20, 10))
    plt.title("A2.3 PageRank (s-Sensibility)")
    plt.xlabel("Node ID")
    plt.ylabel("Pagerank")
    for row in ranks:
        plt.plot(row)

    plt.legend(['s = %.2f' % s for s in numpy.arange(0.0, 1.0, 0.05)],
               loc='upper right',
               prop={'size': 7})
    plt.savefig("submission/pagerank.png")
예제 #15
0
def save_plot(filter_bank, name):

	rows , cols = filter_bank.shape[0:2]

	plt.figure()
	sub = 1
	for row in range(rows):
		for col in range(cols):
			plt.subplot(rows, cols, sub)
			plt.imshow(filter_bank[row][col], cmap='gray')
			plt.axis('off')
			sub += 1

	plt.savefig(name)
예제 #16
0
def update_data(args, show_seconds=20, subsampling=10):

    # load previous
    metadata1 = np.load(os.path.join(args.session1, 'metadata.npy'), allow_pickle=True).item()
    data1 = np.load(os.path.join(args.session1, 'NIdaq.npy'), allow_pickle=True).item()
    t1 = np.arange(len(data1['analog'][0,:]))/metadata1['NIdaq-acquisition-frequency']
    
    metadata2 = np.load(os.path.join(args.session2, 'metadata.npy'), allow_pickle=True).item()
    data2 = np.load(os.path.join(args.session2, 'NIdaq.npy'), allow_pickle=True).item()
    t2 = np.arange(len(data2['analog'][0,:]))/metadata1['NIdaq-acquisition-frequency']


    metadata = np.load(os.path.join(args.session1, 'metadata.npy'), allow_pickle=True).item()
    data = np.load(os.path.join(args.session, 'NIdaq.npy'), allow_pickle=True).item()
    t = np.arange(len(data['analog'][0,:]))/metadata1['NIdaq-acquisition-frequency']

    tstart = np.min([t.max(), t1.max(), t2.max()])-show_seconds # showing the last 5 seconds


    # checking that the two realisations are the same:
    plt.figure(figsize=(7,5))

    plt.plot(t1[t1>tstart][::subsampling],
             data1[args.type][args.channel,t1>tstart][::subsampling], label='session #1')
    
    plt.plot(t2[t2>tstart][::subsampling],
             data2[args.type][args.channel,t2>tstart][::subsampling], label='session #2')

    plt.plot(t[t>tstart][::subsampling],
             data[args.type][args.channel,t>tstart][::subsampling], label='session')
    
    plt.legend()
    plt.show()


    y = input('  /!\ ----------------------- /!\ \n  Confirm that you want to replace\n the "%s" data of channel "%s" in session:\n "%s"\n by the data of session #1 ? [yes/No]' % (args.type, args.channel, args.session))
    if y in ['y', 'Y', 'yes', 'Yes']:
        
        temp = str(tempfile.NamedTemporaryFile().name)+'.npy'
        print("""
        ---> moving the old data to the temporary file directory as: "%s" [...]
        """ % temp)
        shutil.move(os.path.join(args.session, 'NIdaq.npy'), temp)
        print('applying changes [...]')
        data[args.type][args.channel,:] = data1[args.type][args.channel,:len(data[args.type][args.channel,:])]
        np.save(os.path.join(args.session, 'NIdaq.npy'), data)
        print('done !')

    else:
        print('--> data update aborted !! ')
예제 #17
0
def plot_four(plot_data):
    
    fig = plt.figure(figsize=(20, 10))
    
#    gs = gridspec.GridSpec(1, 2, height_ratios=[1, 2]) 
    
    ax = fig.add_subplot(223, projection='3d')
    ax.scatter(plot_data['sx'],  plot_data['sy'], plot_data['sz'])
    ax.plot(plot_data['sx'],  plot_data['sy'], plot_data['sz'], color='b')
    ax.view_init(azim=0, elev=90) #xy plane
    plt.xticks(fontsize=10)
    ax.set_title('Displacement Projection in xy Plane',size=20)

    ax2 = fig.add_subplot(224, projection='3d')
    ax2.scatter(plot_data['sx'],  plot_data['sy'], plot_data['sz'])
    ax2.plot(plot_data['sx'],  plot_data['sy'], plot_data['sz'], color='b')
    ax2.view_init(azim=0, elev=45) 
    ax2.set_title('Displacement',size=20)

    ax3 = fig.add_subplot(221)
    # 50 represents number of points to make between T.min and T.max
    xnew = np.linspace(0,8,50) 
    spl = make_interp_spline(pd.Series(range(9)), plot_data['tilt1'], k=3)  # type: BSpline
    x = spl(xnew)
    spl = make_interp_spline(pd.Series(range(9)), plot_data['tilt2'], k=3)  # type: BSpline
    y = spl(xnew)
    spl = make_interp_spline(pd.Series(range(9)), plot_data['compass'], k=3)  # type: BSpline
    z = spl(xnew)
    ax3.plot(x,"b-",label='tilt1')
    ax3.plot(y,"r-",label='tilt2')
    ax3.plot(z,"g-",label='compass')
    ax3.legend(loc="lower left",prop={'size': 20})
    ax3.set_title('Orientation Plot (degree)',size=20)
    ax3.tick_params(labelsize=20)
    
    ax4 = fig.add_subplot(222)
#    x = gaussian_filter1d(plot_data['ax'], sigma=1)    
#    y = gaussian_filter1d(plot_data['ay'], sigma=1)   
#    z = gaussian_filter1d(plot_data['az'], sigma=1)   
#    mag = gaussian_filter1d(plot_data['accelerometer'], sigma=1)   
    spl = make_interp_spline(pd.Series(range(9)), plot_data['ax'], k=3)  # type: BSpline
    x = spl(xnew)
    spl = make_interp_spline(pd.Series(range(9)), plot_data['ay'], k=3)  # type: BSpline
    y = spl(xnew)
    spl = make_interp_spline(pd.Series(range(9)), plot_data['az'], k=3)  # type: BSpline
    z = spl(xnew)
    spl = make_interp_spline(pd.Series(range(9)), plot_data['accelerometer'], k=3)  # type: BSpline
    mag = spl(xnew)
    ax4.plot(x/1000,"c--",label='ax')
    ax4.plot(y/1000,"g--",label='ay')
    ax4.plot(z/1000,"b--",label='az')
    ax4.plot(mag,"r-",label='Acc')

    ax4.legend(loc="lower left",prop={'size': 20})
    ax4.set_title('Acceleration Plot (g)',size=20)
    ax4.tick_params(labelsize=20)
    
    plt.tight_layout()
    plt.show()
    fig.savefig('FourInOne.png')
예제 #18
0
    def __init__(self, parent=None):
        super(Plot, self).__init__(parent)
        self.mainWindow = parent

        self.fig = plt.figure()
        self.canvas = FigureCanvas(self.fig)
        self.graphNavigationBar = NavigationToolbar(self.canvas, parent)
        self.legendList = PlotLegendList()
        self.defaultPlot()

        #  Current selected scans, detectors and counters
        self.detectors = []
        self.scans = []
        self.counters = []
        self.currentTab = 0

        # Adds the plotting widgets to the layout
        splitter = qtWidgets.QSplitter()
        splitter.setOrientation(qtCore.Qt.Vertical)
        splitter.addWidget(self.graphNavigationBar)
        splitter.addWidget(self.canvas)
        splitter.addWidget(self.legendList)

        # Adds the splitter as the layout of the widget
        layout = qtWidgets.QVBoxLayout()
        layout.addWidget(splitter)
        self.setLayout(layout)
예제 #19
0
def plot_score_dist(spacing, std_along, prob_miss, max_distance):
    from matplotlib.pylab import plt
    plt.close("Score Dist")
    plt.figure("Score Dist")
    d = np.linspace(0, max_distance, 500)
    plt.plot(d, [score_dist(di, spacing, std_along, prob_miss) for di in d])
    plt.vlines(spacing, 0, 1)
    plt.vlines(spacing * 2, 0, 1, ls='--')
    plt.annotate("Miss-detect the next mine", (spacing * 2, 0.5), (12, 0),
                 textcoords='offset points')
    plt.ylabel('$p(d)$')
    plt.xlabel('$d$')
    plt.grid()
    plt.xticks(np.arange(max_distance))
    plt.xlim(0, max_distance)
    plt.savefig('score_dist.pdf')
예제 #20
0
def getfigure(figure=None, clear=False, show=False, save=False):
    save = save
    if not isinstance(figure, plt.Figure):
        figure = plt.figure(figure)
    if clear:
        figure.clear()
    if show or save:            
        def finalyse(name, save=save, show=show):
            if show:
                figure.show()
                figure.canvas.draw()
            fout = ""
            if save: 
                if isinstance(save, basestring):
                    fout = save
                    f = save
                else:
                    fout = getattr(save, "name", "")
                    #fout = name + "." + figure_format   
                    f = save
                figure.savefig(f)
                log.notice("Figure %s saved to %s"%(name, fout))
            return fout    

    else:
        def finalyse(name):
            return name                
    return figure, finalyse        
예제 #21
0
def getfigure(figure=None, clear=False, show=False, save=False):
    save = save
    if not isinstance(figure, plt.Figure):
        figure = plt.figure(figure)
    if clear:
        figure.clear()
    if show or save:

        def finalyse(name, save=save, show=show):
            if show:
                figure.show()
                figure.canvas.draw()
            fout = ""
            if save:
                if isinstance(save, basestring):
                    fout = save
                    f = save
                else:
                    fout = getattr(save, "name", "")
                    #fout = name + "." + figure_format
                    f = save
                figure.savefig(f)
                log.notice("Figure %s saved to %s" % (name, fout))
            return fout

    else:

        def finalyse(name):
            return name

    return figure, finalyse
예제 #22
0
    def initPlots(self):
        ##
        # if the plot is detached just return

        if not self.attached:
            return
        dataCom = self.dataCom

        ##
        # if igure has been close we need to build a new one
        if self.fig is None:
            self.fig = plt.figure(self.figId, **self._initkwargs)

        fig = self.fig
        ## clear the figure
        fig.clear()

        ###
        # make the subplots
        self.rtdSubPlots = self.makeSubPlots()

        ###
        # init all subplots
        for p in self.rtdSubPlots:
            p.initPlot()

        try:
            fig.canvas.show()
        except AttributeError:
            pass

        self.lastConfigCounter = dataCom.permanentData["COUNTER.CONFIG"]
        self.initCanvas(fig)
        self.figSize = list(fig.get_size_inches())
예제 #23
0
    def show(self):
        if self.fig is None:
            self.fig = plt.figure(self.figId, **self._initkwargs)

        if not self.showed:
            self.fig.show()
            self.showed = True
예제 #24
0
def fxcor_subplot(result, GCs, stars):
        '''
        Makes subplot of TDR vs VERR and VREL.
        Returns a figure object
        '''
        plt.close('all')
        fig = plt.figure(figsize=(6,6))
        gs = gridspec.GridSpec(70,40,bottom=0.10,left=0.15,right=0.98, top = 0.95)

        plt.rc('text', usetex=True)
        plt.rc('font', family='serif')

        R_vel = plt.subplot(gs[10:40,0:40])
        R_err = plt.subplot(gs[40:70,0:40])
        R_hist = plt.subplot(gs[0:10,0:40])
        
        R_hist.axis('off')
        plt.setp(R_vel.get_xticklabels(), visible=False)
        
        x = result.TDR
        y = result.VREL_helio
        R_vel.scatter(x, y, s=10, c='gray', edgecolor='none', alpha = 0.6, label = 'All')

        x = GCs.TDR
        y = GCs.VREL_helio
        R_vel.scatter(x, y, s=11, c='orange', edgecolor='none', alpha = 0.8, label = 'GCs')

        x = stars.TDR
        y = stars.VREL_helio
        R_vel.scatter(x, y, s=11, c='green', edgecolor='none', alpha = 0.8, label = 'Stars')

        R_vel.set_xlim(1,20)
        R_vel.set_ylim(-2000,5000)
        R_vel.set_ylabel(r'$v$ $[km \, s^{-1}]$')
        plt.setp(R_vel.get_yticklabels()[0], visible=False)  
        
        x = result.TDR
        y = result.VERR
        R_err.scatter(x, y,s=10, c='gray', edgecolor='none', alpha = 0.6)

        x = GCs.TDR
        y = GCs.VERR
        R_err.scatter(x, y,s=11, c='orange', edgecolor='none', alpha = 0.8)

        x = stars.TDR
        y = stars.VERR
        R_err.scatter(x, y,s=11, c='green', edgecolor='none', alpha = 0.8)

        R_err.set_ylim(2,80)
        R_err.set_xlim(1,20)
        R_err.set_ylabel(r'$\delta v$ $[km \, s^{-1}]$')
        R_err.set_xlabel(r'TDR')
        plt.setp(R_err.get_yticklabels()[-1], visible=False)
        R_vel.legend()
        
        R_hist.hist([GCs.TDR,stars.TDR], range = (1,20), bins = 50, normed=True,
                                 color=['orange','green'])
        return fig
예제 #25
0
def plt_score(history):
    plt.figure()
    plt.plot()
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig('acc.png')
    # loss
    plt.figure()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig('loss.png')
예제 #26
0
    def __init__(self):
        super(PlotWidget, self).__init__()
        self.plotWidget = QWidget()
        self.fig = plt.figure()
        #self.fig.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
        self.canvas = FigureCanvas(self.fig)
        self.defaultPlot()

        layout = QVBoxLayout()
        layout.addWidget(self.canvas)
        self.plotWidget.setLayout(layout)
예제 #27
0
 def __init__(self, dataCom, figId, **kwargs):
     fig = plt.figure(figId, **kwargs)
     self._initkwargs = kwargs
     self.figId = figId
     self.dataCom = dataCom
     self.lastDataCounter = -99
     self.lastConfigCounter = -99
     self.fig = fig
     self.rtdSubPlots = []
     self.checkCanvas()
     self.showed = False
예제 #28
0
def make_radar(skill: int, strength: int, defence: int, willpower: int,
               attack: int, stamina: int):
    '''
    :return: PNG type image binary content
    '''
    value = [skill, strength, defence, willpower, attack, stamina]
    if not all(map(lambda x: isinstance(x, int) and 0 < x <= 100, value)):
        return
    font = FontProperties(fname=settings.PINGFANG_FONT, size=23)
    plt.figure(figsize=(4.8, 4.8))  # 图片大小
    name = [
        '技术\n ', '力量       ', '防守       ', '\n意志力', '       进攻 ', '       耐力 '
    ]  # 标签
    theta = np.linspace(0, 2 * np.pi, len(name),
                        endpoint=False)  # 将圆周根据标签的个数等比分
    theta = np.concatenate((theta, [theta[0]]))  # 闭合
    value = np.concatenate((value, [value[0]]))  # 闭合
    ax = plt.subplot(111, projection='polar')  # 构建图例
    ax.set_theta_zero_location('N')  # 设置极轴方向
    ax.fill(theta, value, color="#EF2D55", alpha=0.35)  # 填充色,透明度
    for i in [20, 40, 60, 80, 100]:  # 绘等分线
        ax.plot(theta, [i] * (6 + 1), 'k-', lw=1,
                color='#8989A3')  # 之所以 n +1,是因为要闭合!
    ax.plot(theta, value, 'ro-', 'k-', lw=1, alpha=0.75,
            color='#FF465C')  # 绘数据图
    ax.set_thetagrids(theta * 180 / np.pi,
                      name,
                      fontproperties=font,
                      color='#8989A3')  # 替换标签
    ax.set_ylim(0, 100)  # 设置极轴的区间
    ax.spines['polar'].set_visible(False)  # 去掉最外围的黑圈
    ax.grid(True, color='#8989A3', linestyle='-', linewidth=1)
    ax.set_yticks([])
    buf = io.BytesIO()
    plt.savefig(buf, transparent=True)  # 透明
    plt.close('all')  # 关闭所有绘图
    buf.seek(0)
    return buf
예제 #29
0
def plot_scatter_annotate(data, labels, title):
    plt.figure(figsize=(10, 10))
    assert data.shape[0] == len(labels), 'size mismatch'
    plt.subplots_adjust(bottom=0.1)
    plt.scatter(data[:, 0],
                data[:, 1],
                marker='o',
                s=100,
                cmap=plt.get_cmap('Spectral'))
    plt.title(title)
    for label, x, y in zip(labels, data[:, 0], data[:, 1]):
        plt.annotate(label,
                     xy=(x, y),
                     xytext=(-20, 20),
                     textcoords='offset points',
                     ha='right',
                     va='bottom',
                     bbox=dict(boxstyle='round,pad=0.5',
                               fc='yellow',
                               alpha=0.5),
                     arrowprops=dict(arrowstyle='->',
                                     connectionstyle='arc3,rad=0'))
    plt.show()
예제 #30
0
def example_pic():
    """Generate the example graph picture."""
    # create graph from edge list
    graph = nx.Graph([(0, 1), (0, 2), (1, 3), (3, 0)])

    # positions for all nodes
    pos = nx.spring_layout(graph)

    # each node is labaled by its own name
    labels = {node: str(node) for node in graph.node.keys()}

    # configure the image
    plt.figure(figsize=(2, 2))
    plt.axis('off')

    # draw all of the things!
    nx.draw_networkx_nodes(graph, pos, nodelist=[0, 1, 2, 3], node_color='r')
    nx.draw_networkx_edges(graph, pos, width=1.0, alpha=0.5)
    nx.draw_networkx_labels(graph, pos, labels, font_size=16)

    # place the file where it belongs
    path = os.path.join(os.environ['ERDOS_PATH'], "content/images", "nodes_edges_example.png")
    plt.savefig(path)
예제 #31
0
def example_image(graph,
                  filename,
                  layout='spring',
                  edge_labels=False,
                  node_labels=False,
                  show=False):
    """Generates example image with graph.

    Uses nx.draw_networkx_* methods, and matplotlib to draw and save the
    image.

    """
    # positions for all nodes
    pos = LAYOUT_DICT[layout](graph)

    # configure the image
    plt.figure(figsize=(2, 2))
    plt.axis('off')

    # draw all of the things!
    nx.draw_networkx_nodes(graph, pos, nodelist=graph.nodes(), node_color='r')
    nx.draw_networkx_edges(graph, pos, width=1.0, alpha=0.5, arrows=True)

    if node_labels:
        nlabels = {node: str(node) for node in graph.nodes()}
        nx.draw_networkx_labels(graph, pos, nlabels, font_size=16)

    if edge_labels:
        elabels = {edge: str(idx) for idx, edge in enumerate(graph.edges())}
        nx.draw_networkx_edge_labels(graph, pos, elabels)

    # place the file where it belongs
    path = os.path.join(os.environ['ERDOS_PATH'], "content/images", filename)
    plt.savefig(path)

    if show:
        plt.show()
예제 #32
0
def filter_show(filters, nx=8, margin=3, scale=10):
    FN, C, FH, FW = filters.shape
    ny = int(np.ceil(FN / nx))
    fig = plt.figure()
    fig.subplots_adjust(left=0,
                        right=1,
                        bottom=0,
                        top=1,
                        hspace=0.05,
                        wspace=0.05)

    for i in range(FN):
        ax = fig.add_subplot(ny, nx, i + 1, xticks=[], yticks=[])
        ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')
    plt.show()
예제 #33
0
def plot_result(filename='glances.csv'):
    import pandas as pd
    from matplotlib.pylab import plt
    data = pd.read_csv(filename)
    mem_used = data['mem_used'].apply(lambda x: float(x) / (10**9))

    plt.figure(1)

    plt.subplot(121)
    mem_used.plot(color="r", linestyle="-", linewidth=1)
    plt.xlabel('time step')
    plt.ylabel('GB')
    plt.title('mem_used')

    plt.subplot(122)
    gpu_0_proc = data['gpu_0_proc']
    gpu_0_proc.plot(color="b", linestyle="-", linewidth=1)
    plt.xlabel('time step')
    plt.ylabel('proc')
    plt.title('gpu_0_proc')
    plt.show()

    print("mean mem_used:{},mean_gpu_0_proc:{}".format(mem_used.mean(),
                                                       gpu_0_proc.mean()))
예제 #34
0
파일: vsm.py 프로젝트: johndpope/Thesis
    def drawSurfacePlot(self, f1):
        """
            Method to draw a surface plot for test spec
            using Word Rank | Tf-Idf Score | PI

        """
        test_doc = {}
        f1.next()
        for row in f1:
            test_doc[row[0]] = [row[1], row[2]]
        sorted_test_doc = sorted(test_doc.items(),
                                 key=lambda e: e[1][0],
                                 reverse=True)

        word_rank = []
        PI_list = []
        tf_idf_list = []

        for i, word in enumerate(sorted_test_doc):
            word_rank.append(i + 1)
            PI_list.append(word[1][0])
            tf_idf_list.append(word[1][1])

        PI_list = [float(pi) for pi in PI_list]
        tf_idf_list = [float(tf_idf) for tf_idf in tf_idf_list]
        fig = plt.figure()
        ax = Axes3D(fig)
        X = word_rank
        X_clone = word_rank
        Y = tf_idf_list
        X, Y = np.meshgrid(X, Y)
        print X
        # Z should be a function of X and Y
        Z = PI_list
        X_clone, Z = np.meshgrid(X_clone, Z)
        ax.plot_surface(X,
                        Y,
                        Z,
                        rstride=10,
                        cstride=10,
                        cmap=plt.cm.RdBu,
                        alpha=None)
        #ax.contour(X, Y, Z, zdir='x', offset=-4, cmap=cm.hsv)
        #ax.contour(Y, Y, Z, zdir='y', offset=4, cmap=cm.hsv)
        #ax.contour(Y, Y, Z, zdir='z', offset=-2, cmap=cm.hsv)
        #ax.set_zlim(0, 1)
        plt.show()
예제 #35
0
def plot_piledspectra():
    fig = plt.figure(figsize = (6,8)) 
    plt.xlim(5000,9000)
    
    specindex = range(0,100,10)
    offset = np.arange(0,len(specindex)) * 0.5
    ylim = [0.5, offset[-1] + 1.3]
    plt.ylim(ylim[0], ylim[1])
    
    plt.rc('text', usetex=True)
    plt.rc('font', family='serif')
    
    plt.xlabel(r'Restrame Wavelength [ \AA\ ]')
    plt.ylabel(r'Flux')
    
    line_wave = [5175., 5892., 6562.8, 8498., 8542., 8662.] 
        #       ['Mgb', 'NaD', 'Halpha', 'CaT', 'CaT', 'CaT']
    for line in line_wave:
            x = [line, line]
            y = [ylim[0], ylim[1]]
            plt.plot(x, y, c= 'gray', linewidth=1.0)
    
    plt.annotate(r'CaT', xy=(8540.0, ylim[1] + 0.05), xycoords='data', annotation_clip=False)
    plt.annotate(r'H$\alpha$', xy=(6562.8, ylim[1] + 0.05), xycoords='data', annotation_clip=False)
    plt.annotate(r'NaD', xy=(5892., ylim[1] + 0.05), xycoords='data', annotation_clip=False)
    plt.annotate(r'Mg$\beta$', xy=(5175., ylim[1] + 0.05), xycoords='data', annotation_clip=False)
    
    for i,j in zip(specindex,offset):
        iraf.noao.onedspec.continuum(input = GCssorted.ORIGINALFILE.iloc[i] + '[1]', output = '/Volumes/VINCE/OAC/continuum.fits',
            type = 'ratio', naverage = '3', function = 'spline3',
            order = '5', low_reject = '2.0', high_reject = '2.0', niterate = '10')
    
        data = fits.getdata('/Volumes/VINCE/OAC/continuum.fits', 0)
        
        hdu = fits.open(GCssorted.ORIGINALFILE.iloc[i])
        header1 = hdu[1].header
        lamRange = header1['CRVAL1']  + np.array([0., header1['CD1_1'] * (header1['NAXIS1'] - 1)]) 
        wavelength = np.linspace(lamRange[0],lamRange[1], header1['NAXIS1'])
        hdu.close()
    
        zp = 1. + (GCssorted.VREL.iloc[i] / 299792.458)
      
        plt.plot(wavelength/zp, gaussian_filter(data,2) + j, c = 'black', lw=1)
        os.remove('/Volumes/VINCE/OAC/continuum.fits')
예제 #36
0
    def drawSurfacePlot(self, f1):
        """
            Method to draw a surface plot for test spec
            using Word Rank | Tf-Idf Score | PI

        """
        test_doc = {}
        f1.next()
        for row in f1:
            test_doc[row[0]] = [row[1], row[2]]
        sorted_test_doc = sorted(test_doc.items(), key=lambda e: e[1][0], reverse=True)

        word_rank = []
        PI_list = []
        tf_idf_list = []

        for i, word in enumerate(sorted_test_doc):
            word_rank.append(i + 1)
            PI_list.append(word[1][0])
            tf_idf_list.append(word[1][1])

        PI_list = [float(pi) for pi in PI_list]
        tf_idf_list = [float(tf_idf) for tf_idf in tf_idf_list]
        fig = plt.figure()
        ax = Axes3D(fig)
        X = word_rank
        X_clone = word_rank
        Y = tf_idf_list
        X, Y = np.meshgrid(X, Y)
        print X
        # Z should be a function of X and Y
        Z = PI_list
        X_clone, Z = np.meshgrid(X_clone, Z)
        ax.plot_surface(X, Y, Z, rstride=10, cstride=10, cmap=plt.cm.RdBu, alpha=None)
        #ax.contour(X, Y, Z, zdir='x', offset=-4, cmap=cm.hsv)
        #ax.contour(Y, Y, Z, zdir='y', offset=4, cmap=cm.hsv)
        #ax.contour(Y, Y, Z, zdir='z', offset=-2, cmap=cm.hsv)
        #ax.set_zlim(0, 1)
        plt.show()
예제 #37
0
    def plot(self,x, mk="b-", axis=None, figure=None, **kwargs):
        """ plot the fitted model according to a x array 

        Parameters:
        -----------
        x : array like 
        mk : string, optional 
            marker short string description, default is 'b-'
        axis : matplotlib axis, optional
            if not given take the subplot 1,1 of figure
        figure : matplotlib figure, optional
            if not given take plt.figure()
        **kwargs : optional
            other plot parameter         
        """
        from matplotlib.pylab  import plt
        if axis is None:
            if figure is None:
                figure = plt.figure()
            
            axis = figure.add_subplot(111)
        return axis.plot(x, self.model(x), mk, **kwargs)
예제 #38
0
    def surfMagnitudePhaseImage(self, image):
        import numpy as np
        
        X = np.linspace(-1, 1, 64)
        Y = np.linspace(-1, 1, 64)
        X, Y = np.meshgrid(X, Y)
        mag = absolute(image).astype('float')
        phase = angle(image).astype('float')

        
        fig = plt.figure()
        ax = fig.add_subplot(211, projection='3d')
        Z = mag
        surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
                               linewidth=0, antialiased=True)
        ax.set_zlim3d(np.min(Z), np.max(Z))
        fig.colorbar(surf)
        ax = fig.add_subplot(212, projection='3d')
        Z = phase
        surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
                               linewidth=0, antialiased=True)
        ax.set_zlim3d(np.min(Z), np.max(Z))
        fig.colorbar(surf)
        plt.show()
예제 #39
0
        'Kd': 3,
        'sat_lim_max': 10,
        'sat_lim_min': -10
    }

    new_controller('example_configuration.cnf', 'example_controller.py', 'PIDAW', param)

    # Create a motor object for simulation
    motor = DC_Motor(configfile='model.cnf', controlmodule='example_controller')

    # Simulate the system with an alternating step reference 
    ref, u, x, y = motor.simulate(30, 'steps')

    # Visualize simulation
    t = np.array([ii * motor.h for ii in range(len(u[0,:]))])
    plt.figure(1);
    plt.step(t, u[0])
    limits = np.array(plt.axis())*np.array([1.,1.,1.1,1.1])
    plt.axis(limits)
    plt.title('Control signal(s)')

    plt.figure(2);
    plt.step(t, np.transpose(x))
    limits = np.array(plt.axis())*np.array([1.,1.,1.1,1.1])
    plt.axis(limits)
    plt.title('System states')

    plt.figure(3);
    plt.step(t, ref[0])
    plt.step(t, x[0])
    limits = np.array(plt.axis())*np.array([1.,1.,1.1,1.1])
예제 #40
0
plt.legend()

# - - - - - - - - - - - - - - - - - - - - - - - 
# - - - - - - - - - - - - - - - - - - - - - - - 


mask = (((result['g_auto'] - result['r_auto']) < (0.2 + 0.6 * (result['g_auto'] - result['i_auto']))) &
        ((result['g_auto'] - result['r_auto']) > (-0.2 + 0.6 * (result['g_auto'] - result['i_auto']))) &
        ((result['g_auto'] - result['i_auto']) > 0.5) & 
        ((result['g_auto'] - result['i_auto']) < 1.3) &
        ((result['i_auto']) < 24))

subset = result[mask]
subset = subset.sample(n=1000)

plt.figure()
plt.scatter(result['g_auto'] - result['i_auto'], result['g_auto'] - result['r_auto'], s=10, c='gray', edgecolor='none', alpha = 0.5)
plt.scatter(subset['g_auto'] - subset['i_auto'], subset['g_auto'] - subset['r_auto'], s=20, c='blue', edgecolor='none')
plt.scatter(GCs['g_auto'] - GCs['i_auto'], GCs['g_auto'] - GCs['r_auto'], s=10, c='red', edgecolor='none')
plt.xlabel('(g - i)')
plt.ylabel('(g - r)')
plt.xlim(-1,4)
plt.ylim(-1,4)

plt.figure()
plt.scatter(subset['g_auto'] - subset['r_auto'], subset['r_auto'], s=30, c='blue', edgecolor='none')
plt.scatter(GCs['g_auto'] - GCs['r_auto'], GCs['i_auto'], s=8, c='red', edgecolor='none')
plt.ylim(13,24)
plt.gca().invert_yaxis()
plt.xlabel('(g - i)')
plt.ylabel('i')
예제 #41
0
파일: shallow.py 프로젝트: tmmsartor/casadi
    [u, v, h] = f_step.call([p, u, v, h])

# Create an integrator function
f = MXFunction([p, uk, vk, hk], [u, v, h])
f.init()

print "generated discrete dynamics"

# Allocate memory
u = copy.deepcopy(u0)
v = copy.deepcopy(v0)
h = copy.deepcopy(h0)

# Prepare plotting
if plot_progress:
    fig = plt.figure(1)
    ax = fig.add_subplot(111, projection="3d")
    # plt.clf()
    # plt.grid(True)
    plt.ion()
    plt.hold(False)
    plt.draw()
    plt.show()

# Measurement
h_meas = []

# Simulate once to generate "measurements"
for k in range(num_measurements):
    # Visualize the pool
    if plot_progress:
예제 #42
0
def plot_spectrum(result, correct = True, interactive = False):
    
    plt.close('all')
    plt.ioff()

    if interactive:
      plt.ion()

    hdu = fits.open(result['ORIGINALFILE'])
    galaxy = gaussian_filter(hdu[1].data, 1)
    thumbnail = hdu['THUMBNAIL'].data
    twoD = hdu['2D'].data
    header = hdu[0].header
    header1 = hdu[1].header
    hdu.close()

    lamRange = header1['CRVAL1']  + np.array([0., header1['CD1_1'] * (header1['NAXIS1'] - 1)]) 
    
    if correct:
      zp = 1. + (result['VREL'] / 299792.458)
    else:
      zp = 1.

    wavelength = np.linspace(lamRange[0],lamRange[1], header1['NAXIS1']) / zp
    ymin, ymax = np.min(galaxy), np.max(galaxy)
    ylim = [ymin, ymax] + np.array([-0.02, 0.1])*(ymax-ymin)
    ylim[0] = 0.

    xmin, xmax = np.min(wavelength), np.max(wavelength)

    ### Define multipanel size and properties
    fig = plt.figure(figsize=(8,6))
    gs = gridspec.GridSpec(200,130,bottom=0.10,left=0.10,right=0.95)

    ### Plot the object in the sky
    ax_obj = fig.add_subplot(gs[0:70,105:130])
    ax_obj.imshow(thumbnail, cmap = 'gray', interpolation = 'nearest')
    ax_obj.set_xticks([]) 
    ax_obj.set_yticks([]) 

    ### Plot the 2D spectrum
    ax_2d = fig.add_subplot(gs[0:11,0:100])
    ix_start = header['START_{}'.format(int(result['DETECT']))]
    ix_end = header['END_{}'.format(int(result['DETECT']))]
    ax_2d.imshow(twoD, cmap='spectral',
                aspect = "auto", origin = 'lower', extent=[xmin, xmax, 0, 1], 
                vmin = -0.2, vmax=0.2) 
    ax_2d.set_xticks([]) 
    ax_2d.set_yticks([]) 
    
    ### Add spectra subpanels
    ax_spectrum = fig.add_subplot(gs[11:85,0:100])
    ax_blue = fig.add_subplot(gs[110:200,0:50])
    ax_red = fig.add_subplot(gs[110:200,51:100])
    
    ### Plot some atomic lines  
    line_wave = [4861., 5175., 5892., 6562.8, 8498., 8542., 8662.] 
    #           ['Hbeta', 'Mgb', 'NaD', 'Halpha', 'CaT', 'CaT', 'CaT']
    for i in range(len(line_wave)):
        x = [line_wave[i], line_wave[i]]
        y = [ylim[0], ylim[1]]
        ax_spectrum.plot(x, y, c= 'gray', linewidth=1.0)
        ax_blue.plot(x, y, c= 'gray', linewidth=1.0)
        ax_red.plot(x, y, c= 'gray', linewidth=1.0)

    ### Plot the spectrum 
    ax_spectrum.plot(wavelength, galaxy, 'k', linewidth=1.3)
    ax_spectrum.set_ylim(ylim)
    ax_spectrum.set_xlim([xmin,xmax])
    ax_spectrum.set_ylabel(r'Arbitrary Flux')
    ax_spectrum.set_xlabel(r'Restframe Wavelength [ $\AA$ ]')
    
    ### Plot blue part of the spectrum
    x1, x2 = 300, 750 
    ax_blue.plot(wavelength[x1:x2], galaxy[x1:x2], 'k', linewidth=1.3)
    ax_blue.set_xlim(wavelength[x1],wavelength[x2])
    ax_blue.set_ylim(galaxy[x1:x2].min(), galaxy[x1:x2].max())
    ax_blue.set_yticks([]) 
    
    ### Plot red part of the spectrum
    x1, x2 = 1400, 1500
    ax_red.plot(wavelength[x1:x2], galaxy[x1:x2], 'k', linewidth=1.3)
    ax_red.set_xlim(wavelength[x1],wavelength[x2])
    ax_red.set_ylim(galaxy[x1:x2].min(), galaxy[x1:x2].max())
    ax_red.set_yticks([]) 

    ### Plot text
    #if interactive:
    textplot = fig.add_subplot(gs[80:200,105:130])
    kwarg = {'va' : 'center', 'ha' : 'left', 'size' : 'medium'}
    textplot.text(0.1, 1.0,r'ID = {} \, {}'.format(result.ID, int(result.DETECT)),**kwarg)
    textplot.text(0.1, 0.9,r'$v =$ {}'.format(int(result.VREL)), **kwarg)
    textplot.text(0.1, 0.8,r'$\delta \, v = $ {}'.format(int(result.VERR)), **kwarg)
    textplot.text(0.1, 0.7,r'SN1 = {0:.2f}'.format(result.SN1), **kwarg)
    textplot.text(0.1, 0.6,r'SN2 = {0:.2f}'.format(result.SN2), **kwarg)
    textplot.text(0.1, 0.5,r'TDR = {0:.2f}'.format(result.TDR), **kwarg)
    textplot.text(0.1, 0.4,r'SG = {}'.format(result.SG), **kwarg)
    textplot.axis('off')

    return fig
plt.plot(train_sizes, train_scores_mean, '-', color="r",
         label="Training score")
plt.plot(train_sizes, test_scores_mean, '-', color="g",
         label="Cross-validation score")
plt.ylim(0,1.2)
plt.xlim(0,200)
plt.legend(loc="best")
plt.xlabel("Train test size")
plt.savefig("learning_curves.png")
plt.close("all")


# plot the model vs the predictions

for what_plot in [0,1,2,3]:
    fig=plt.figure(figsize=(16,8))
    #
    ax1 = fig.add_subplot(1,2,1); ax2 = fig.add_subplot(1,2,2)
    ax1.tick_params(labelsize=20); ax2.tick_params(labelsize=20)
    #
    if(what_plot==3):
        # actual data
        ax1.scatter(X_test[models_features[i]]["Budget"],X_test[models_response[i]],c=X_test[models_features[i]]["AveStudioShare"],s=2.**((X_test[models_features[i]]["TheatersOpening"])+4),cmap="Reds",alpha=0.8)
        #
        # predictions
        predictions = the_best_models[i].predict(X_test[models_features[i]])
        predictions_with_error = np.random.normal(predictions,np.sqrt(best_mse[i]),size=len(predictions))
        ax2.scatter(X_test[models_features[i]]["Budget"],predictions_with_error,c=X_test[models_features[i]]["AveStudioShare"],s=2.**((X_test[models_features[i]]["TheatersOpening"])+4),cmap="Greens",alpha=0.8)
    #
    if(what_plot==2):
        # actual data
예제 #44
0
GCs = pd.read_csv('/Volumes/VINCE/OAC/GCs_903.csv', dtype = {'ID': object}, comment = '#')

# ----------------------------------
rep1 = GCs[GCs.Alt1.isin(GCs.ID)]
df1 = pd.DataFrame()
df2 = pd.DataFrame()
for j in range(0,len(rep1)):
	df1.iloc[j] = rep1.iloc[j]



x = VIMOS['VREL_helio'] 
xerr = VIMOS['VERR'] 
y = SchuberthMatch['HRV'] 
yerr = SchuberthMatch['e.1'] 

print 'rms (VIMOS - Schuberth) GCs = ', np.std(x-y)

plt.close('all')
plt.figure(figsize=(6,6))
plt.errorbar(x, y, yerr= yerr, xerr = xerr, fmt = 'o', c ='black', label = 'Schuberth et al.')
plt.plot([-200, 2200], [-200, 2200], '--k')
plt.xlim(-200,2200)
plt.ylim(-200,2200)

x = VIMOS['r_auto'] 
y = SchuberthMatch['Rmag'] 

plt.scatter(x, y, c ='black')

예제 #45
0
	def plotting(self,pressurelattice,invasionlattice,pressure,number_of_clusters):

		from matplotlib.pylab import plt

		#Plotting the invasionlattice
		plt.figure(2)
		plt.imshow(invasionlattice, cmap='Greys',interpolation='nearest')
		plt.title("Invasion lattice")
		plt.colorbar()
		plt.savefig(self.pathsimulation + "invasionlattice.png", bbox_inches="tight")
		plt.close()

		#plotting the pressure
		plt.figure(5)
		plt.plot(pressure)
		plt.xlabel('Time')
		plt.ylabel('Pressure')
		plt.title('P(t)')
		plt.savefig(self.pathsimulation +"pressure.png", bbox_inches="tight")
		plt.close()

		#plotting the clusterlattice
		plt.figure(6)
		plt.imshow(self.lw, interpolation='nearest')
		plt.title("Clusterlattice")
		plt.colorbar()
		plt.savefig(self.pathsimulation +"clusterlattice.png", bbox_inches="tight")
		plt.close()

		#Temporal diagram
		plt.figure(7)
		plt.imshow(self.temporalplot,interpolation='nearest')
		plt.title('Temporal diagram')
		plt.colorbar()
		plt.savefig(self.pathsimulation +"temporal.png", bbox_inches="tight")
		plt.close()

		#Plotting pressure distribution in the cell.
		plt.figure(3)
		plt.hist(pressurelattice.ravel(), bins=30, fc='k', ec='k')
		plt.savefig(self.pathsimulation +"pressuredistribution.png", bbox_inches="tight")
		plt.close()

		#Plotting the number of clusters as a function of interation.
		plt.figure(1)
		plt.plot(number_of_clusters)
		plt.xlabel('Iteration number/time')
		plt.ylabel('Number of clusters')
		plt.title('Number_of_cluster(t)')
		plt.savefig(self.pathsimulation +"clusterN.png", bbox_inches="tight")
		plt.close()
예제 #46
0
import csv
import datetime
from pylab import *
from matplotlib.pylab import plt
from matplotlib import dates as mtd
from matplotlib.dates import HourLocator, DayLocator,DateFormatter
f1=open("./Turnstile/Turnstile-Data.txt","rb")
#f2=open("./Turnstile/Sp170St.txt","wb")
rv=csv.reader(f1)
#ro=csv.writer(f2)
fig=plt.figure(figsize=(10,8))
ax1=fig.add_subplot(3,1,1)
ax2=fig.add_subplot(3,1,3)
fig_wnd=plt.figure()
ax1_wnd=fig_wnd.add_subplot(3,1,1)
print "The station we are analyzing is 170th Street on line 4"
REMOTE="R243"; BOOTH="R284" # This is the code for the subway stop
print "We are considering turnstile numbers over the weekday from Jan 7th to Jan 11, 2013"
T_ent=-1     # Entry count on turnstile
T_exit=-1   # Exit count on turnstile
trun=[]
SCP=""
flag=-1
for rw in rv:
 if (rw[0]==BOOTH and rw[1]==REMOTE) :
   flag=1
   print len(rw)
   if SCP!=rw[2]:
     T_ent=-1     # Entry count on turnstile
     T_exit=-1   # Exit count on turnstile
     SCP=rw[2]
예제 #47
0
truth = np.array(y_test)

from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score

accuracy_score(predictions,truth)
f1_score(predictions,truth,average="weighted")
recall_score(predictions,truth,average="weighted")
precision_score(predictions,truth,average="weighted")

from sklearn.metrics import roc_curve, auc
from matplotlib.pylab import plt
import seaborn as sns

all_categories = np.unique(articles["category"])

fig=plt.figure(figsize=(8,8))
ax=fig.add_subplot(1,1,1)
plt.tick_params(labelsize=20)
for i in range(len(all_categories)):
    mask = y_test == all_categories[i]
    prob_category = clf.predict_proba(X_test)[:,i]
    dummy_y = np.ones(len(y_test))
    dummy_y[~mask] = 0
    fpr, tpr, thresholds = roc_curve(dummy_y, prob_category)
    ax.plot(fpr,tpr,label=all_categories[i]+", AUC = "+str(round(auc(fpr,tpr),2)),lw=2)

ax.legend(loc=4,fontsize="large")
ax.plot([0, 1], [0, 1], "k--")
ax.set_xlim([0.0, 1.0]);ax.set_ylim([0.0, 1.05])
ax.set_xlabel("False Positive Rate",fontsize=20); ax.set_ylabel("True Positive Rate",fontsize=20)
plt.savefig("roc_curve_body.png",bbox_inches="tight")
예제 #48
0
def telluric_psf(obs,plot=False,plot_med=False):
    """
    Telluric PSF
    
    Measure the instrumental profile of HIRES by treating telluric
    lines as delta functions. The width of the telluric lines is a
    measure of the PSF width.

    Method
    ------ 
    Fit a comb of gaussians to the O2 bandhead from 6270-6305A. The
    comb of gaussians that describes the telluric lines has 4 free
    parameters:

       - sig  : width of the line
       - wls0 : shift of telluric line centers: -0.1 to +0.1 angstroms
       - wls1 : wavelength stretch allows for 1% fluctuation in dlam/dpix
              0.99 to 1.01.
       - d    : scale factor to apply to the telluric lines

    If wls0 is off by more than a line width, we might not find a
    solution. We perform a scan in wls0.
    
    Parameters
    ----------
    obs : CPS observation id.
    """
    
    # Load spectral region and prep.
    spec = smio.getspec_fits(obs=obs)[14]
    spec = spec[(spec['w'] > 6270) & (spec['w'] < 6305)]
    spec['s'] /= continuum.cfit(spec)

    # figure out where the 95 percentile is for data that's this
    # noisey, then adjust continuum
    serr = np.median(spec['serr'])
    contlevel = np.percentile(np.random.randn(spec.size)*serr,95)
    spec['s'] *= (1+contlevel)
    darr = np.array(tell.d)

    # Intialize parameters
    p0 = lmfit.Parameters()
    p0.add('wls0',value=0.0)
    p0.add('sig',value=0.02,min=0,max=0.1)
    p0.add('wls1',value=1.0,min=0.99,max=1.01)
    p0.add('d',value=1,min=0.25,max=2)

    def model(p):
        return telluric_comb(p['sig'].value,p['wls0'].value,p['wls1'].value,
                             spec['w'],tell.wcen,darr=p['d'].value*darr)

    def res(p):
        """
        Residuals

        Returns residuals for use in the LM fitter. I compute the
        median absolute deviation to flag outliers. I remove these
        values from the residual array. However, I divide the
        residuals by the total number of residuals (post sigma
        clipping) so a to not penalize solutions with more points.

        Parameters
        ----------
        p : lmfit parameters object

        Returns
        -------
        res : Residual array (used in lmfit)
        
        """
        mod = model(p)
        res = (spec['s'] - mod) / spec['serr'] # normalized residuals

        mad = np.median(np.abs(res))
        b =  (np.abs(res) < 5*mad ) 
        res /= b.sum()

        if b.sum() < 10:
            return res * 10
        else:
            return res[b]

    # Do a scan over different wavelength shifts
    wstep = 0.01
    wrange = 0.1
    wls0L = np.arange(-wrange,wrange+wstep,wstep)

    chiL = np.zeros(len(wls0L))
    outL = []
    for i in range(len(wls0L)):
        p = copy.deepcopy(p0)
        p['wls0'].value = wls0L[i]
        out = lmfit.minimize(res,p)
        chiL[i] = np.sum(res(p)**2)
        outL+=[out] 

    # If the initial shift is off by a lot, the line depths will go to
    # zero and median residual will be 0. Reject these values
    out = outL[np.nanargmin(chiL)]
    p = out.params
    lmfit.report_fit(out.params)

    # Bin up the regions surronding the telluric lines
    wtellmid = np.mean(tell.wcen)
    def getseg(wcen):
        wcen = wtellmid + (wcen-wtellmid)*p['wls1'].value + p['wls0'].value
        dw = spec['w'] - wcen
        b = np.abs(dw) < 0.3

        seg = pd.DataFrame({'dw':dw,'s':spec['s'],'model':model(p)})
        seg = pdplus.LittleEndian(seg.to_records(index=False))
        seg = pd.DataFrame(seg)
        return seg[b]
    
    seg = map(getseg,list(tell.wcen))
    seg = pd.concat(seg,ignore_index=True)
    wstep = 0.025
    bins = np.arange(-0.3,0.3+wstep,wstep)
    seg['dw0'] = 0.
    for dw0 in np.arange(-0.3,0.3,wstep):
        seg['dw0'][seg.dw.between(dw0,dw0+wstep)] = dw0
    bseg = seg.groupby('dw0',as_index=False).median()

    mod = model(p) # best fit model
    def plot_spectrum():
        # Plot telluric lines and fits
        plt.plot(spec['w'],spec['s'],label='Stellar Spectrum')
        plt.plot(spec['w'],mod,label='Telluric Model')
        plt.plot(spec['w'],spec['s']-mod+0.5,label='Residuals')
        plt.xlim(6275,6305)
        plt.ylim(0.0,1.05)
        plt.xlabel('Wavelength (A)')
        plt.ylabel('Intensity')
        plt.title('Telluric Lines')
        
    def plot_median_profile():
        # Plot median line profile
        plt.plot(bseg.dw,bseg.s,'.',label='Median Stellar Spectrum')
        plt.plot(bseg.dw,bseg.model,'-',label='Median Telluric Model')
        plt.title('Median Line Profile')
        plt.xlabel('Distance From Line Center (A)')
        yl = list(plt.ylim())
        yl[1] = 1.05
        plt.ylim(*yl)

    if plot:
        # Used for stand-alone telluric diagnostic plots
        gs = GridSpec(1,4)
        fig = plt.figure(figsize=(12,3))
        plt.gcf().set_tight_layout(True)
        plt.sca(fig.add_subplot(gs[0,:3]))
        plot_spectrum()
        plt.sca(fig.add_subplot(gs[0,3]))
        plot_median_profile()

    if plot_med:
        # Used in quicklook plot
        plot_median_profile()


    # sig = width of telluric line in
    sig = p['sig'].value # [Angstroms]
    sig = sig/6290*3e5 # [km/s]
    sig = sig/1.3 # [pixels]
    return sig