コード例 #1
0
def fitPolyGrad(M, alpha, lrate):
    w = rnd.randn(M + 1)
    Ztrain = dataMatrix(Xtrain, M)
    errTrainList = []
    errTestList = []
    iList = 10**np.array([0, 1, 2, 3, 4, 5, 6, 7])
    j = 0
    plt.figure()
    plt.suptitle(
        'Question 5(d): fitted polynomial as number of weight-updates increases'
    )

    for i in range(10000000 + 1):
        grad = regGrad(Ztrain, Ytrain, w, alpha)
        w -= lrate * grad

        if np.mod(i, 1000) == 0:
            yhat = np.matmul(Ztrain, w)
            errTrain = np.sum((Ytrain - yhat)**2) / Ntrain
            errTrainList.append(errTrain)
            yhat = np.matmul(Ztrain, w)
            errTest = np.sum((Ytest - yhat)**2) / Ntest
            errTestList.append(errTest)

        if i == iList[j]:
            j += 1
            plt.subplot(3, 3, j)
            plotPoly(w)

        if np.mod(i, 100000):
            print('iteration{},   Training error = {}'.format(i, errTrain))

    plt.figure()
    plotPoly(w)
    plt.title('Question 5: fitted polynomial')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.figure()
    plt.plot(errTrainList, 'b')
    plt.plt(errTestList, 'r')
    plt.title('Question 5: training and test error v.s. time')
    plt.xlabel('Number of iterations (in thousands)')
    plt.ylabel('Error')
    print('')
    print('Final training and test errors: ')
    print('   {},   {}'.format(errTrain, errTest))
    print('')
    print('Final weight vector: ')
    print(w)
コード例 #2
0
def changeCandle():
    plt.clf()

    if candleType == "1H":
        ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=5, colspan=1)
        ax2 = plt.subplot2grid((6, 1), (5, 0),
                               rowspan=1,
                               colspan=1,
                               sharex=ax1)
        plt.setp(ax1.get_xticklabels(), visible=False)

        df_ohlc = df_segment["Close"]
        df_volume = df_segment["Volume To"].resample(candleType).sum()
        df_ohlc.reset_index(inplace=True)
        df_ohlc["Date"] = df_ohlc["Date"].map(mdates.date2num)

        ax1.xaxis_date()  # show mdates as readable normal date
        plt.plt(ax1)
        ax2.fill_between(df_volume.index.map(mdates.date2num),
                         df_volume.values,
                         0,
                         facecolors=volumeColor)
        fig.canvas.draw()

    else:
        ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=5, colspan=1)
        ax2 = plt.subplot2grid((6, 1), (5, 0),
                               rowspan=1,
                               colspan=1,
                               sharex=ax1)
        plt.setp(ax1.get_xticklabels(), visible=False)

        df_ohlc = df_segment["Close"].resample(candleType).ohlc()
        df_volume = df_segment["Volume To"].resample(candleType).sum()
        df_ohlc.reset_index(inplace=True)
        df_ohlc["Date"] = df_ohlc["Date"].map(mdates.date2num)

        ax1.xaxis_date()  # show mdates as readable normal date
        candlestick_ohlc(ax1,
                         df_ohlc.values,
                         width=candleWidth,
                         colorup=lightColor,
                         colordown=darkColor)
        ax2.fill_between(df_volume.index.map(mdates.date2num),
                         df_volume.values,
                         0,
                         facecolors=volumeColor)
        fig.canvas.draw()
コード例 #3
0
    def show(self, first_ascan=0, last_ascan=None):
        """" 
        if absolute == True: Returns depth profile; 
        otherwise just Fourrier transform 
        """
        if last_ascan == None:
            last_ascan = len(self.fourrier_data)

        if isinstance(self.fourrier_data[0], int) or isinstance(
                self.fourrier_data[0], float):
            plt.figure()
            plt.plt(self.fourrier_data)
            plt.title(self.name)
        else:
            plt.figure()
            plt.imshow(np.rot90(self.fourrier_data), cmap="gray")
            plt.clim([0, 10000])
            plt.axis("tight")
            plt.title(self.name)
def L_Layer_model(X,Y,layer_dims,learning_rate=0.0075,num_iterations,print_cost):
	costs = []
	parameters = initialize_parameters_deep(layers_dims)

	for i in range(0,num_iterations):
		AL, caches = L_model_forward(X, parameters)
		cost = compute_cost(AL,Y)
		grads = L_model_backward(AL,Y, caches)
		parameters = update_parameters(parameters,grads, learning_rate)

		if print_cost and i%100 == 0:
			print("Cost after iteration %i:%f"%(i,cost))
		if i%100 == 0:
			costs.append(cost)
	plt.plt(costs)
	plt.ylabel('cost')
	plt.xlabel('iteration')
	plt.title("learning_rate = " + str(learning_rate))
	plt.show()

	return parameters
コード例 #5
0
ファイル: Explore_Data.py プロジェクト: SpongeGourd/python
 def plot(self,X,y,kind='scatter',subPlots=True):
     X =  _check_dataFrame(X)
     if(subPlots!=True):
         fig = plt.plt(X,y)
         self.fig_list.append(fig)
     else:
         #plot in different figure
         fig = plt.figure()
         for i in xrange(X.shape[1]):
             df = X[[X.columns[i]]]
             plt.plot(df,y,"*")
             self.fig_list.append(fig)
コード例 #6
0
ファイル: maincode.py プロジェクト: gayu-tanks/2d-mapper
def plott(f,l,r):
    if(dir==0):
        if(n==1):
            n=0
        else:
            z=y+15
            if(l<30):
                plt.plt([x-l,x-l],[y,z])
            if(r<30):
                plt.plot([x+r,x+r],[y,z])
            if(f<30):
                plt.plot([x-l,x+r],[z,z])
            y=z
    if(dir==1):
        if(n==1):
            n=0
        else:
            z=x-15
            if(l<30):
                plt.plt([z,x],[y-l,y-l])
            if(r<30):
                plt.plot([z,x],[y+r,y+r])
            if(f<30):
                plt.plot([x-f,x-f],[y-l,y+r])
            x=z
    if(dir==2):
        if(n==1):
            n=0
        else:
            z=y-15
            if(l<30):
                plt.plt([x+l,x+l],[y,z])
            if(r<30):
                plt.plot([x-r,x-r],[y,z])
            if(f<30):
                plt.plot([x+l,x-r],[z,z])
            y=z
    if(dir==3):
        if(n==1):
            n=0
        else:
            z=x+15
            if(l<30):
                plt.plt([z,x],[y+l,y+l])
            if(r<30):
                plt.plot([z,x],[y-r,y-r])
            if(f<30):
                plt.plot([x+f,x+f],[y+l,y-r])
            x=z
コード例 #7
0
ファイル: Main.py プロジェクト: Joker1379/CourceWork
def animate(frame):
    data = open(str(pathlib.Path(__file__).parent.absolute())+"\Data.txt","r").read().split('\n')
    if frame <= len(data): data = data[0:frame]
    for i in data:
        if i != '':
            i = i.split()
            if i[0] == 'o':
                x = [float(j) for j in re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[1]+i[2])]
                y = [float(j) for j in re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[3]+i[4])]
                plt(x, y,  marker = 'o', color = 'b')
            elif i[0] == 'l':
                x = [float(j) for j in re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[1]+i[2])]
                y = [float(j) for j in re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[3]+i[4])]
                plt(*dl(x[0], x[1], y[0], y[1]),  marker = 'o', color = 'g')
            elif i[0] == 'p':
                x, y = re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[1]), re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[2])
                plt(float(x[0]), float(y[0]),  marker = 'o', color = 'r')
            elif i[0] == 'c':
                x, y = re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[1]), re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[2])
                r = re.findall(r'[-]?\d+\.\d+|[-]?\d+', i[3])
                c = opt.Circle((float(x[0]), float(y[0])), float(r[0]), color = 'b', fill = False)
                opt.gca().add_artist(c)
コード例 #8
0
ファイル: quiz.py プロジェクト: theolove/tlove_ga_projects

"""
5. Explain what the following code block is doing, line by line.
"""
import matplotlib.pyplot as plt
from __future__ import division

ads['ctr'] = ads['clicks'] / ads['impressions']

fig = plt.figure()
plt.subplot(1, 2, 1)
plt.hist(ads.spend)

plt.subplot(1, 2, 2)
plt.plt(ads.spend, ads.ctr, 'g.')
plt.show()


"""
6-8. Imagine we're viewing the following coefficient table for the following
regression:

(ad_id1772 is either 1 or 0, meaning it was ad 1772, or it was not)
'spend ~ impressions + clicks + ad_id1772'

column          coefficient         pvalue
y_intercept     0.02                0.000
impressions     0.00057             0.038
clicks          0.976               0.78
ad_id1772      -0.5                 0.02
コード例 #9
0
plt.legend()
plt.show()

# Plots the density of each temporal Graph
plt.plot(range(len(strong_componet_sizes)), densities)
plt.xlabel('Days')
plt.ylabel('Density')
plt.show()

#####################################################################################
# Repeat the step above for the average clustering coefficent of each G_t.          #
# TODO:                                                                             #
#   What do you observe? What does an increasing (similary, decreasing) trend of    #
#   clustering coefficients mean for changes to the structure of the network over   #
#   time.                                                                           #
#####################################################################################

# Plots the average clustering coeifecints for each temporal graph.
plt.plt(range(len(strong_componet_sizes)), clusterings)
plt.xlabel('Days')
plt.ylabel('Clustering')
plt.show()

#####################################################################################
# TODO Extra Credit:                                                                #
#   Given G, identify time points 2 <= phi <= T such that G_p differs signifigantly #
#   from G_p-1. Your approach should rank the time points in descending order of    #
#   Signifigance in the identified change between graph snapshots. You may use any  #
#   algorithm in the literature (in which case proper citation is required to       #
#   recognize the author(s) of the algorithm), or develop an algorithm of your own  #
#####################################################################################
コード例 #10
0
from conversions import dm2dd

site = 'AG01'
# get data
[lati, loni, on,
 bd] = getemolt_latlon(site)  # extracts lat/lon based on site code
[lati, loni] = dm2dd(lati, loni)  #converts decimal-minutes to decimal degrees
dept = [0, 5]
(obs_dt, obs_temps, obs_salt) = getobs_tempsalt(
    site,
    input_time=[dt.datetime(2006, 9, 10),
                dt.datetime(2006, 9, 11)],
    dep=dept)
dept = [bd[0] - 0.25 * bd[0], bd[0] + 0.25 * bd[0]]
(obs_dt, obs_tempb, obs_salt) = getobs_tempsalt(
    site,
    input_time=[dt.datetime(2006, 9, 10),
                dt.datetime(2006, 9, 11)],
    dep=dept)

# get model
for k in range(44):
    modtso = getFVCOM_bottom_tempsalt_netcdf(lati,
                                             loni,
                                             dt.datetime(2006, 9, 10),
                                             dt.datetime(2006, 9, 11),
                                             layer=k,
                                             vname='temp')
    plt.plot(modtso[0], -k, 'g*')
plt.plt([obs_temps[0], obs_tempb[0]], [0, -bd[0]], 'r*')
コード例 #11
0
dl_2.train(x=x, y=y, training_frame=data_training)

dl_3 = dl_250 = H2ODeepLearningEstimator(hidden=[11, 13, 17, 19],
                                         checkpoint=dl_1,
                                         epochs=250,
                                         activation="rectifier",
                                         loss="crossentropy")
dl_3.train(x=x,
           y=y,
           training_frame=data_training,
           validation_frame=data_testing)
target_names = ['class 0', 'class 1']
pred = dl_2.predict(data_testing[0:-1]).as_data_frame(use_pandas=True)

#rf_perf1 = dl_1.model_performance(data_testing)
#rf_perf2 = dl_2.model_performance(data_testing)
#rf_perf3 = dl_3.model_performance(data_testing)
#print("Predictions:",pred)
#print("Perfromance on test",rf_perf1)
#print("Perfromance on test",rf_perf2)
#print("Perfromance on test",rf_perf3)
#pred=dl_2.predict(data_testing).as_data_frame(use_pandas=True)
print("Pred", pred)
np.savetxt("/Users/bonythomas/test.csv", pred, delimiter=",")

plt(type='roc', train=False, show=True)

#print ("AUC on Test:",rf_perf1.auc())
#print ("AUC on Test:",rf_perf2.auc())
#print ("AUC on Test:",rf_perf3.auc())
def display(x, y):
    plt.plt(x, y)
    plt.show()
コード例 #13
0
import matplotlib.pyplot as plt

fig, ax1 = plt.subplots()

times = range(7)
co2 = 250, 256, 272, 260, 300, 320, 389

ax1.plot(times, co2, "b--")
ax1.set_ylabel('[$CO_2$]')
ax2 = ax1.twinx()

temp = [14.1, 15.5, 16.3, 18.1, 17.3, 19.1, 20.2]

ax2.set_ylabel("Temp (degC)")

plt.show()

#ex2

plt.subplot(1, 3, 1)
x = range(0, 10, 1)
plt.plot(x)

plt.subplot(1, 3, 2)
y = range(10, 0, -1)
plt.plt(y)
コード例 #14
0
import matplotlib.pyplot as plt
year = [2000, 2001, 2002, 2003, 2004, 2005]
size = [5, 8, 10, 11, 12, 17]
plt.plt(year, size)
plt.show()
コード例 #15
0
### 使用lambda时 elif需要多个if else嵌套
df['col1'] = df['col1'].apply(lambda x:1 if x=='HS' else (2 if x=='C' else 3))
### 如何使用col2更新col1 
### 举例:当col1包含abc时,col2为0,注意col1为None时不更新  第一个参数可以更新为其他条件 比如 df['col1']==2 
df.loc[df['col1'].str.contains('abc',na=False), 'col2'] = 0

# groupby语法 指定需要聚合的列 以及聚合过程中是否需要丢弃聚合列对应的空值
groupby = df.groupby(by=['colA','colB'],dropna=True) #返回一个groupby 对象
df1 = groupby.sum() #后面加聚合函数 返回dataframe 对象

# 取消索引 注意这里要把函数返回再赋给df
df = df.reset_index()

# plot结果,可以选择绘图类型 修改画布大小
df.plot(kind='bar',figsize=(15,10))
# 用bar的另外一种形式
df.plot.bar(x='col1',y='col2',figsize=(15,10))

# 简单plot两列 
x = df['a']
y = df['b']
plt.plt(x,y)

#dataframe如何转化为numpy array 
numpy_array = df.iloc[:,:6].to_numpy().reshape(4,5)
#numpy array转化为series 注意numpy array要转化成一维
series = pd.Series(numpy_array)
#series转化为dataframe
frame = { 'name1': series1, 'name2': series } 
df = pd.DataFrame(frame) 
コード例 #16
0
ファイル: pyplot.py プロジェクト: thuydang/gt_fpbc
#!/usr/bin/env python

from matplotlib.pyplot import plt

plt([1,2,3])
show()
コード例 #17
0
def train():
    if args.dataset == 'COCO':
        if args.dataset_root == VOC_ROOT:
            if not os.path.exists(COCO_ROOT):
                parser.error('Must specify dataset_root if specifying dataset')
            print("WARNING: Using default COCO dataset_root because " +
                  "--dataset_root was not specified.")
            args.dataset_root = COCO_ROOT
        cfg = coco
        dataset = COCODetection(root=args.dataset_root,
                                transform=SSDAugmentation(
                                    cfg['min_dim'], MEANS))
    elif args.dataset == 'VOC':
        if args.dataset_root == COCO_ROOT:
            parser.error('Must specify dataset if specifying dataset_root')
        cfg = voc
        dataset = VOCDetection(root=args.dataset_root,
                               transform=SSDAugmentation(
                                   cfg['min_dim'], MEANS))

    if args.visdom:
        import visdom
        global viz
        viz = visdom.Visdom()
        print(viz)

    build_net = build_ssd
    if args.model == 'ssd300':
        build_net = build_ssd
    elif args.model == 'ssd300_fpn38':
        build_net = build_ssd300_fpn38
    elif args.model == 'ssd300_fpn75':
        build_net = build_ssd300_fpn75
    elif args.model == 'ssd300_fpn150':
        args.batch_size = 16
        build_net = build_ssd300_fpn150
        cfg['max_iter'] = 240000
        cfg['lr_steps'] = (160000, 200000, 240000)
        print(cfg['max_iter'])
        print(cfg['lr_steps'])

    ssd_net = build_net('train', cfg['min_dim'], cfg['num_classes'])
    net = ssd_net
    print(net)

    if args.cuda:
        net = torch.nn.DataParallel(ssd_net)
        cudnn.benchmark = True

    if args.resume:
        print('Resuming training, loading {}...'.format(args.resume))
        ssd_net.load_weights(args.resume)
    else:
        vgg_weights = torch.load(args.save_folder + args.basenet)
        print('Loading base network...')
        ssd_net.vgg.load_state_dict(vgg_weights)

    if args.cuda:
        net = net.cuda()

    if not args.resume:
        print('Initializing weights...')
        # initialize newly added layers' weights with xavier method
        print('~~~~~~~', weights_init)
        #ssd_net.SElayers.apply(weights_init)
        if 'fpn' in args.model:
            ssd_net.Fusion_Ups.apply(weights_init)
            ssd_net.Fusion_Lefts.apply(weights_init)
        ssd_net.extras.apply(weights_init)
        ssd_net.loc.apply(weights_init)
        ssd_net.conf.apply(weights_init)

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
                             False, args.cuda)

    net.train()
    # loss counters
    loc_loss = 0
    conf_loss = 0
    epoch = 0
    print('Loading the dataset...')

    epoch_size = len(dataset) // args.batch_size
    print('Training ' + args.model + ' on:', dataset.name)
    print('Using the specified args:')
    print(args)

    step_index = 0
    # add more boxes

    if args.visdom:
        vis_title = 'SSD.PyTorch on ' + dataset.name
        vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']
        iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)
        epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)

    data_loader = data.DataLoader(dataset,
                                  args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    batch_iterator = iter(data_loader)

    pbar = tqdm(total=cfg['max_iter'], unit='iters', ncols=100)
    pbar.update(args.start_iter)

    loss_history = []

    for iteration in range(args.start_iter, cfg['max_iter']):
        if args.visdom and (iteration % epoch_size == 0):
            epoch += 1
            update_vis_plot(epoch, loc_loss, conf_loss, iter_plot, epoch_plot,
                            'append', epoch_size)
            # reset epoch loss counters
            loc_loss = 0
            conf_loss = 0

        if iteration in cfg['lr_steps']:
            step_index += 1
            adjust_learning_rate(optimizer, args.gamma, step_index)

        # load train data
        try:
            images, targets = next(batch_iterator)
        except StopIteration:
            batch_iterator = iter(data_loader)
            images, targets = next(batch_iterator)

        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(ann.cuda()) for ann in targets]
        else:
            images = Variable(images)
            targets = [Variable(ann) for ann in targets]
        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.data.item()
        conf_loss += loss_c.data.item()

        pbar.set_postfix({'loss': '%.2f' % (loss.item())})
        pbar.update()

        loss_history.append(loss.item())
        # if iteration % 1 == 0:
        #     print('timer: %.4f sec.' % (t1 - t0))
        #     print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.item()), end=' ')

        if args.visdom:
            update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],
                            iter_plot, epoch_plot, 'append')

        if iteration != 0 and iteration % 5000 == 0:
            print('Saving state, iter:', iteration)
            torch.save(
                ssd_net.state_dict(),
                'weights/' + args.model + '_' + repr(iteration) + '.pth')

    pbar.close()
    torch.save(
        ssd_net.state_dict(),
        args.save_folder + '' + args.dataset + '_' + args.model + '.pth')

    history = {'loss': loss_history}
    file_name = args.model + '_history.pkl'
    with open(file_name, 'wb') as f:
        pickle.dump(history, f)
        f.close()
    plt.plt(loss_history)
    plt.show()
コード例 #18
0
import pandas as pd
from sklearn import linear_model
import matplotlib.pyplot as plt

# read data
dataframe = pd.read_fwf('brain_body.txt')
x_values = dataframe[['Brain']]
y_values = dataframe[['Body']]

# train model on data
body_reg = linear_model.LinearRegression()
body_reg.fit(x_values, y_values)

# visualize results
plt.scatter(x_values, y_values)
plt.plt(x_values, body_reg.predict(x_values))
plt.show()
コード例 #19
0
#build the look up table
I = np.zeros([256, 2])
for i in range(256):
    I[i] = np.min(
        np.argwhere(Cu_hist_2 -
                    Cu_hist[i] == np.min(abs(Cu_hist_2 - Cu_hist[i]))))[0][0]
#plot plot plot!

plt.figure(1, figsize=(9, 9))
plt.subplot(321)
plt.plot(hist)
plt.title('Histogram of first image')

plt.subplot(322)
plt.plot(Cu_hist)
plt.title('Cumulative Histogram of first image')

plt.subplot(323)
plt.plot(hist_2)
plt.title('Histogram of second image')

plt.subplot(324)
plt.plot(Cu_hist_2)
plt.title('Cumulative Histogram of second image')

plt.subplot(325)
plt.plt(I)
plt.title('The look up table I')

plt.show()
コード例 #20
0
ファイル: 1cha_graph.py プロジェクト: justin7yun/project
from matplotlib import pyplot as plt

# y = input("y를 입력해주세요: ")
# a = input("a를 입력해주세요: ")
# x = input("x를 입력해주세요: ")
# b = input("b를 입력해주세요: ")

hello_world = plt.plot([0, 10], [0, 2])
hello_world2 = plt.plt([0, 10], [0, 2])
plt.show((hello_world), (hello_world2))
コード例 #21
0
# In[34]:
"""
5. Explain what the following code block is doing, line by line.
"""
import matplotlib.pyplot as plt  #import matplotlib stuff
from __future__ import division  #getting functions from python 3

ads['ctr'] = ads['clicks'] / ads[
    'impressions']  #add a column whose values are clicks/impressions

fig = plt.figure()
plt.subplot(1, 2, 1)  # sets up 2 spots to put plots in one row
plt.hist(ads.spend)  # plots a histogram of ads.spend

plt.subplot(1, 2, 2)  # moving onto the second plot
plt.plt(ads.spend, ads.ctr, 'g.')  #this gives an error so i don't know
plt.show()  #shows both plots on the grid

# In[ ]:
"""
6-8. Imagine we're viewing the following coefficient table for the following
regression:

(ad_id1772 is either 1 or 0, meaning it was ad 1772, or it was not)
'spend ~ impressions + clicks + ad_id1772'

column          coefficient         pvalue
y_intercept     0.02                0.000
impressions     0.00057             0.038
clicks          0.976               0.78
ad_id1772      -0.5                 0.02
コード例 #22
0
plt.plot(linear_data, '-o')

#multiplots
linear_data=np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.subplot(1,2,2)
plt.plot(linear_data, '-o')

plt.subplot(1,2,1)
plt.plot(linear_data, '-x')

plt.figure()
ax1 = plt.subplot(1,2,1)
plt.plot(linear_data, '-o')
ax2 = plt.subplot(1,2,2, sharey=ax1)
plt.plt(explonential_data, '-x')

#subplots parameters
plt.figure()
plt.subplot(1,2,1) == plt.subplot(121)

fig, ((ax1,ax2,ax3), (ax4, ax5, ax5), (ax7,ax8,ax9)) = plt.subplots(3,3,sharex=True,sharey=True)
ax5.plot(linear_data, '-')

for ax in plt.gcf().get_axes():
    for label in ax.get_xticklabels() + ax.get_yticklabels():
        label.set_visible(True)

plt.gcf().canvas.draw()

#plot a histogram
コード例 #23
0
import matplotlib.pyplot as plt

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)

data = pd.read_excel(
    'C:\\Users\Administrator\Desktop\macroeconomics\\GDP.xlsx', index_col=0)

ticks = ax.set_xticks([
    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
    21, 22, 23, 24, 25, 26, 27
])

labels = ax.set_xticklabels([
    '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000',
    '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009',
    '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018',
    '2019'
],
                            rotation=40,
                            fontsize='small')

ax.set_title(('中国国内生产总值'))
ax.set_xlabel('年份')
ax.set_ylabel('市值(亿元人民币)')

plt.plt(data)
plt.show()
コード例 #24
0
expr = x * x + x * y + x * y + y * y
res = expr.subs({x: 1, y: 2})
r = expr.subs({x: 1 - y})
#print(r)

import sympy as sym
from sympy import Symbol
from sympy import pprint
import sympy.plotting as syp
sigma = Symbol('sigma')
mu = Symbol('mu')
#pprint(2*sym.pi*sigma)
gauss_function = 1 / sym.sqrt(2 * sym.pi * sigma)
gauss_function.subs({mu: 0, sigma: 1})
#print(gauss_function)
part_1 = 1 / (sym.sqrt(2 * sym.pi * sigma**2))
part_2 = sym.exp(-1 * ((x - mu)**2) / (2 * sigma**2))
#pprint(1/(sym.sqrt(2*sym.pi*sigma**2)))
my_gauss_f = part_1 * part_2
syp.plot(my_gauss_f.subs({mu: 1, sigma: 3}), (x, -10, 10), title='gauss')

x_values = []
y_values = []
for value in range(-5, 5):
    y = my_gauss_f.subs({mu: 10, sigma: 30, x: value})
    y_values.append(y)
    x_values.append(value)
    print(x, y)
import matplotlib.pyplot as plt
plt.plt(x_values, y_values)
コード例 #25
0
 def scatter_plot(self, clf, X, Y):
     '''回帰直線を描画する'''
     # 散布図
     plt.scatter(X, Y)
     # 回帰直線
     plt.plt(X, clf.predict(X))
コード例 #26
0
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
hdus = fits.open('UGC11680NED01.p_e.rad_SFH_lum_Mass.fits.gz')
img = hdus[0].data
plt.imshow(img)
img = hdus[0].data
plt.imshow(img)
plt.clf()
plt.imshow(img, origin = 'lower')
img.shape
img.min()
mass= img[0,:].cumsum()
plt.figure()
plt.plt(mass)
plt.plot(mass)
mass= img[1,:].cumsum()
plt.plot(mass)
mass= img[2,:].cumsum()
plt.plot(mass)
mass= img[4,:].cumsum()
plt.plot(mass)
mass= img[5,:].cumsum()
plt.plot(mass)
mass= img[6,:].cumsum()
plt.plot(mass)
mass= img[7,:].cumsum()
plt.plot(mass)
mass= img[8,:].cumsum()
plt.plot(mass)
コード例 #27
0
# coding: utf-8
import numpy as np

x = np.linspace(0, 100, 10000)
y = np.sin(x) + np.sin(3 * x) + np.sin(5 * x)
import matplotlib.pyplot as plt

plt.plot(y)
plt.show()
# fft
Y = np.fft.fft(y)
plt.plt(np.abs(Y))
plt.plpt(np.abs(Y))
plt.plot(np.abs(Y))
plt.show()
2 * np, pi * 16 / 100
2 * np.pi * 16 / 100
2 * np.pi * 48 / 100
2 * np.pi * 80 / 100
コード例 #28
0
ファイル: project2.py プロジェクト: wdahl/ICSI532Project
weak_componet_sizes.append(
    len(max(nx.weakly_connected_components(G_t), key=len)))
densities.append(nx.density(G_t))
clusterings.append(nx.average_clustering(G_t))

#####################################################################################
# Plot the evoultion of the size of the largest connected componet (both in the weak#
# and the strong sense) as a function of time (mesaured in days). In a separte      #
# figure, plot the density of each G_t, over t.                                     #
# TODO:                                                                             #
#   Do you observe a "densification law" or an "undensification" trend?             #
#####################################################################################

# Plots the stong and weak compnent sizes for each temporal graph
plt.plt(range(len(strong_componet_sizes)),
        weak_componet_sizes,
        label='Weakly Connected Componet')
plt.plt(range(len(strong_componet_sizes)),
        strong_componet_sizes,
        label='Strongly Connected Componet')
plt.xlabel('Days')
plt.ylabel('Largest Componet Size')
plt.legend()
plt.show()

# Plots the density of each temporal Graph
plt.plt(range(len(strong_componet_sizes)), densities)
plt.xlabel('Days')
plt.ylabel('Density')
plt.show()
コード例 #29
0
#Getting Silhouette with squared euclidean distance for k value ranging from 2 to 8
TotalSED = []
for player in player_name:
    features = transform_data.where(transform_data["player_name"] == player[0]).select("features")
    for k in range(2,8):
        kmeans = KMeans(featuresCol = 'features', k=k)
        model = kmeans.fit(features)
        predictions = model.transform(features)
        silhouette = evaluator.evaluate(predictions)
        print("With K={}".format(k))
        print("Silhouette with squared euclidean distance = " + str(silhouette))
        TotalSED.append(silhouette)
    break

#plotting kvalues and Total_SED 
plt.plt(range(2,9), TSED); plt.xlabel("No_of_Clusters"); plt.ylabel("Total_SED"); plt.xticks(k)


#ESTABLISH MODEL WITH KMEANS
kmeans = KMeans().setK(4).setSeed(1)
model = kmeans.fit(training_set)
​
# Make predictions
predictions = model.transform(training_set)
​
# Evaluate clustering by computing Silhouette score
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
​
# Shows the result.
コード例 #30
0
        G_losses.append(errG.item())
        D_losses.append(errD.item())

        if (iters % 500 == 0) or ((epoch == num_epochs - 1) and
                                  (i == len(dataloader) - 1)):
            with torch.no_grad():
                fake = netG(z).detach().cpu()
            img_list.append(vutils.make_grid(fake, padding=2, normalize=True))

        iters += 1

## show training scalars to plt
plt.figure(figsize=(10, 5))
plt.title("GEnerator and Discriminator Loss DUring Training")
plt.plt(G_losses, label='G')
plt.plt(D_loosses, label='D')
plt.xlabel("iterations")
plt.ylabel("loss")
plt.legend()
plt.imshow()

## animation result

#import matplotlib.animation as animation
#from IPython.display import HTML

fig = plt.figure(figsize=(8, 8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)]
       for i in img_list]
コード例 #31
0
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 13:20:47 2013
Routine to look at model temperature profiles 
ar a particular eMOLT location where we had both surf & bot sensors
@author: jmanning

"""
import datetime as dt
from matplotlib import pyplot as plt
from getdata_yw import getobs_tempsalt
from getdata import getemolt_latlon
from models_yw import getFVCOM_bottom_tempsalt_netcdf
from conversions import dm2dd

site='AG01'
# get data
[lati,loni,on,bd]=getemolt_latlon(site)# extracts lat/lon based on site code
[lati,loni]=dm2dd(lati,loni)#converts decimal-minutes to decimal degrees
dept=[0,5]
(obs_dt,obs_temps,obs_salt)=getobs_tempsalt(site, input_time=[dt.datetime(2006,9,10),dt.datetime(2006,9,11)], dep=dept)
dept=[bd[0]-0.25*bd[0],bd[0]+0.25*bd[0]]
(obs_dt,obs_tempb,obs_salt)=getobs_tempsalt(site, input_time=[dt.datetime(2006,9,10),dt.datetime(2006,9,11)], dep=dept)

# get model
for k in range(44):
    modtso=getFVCOM_bottom_tempsalt_netcdf(lati,loni,dt.datetime(2006,9,10),dt.datetime(2006,9,11),layer=k,vname='temp') 
    plt.plot(modtso[0],-k,'g*')
plt.plt([obs_temps[0],obs_tempb[0]],[0,-bd[0]],'r*') 

コード例 #32
0

"""
5. Explain what the following code block is doing, line by line.
"""
import matplotlib.pyplot as plt ## import matplotlib into the namespace
from __future__ import division ## import the python 3 division module

ads['ctr'] = ads['clicks'] / ads['impressions']  ## create a new column that shows click per impression

fig = plt.figure()  ## create a plot object
plt.subplot(1, 2, 1) ## define the first subplot of a 1x2 plot
plt.hist(ads.spend) ## create a histogram using the data in the spend column

plt.subplot(1, 2, 2) ## define the second subplot
plt.plt(ads.spend, ads.ctr, 'g.') ## create a scatter plot of spend vs ctr, colored green
plt.show() ## display the figure


"""
6-8. Imagine we're viewing the following coefficient table for the following
regression:

(ad_id1772 is either 1 or 0, meaning it was ad 1772, or it was not)
'spend ~ impressions + clicks + ad_id1772'

column          coefficient         pvalue
y_intercept     0.02                0.000
impressions     0.00057             0.038
clicks          0.976               0.78
ad_id1772      -0.5                 0.02
コード例 #33
0
        # set_voltage = 0.12*np.sin(2*current_power/np.pi)
        set_voltage = 2 * current_power
        if set_voltage > MAX_VOLTAGE:
            set_voltage = MAX_VOLTAGE
        supply.set_voltage(set_voltage)
        n -= 60 / 0.2
    if n == (2 * 60 * 60 / 0.2 - 60 / 0.2):
        siggen.dcoffset(SET_POINT)
        print('PID loop started')
    sleep(0.2)
    current_power = meter.read_power()
    error = current_power - TARGET_POINT

    p_value = KP * error
    integrator = integrator + error
    i_value = -integrator * KI
    d_value = KD * (error - difference)

    pid = SET_POINT + p_value + i_value  # + d_value
    print(current_power, pid, p_value, i_value)
    siggen.dcoffset(pid)
    x.append(time())
    y.append(current_power)
    p.append(pid)
    n -= 1

plt.plot(x, y, label='Power')
plt.plt(x, p, label='PID')
plt.show()
コード例 #34
0
    plt.gca().invert_xaxis()
    plt.title('SFR Surface Dencity Image')
    
    plt.subplot(222)
    plt.imshow(Zvel, interpolation = 'nearest')
    plt.gca().invert_xaxis()
    plt.title('Galaxy Surface Dencity Image')

    plt.subplot(223)
    plt.semilogy( psd1D )
    plt.title('SFR Surface Dencity Image')
    
    plt.subplot(224)
    plt.semilogy( psd1Dden )
    plt.title('Galaxy Surface Dencity Image')
    #plt.tight_layout()
    
    plt.figure(2)
    plt.clf()
    plt.semilogy(xps)
    plt.title('cross power specturm')
    plt.show()

delta = str(delta)
savetext(folder,("SFR_Surface_Dencity_"+delta+"_binsize.csv"),Z)
savetext(folder,"Galaxy_Surface_Dencity"+delta+"_binsize.csv",Zden)    

plt.plot(Z.flatten(), Zvel.flatten(), 'kx')
plt.plt(x,y)
plt.show()
コード例 #35
0
    def train(self, data, all_y_trues):
        """
        - data is a (n x 2) numpy array, n = # samples in the dataset.
        - all_y_trues is a numpy with n elements.
        Elements in all_y_trues correspond to those indata.
        """
        learn_rate = 0.1
        epochs = 1000
        loss_f = np.zeros((epochs, 1))
        for epoch in range(epochs):
            for x, y_true in zip(data, all_y_trues):
                #---Do a feedforward (we'll need values later)
                sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1
                h1 = sigmoid(sum_h1)

                sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2
                h2 = sigmoid(sum_h2)

                sum_o1 = self.w5 * h1 + self.w6 * h2 + self.b3
                o1 = sigmoid(sum_o1)
                y_pred = o1

                #--- Calculate partial derivatives.
                #--- Naming: d_L_d_w1 represents "partial L/partial w1"
                d_L_d_ypred = -(y_true - y_pred)

                #Neuron o1
                d_ypred_d_w5 = h1 * deriv_sigmoid(sum_o1)
                d_ypred_d_w6 = h2 * deriv_sigmoid(sum_o1)
                d_ypred_d_b3 = deriv_sigmoid(sum_o1)

                d_ypred_d_h1 = self.w5 * deriv_sigmoid(sum_o1)
                d_ypred_d_h2 = self.w6 * deriv_sigmoid(sum_o1)

                #Neuron h1
                d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1)
                d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1)
                d_h1_d_b1 = deriv_sigmoid(sum_h1)

                #Neuron h2
                d_h1_d_w3 = x[0] * deriv_sigmoid(sum_h2)
                d_h1_d_w4 = x[0] * deriv_sigmoid(sum_h2)
                d_h1_d_b2 = deriv_sigmoid(sum_h2)

                #--- update weights and biases
                #Neuron o1
                self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5
                self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6
                self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3

                #Neuron h1
                self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1
                self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2
                self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1

                #Neuron h2
                self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3
                self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4
                self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h1_d_b2

    #--- Calculate total loss at the end of each epoch

            y_preds = np.apply_along_axis(self.feedforward, 1, data)
            loss = mse_loss(all_y_trues, y_preds)
            loss_f[epoch] = loss
            if epoch % 10 == 0:
                print("Epoch %d loss: %.3f", (epoch, loss))

    #fig,ax = plt.subplots()
        plt.plt(loss_f)
        plt.title('loss_function')
        plt.show()