Beispiel #1
0
def gera(nome_teste, nome_pred):
    pred = pd.read_csv('dados/'+ nome_teste, delimiter=' ', usecols=[0, 1], header=None, names=['alvo', 'preco'])
    out = pd.read_csv('dados/'+ nome_pred, delimiter=' ', usecols=[0], header=None, names=['resultado'])
    
    print len(pred)
    print len(out)
    
    errosx = []
    errosy = []
    acertosx = []
    acertosy = []
    precosx = []
    precosy = []
    for i in range(0, len(pred)):
        precosx.append(i)
        precosy.append(float(pred['preco'][i][2:]))
        if pred['alvo'][i] == out['resultado'][i]:
            acertosx.append(i)
            acertosy.append(float(pred['preco'][i][2:]))
        else:
            errosx.append(i)
            errosy.append(float(pred['preco'][i][2:]))
            

    plt.plot(precosx, precosy)
    plt.plot(errosx, errosy, 'rx')
    plt.plot(acertosx, acertosy, 'x')
    plt.show()
def show_cmaps(names):
    matplotlib.rc('text', usetex=False)
    a=np.outer(np.arange(0,1,0.01),np.ones(10))   # pseudo image data
    f=figure(figsize=(10,5))
    f.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)
    # get list of all colormap names
    # this only obtains names of built-in colormaps:
    maps=[m for m in cm.datad if not m.endswith("_r")]
    # use undocumented cmap_d dictionary instead
    maps = [m for m in cm.cmap_d if not m.endswith("_r")]
    maps.sort()
    # determine number of subplots to make
    l=len(maps)+1
    if names is not None: l=len(names)  # assume all names are correct!
    # loop over maps and plot the selected ones
    i=0
    for m in maps:
        if names is None or m in names:
            i+=1
            ax = subplot(1,l,i)
            ax.axis("off")
            imshow(a,aspect='auto',cmap=cm.get_cmap(m),origin="lower")
            title(m,rotation=90,fontsize=10,verticalalignment='bottom')
#    savefig("colormaps.png",dpi=100,facecolor='gray')
    show()
def show_plot(X, y, n_neighbors=10, h=0.2):
    # Create color maps
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])

    for weights in ['uniform', 'distance']:
        # we create an instance of Neighbours Classifier and fit the data.
        clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
        clf.fit(X, y)
        clf.n_neighbors = n_neighbors

        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max]x[y_min, y_max].
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))
        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        plt.figure()
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

        # Plot also the training points
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title("3-Class classification (k = %i, weights = '%s')"
                  % (n_neighbors, weights))

    plt.show()
Beispiel #4
0
    def get_gabors(self, rf):
        lams =  float(rf[0])/self.sfs # lambda = 1./sf  #1./np.array([.1,.25,.4])
        sigma = rf[0]/2./np.pi
        # rf = [100,100]
        gabors = np.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))

        i = np.arange(-rf[0]/2+1,rf[0]/2+1)
        #print i
        j = np.arange(-rf[1]/2+1,rf[1]/2+1)
        ii,jj = np.meshgrid(i,j)
        for o, theta in enumerate(self.oris):
            x = ii*np.cos(theta) + jj*np.sin(theta)
            y = -ii*np.sin(theta) + jj*np.cos(theta)

            for p, phase in enumerate(self.phases):
                for s, lam in enumerate(lams):
                    fxx = np.cos(2*np.pi*x/lam + phase) * np.exp(-(x**2+y**2)/(2*sigma**2))
                    fxx -= np.mean(fxx)
                    fxx /= np.linalg.norm(fxx)

                    #if p==0:
                        #plt.subplot(len(oris),len(lams),count+1)
                        #plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
                        #count+=1

                    gabors[o,p,s,:,:] = fxx
        plt.show()
        return gabors
Beispiel #5
0
    def plot_predict_is(self,h=5,**kwargs):
        """ Plots forecasts with the estimated model against data
            (Simulated prediction with data)

        Parameters
        ----------
        h : int (default : 5)
            How many steps to forecast

        Returns
        ----------
        - Plot of the forecast against data 
        """     

        figsize = kwargs.get('figsize',(10,7))

        plt.figure(figsize=figsize)
        date_index = self.index[-h:]
        predictions = self.predict_is(h)
        data = self.data[-h:]

        t_params = self.transform_z()

        plt.plot(date_index,np.abs(data-t_params[-1]),label='Data')
        plt.plot(date_index,predictions,label='Predictions',c='black')
        plt.title(self.data_name)
        plt.legend(loc=2)   
        plt.show()          
def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
def plot_figure(data, row_labels, col_labels, abs_val, plot_bin, labels, time, logInterval):
  colors = ['#0077FF', '#FF0000', '#00FF00', 'magenta']
  cmaps = []
  for i in colors:
    cmaps.append(mpl.colors.LinearSegmentedColormap.from_list('m1',['black',i]))
  dim, rows, cols = data.shape
  vmax = np.amax(data)
  vmin = np.amin(data)
  fig, ax = plt.subplots()
  c = np.zeros([rows, cols, 4])
  for i in range(dim):
    c = np.add(c, cmaps[i]((data[i]-vmin)/(vmax-vmin)))
  c = np.clip(c, 0, 1)
  pc = ax.imshow(c, aspect='auto', interpolation='none')
  #ax.set_title(labels[0])
  fig.text(0.5, 0.04, 'Bin # along rod length', ha='center',
      fontsize=fontsize)
  fig.text(0.0, 0.5, 'Time (s)', va='center', rotation='vertical',
      fontsize=fontsize)
  #ax.add_patch(Rectangle((0.5, time/logInterval), cols-1, 10, edgecolor='w',
  #  facecolor='none'))
  #ax.add_patch(Rectangle((bin_id, 0.5/logInterval), 1, rows-0.5/logInterval,
  #  edgecolor='w', facecolor='none'))
  #plt.savefig('kymo.png', bbox_inches='tight')
  plt.savefig('3rods_1long_kymo.pdf')
  plt.show()
Beispiel #8
0
def plotAlphas(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix): 
    """
    Plot the variation in the error with alpha for penalisation. 
    """
    for i, datasetName in enumerate(datasetNames): 
        #plt.figure(i)    
        
        
        for k in range(len(sampleMethods)):
            outfileName = outputDir + datasetName + sampleMethods[k] + fileNameSuffix + ".npz"
            data = numpy.load(outfileName)
    
            errors = data["arr_0"]
            meanMeasures = numpy.mean(errors, 0)
            
            foldInd = 4 
    
            for i in range(sampleSizes.shape[0]):
                plt.plot(cvScalings, meanMeasures[i, foldInd, 2:8], next(linecycler), label="m="+str(sampleSizes[i]))
                    
            plt.xlabel("Alpha")
            plt.ylabel('Error')
            xmin, xmax = cvScalings[0], cvScalings[-1]
            plt.xlim((xmin,xmax))

        
            plt.legend(loc="upper left")
    plt.show()
Beispiel #9
0
def main():

    # http://scikit-learn.org/stable/tutorial/basic/tutorial.html#loading-an-example-dataset
    # "A dataset is a dictionary-like object that holds all the data and some
    # metadata about the data. This data is stored in the .data member, which
    # is a n_samples, n_features array. In the case of supervised problem, one
    # or more response variables are stored in the .target member."

    # Toy datasets

    iris = datasets.load_iris()         # The iris dataset (classification)
    digits = datasets.load_digits()     # The digits dataset (classification)

    #boston = datasets.load_boston()     # The boston house-prices dataset (regression)
    #diabetes = datasets.load_diabetes() # The diabetes dataset (regression)
    #linnerud = datasets.load_linnerud() # The linnerud dataset (multivariate regression)

    print(iris.feature_names)
    print(iris.data)
    print(iris.target_names)
    print(iris.target)

    print(digits.images[0])
    print(digits.target_names)
    print(digits.target)

    plt.imshow(digits.images[0], cmap='gray', interpolation='nearest')
    plt.show()
Beispiel #10
0
def main():
    gw = gridworld()
    a = agent(gw)

    for epoch in range(20):
        a.initEpoch()
        while True:
            rwd, stat, act = a.takeAction()
            a.updateQ(rwd, stat, act)
            if gw.status() == 'Goal':
                break
            if mod(a.counter, 10)==0:
                print(gw.state())
                print(gw.field())
        print('Finished')
        print(a.counter)
        print(gw.state())
        print(gw.field())
        Q = transpose(a.Q(), (2,0,1))
        for i in range(4):
            plt.subplot(2,2,i)
            plt.imshow(Q[i], interpolation='nearest')
            plt.title(a.actions()[i])
            plt.colorbar()
        plt.show()
Beispiel #11
0
def plotResults(datasetName, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix):
    """
    Plots the errors for a particular dataset on a bar graph. 
    """

    for k in range(len(sampleMethods)):
        outfileName = outputDir + datasetName + sampleMethods[k] + fileNameSuffix + ".npz"
        data = numpy.load(outfileName)

        errors = data["arr_0"]
        meanMeasures = numpy.mean(errors, 0)

        for i in range(sampleSizes.shape[0]):
            plt.figure(k*len(sampleMethods) + i)
            plt.title("n="+str(sampleSizes[i]) + " " + sampleMethods[k])

            for j in range(errors.shape[3]):
                plt.plot(foldsSet, meanMeasures[i, :, j])
                plt.xlabel("Folds")
                plt.ylabel('Error')

            labels = ["VFCV", "PenVF+"]
            labels.extend(["VFP s=" + str(x) for x in cvScalings])
            plt.legend(tuple(labels))
    plt.show()
Beispiel #12
0
    def work(self, **kwargs):
        self.__dict__.update(kwargs)
        self.worked = True
        samples = LGMM1(rng=self.rng,
                size=(self.n_samples,),
                **self.LGMM1_kwargs)
        samples = np.sort(samples)
        edges = samples[::self.samples_per_bin]
        centers = .5 * edges[:-1] + .5 * edges[1:]
        print edges

        pdf = np.exp(LGMM1_lpdf(centers, **self.LGMM1_kwargs))
        dx = edges[1:] - edges[:-1]
        y = 1 / dx / len(dx)

        if self.show:
            plt.scatter(centers, y)
            plt.plot(centers, pdf)
            plt.show()
        err = (pdf - y) ** 2
        print np.max(err)
        print np.mean(err)
        print np.median(err)
        if not self.show:
            assert np.max(err) < .1
            assert np.mean(err) < .01
            assert np.median(err) < .01
Beispiel #13
0
def filterFunc():
    rects = []
    hsv_planes = [[[]]]
    if os.path.isfile(Image_File):
        BGR=cv2.imread(Image_File)
        gray = cv2.cvtColor(BGR, cv2.COLOR_BGR2GRAY)
        img = gray
        f = np.fft.fft2(img)
        fshift = np.fft.fftshift(f)
        magnitude_spectrum = 20*np.log(np.abs(fshift))
        
        plt.subplot(221),plt.imshow(img, cmap = 'gray')
        plt.title('Input Image'), plt.xticks([]), plt.yticks([])
        
        plt.subplot(222),plt.imshow(magnitude_spectrum, cmap = 'gray')
        plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
        
        FiltzeredFFT = HighPassFilter(fshift, 60)
        plt.subplot(223),plt.imshow(np.abs(FiltzeredFFT), cmap = 'gray')
        plt.title('Filtered'), plt.xticks([]), plt.yticks([])
        
		
        f_ishift = np.fft.ifftshift(FiltzeredFFT)
        img_back = np.fft.ifft2(f_ishift)
        img_back = np.abs(img_back)
        plt.subplot(224),plt.imshow(np.abs(img_back), cmap = 'gray')
        plt.title('Filtered Image'), plt.xticks([]), plt.yticks([])
        plt.show()
Beispiel #14
0
def main():
  # use sys.argv if needed
  print('starting boids...')

  parser = argparse.ArgumentParser(description="Implementing Craig Reynold's Boids...")
  # add arguments
  parser.add_argument('--num-boids', dest='N', required=False)
  args = parser.parse_args()

  # number of boids
  N = 100
  if args.N:
      N = int(args.N)

  # create boids
  boids = Boids(N)

  # setup plot
  fig = plt.figure()
  ax = plt.axes(xlim=(0, width), ylim=(0, height))

  pts, = ax.plot([], [], markersize=10, 
                  c='k', marker='o', ls='None')
  beak, = ax.plot([], [], markersize=4, 
                  c='r', marker='o', ls='None')
  anim = animation.FuncAnimation(fig, tick, fargs=(pts, beak, boids), 
                                 interval=50)

  # add a "button press" event handler
  cid = fig.canvas.mpl_connect('button_press_event', boids.buttonPress)

  plt.show()
Beispiel #15
0
    def work(self):
        self.worked = True
        kwargs = dict(
                weights=self.weights,
                mus=self.mus,
                sigmas=self.sigmas,
                low=self.low,
                high=self.high,
                q=self.q,
                )
        samples = GMM1(rng=self.rng,
                size=(self.n_samples,),
                **kwargs)
        samples = np.sort(samples)
        edges = samples[::self.samples_per_bin]
        #print samples

        pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
        dx = edges[1:] - edges[:-1]
        y = 1 / dx / len(dx)

        if self.show:
            plt.scatter(edges[:-1], y)
            plt.plot(edges[:-1], pdf)
            plt.show()
        err = (pdf - y) ** 2
        print np.max(err)
        print np.mean(err)
        print np.median(err)
        if not self.show:
            assert np.max(err) < .1
            assert np.mean(err) < .01
            assert np.median(err) < .01
 def default_run(self):
     """
     Plots the results, saves the figure, and finally displays it from simulating codewords with Sum-prod and Max-prod
     algorithms across variance levels. This combines the results in one plot.
     :return:
     """
     if not os.path.exists("./graphs"):
         os.makedirs("./graphs")
     self.save_time = str(int(time.time()))
     self.simulate(Decoder.SUM_PROD)
     self.compute_error()
     plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability],
              "ro-", label="Sum-Prod")
     self.simulate(Decoder.MAX_PROD)
     self.compute_error()
     plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability],
              "g^--", label="Max-Prod")
     plt.legend(loc=2)
     plt.title("Hamming Decoder Factor Graph Simulation Results\n" +
               r"$\log_{10}(\sigma^2)$ vs. $\log_{10}(P_e)$" + " for Max-Prod & Sum-Prod Algorithms\n" +
               "Sample Size n = %(codewords)s Codewords \n Variance Levels = %(levels)s"
               % {"codewords": str(self.iterations), "levels": str(self.variance_levels)})
     plt.xlabel("$\log_{10}(\sigma^2)$")
     plt.ylabel(r"$\log_{10}(P_e)$")
     plt.savefig("graphs/%(time)s-max-prod-sum-prod-%(num_codewords)s-codewords-variance-bit_error_probability.png" %
                 {"time": self.save_time,
                  "num_codewords": str(self.iterations)}, bbox_inches="tight")
     plt.show()
def vis_result(image, seg, gt, title1='Segmentation', title2='Ground truth', savefile=None):
    indices = np.where(seg >= 0.5)
    indices_gt = np.where(gt >= 0.5)

    im_norm = image / image.max()
    rgb_image = color.gray2rgb(im_norm)
    multiplier = [0., 1., 1.]
    multiplier_gt = [1., 1., 0.]

    im_seg = rgb_image.copy()
    im_gt = rgb_image.copy()
    im_seg[indices[0], indices[1], :] *= multiplier
    im_gt[indices_gt[0], indices_gt[1], :] *= multiplier_gt

    fig = plt.figure()
    a = fig.add_subplot(1, 2, 1)
    plt.imshow(im_seg)
    a.set_title(title1)
    a = fig.add_subplot(1, 2, 2)
    plt.imshow(im_gt)
    a.set_title(title2)

    if savefile is None:
        plt.show()
    else:
        plt.savefig(savefile)
    plt.close()
Beispiel #18
0
def compare_chebhist(dname, mylambda, c, Nbin = 25):


    if mylambda == 'Do not exist':
        print('--!!Warning: eig file does not exist, can not display compare histgram')
    else:
        mylambda = 1 - mylambda
        lmin = max(min(mylambda), -1)
        lmax = min(max(mylambda),  1)

        # print c
        cheb_file_content = '\n'.join([str(st) for st in c])
        x = np.linspace(lmin, lmax, Nbin + 1)
        y = plot_chebint(c, x)
        u = (x[1:] + x[:-1]) / 2
        v =  y[1:] - y[:-1]

        plt.clf()
        plt.hold(True)
        plt.hist(mylambda,Nbin)
        plt.plot(u, v, "r.", markersize=10)
        plt.hold(False)
        plt.show()
        filename = 'data/' + dname + '.png'
        plt.savefig(filename)

        cheb_filename = 'data/' + dname + '.cheb'
        f = open(cheb_filename, 'w+')
        f.write(cheb_file_content)
        f.close()
Beispiel #19
0
def run_test(fld, seeds, plot2d=True, plot3d=True, add_title="",
             view_kwargs=None, show=False, scatter_mpl=False, mesh_mvi=True):
    interpolated_fld = viscid.interp_trilin(fld, seeds)
    seed_name = seeds.__class__.__name__
    if add_title:
        seed_name += " " + add_title

    try:
        if not plot2d:
            raise ImportError
        from viscid.plot import vpyplot as vlt
        from matplotlib import pyplot as plt
        plt.clf()
        # plt.plot(seeds.get_points()[2, :], fld)
        mpl_plot_kwargs = dict()
        if interpolated_fld.is_spherical():
            mpl_plot_kwargs['hemisphere'] = 'north'
        vlt.plot(interpolated_fld, **mpl_plot_kwargs)
        plt.title(seed_name)

        plt.savefig(next_plot_fname(__file__, series='2d'))
        if show:
            plt.show()

        if scatter_mpl:
            plt.clf()
            vlt.plot2d_line(seeds.get_points(), fld, symdir='z', marker='o')
            plt.savefig(next_plot_fname(__file__, series='2d'))
            if show:
                plt.show()
    except ImportError:
        pass

    try:
        if not plot3d:
            raise ImportError
        from viscid.plot import vlab

        _ = get_mvi_fig(offscreen=not show)

        try:
            if mesh_mvi:
                mesh = vlab.mesh_from_seeds(seeds, scalars=interpolated_fld)
                mesh.actor.property.backface_culling = True
        except RuntimeError:
            pass

        pts = seeds.get_points()
        p = vlab.points3d(pts[0], pts[1], pts[2], interpolated_fld.flat_data,
                          scale_mode='none', scale_factor=0.02)
        vlab.axes(p)
        vlab.title(seed_name)
        if view_kwargs:
            vlab.view(**view_kwargs)

        vlab.savefig(next_plot_fname(__file__, series='3d'))
        if show:
            vlab.show(stop=True)
    except ImportError:
        pass
Beispiel #20
0
def plot_scenario(strategies, names, scenario_id=1):
    probabilities = get_scenario(scenario_id)

    plt.figure(figsize=(6, 4.5))

    ax = plt.subplot(111)
    ax.spines["top"].set_visible(False)
    ax.spines["bottom"].set_visible(False)
    ax.spines["right"].set_visible(False)
    ax.spines["left"].set_visible(False)

    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()

    plt.yticks(fontsize=14)
    plt.xticks(fontsize=14)
    plt.xlim((0, 1300))

    # Remove the tick marks; they are unnecessary with the tick lines we just plotted.
    plt.tick_params(axis="both", which="both", bottom="on", top="off",
                    labelbottom="on", left="off", right="off", labelleft="on")

    for rank, (strategy, name) in enumerate(zip(strategies, names)):
        plot_strategy(probabilities, strategy, name, rank)

    plt.title("Bandits: " + str(probabilities), fontweight='bold')
    plt.xlabel('Number of Trials', fontsize=14)
    plt.ylabel('Cumulative Regret', fontsize=14)
    plt.legend(names)
    plt.show()
def plot():
    elements_list = get_elements()
    x = range(0, len(elements_list))
    y = elements_list
    print(x)
    plt.plot(x, y)
    plt.show()
Beispiel #22
0
def main():
    """The main function."""

    # Build data ################

    x = np.arange(0, 10000, 500)
    y = np.arange(0, 1, 0.05)

    xx, yy = np.meshgrid(x, y)
    z = np.power(xx,yy)

    print "xx ="
    print xx
    print "yy ="
    print yy
    print "z ="
    print z

    # Plot data #################

    fig = plt.figure()
    ax = axes3d.Axes3D(fig)

    surf = ax.plot_surface(xx, yy, z, cmap=cm.jet, rstride=1, cstride=1, color='b', shade=True)

    ax.set_xlabel("X")
    ax.set_ylabel("Y")
    ax.set_zlabel("Z")

    fig.colorbar(surf, shrink=0.5, aspect=5)

    plt.show()
def plotIterationResult(train_err_list):
    x = range(1,len(train_err_list) + 1)
    fig = plt.figure()
    plt.plot(x,train_err_list)
    plt.xlabel('iterations')
    plt.ylabel('training error')
    plt.show()
def plotTestData(tree):
	plt.figure()
	plt.axis([0,1,0,1])
	plt.xlabel("X axis")
	plt.ylabel("Y axis")
	plt.title("Green: Class1, Red: Class2, Blue: Class3, Yellow: Class4")
	for value in class1:
		plt.plot(value[0],value[1],'go')
	plt.hold(True)
	for value in class2:
		plt.plot(value[0],value[1],'ro')
	plt.hold(True)
	for value in class3:
		plt.plot(value[0],value[1],'bo')
	plt.hold(True)
	for value in class4:
		plt.plot(value[0],value[1],'yo')
	plotRegion(tree)
	for value in classPlot1:
		plt.plot(value[0],value[1],'g.',ms=3.0)
	plt.hold(True)
	for value in classPlot2:
		plt.plot(value[0],value[1],'r.', ms=3.0)
	plt.hold(True)
	for value in classPlot3:
		plt.plot(value[0],value[1],'b.', ms=3.0)
	plt.hold(True)
	for value in classPlot4:
		plt.plot(value[0],value[1],'y.', ms=3.0)
	plt.grid(True)
	plt.show()
def plotJ(J_history,num_iters):
    x = np.arange(1,num_iters+1)
    plt.plot(x,J_history)
    plt.xlabel(u"迭代次数",fontproperties=font) # 注意指定字体,要不然出现乱码问题
    plt.ylabel(u"代价值",fontproperties=font)
    plt.title(u"代价随迭代次数的变化",fontproperties=font)
    plt.show()
    def one_file_features(self, im, demo=False):
        """
        Zde je kontruován vektor příznaků pro klasfikátor
        """
        # color processing
        fd = np.array([])

        img = skimage.color.rgb2gray(im)
        # graylevel
        if self.hogFeatures:
            pass

        if self.grayLevelFeatures:
            imr = skimage.transform.resize(img, [9, 9])
            glfd = imr.reshape(-1)
            fd = np.append(fd, glfd)

            if demo:
                plt.imshow(imr)
                plt.show()

        #fd.append(hsvft[:])
        if self.colorFeatures:
            #fd = np.append(fd, colorft)
            pass

        #print hog_image
        return fd
Beispiel #27
0
    def zplane(self, title="", fontsize=18):
        """ Display filter in the complex plane

        Parameters
        ----------

        """
        rb = self.z
        ra = self.p

        t = np.arange(0, 2 * np.pi + 0.1, 0.1)
        plt.plot(np.cos(t), np.sin(t), "k")

        plt.plot(np.real(ra), np.imag(ra), "x", color="r")
        plt.plot(np.real(rb), np.imag(rb), "o", color="b")
        M1 = -10000
        M2 = -10000
        if len(ra) > 0:
            M1 = np.max([np.abs(np.real(ra)), np.abs(np.imag(ra))])
        if len(rb) > 0:
            M2 = np.max([np.abs(np.real(rb)), np.abs(np.imag(rb))])
        M = 1.6 * max(1.2, M1, M2)
        plt.axis([-M, M, -0.7 * M, 0.7 * M])
        plt.title(title, fontsize=fontsize)
        plt.show()
Beispiel #28
0
def draw(data, classes, model, resolution=100):
    mycm = mpl.cm.get_cmap('Paired')
    
    one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
    two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
    xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(predict(model, inputs[i])[0])
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    
    t = np.zeros(15)
    for i in range(15):
        if i < 5:
            t[i] = 0
        elif i < 10:
            t[i] = 1
        else:
            t[i] = 2
    plt.scatter(model[:, 0], model[:, 1], s=150, c=t, cmap=mycm)
    
    plt.xlim([0, 10])
    plt.ylim([0, 10])
    
    plt.show()
Beispiel #29
0
def display(spectrum):
	template = np.ones(len(spectrum))

	#Get the plot ready and label the axes
	pyp.plot(spectrum)
	max_range = int(math.ceil(np.amax(spectrum) / standard_deviation))
	for i in range(0, max_range):
		pyp.plot(template * (mean + i * standard_deviation))
	pyp.xlabel('Units?')
	pyp.ylabel('Amps Squared')    
	pyp.title('Mean Normalized Power Spectrum')
	if 'V' in Options:
		pyp.show()
	if 'v' in Options:
		tokens = sys.argv[-1].split('.')
		filename = tokens[0] + ".png"
		input = ''
		if os.path.isfile(filename):
			input = input("Error: Plot file already exists! Overwrite? (y/n)\n")
			while input != 'y' and input != 'n':
				input = input("Please enter either \'y\' or \'n\'.\n")
			if input == 'y':
				pyp.savefig(filename) 
			else:
				print("Plot not written.")
		else:
			pyp.savefig(filename) 
  def build_hist(self, coverage, show=False, save=False, save_fn="max_hist_plot"):
    """
    Build a histogram to determine what the maxes look & visualize match_count
    Might be used to determine a resonable threshold

    @param coverage: the average coverage for an single nt
    @param show: Show visualization with match maxes
    @param save_fn: Save to disk with this file name or else it will be the default

    @return: the histogram array
    """
    #import matplotlib
    #matplotlib.use("Agg")
    import matplotlib.pyplot as plt

    maxes = self.match_count.max(1) # get maxes along 1st dim

    h = plt.hist(maxes, bins=self.match_count.shape[0]) # figure out where the majority

    plt.ylabel("Frequency")
    plt.xlabel("Count per index")
    plt.title("Frequency count histogram")

    if show: plt.show()
    if save: plt.savefig(save_fn, dpi=160, frameon=False)

    return h[0]
# from all thies boxplot we can see that column 'crim','zn','rm', 'dis','black','lstat','medv' has some outlier 

######## for "rm" column HVO and LVO finding and replacing

# Detection of outliers for rad column(find limits for RM based on IQR)

rm_IQR = boston['rm'].quantile(0.75) - boston['rm'].quantile(0.25)
rm_lower_limit = boston['rm'].quantile(0.25) - (rm_IQR * 1.5)
rm_upper_limit = boston['rm'].quantile(0.75) + (rm_IQR * 1.5)


####################### Replace ############################
# Now let's replace the outliers by the maximum and minimum limit
boston['rm_replaced']= pd.DataFrame(np.where(boston['rm'] > rm_upper_limit, rm_upper_limit, 
                                         np.where(boston['rm'] < rm_lower_limit, rm_lower_limit, boston['rm'])))
sns.boxplot(boston.boston_replaced);plt.title('Boxplot');plt.show()

#we see no outiers


######## for "crim" column HVO  finding and replacing ##############

# Detection of outliers (find limits for RM based on IQR)
crim_IQR = boston['crim'].quantile(0.75) - boston['crim'].quantile(0.25)
crim_upper_limit = boston['crim'].quantile(0.75) + (crim_IQR * 1.5)

####################### Replace ############################
# Now let's replace the outliers by the maximum and minimum limit
boston['crim_replaced']= pd.DataFrame(np.where(boston['crim'] > crim_upper_limit, crim_upper_limit ,  boston['crim']))
sns.boxplot(boston.crim_replaced);plt.title('Boxplot');plt.show()
Beispiel #32
0
def drawOS(spike_times, optimal_bin_num):
    plt.hist(spike_times, optimal_bin_num)
    plt.yticks([])
    plt.show()
cols=['PCT_OBESE10','RECFACPTH10','FSRPTH10','PCT_NHWHITE10','PCT_NHBLACK10','PCT_HISP10', \
'PCT_NHASIAN10','PCT_18YOUNGER10','MEDHHINC10','SNAPSPTH10']

df_append=pd.concat([df_main[cols],df_pred[cols]])

df_final=df_append[cols]
model = KMeans(n_clusters=4)
model.fit(df_final)

print model.fit

pca_2=PCA(2)
plot_columns=pca_2.fit_transform(df_final)
plt.scatter(x=plot_columns[:,0],y=plot_columns[:,1],c=model.labels_)
plt.show()


model.cluster_centers_

final = pd.DataFrame(np.nan, index=cols, columns=[1,2,3,4])

final[1] = model.cluster_centers_[0]
final[2] = model.cluster_centers_[1]
final[3] = model.cluster_centers_[2]
final[4] = model.cluster_centers_[3]
print len(DataFrame(model.labels_, columns=['Label']))
print DataFrame(model.labels_, columns=['Label']).groupby('Label').size()
print final

df_append['cluster_num']=model.labels_
Beispiel #34
0
def main():
	train, test = get_data()
	train_X = rearrange(train['X'])
	train_Y = train['y'].flatten()-1
	train_X, train_Y = shuffle(train_X, train_Y)
	test_X = rearrange(test['X'])
	test_Y = test['y'].flatten()-1

	max_iter = 6
	print_period = 10
	lr = np.float32(0.0001)
	mu = np.float32(0.99)
	decay = np.float32(0.9)
	eps = np.float32(1e-10)
	reg = np.float32(0.01)
	N = train_X.shape[0]
	batch_sz = 500
	num_batch = N // batch_sz
	M = 500
	K = 10
	poolsz = (2, 2)

	W1_shape = (20, 3, 5, 5) #(num_feature_maps, num_color_channels, filter_width, filter_height)
	W1_init = init_filter(W1_shape, poolsz)
	b1_init = np.zeros(W1_shape[0], dtype=np.float32)

	W2_shape = (50, 20, 5, 5) #(num_feature_maps, old_num_feature_maps, filter_width, filter_height)
	W2_init = init_filter(W2_shape, poolsz)
	b2_init = np.zeros(W2_shape[0], dtype=np.float32)

	#ANN
	W3_init = np.random.randn(W2_shape[0]*5*5, M) / np.sqrt(W2_shape[0]*5*5 + M)
	b3_init = np.zeros(M, dtype=np.float32)
	W4_init = np.random.randn(M, K) / np.sqrt(M+K)
	b4_init = np.zeros(K, dtype=np.float32)

	#init theano variables
	X = T.tensor4('X', dtype='float32')
	Y = T.ivector('T')
	W1 = theano.shared(W1_init, 'W1')
	b1 = theano.shared(b1_init, 'b1')
	W2 = theano.shared(W2_init, 'W2')
	b2 = theano.shared(b2_init, 'b2')
	W3 = theano.shared(W3_init.astype(np.float32), 'W3')
	b3 = theano.shared(b3_init, 'b3')
	W4 = theano.shared(W4_init.astype(np.float32), 'W4')
	b4 = theano.shared(b4_init, 'b4')

	#forward
	Z1 = convpool(X, W1, b1)
	Z2 = convpool(Z1, W2, b2)
	Z3 = relu(Z2.flatten(ndim=2).dot(W3) + b3)
	pY = T.nnet.softmax(Z3.dot(W4) + b4)
	
	#test & prediction functions
	params = [W1, b1, W2, b2, W3, b3, W4, b4]
	rcost = reg * np.sum((p*p).sum() for p in params)
	cost = -(T.log(pY[T.arange(Y.shape[0]), Y])).mean() + rcost
	prediction = T.argmax(pY, axis=1)
	momentum = [theano.shared(
		np.zeros_like(p.get_value(), dtype=np.float32)) for p in params]
	catchs = [theano.shared(
		np.ones_like(p.get_value(), dtype=np.float32)) for p in params]
	
	#RMSProp
	updates = []
	grads = T.grad(cost, params)
	for p, g, m, c in zip(params, grads, momentum, catchs):
		updates_c = decay*c + (np.float32(1.0)-decay)*g*g
		updates_m = mu*m - lr*g / T.sqrt(updates_c + eps)
		updates_p = p + updates_m

		updates.append([c, updates_c])
		updates.append([m, updates_m])
		updates.append([p, updates_p])

	#init functions
	train_op = theano.function(inputs=[X, Y], updates=updates)
	prediction_op = theano.function(inputs=[X, Y], outputs=[cost, prediction])

	costs= []
	for i in range(max_iter):
		shuffle_X, shuffle_Y = shuffle(train_X, train_Y)
		for j in range(num_batch):
			x = shuffle_X[j*batch_sz : (j*batch_sz+batch_sz), :]
			y = shuffle_Y[j*batch_sz : (j*batch_sz+batch_sz)]

			train_op(x, y)
			if j % print_period == 0:
				cost_val, p_val = prediction_op(test_X, test_Y)
				e = error_rate(p_val, test_Y)
				costs.append(cost_val)
				print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, cost_val, e))
	plt.plot(costs)
	plt.show()
 def show_array(self, arr):
     plt.figure()
     plt.imshow(arr, cmap="gray")
     plt.show()
Beispiel #36
0
def plotStockData(stockAsPandas):
    (stockAsPandas.loc[:, stockAsPandas.columns != 'volume']).plot()
    plt.show()
print("Cross Validation score for Decision Tree:",cross_validation_decision_tree)
print("Cross Validation score for Adaptive Boosting:",cross_validation_adaptive_boosting)

# Figure and comparison show
names = ['Decision Tree', 'Adaptive Boosting']
values = [mean_absolute_error_decision_tree, mean_absolute_error_adaptive_boosting]
plot.figure(figsize= (15, 5))
plot.subplot(121).set_ylabel('Mean Absolute Error')

plot.bar(names, values)

values2 = [explained_variance_decision_tree, explained_variance_adaptive_boosting]
plot.subplot(122).set_ylabel('Explained Variance')
plot.bar(names, values2)

plot.show()

plot.figure(figsize= (15, 5))
values3 = [max_error_decision_tree, max_error_adaptive_boosting]
plot.subplot(121).set_ylabel('Max Error')
plot.bar(names, values3)

values4 = [r2_score_decision_tree, r2_score_adaptive_boosting]
plot.subplot(122).set_ylabel('R2 Score')
plot.bar(names, values4)

plot.show()

names = ['Split-1', 'Split-2', 'Split-3', 'Split-4', 'Split-5']
plot.figure(figsize = (15, 5))
plot.subplot(111)
Beispiel #38
0
def main():
    #x1 = utils.imgLoad('Set12/04.png')
    #x2 = utils.imgLoad('Set12/05.png')
    #x  = torch.cat([x1,x2])
    #x = utils.imgLoad('CBSD68/0018.png')
    #x = utils.imgLoad('Set12/04.png')

    #N = 12
    #x = torch.arange(N*N).reshape(1,1,N,N).float()
    #print("x.shape =", x.shape)
    #print(x)

    C = x.shape[1]
    Cout = 3
    rank = 3
    K = 8
    ks = 3
    M = 32

    local = False
    if not local:
        pad = utils.calcPad2D(*x.shape[2:], M)
        xpad = F.pad(x, pad, mode='reflect')
    else:
        xpad = x

    #print("Starting faiss_knn ...")
    #start = time.time()
    #xbq = xpad.reshape(N,1).numpy()
    #_, I = exs.knn(xbq,xbq, K)
    #end = time.time()
    #print("done.")
    #print(f"fais_knn time = {end-start:.3f}")

    mask = localMask(M, M, ks)
    mask = slidingMask(M, ks)

    print("Starting topK ...")
    start = time.time()
    edge = slidingTopK(xpad, K, M, mask)
    #edge = windowedTopK(xpad, K, M, mask)
    end = time.time()
    print("done.")
    print(f"time = {end-start:.3f}")

    print(f"edge.shape = ")
    print(edge.shape)

    sys.exit()
    print(edge[:, 0])

    # (B, K, N, C)
    label, vertex = getLabelVertex(xpad, edge)

    #GConv = net.GraphConv(C,Cout, ks=ks)
    #ypad = GConv(x, edge)
    #y = utils.unpad(ypad, pad)

    fig, handler = visual.visplotNeighbors(xpad,
                                           edge,
                                           local_area=False,
                                           depth=0)
    plt.show()
Beispiel #39
0
    def muscle_plot(self, a=1, axs=None):
        """Plot muscle-tendon relationships with length and velocity."""

        try:
            import matplotlib.pyplot as plt
        except ImportError:
            print('matplotlib is not available.')
            return
        
        if axs is None:
            _, axs = plt.subplots(nrows=1, ncols=3, figsize=(9, 4))
        
        lmopt   = self.P['lmopt']
        ltslack = self.P['ltslack']
        vmmax   = self.P['vmmax']
        alpha0  = self.P['alpha0']
        fm0     = self.P['fm0']
        lm0     = self.S['lm0']
        lmt0    = self.S['lmt0']
        lt0     = self.S['lt0']
        if np.isnan(lt0):
            lt0 = lmt0 - lm0*np.cos(alpha0)
        
        lm  = np.linspace(0, 2, 101)
        lt  = np.linspace(0, 1, 101)*0.05 + 1
        vm  = np.linspace(-1, 1, 101)*vmmax*lmopt
        fl  = np.zeros(lm.size)
        fpe = np.zeros(lm.size)
        fse = np.zeros(lt.size)
        fvm = np.zeros(vm.size)
        
        fl_lm0  = self.force_l(lm0/lmopt)
        fpe_lm0 = self.force_pe(lm0/lmopt)
        fm_lm0  = fl_lm0 + fpe_lm0
        ft_lt0  = self.force_se(lt0, ltslack)*fm0        
        
        for i in range(101):
            fl[i]  = self.force_l(lm[i])
            fpe[i] = self.force_pe(lm[i])
            fse[i] = self.force_se(lt[i], ltslack=1)
            fvm[i] = self.force_vm(vm[i], a=a, fl=fl_lm0)

        lm  = lm*lmopt
        lt  = lt*ltslack
        fl  = fl
        fpe = fpe
        fse = fse*fm0
        fvm = fvm*fm0
            
        xlim = self.margins(lm, margin=.05, minmargin=False)
        axs[0].set_xlim(xlim)
        ylim = self.margins([0, 2], margin=.05)
        axs[0].set_ylim(ylim)
        axs[0].plot(lm, fl, 'b', label='Active')
        axs[0].plot(lm, fpe, 'b--', label='Passive')
        axs[0].plot(lm, fl+fpe, 'b:', label='')
        axs[0].plot([lm0, lm0], [ylim[0], fm_lm0], 'k:', lw=2, label='')
        axs[0].plot([xlim[0], lm0], [fm_lm0, fm_lm0], 'k:', lw=2, label='')
        axs[0].plot(lm0, fm_lm0, 'o', ms=6, mfc='r', mec='r', mew=2, label='fl(LM0)')
        axs[0].legend(loc='best', frameon=True, framealpha=.5)
        axs[0].set_xlabel('Length [m]')
        axs[0].set_ylabel('Scale factor')
        axs[0].xaxis.set_major_locator(plt.MaxNLocator(4))
        axs[0].yaxis.set_major_locator(plt.MaxNLocator(4))
        axs[0].set_title('Muscle F-L (a=1)')
        
        xlim = self.margins([0, np.min(vm), np.max(vm)], margin=.05, minmargin=False)
        axs[1].set_xlim(xlim)
        ylim = self.margins([0, fm0*1.2, np.max(fvm)*1.5], margin=.025)
        axs[1].set_ylim(ylim)
        axs[1].plot(vm, fvm, label='')
        axs[1].set_xlabel('$\mathbf{^{CON}}\;$ Velocity [m/s] $\;\mathbf{^{EXC}}$')
        axs[1].plot([0, 0], [ylim[0], fvm[50]], 'k:', lw=2, label='')
        axs[1].plot([xlim[0], 0], [fvm[50], fvm[50]], 'k:', lw=2, label='')
        axs[1].plot(0, fvm[50], 'o', ms=6, mfc='r', mec='r', mew=2, label='FM0(LM0)')
        axs[1].plot(xlim[0], fm0, '+', ms=10, mfc='r', mec='r', mew=2, label='')
        axs[1].text(vm[0], fm0, 'FM0')
        axs[1].legend(loc='upper right', frameon=True, framealpha=.5)
        axs[1].set_ylabel('Force [N]')
        axs[1].xaxis.set_major_locator(plt.MaxNLocator(4))
        axs[1].yaxis.set_major_locator(plt.MaxNLocator(4))
        axs[1].set_title('Muscle F-V (a=1)')

        xlim = self.margins([lt0, ltslack, np.min(lt), np.max(lt)], margin=.05,
                             minmargin=False)
        axs[2].set_xlim(xlim)
        ylim = self.margins([ft_lt0, 0, np.max(fse)], margin=.05)
        axs[2].set_ylim(ylim)
        axs[2].plot(lt, fse, label='')
        axs[2].set_xlabel('Length [m]')
        axs[2].plot([lt0, lt0], [ylim[0], ft_lt0], 'k:', lw=2, label='')
        axs[2].plot([xlim[0], lt0], [ft_lt0, ft_lt0], 'k:', lw=2, label='')
        axs[2].plot(lt0, ft_lt0, 'o', ms=6, mfc='r', mec='r', mew=2, label='FT(LT0)')
        axs[2].legend(loc='upper left', frameon=True, framealpha=.5)
        axs[2].set_ylabel('Force [N]')
        axs[2].xaxis.set_major_locator(plt.MaxNLocator(4))
        axs[2].yaxis.set_major_locator(plt.MaxNLocator(4))
        axs[2].set_title('Tendon')  
        plt.suptitle('Muscle-tendon mechanics', fontsize=18, y=1.03)
        plt.tight_layout(w_pad=.1)
        plt.show()
        
        return axs
Beispiel #40
0
					environment.setColor(color)
					environment.stroke(points)
					environment.getImage().save('./threebody/%d.png' % (file_cnt))
					environment.close()
					data = [{'color': color, 'radius': radius}] + points
					dataset.append(data)
					file_cnt += 1

		for i in range(0, 3):
			world.objects[i].setPos([random.random()*2.2-1.1, random.random()*2.2-1.1, random.random()])
			world.objects[i].setVelocity([0.0, 0.0, 0.0])
			world.objects[i].trajectory = []
	obj = world.objects
	world.update(1)

'''
	scat = []
	for i in range(0, 3):
		scat.append(ax.scatter(obj[i].pos[0], obj[i].pos[1], c = 1, alpha = 1))
	return scat

anim = animation.FuncAnimation(fig, animate, interval=20, blit=True)
plt.show()
'''
print(world)

while file_cnt < 65537:
	generate()
	print(file_cnt)
	if file_cnt % 4096 == 0:
		f = open('./threebody/strokes.txt', 'w')
def cavity_iteration(params, fd_lower=10.47, fd_upper=10.51, display=False):

    threshold = 0.0005

    eps = params.eps
    eps_array = np.array([eps])

    multi_results = multi_sweep(eps_array, fd_lower, fd_upper, params,
                                threshold)

    labels = params.labels

    collected_data_re = None
    collected_data_im = None
    collected_data_abs = None
    results_list = []
    for sweep in multi_results.values():
        for i, fd in enumerate(sweep.fd_points):
            transmission = sweep.transmissions[i]
            p = sweep.params[i]
            coordinates_re = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_im = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_abs = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                               [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                               [p.n_t], [p.n_c]]
            point = np.array([transmission])
            abs_point = np.array([np.abs(transmission)])

            for j in range(len(coordinates_re) - 1):
                point = point[np.newaxis]
                abs_point = abs_point[np.newaxis]

            hilbert_dict = OrderedDict()
            hilbert_dict['t_levels'] = p.t_levels
            hilbert_dict['c_levels'] = p.c_levels
            packaged_point_re = xr.DataArray(point,
                                             coords=coordinates_re,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_im = xr.DataArray(point,
                                             coords=coordinates_im,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_abs = xr.DataArray(abs_point,
                                              coords=coordinates_abs,
                                              dims=labels,
                                              attrs=hilbert_dict)
            packaged_point_re = packaged_point_re.real
            packaged_point_im = packaged_point_im.imag

            if collected_data_re is not None:
                collected_data_re = collected_data_re.combine_first(
                    packaged_point_re)
            else:
                collected_data_re = packaged_point_re

            if collected_data_im is not None:
                collected_data_im = collected_data_im.combine_first(
                    packaged_point_im)
            else:
                collected_data_im = packaged_point_im

            if collected_data_abs is not None:
                collected_data_abs = collected_data_abs.combine_first(
                    packaged_point_abs)
            else:
                collected_data_abs = packaged_point_abs

    a_abs = collected_data_abs.squeeze()

    max_indices = local_maxima(a_abs.values[()])
    maxima = a_abs.values[max_indices]
    indices_order = np.argsort(maxima)

    two_peaks = False
    if len(max_indices) == 2:
        two_peaks = True

        max_indices = max_indices[indices_order[-2:]]

        f_r = a_abs.f_d[max_indices[1]].values[()]
        f_r_2 = a_abs.f_d[max_indices[0]].values[()]
        split = f_r - f_r_2

        ratio = a_abs[max_indices[1]] / a_abs[max_indices[0]]
        ratio = ratio.values[()]

    max_idx = np.argmax(a_abs).values[()]
    A_est = a_abs[max_idx]
    f_r_est = a_abs.f_d[max_idx]
    #popt, pcov = curve_fit(lorentzian_func, a_abs.f_d, a_abs.values, p0=[A_est, f_r_est, 0.001])
    popt, pcov = lorentzian_fit(a_abs.f_d.values[()], a_abs.values[()])
    Q_factor = popt[2]

    if display:
        fig, axes = plt.subplots(1, 1)
        a_abs.plot(ax=axes)
        plt.show()
    """
    print "Resonance frequency = " + str(popt[1]) + " GHz"
    print "Q factor = " + str(Q_factor)

    fig, axes = plt.subplots(1,1)
    collected_data_abs.plot(ax=axes)
    axes.plot(a_abs.f_d, lorentzian_func(a_abs.f_d, *popt), 'g--')

    plt.title(str(p.t_levels) + str(' ') + str(p.c_levels))

    props = dict(boxstyle='round', facecolor='wheat', alpha=1)
    if two_peaks == True:
        textstr = 'f_r = ' + str(popt[1]) + 'GHz\n$Q$ = ' + str(Q_factor) + '\n$\chi$ = ' + str(
            split * 1000) + 'MHz\n$\kappa$ = ' + str(1000 * params.kappa) + 'MHz\nRatio = ' + str(ratio)
    else:
        textstr = 'f_r = ' + str(popt[1]) + 'GHz\n$Q$ = ' + str(Q_factor) + '\n$\kappa$ = ' + str(1000 * params.kappa) + 'MHz'

    label = axes.text(a_abs.f_d[0], popt[0], textstr, fontsize=14, verticalalignment='top', bbox=props)

    #collected_dataset = xr.Dataset({'a_re': collected_data_re,
    #                                'a_im': collected_data_im,
    #                                'a_abs': collected_data_abs})

    #time = datetime.now()
    #cwd = os.getcwd()
    #time_string = time.strftime('%Y-%m-%d--%H-%M-%S')

    #directory = cwd + '/eps=' + str(eps) + 'GHz' + '/' + time_string
    #if not os.path.exists(directory):
    #    os.makedirs(directory)
    #    collected_dataset.to_netcdf(directory+'/spectrum.nc')

    plt.show()

    """

    #fc_new = params.fc + 10.49602 - popt[1]
    #g_new = params.g * np.sqrt(23.8 * 1000 / split) / 1000
    #kappa_new = Q_factor * params.kappa / 8700

    return popt[1], split, Q_factor
Beispiel #42
0
    def lm_plot(self, x, axs=None):
        """Plot results of actdyn_ode45 function.
            data = [t, lmt, lm, lt, vm, fm*fm0, fse*fm0, fl*fm0, fpe*fm0, alpha]
        """

        try:
            import matplotlib.pyplot as plt
        except ImportError:
            print('matplotlib is not available.')
            return
        
        if axs is None:
            _, axs = plt.subplots(nrows=3, ncols=2, sharex=True, figsize=(10, 6))

        axs[0, 0].plot(x[:, 0], x[:, 1], 'b', label='LMT')
        lmt = x[:, 2]*np.cos(x[:, 9]) + x[:, 3]
        if np.sum(x[:, 9]) > 0:
            axs[0, 0].plot(x[:, 0], lmt, 'g--', label=r'$LM \cos \alpha + LT$')
        else:
            axs[0, 0].plot(x[:, 0], lmt, 'g--', label=r'LM+LT')
        ylim = self.margins(x[:, 1], margin=.1)
        axs[0, 0].set_ylim(ylim)
        axs[0, 0].legend(framealpha=.5, loc='best')
        
        axs[0, 1].plot(x[:, 0], x[:, 3], 'b')
        #axs[0, 1].plot(x[:, 0], lt0*np.ones(len(x)), 'r')
        ylim = self.margins(x[:, 3], margin=.1)
        axs[0, 1].set_ylim(ylim)
        
        axs[1, 0].plot(x[:, 0], x[:, 2], 'b')
        #axs[1, 0].plot(x[:, 0], lmopt*np.ones(len(x)), 'r')
        ylim = self.margins(x[:, 2], margin=.1)
        axs[1, 0].set_ylim(ylim)
        
        axs[1, 1].plot(x[:, 0], x[:, 4], 'b')
        ylim = self.margins(x[:, 4], margin=.1)
        axs[1, 1].set_ylim(ylim)
        
        axs[2, 0].plot(x[:, 0], x[:, 5], 'b', label='Muscle')
        axs[2, 0].plot(x[:, 0], x[:, 6], 'g--', label='Tendon')
        ylim = self.margins(x[:, [5, 6]], margin=.1)
        axs[2, 0].set_ylim(ylim)
        axs[2, 0].set_xlabel('Time (s)')
        axs[2, 0].legend(framealpha=.5, loc='best')
        
        axs[2, 1].plot(x[:, 0], x[:, 8], 'b', label='PE')
        ylim = self.margins(x[:, 8], margin=.1)
        axs[2, 1].set_ylim(ylim)
        axs[2, 1].set_xlabel('Time (s)')
        axs[2, 1].legend(framealpha=.5, loc='best')
        
        ylabel = ['$L_{MT}\,(m)$', '$L_{T}\,(m)$', '$L_{M}\,(m)$',
                  '$V_{CE}\,(m/s)$', '$Force\,(N)$', '$Force\,(N)$']
        for i, axi in enumerate(axs.flat):
            axi.set_ylabel(ylabel[i], fontsize=14)
            axi.yaxis.set_major_locator(plt.MaxNLocator(4))
            axi.yaxis.set_label_coords(-.2, 0.5)

        plt.suptitle('Simulation of muscle-tendon mechanics', fontsize=18,
                     y=1.03)
        plt.tight_layout()
        plt.show()
        
        return axs
def get_spidy(root: str):
    #From: https://python-graph-gallery.com/391-radar-chart-with-several-individuals/
    categories = list(lovelanguage)[1:]
    N = len(categories)
    # What will be the angle of each axis in the plot? (we divide the plot / number of variable)
    angles = [n / float(N) * 2 * pi for n in range(N)]
    angles += angles[:1]
    # Initialise the spider plot
    fig3 = plt.figure(figsize=(8, 8))
    ax = plt.subplot(111, polar=True)
    plt.title('Which programming language do people love the most?',
              fontsize=14,
              fontweight='bold')

    # If you want the first axis to be on top:
    ax.set_theta_offset(pi / 2)
    ax.set_theta_direction(-1)

    # Draw one axe per variable + add labels labels yet
    plt.xticks(angles[:-1], categories)
    # Draw ylabels
    ax.set_rlabel_position(0)
    plt.yticks([5, 10, 15], ["5%", "10%", "15%"], color="grey", size=12)
    plt.ylim(0, 15)

    # Plot each individual = each line of the data
    # Ind1
    values = lovelanguage.loc[0].drop('group').values.flatten().tolist()
    values += values[:1]
    ax.plot(angles,
            values,
            linewidth=1,
            linestyle='solid',
            label="Professional(Love)")
    ax.fill(angles, values, 'b', alpha=0.1)
    # Ind2
    values = lovelanguage.loc[1].drop('group').values.flatten().tolist()
    values += values[:1]
    ax.plot(angles,
            values,
            linewidth=1,
            linestyle='solid',
            label="Students(Love)")
    ax.fill(angles, values, 'r', alpha=0.1)
    # Add legend
    plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
    #plt.savefig('static/lovewWeb.png')

    ####HATE HATE   HATE HATE HATE
    categories1 = list(hatelanguage)[1:]
    N = len(categories1)
    # What will be the angle of each axis in the plot? (we divide the plot / number of variable)
    angles = [n / float(N) * 2 * pi for n in range(N)]
    angles += angles[:1]
    # Initialise the spider plot
    fig3 = plt.figure(figsize=(8, 8))
    bx = plt.subplot(111, polar=True)
    plt.title('Which programming language do people hate the most?',
              fontsize=14,
              fontweight='bold')
    # If you want the first axis to be on top:
    bx.set_theta_offset(pi / 2)
    bx.set_theta_direction(-1)
    # Draw one axe per variable + add labels labels yet
    plt.xticks(angles[:-1], categories)
    # Draw ylabels
    bx.set_rlabel_position(0)
    plt.yticks([3, 6, 9], ["3%", "6%", "9%"], color="red", size=12)
    plt.ylim(0, 9)
    # Plot each individual = each line of the data
    # Ind1
    values = hatelanguage.loc[0].drop('group').values.flatten().tolist()
    values += values[:1]
    bx.plot(angles,
            values,
            linewidth=1,
            linestyle='solid',
            label="Professional(Hate)")
    bx.fill(angles, values, 'b', alpha=0.1)
    # Ind2
    values = hatelanguage.loc[1].drop('group').values.flatten().tolist()
    values += values[:1]
    bx.plot(angles,
            values,
            linewidth=1,
            linestyle='solid',
            label="Students(Hate)")
    bx.fill(angles, values, 'r', alpha=0.1)

    # Add legend
    plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
    plt.show()
Beispiel #44
0
def draw_graph(grph,
               edge_labels=True,
               node_color='#AFAFAF',
               edge_color='#CFCFCF',
               plot=True,
               node_size=2000,
               with_labels=True,
               arrows=True,
               layout='neato'):
    """
    Parameters
    ----------
    grph : networkxGraph
        A graph to draw.
    edge_labels : boolean
        Use nominal values of flow as edge label
    node_color : dict or string
        Hex color code oder matplotlib color for each node. If string, all
        colors are the same.

    edge_color : string
        Hex color code oder matplotlib color for edge color.

    plot : boolean
        Show matplotlib plot.

    node_size : integer
        Size of nodes.

    with_labels : boolean
        Draw node labels.

    arrows : boolean
        Draw arrows on directed edges. Works only if an optimization_model has
        been passed.
    layout : string
        networkx graph layout, one of: neato, dot, twopi, circo, fdp, sfdp.
    """
    if type(node_color) is dict:
        node_color = [node_color.get(g, '#AFAFAF') for g in grph.nodes()]

    # set drawing options
    options = {
        'prog': 'dot',
        'with_labels': with_labels,
        'node_color': node_color,
        'edge_color': edge_color,
        'node_size': node_size,
        'arrows': arrows
    }

    # try to use pygraphviz for graph layout
    try:
        import pygraphviz
        pos = nx.drawing.nx_agraph.graphviz_layout(grph, prog=layout)
    except ImportError:
        logging.error('Module pygraphviz not found, I won\'t plot the graph.')
        return

    # draw graph
    nx.draw(grph, pos=pos, **options)

    # add edge labels for all edges
    if edge_labels is True and plt:
        labels = nx.get_edge_attributes(grph, 'weight')
        nx.draw_networkx_edge_labels(grph, pos=pos, edge_labels=labels)

    # show output
    if plot is True:
        plt.show()
def random_eval_experiment():
    '''
    Experiment illutrating how quickly global random evaluation will fail as a method of optimization.  Output is minimum value attained by random sampling over the cube [-1,1] x [-1,1] x... [-1,1] evaluating simple quadratic for 100, 1000, or 10000 times.  The dimension is increased from 1 to 100 and the minimum plotted for each dimension.
    '''
    # define symmetric quadratic N-dimensional
    g = lambda w: np.dot(w.T, w)

    # loop over dimensions, sample directions, evaluate
    mean_evals = []
    big_dim = 100
    num_pts = 10000
    pt_stops = [100, 1000, 10000]
    for dim in range(big_dim):
        dim_eval = []
        m_eval = []
        for pt in range(num_pts):
            # generate random direction using normalized gaussian
            direction = np.random.randn(dim + 1, 1)
            norms = np.sqrt(np.sum(direction * direction, axis=1))[:,
                                                                   np.newaxis]
            direction = direction / norms

            e = g(direction)
            dim_eval.append(e)

            # record mean and std of so many pts
            if (pt + 1) in pt_stops:
                m_eval.append(np.min(dim_eval))
        mean_evals.append(m_eval)

    # convert to array for easy access
    mean_evals_global = np.asarray(mean_evals)

    fig = plt.figure(figsize=(6, 3))

    # create subplot with 3 panels, plot input function in center plot
    gs = gridspec.GridSpec(1, 1, width_ratios=[1])
    fig.subplots_adjust(wspace=0.5, hspace=0.01)

    # plot input function
    ax = plt.subplot(gs[0])

    for k in range(len(pt_stops)):
        mean_evals = mean_evals_global[:, k]

        # scatter plot mean value
        ax.plot(np.arange(big_dim) + 1, mean_evals)

        # clean up plot - label axes, etc.,
        ax.set_xlabel('dimension of input')
        ax.set_ylabel('funciton value')

    # draw legend
    t = [str(p) for p in pt_stops]
    ax.legend(t, bbox_to_anchor=(1, 0.5))

    # draw horizontal axis
    ax.plot(np.arange(big_dim) + 1,
            np.arange(big_dim) * 0,
            linewidth=1,
            linestyle='--',
            color='k')
    plt.show()
def qubit_iteration(params, fd_lower=8.9, fd_upper=9.25, display=False):

    threshold = 0.001
    eps = params.eps
    eps_array = np.array([eps])
    multi_results = multi_sweep(eps_array, fd_lower, fd_upper, params,
                                threshold)

    labels = params.labels

    collected_data_re = None
    collected_data_im = None
    collected_data_abs = None
    results_list = []
    for sweep in multi_results.values():
        for i, fd in enumerate(sweep.fd_points):
            transmission = sweep.transmissions[i]
            p = sweep.params[i]
            coordinates_re = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_im = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_abs = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                               [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                               [p.n_t], [p.n_c]]
            point = np.array([transmission])
            abs_point = np.array([np.abs(transmission)])

            for j in range(len(coordinates_re) - 1):
                point = point[np.newaxis]
                abs_point = abs_point[np.newaxis]

            hilbert_dict = OrderedDict()
            hilbert_dict['t_levels'] = p.t_levels
            hilbert_dict['c_levels'] = p.c_levels
            packaged_point_re = xr.DataArray(point,
                                             coords=coordinates_re,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_im = xr.DataArray(point,
                                             coords=coordinates_im,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_abs = xr.DataArray(abs_point,
                                              coords=coordinates_abs,
                                              dims=labels,
                                              attrs=hilbert_dict)
            packaged_point_re = packaged_point_re.real
            packaged_point_im = packaged_point_im.imag

            if collected_data_re is not None:
                collected_data_re = collected_data_re.combine_first(
                    packaged_point_re)
            else:
                collected_data_re = packaged_point_re

            if collected_data_im is not None:
                collected_data_im = collected_data_im.combine_first(
                    packaged_point_im)
            else:
                collected_data_im = packaged_point_im

            if collected_data_abs is not None:
                collected_data_abs = collected_data_abs.combine_first(
                    packaged_point_abs)
            else:
                collected_data_abs = packaged_point_abs

    a_abs = collected_data_abs.squeeze()

    if True:

        max_indices = local_maxima(a_abs.values[()])
        maxima = a_abs.values[max_indices]
        indices_order = np.argsort(maxima)

        max_idx = np.argmax(a_abs).values[()]
        A_est = a_abs[max_idx]
        f_r_est = a_abs.f_d[max_idx]
        popt, pcov = lorentzian_fit(a_abs.f_d.values[()], a_abs.values[()])
        f_r = popt[1]

        two_peaks = False
        split = None
        if len(max_indices) >= 2:
            two_peaks = True
            max_indices = max_indices[indices_order[-2:]]

            f_01 = a_abs.f_d[max_indices[1]].values[()]
            f_12 = a_abs.f_d[max_indices[0]].values[()]
            split = f_12 - f_r

    if display:
        fig, axes = plt.subplots(1, 1)
        a_abs.plot(ax=axes)
        plt.show()
        """ 
        fig, axes = plt.subplots(1, 1)
        xlim = axes.get_xlim()
        ylim = axes.get_ylim()
        xloc = xlim[0] + 0.1*(xlim[1]-xlim[0])
        yloc = ylim[1] - 0.1*(ylim[1]-ylim[0])

        collected_data_abs.plot(ax=axes)
        axes.plot(a_abs.f_d, lorentzian_func(a_abs.f_d, *popt), 'g--')
        print "Resonance frequency = " + str(popt[1]) + " GHz"
        print "Q factor = " + str(Q_factor)
        plt.title(str(p.t_levels) + str(' ') + str(p.c_levels))

        props = dict(boxstyle='round', facecolor='wheat', alpha=1)
        if two_peaks == True:
            textstr = '$f_{01}$ = ' + str(f_01) + 'GHz\n' + r'$\alpha$ = ' + str(1000*split) + 'MHz\n$Q$ = ' + str(Q_factor) + '\n$FWHM$ = ' + str(1000*params.kappa) + 'MHz'
        else:
            #textstr = 'fail'
            textstr = '$f_{01}$ = ' + str(f_r_est.values[()]) + 'GHz\n$Q$ = ' + str(
                Q_factor) + '\n$FWHM$ = ' + str(1000 * params.kappa) + 'MHz'

        #textstr = '$f_{01}$ = ' + str(f_01) + 'GHz\n' + r'$\alpha$ = ' + str(split) + 'GHz'
        label = axes.text(xloc, yloc, textstr, fontsize=14, verticalalignment='top', bbox=props)

    plt.show()

    collected_dataset = xr.Dataset({'a_re': collected_data_re,
                                    'a_im': collected_data_im,
                                    'a_abs': collected_data_abs})

    time = datetime.now()
    cwd = os.getcwd()
    time_string = time.strftime('%Y-%m-%d--%H-%M-%S')

    directory = cwd + '/eps=' + str(eps) + 'GHz' + '/' + time_string
    if not os.path.exists(directory):
        os.makedirs(directory)
        collected_dataset.to_netcdf(directory+'/spectrum.nc')

    """

    #new_fq = params.fq + 9.19324 - f_r_est.values[()]
    # new_chi = (2*params.chi - split - 0.20356)/2
    #new_chi = -0.20356 * params.chi / split

    return f_r, split
def generictf(flow_df,
              min_flow,
              max_flow,
              radius,
              efficiency,
              flow_unit='feet/sec',
              fluid_density=1000,
              verbose=False,
              enable_plot=False):

    # Turbine's flow velocity to power transfer curve model
    def transferfunction(flow, flow_unit, min_flow, max_flow, radius,
                         efficiency, fluid_density):

        # Checks if flow velocity units are valid and perform conversion
        if flow_unit == 'feet/sec':
            # Converts the flow velocity from feet/s to meters/s
            v_ms = abs(flow) * 0.3048
            max_v_ms = abs(max_flow) * 0.3048

        else:
            if flow_unit == 'meters/sec':
                # Stores the flow velocity in meters/s
                v_ms = abs(flow)
                max_v_ms = abs(max_flow)

            else:
                # If flow unit is not feet/sec nor meters/sec
                raise NameError("Error: flow velocity unit " + flow_unit +
                                " is not currently supported")

        # Checks if flow velocity is above minimum value to generate power
        if abs(flow) > (min_flow):

            # Checks if flow velocity is bellow maximum value (output is not yet saturated)
            if abs(flow) < (max_flow):
                # Returns power calculated from transfer function considering incompressible fluid
                return (efficiency * (fluid_density) * 3.14159 * (radius**2) *
                        (v_ms**3)) / 2

            else:
                # Returns saturated max power
                return (efficiency * (fluid_density) * 3.14159 * (radius**2) *
                        (max_v_ms**3)) / 2
        else:

            # Returns zero power
            return 0

    # Imports library dependencies
    import pandas as pd
    import matplotlib.pyplot as plt

    # Checks if verbose is true and prints input parameters for calculation
    if verbose == True:
        print(" ")
        print(
            "The input turbine parameters to calculate instantaneous power generation are:"
        )
        print("  Min flow velocity: " + str(min_flow) + " in " + flow_unit)
        print("  Max flow velocity: " + str(max_flow) + " in " + flow_unit)
        print("  Radius:            " + str(radius) + " in meters")
        print("  Efficiency:        " + str(efficiency))
        print(" ")
        print("Fluid density is " + str(fluid_density) + " kg per cubic meter")

        # Display warning for high efficiency values
        if efficiency > 0.593:
            print(" ")
            print("Warning: Efficiency is above Betz Limit (0.593)")

        # Checks if enable_plot is true and plots turbine's transfer function
        if enable_plot == True:
            # Plots the transfer function of selected turbine model
            flow = [0.01 * x for x in range(0, int(110 * max_flow))]
            power = [
                transferfunction(x, flow_unit, min_flow, max_flow, radius,
                                 efficiency, fluid_density) for x in flow
            ]
            plt.plot(flow, power)
            plt.title("Flow Velocity to Power Transfer Curve")
            plt.xlabel("Flow velocity in " + flow_unit)
            plt.ylabel("Output power in Watts")
            plt.show()
            #plt.grid(b=True)
            #plt.draw()
            #plt.pause(3)
            #plt.figure()

    # Creates power time series as an empty dataframe
    power_df = pd.DataFrame()

    # Converts flow velocity time series in instantaneous power time series
    power_df['power'] = flow_df.flow.apply(lambda x: transferfunction(
        x, flow_unit, min_flow, max_flow, radius, efficiency, fluid_density))

    # Checks if verbose is true and prints average power generation
    if verbose == True:
        print(" ")
        print("Average power generation: " +
              "{0:.4f}".format(power_df['power'].mean()) + " Watts")

    # Returns generated power dataframe
    return power_df
def random_local_experiment():
    '''
    Experiment illustrating the ultimate shortcoming of local random search.   Output is fraction of directions that are decreasing on a simple quadratic centered at the point [1,0,0...] as we increase the dimension of the function
    
    '''
    # define symmetric quadratic N-dimensional
    g = lambda w: np.dot(w.T, w)

    # loop over dimensions, sample points, evaluate
    mean_evals = []
    big_dim = 25
    num_pts = 10000
    pt_stops = [100, 1000, 10000]
    for dim in range(big_dim):
        # containers for evaluation
        dim_eval = []
        m_eval = []

        # starting vector
        start = np.zeros((dim + 1, 1))
        start[0] = 1

        for pt in range(num_pts):
            # generate random point on n-sphere
            r = np.random.randn(dim + 1)
            r.shape = (len(r), 1)
            pt_norm = math.sqrt(np.dot(r.T, r))
            r = [b / pt_norm for b in r]
            r += start

            # compare new direction to original point
            if g(r) < g(start):
                dim_eval.append(1)
            else:
                dim_eval.append(0)

            # record mean and std of so many pts
            if (pt + 1) in pt_stops:
                m_eval.append(np.mean(dim_eval))

        # store average number of descent directions
        mean_evals.append(m_eval)

    # convert to array for easy access
    mean_evals_global = np.asarray(mean_evals)

    fig = plt.figure(figsize=(6, 3))

    # create subplot with 3 panels, plot input function in center plot
    gs = gridspec.GridSpec(1, 1, width_ratios=[1])
    fig.subplots_adjust(wspace=0.5, hspace=0.01)

    # plot input function
    ax = plt.subplot(gs[0])

    for k in range(len(pt_stops)):
        mean_evals = mean_evals_global[:, k]

        # scatter plot mean value
        ax.plot(np.arange(big_dim) + 1, mean_evals)

        # clean up plot - label axes, etc.,
        ax.set_xlabel('dimension of input')
        ax.set_ylabel('fraction of directions descennding')

    # draw legend
    t = [str(p) for p in pt_stops]
    ax.legend(t, bbox_to_anchor=(1, 0.5))

    # draw horizontal axis
    ax.plot(np.arange(big_dim) + 1,
            np.arange(big_dim) * 0,
            linewidth=1,
            linestyle='--',
            color='k')

    plt.show()
def main(candidate_file):
    candidates = get_candidates(candidate_file)
    # get the graph of users for the whole year
    calorie_king_social_network = CKGraph()
    graphs = calorie_king_social_network.build_undirected_graph(uids=candidates.keys())
    G = graphs[0]
    components = nx.connected_component_subgraphs(G)
    giant_component = components[0]
    giant_nodes = giant_component.nodes()

    #get the active users for each day
    master_query ="""Select distinct(ck_id) from activity_time_log """
    db = DBConnection()
    #get the field names for each day of the year
    result = db.query("DESCRIBE activity_time_log")
    days = filter(lambda y: y != 'ck_id', map(lambda x: str(x['Field']), result))
    #make the matrix of user activity
    activity = numpy.zeros((len(giant_nodes),len(days)))
    rows = {}
    row_count = 0
    column_count = 0
    for day in days:
        active_users = db.query(master_query + "WHERE " + day + " IS NOT NULL")
        active_users = map(lambda x: str(x['ck_id']), active_users)
        for user in active_users:
            if user in candidates.keys() and int(candidates[user]) in giant_nodes:
                if user not in rows.keys():
                    row_count += 1
                    rows[user] = row_count
                activity[rows[user],column_count] = 1
        column_count += 1


    
    #for each row in the plot, make a list of the days of activity
    plot_rows = {}
    for r in rows.values():
        plot_rows[r] = []
        current_column_start = -1
        current_column_width = 0
        for i in range(len(activity[r,:])):
            #if there's activity today
            if activity[r,i] == 1:
                #then if we haven't started a bar, start one
                if current_column_start == -1:
                    current_column_start = i
                current_column_width += 1
                #if there's not activity today and it's the end of a bar
            elif activity[r,i] < 1 and current_column_start != -1:
                plot_rows[r].append((current_column_start, current_column_width))
                current_column_start = -1
                current_column_width = 0
    #make the broken bar blot
    fig = pyplot.figure()
    ax = fig.add_subplot(111)
    #colors
    colormap = pyplot.cm.autumn
    #add the bars
    column_width = [1,3]
    increment = lambda x: [x[0]+3, 3]
    random_rows = random.sample(plot_rows.keys(),len(plot_rows.keys()))
    for i in range(1,len(random_rows)+1):
        color = colormap((random.random(), random.random(), random.random()))
        ax.broken_barh(plot_rows[random_rows[i-1]], tuple(column_width), facecolor=color)
        column_width = increment(column_width)
    ax.set_xlabel("Days of user activity")
    ax.set_xlim(0,365)
    ax.set_ylabel("User")
    ax.set_title('User Activity Map')
    pyplot.show()
Beispiel #50
0
 def graph(self, save=False):
     for i in range(len(self.names_stations)):
         if np.sum(np.isnan(self.Y_PM25[:, 0, i])) > 100:
             continue
         name_station = str(self.names_stations[i, (
             np.logical_not(self.names_stations[i].mask))])
         name_station = name_station.replace("]", '')
         name_station = name_station.replace("[", '')
         name_station = name_station.replace("b", '')
         name_station = name_station.replace("'", '')
         name_station = name_station.replace(" ", '')
         plt.figure(figsize=(30, 15))
         plt.title(name_station, fontsize=30)
         plt.plot(self.date_Y[24 * self.ML:],
                  pd.Series(self.Y_PM25[24 * self.ML:, 0, i]).rolling(
                      window=self.window_moving_average,
                      min_periods=1,
                      center=False).mean().values,
                  'r*-',
                  linewidth=3,
                  markersize=10,
                  label='Real Data')
         plt.plot(self.date_DA_ML[24 * self.ML:],
                  pd.Series(
                      self.Xa_PM25_ML[24 * self.ML:len(self.date_DA_ML), 0,
                                      i]).rolling(
                                          window=self.window_moving_average,
                                          min_periods=1,
                                          center=False).mean().values,
                  'k',
                  linewidth=4,
                  markersize=10,
                  label='LE-DA')
         plt.plot(self.date_FC_ML[:-1],
                  pd.Series(self.Xa_PM25_FC[:, 0, i]).rolling(
                      window=self.window_moving_average,
                      min_periods=1,
                      center=False).mean().values,
                  'b',
                  linewidth=4,
                  markersize=10,
                  label='LE-FC')
         plt.plot(self.date_FC_ML,
                  pd.Series(
                      self.Xa_PM25_ML[len(self.date_DA_ML):, 0, i]).rolling(
                          window=self.window_moving_average,
                          min_periods=1,
                          center=False).mean().values,
                  'g',
                  linewidth=4,
                  markersize=10,
                  label='LE-ML')
         #plt.plot(self.date_Y[24*self.ML:],pd.Series(self.Xb_PM25_ML[24*self.ML:,0,i]).rolling(window=self.window_moving_average,min_periods=1,center=False).mean().values,'k--',linewidth=3,markersize=10,label='LE')
         plt.axvline(self.date_FC_ML[0],
                     linewidth=3,
                     linestyle='--',
                     color=[0.3, 0.3, 0.3])
         ax = plt.gca()
         plt.rcParams['text.usetex'] = True
         plt.yticks(fontsize=30)
         plt.ylabel('PM$_{2.5}$ Concentration [$\mu$g/m$^3$]', fontsize=45)
         plt.grid(axis='x')
         plt.legend(fontsize=35)
         plt.xticks(fontsize=30)
         ax.set_xlim(self.date_ML[24 * self.ML], self.date_ML[-1])
         ax.set_ylim(0, 150)
         ax.xaxis.set_major_locator(plt.MaxNLocator(20))
         ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
         if save:
             plt.savefig('./figures/' + name_station + '_ML_' +
                         str(self.ML) + '.png',
                         format='png')
         plt.show()
graph.imshow(blurred)

result = inverse(blurred, PSF, 1e-3)  # 逆滤波
graph.subplot(232)
graph.xlabel("inverse deblurred")
graph.imshow(result)

result = wiener(blurred, PSF, 1e-3)  # 维纳滤波
graph.subplot(233)
graph.xlabel("wiener deblurred(k=0.01)")
graph.imshow(result)

blurred_noisy = blurred + 0.1 * blurred.std() * \
                np.random.standard_normal(blurred.shape)  # 添加噪声,standard_normal产生随机的函数

graph.subplot(234)
graph.xlabel("motion & noisy blurred")
graph.imshow(blurred_noisy)  # 显示添加噪声且运动模糊的图像

result = inverse(blurred_noisy, PSF, 0.1 + 1e-3)  # 对添加噪声的图像进行逆滤波
graph.subplot(235)
graph.xlabel("inverse deblurred")
graph.imshow(result)

result = wiener(blurred_noisy, PSF, 0.1 + 1e-3)  # 对添加噪声的图像进行维纳滤波
graph.subplot(236)
graph.xlabel("wiener deblurred(k=0.01)")
graph.imshow(result)

graph.show()
def waterlilyv2(flow_df,
                flow_unit='feet/sec',
                fluid_density=1000,
                verbose=False,
                enable_plot=False):

    # Water Lily turbine's flow velocity to power transfer curve model based on manufacturer's plot
    def transferfunction2(flow, flow_unit, min_flow, max_flow, fluid_density):

        # Checks if flow velocity units are valid and perform conversion
        if flow_unit == 'feet/sec':
            # Converts the flow velocity from feet/s to meters/s
            v_kmh = abs(flow) * 1.09728
            max_v_kmh = abs(max_flow) * 1.09728

        else:
            if flow_unit == 'meters/sec':
                # Stores the flow velocity in meters/s
                v_kmh = abs(flow) * 3.6
                max_v_kmh = abs(max_flow) * 3.6

            else:
                # If flow unit is not feet/sec nor meters/sec
                raise NameError("Error: flow velocity unit " + flow_unit +
                                " is not currently supported")

        # Checks if flow velocity is above minimum value to generate power
        if abs(flow) > (min_flow):

            # Checks if flow velocity is bellow maximum value (output is not yet saturated)
            if abs(flow) < (max_flow):
                # Returns power calculated from transfer function considering proportionality to fluid density
                return (fluid_density / 1000) * (0.1056 * (v_kmh**2) + 0.0669 *
                                                 (v_kmh) - 0.4709)

            else:
                # Returns saturated max power
                return (fluid_density / 1000) * (0.1056 *
                                                 (max_v_kmh**2) + 0.0669 *
                                                 (max_v_kmh) - 0.4709)
        else:

            # Returns zero power
            return 0

    # Imports library dependencies
    import pandas as pd
    import matplotlib.pyplot as plt

    if flow_unit == 'feet/sec':
        # Turbine parameters
        min_flow = 1.6586  # in feet per second (1.82 Km/h)
        max_flow = 10.4804  # in feet per second (11.5 Km/h)
    else:
        if flow_unit == 'meters/sec':
            # Turbine parameters
            min_flow = 0.5056  # in meters per second (1.82 Km/h)
            max_flow = 3.1944  # in meters per second (11.5 Km/h)
        else:
            # If flow unit is not feet/sec nor meters/sec
            raise NameError("Error: flow velocity unit " + flow_unit +
                            " is not currently supported")

    # Checks if verbose is true and prints input parameters for calculation
    if verbose == True:
        print(" ")
        print(
            "The input turbine parameters to calculate instantaneous power generation are:"
        )
        print("  Min flow velocity: " + str(min_flow) + " in " + flow_unit)
        print("  Max flow velocity: " + str(max_flow) + " in " + flow_unit)
        print(
            "  Transfer equation: P = 0.1056*(v_kmh^2) + 0.0669*(v_kmh) - 0.4709 in Watts"
        )
        print(" ")
        print("Fluid density is " + str(fluid_density) + " kg per cubic meter")

        # Checks if enable_plot is true and plots turbine's transfer function
        if enable_plot == True:
            flow = [0.01 * x for x in range(0, int(110 * max_flow))]
            power = [
                transferfunction2(x, flow_unit, min_flow, max_flow,
                                  fluid_density) for x in flow
            ]
            plt.plot(flow, power)
            plt.title("Flow Velocity to Power Transfer Curve")
            plt.xlabel("Flow velocity in " + flow_unit)
            plt.ylabel("Output power in Watts")
            #plt.grid(b=True)
            plt.show()

    # Creates power time series as an empty dataframe
    power_df = pd.DataFrame()

    # Converts flow velocity time series in instantaneous power time series
    power_df['power'] = flow_df.flow.apply(lambda x: transferfunction2(
        x, flow_unit, min_flow, max_flow, fluid_density))

    # Checks if verbose is true and prints average power generation
    if verbose == True:
        print(" ")
        print("Average power generation: " +
              "{0:.4f}".format(power_df['power'].mean()) + " Watts")

    # Returns generated power dataframe
    return power_df
def reconstruction_spectrum_by_four_inputs_predicted():
    tr_ae_lsf_model = keras.models.load_model(
        "../../results/week_0705/image2lsf_model7_dr03-36-0.00491458.h5")
    tr_ae_f0_model = keras.models.load_model(
        "../../results/week_0622/transfer_autoencoder_f0_model1-10-0.03161320.h5"
    )
    tr_ae_uv_model = keras.models.load_model(
        "../../results/week_0622/transfer_autoencoder_uv_model1-09-0.865.h5")
    tr_ae_energy_model = keras.models.load_model(
        "../../results/week_0622/"
        "transfer_autoencoder_energy_model6_dr03-06-0.01141086.h5")

    # predict the four variables using the pretrained models
    lsf_predicted, f0_prediected, uv_predicted, energy_predicted = \
        do_the_prediction_of_image2grandeurs(tr_ae_lsf_model, tr_ae_f0_model, tr_ae_uv_model, tr_ae_energy_model)

    print("aaa")
    print(lsf_predicted.shape)
    print(f0_prediected.shape)
    print(uv_predicted.shape)
    print(energy_predicted.shape)
    # use a threshold of 0.5 to reset the uv predicted to 0 or 1
    uv_predicted[uv_predicted >= 0.5] = 1
    uv_predicted[uv_predicted < 0.5] = 0
    print("aaa")
    X_f0 = np.load("../../LSF_data/f0_all_chapiter.npy")
    energy = np.load("../../LSF_data/energy_all_chapiters.npy")
    spectrum = np.load(
        "../../data_npy_one_image/spectrogrammes_all_chapitre_corresponding.npy"
    )
    max_spectrum = np.max(spectrum)
    spectrum = spectrum / max_spectrum
    spectrum = np.matrix.transpose(spectrum)
    y_test = spectrum[-15951:]

    # calculate the maximum value of the original data to be used during denormalisation
    max_f0 = np.max(X_f0)
    max_energy = np.max(energy)

    # denormalisation
    f0_prediected = f0_prediected * max_f0
    energy_predicted = energy_predicted * max_energy

    # the 13th, which is the last coefficient of the lsf reset to zero
    lsf_predicted[:, 12] = 0

    # load the energy_lsf_spectrum model used
    mymodel = keras.models.load_model(
        "C:/Users/chaoy/Desktop/StageSilentSpeech/results/week_0607/"
        "energy_lsf_spectrum_model2-667-0.00000845.h5")

    test_result = mymodel.predict(
        [lsf_predicted, f0_prediected, uv_predicted, energy_predicted])
    result = np.matrix.transpose(test_result)
    mse = tf.keras.losses.MeanSquaredError()
    error = mse(y_test, test_result).numpy()
    print(
        "mean squared error between the spectrum predicted and the original spectrum : %8f"
        % error)
    result = result * max_spectrum

    # reconstruct the wave file
    test_reconstruit = librosa.griffinlim(result,
                                          hop_length=735,
                                          win_length=735 * 2)
    sf.write("ch7_reconstructed_total_model_lsf_0715.wav", test_reconstruit,
             44100)

    # load the wave file produced by griffin-lim
    wav_produced, _ = librosa.load(
        "ch7_reconstructed_total_model_lsf_0715.wav", sr=44100)
    spectrogram_produced_griffin = np.abs(
        librosa.stft(wav_produced,
                     n_fft=735 * 2,
                     hop_length=735,
                     win_length=735 * 2))

    fig, ax = plt.subplots(nrows=3)
    img = librosa.display.specshow(librosa.amplitude_to_db(
        np.matrix.transpose(y_test), ref=np.max),
                                   sr=44100,
                                   hop_length=735,
                                   y_axis='linear',
                                   x_axis='time',
                                   ax=ax[0])
    ax[0].set_title('original spectrum')
    librosa.display.specshow(librosa.amplitude_to_db(result, ref=np.max),
                             sr=44100,
                             hop_length=735,
                             y_axis='linear',
                             x_axis='time',
                             ax=ax[1])
    ax[1].set_title('spectrum learned')
    librosa.display.specshow(librosa.amplitude_to_db(
        spectrogram_produced_griffin, ref=np.max),
                             sr=44100,
                             hop_length=735,
                             y_axis='linear',
                             x_axis='time',
                             ax=ax[2])
    ax[2].set_title('spectrum reproduced griffinlim')
    fig.colorbar(img, ax=ax, format="%+2.0f dB")
    plt.show()
def project(vertices, *args):
	vertices = np.array(vertices)
	centroid = getCentroid(vertices)
	if (args==()):
		print ('Specify either ML or AP plane')
		return

	elif (args[0]=='ML'):
		#1. ML contour

		#get the lower bound of the shape in x direction
		#the Xray source will be placed on lower bound
	
		xLower = getMinimumCoordinate(vertices, 0)

		#define APplane i.e., XZ plane y=ymin -- 0x+1y+0z=yLower
		ML = [1,0,0,-xLower]


		#define position of the xray camera
		#the camera's axis will be on the centroid axis of the 3d shape
		cameraPos = [1000+xLower,centroid[1],centroid[2]]


		print ('ML plane equation - x='+str(xLower))
		print ('ML camera position = '+str(cameraPos))


		#take perspective projection in AP plane
		projectedVertices = perspective_project(vertices, ML, cameraPos)
		#after projection the dimensions of vertices are still the same

		#get the contour of projected vertices
		#arguments 0 and 2 mean in x-z plane
		mesh, coordinates, coordinateMap = getContour(projectedVertices, 1, 2)

		#get only the boundary vertices
		boundaryPoints, correspondenceMap = getContourVertices(mesh, coordinates, coordinateMap)

		#plot the points to verify
		plt.scatter(boundaryPoints[:,0], boundaryPoints[:,1])
		plt.show()
		return boundaryPoints, correspondenceMap
	elif (args[0]=='AP'):
		#1. AP contour

		#get the lower bound of the shape in y direction
		#the Xray source will be placed on lower bound
		yLower = getMinimumCoordinate(vertices, 1)

		#define APplane i.e., XZ plane y=ymin -- 0x+1y+0z=yLower
		AP = [0,1,0,-yLower]

		#define position of the xray camera
		#the camera's axis will be on the centroid axis of the 3d shape
		cameraPos = [centroid[0],1000+yLower,centroid[2]]


		print ('AP plane equation - y='+str(yLower))
		print ('AP camera position = '+str(cameraPos))


		#take perspective projection in AP plane
		projectedVertices = perspective_project(vertices, AP, cameraPos)
		#after projection the dimensions of vertices are still the same

		#get the contour of projected vertices
		#arguments 0 and 2 mean in x-z plane
		mesh, coordinates, coordinateMap = getContour(projectedVertices, 0, 2)

		#get only the boundary vertices
		boundaryPoints, correspondenceMap = getContourVertices(mesh, coordinates, coordinateMap)

		#plot the points to verify
		plt.scatter(boundaryPoints[:,0], boundaryPoints[:,1])
		plt.show()

		return boundaryPoints, correspondenceMap
	else:
		print (args[0]+' plane not defined.')
def reconstruction_spectrum_four_inputs_five_images_lsf_models():
    tr_ae_lsf_model = keras.models.load_model(
        "../../results/week_0823/"
        "data_coupe_five_images_to_lsf_model1-08-0.00374416.h5")
    tr_ae_f0_model = keras.models.load_model(
        "../../results/week_0823/image2f0_data_coupe_model1-07-0.04049132.h5")
    tr_ae_uv_model = keras.models.load_model(
        "../../results/week_0823/image2uv_data_coupe_model1-16-0.86421.h5")
    tr_ae_energy_model = keras.models.load_model(
        "../../results/week_0823/"
        "image2energy_data_coupe_model1-06-0.00781385.h5")
    # predict the four variables using the pretrained models
    lsf_predicted, f0_prediected, uv_predicted, energy_predicted = \
        do_the_prediction_of_four_grandeurs_avec_fiveimages2lsf_models \
            (tr_ae_lsf_model, tr_ae_f0_model, tr_ae_uv_model, tr_ae_energy_model)
    print("aaa")
    print(lsf_predicted.shape)
    print(f0_prediected.shape)
    print(uv_predicted.shape)
    print(energy_predicted.shape)

    # use a threshold of 0.5 to reset the uv predicted to 0 or 1
    uv_predicted[uv_predicted >= 0.5] = 1
    uv_predicted[uv_predicted < 0.5] = 0

    X_f0 = np.load("../../LSF_data_coupe/f0_cut_all.npy")
    energy = np.load("../../LSF_data_coupe/energy_cut_all.npy")
    # on utilise les données coupées (84679,736)
    spectrum = np.load(
        "../../data_coupe/spectrogrammes_all_chapitres_coupe.npy")
    max_spectrum = np.max(spectrum)
    spectrum = spectrum / max_spectrum
    y_test = spectrum[-15949:-2]

    # calculate the maximum value of the original data to be used during denormalisation
    max_f0 = np.max(X_f0)
    max_energy = np.max(energy)

    # denormalisation
    f0_prediected = f0_prediected * max_f0
    energy_predicted = energy_predicted * max_energy

    # the 13th, which is the last coefficient of the lsf reset to zero
    lsf_predicted[:, 12] = 0

    # load the energy_lsf_spectrum model used
    mymodel = keras.models.load_model(
        "../../results/week_0823/energy_lsf_spectrum_data_coupe_model2-678-0.00000796.h5"
    )

    test_result = mymodel.predict(
        [lsf_predicted, f0_prediected, uv_predicted, energy_predicted])
    result = np.matrix.transpose(test_result)
    mse = tf.keras.losses.MeanSquaredError()
    error = mse(y_test, test_result).numpy()
    print(
        "mean squared error between the spectrum predicted and the original spectrum : %8f"
        % error)
    result = result * max_spectrum

    # reconstruct the wave file
    test_reconstruit = librosa.griffinlim(result,
                                          hop_length=735,
                                          win_length=735 * 2)
    sf.write("ch7_reconstructed_5imageslsf_0825_data_coupe.wav",
             test_reconstruit, 44100)

    # load the wave file produced by griffin-lim
    wav_produced, _ = librosa.load(
        "ch7_reconstructed_5imageslsf_0825_data_coupe.wav", sr=44100)
    spectrogram_produced_griffin = np.abs(
        librosa.stft(wav_produced,
                     n_fft=735 * 2,
                     hop_length=735,
                     win_length=735 * 2))

    fig, ax = plt.subplots(nrows=3)
    img = librosa.display.specshow(librosa.amplitude_to_db(
        np.matrix.transpose(y_test), ref=np.max),
                                   sr=44100,
                                   hop_length=735,
                                   y_axis='linear',
                                   x_axis='time',
                                   ax=ax[0])
    ax[0].set_title('original spectrum')
    librosa.display.specshow(librosa.amplitude_to_db(result, ref=np.max),
                             sr=44100,
                             hop_length=735,
                             y_axis='linear',
                             x_axis='time',
                             ax=ax[1])
    ax[1].set_title('spectrum learned')
    librosa.display.specshow(librosa.amplitude_to_db(
        spectrogram_produced_griffin, ref=np.max),
                             sr=44100,
                             hop_length=735,
                             y_axis='linear',
                             x_axis='time',
                             ax=ax[2])
    ax[2].set_title('spectrum reproduced griffinlim')
    fig.colorbar(img, ax=ax, format="%+2.0f dB")
    plt.show()
Beispiel #56
0
def pruned_plotting(interpretability_pruned,
              new_scores_pruned,
              names_pruned,
              interpretability_not_pruned,
              new_scores_not_pruned,
              names_not_pruned, start_score, accuracy_lim=None):

    fig, ax = plt.subplots()

    sc_pruned = plt.scatter(interpretability_pruned, new_scores_pruned, c='red')
    sc_not_pruned = plt.scatter(interpretability_not_pruned, new_scores_not_pruned, c='blue')


    ax.set_xlabel("Interpretability (low -> high)")
    ax.set_ylabel("Accuracy (Micro AUC)")
    ax.set_xlim((0, 1.0))
    if type(accuracy_lim) != type(None):
        ax.set_ylim(accuracy_lim)

    ax.axhline(y=start_score, color='red', linestyle='--')

    ax.legend(loc='upper left')

    annot = ax.annotate("", xy=(0,0), xytext=(-220,70),textcoords="offset points",
                        bbox=dict(boxstyle="round", fc="w"),
                        arrowprops=dict(arrowstyle="->"))
    annot.set_visible(False)


    def pruned_update_annot(ind, sc, names):
        pos = sc.get_offsets()[ind["ind"][0]]
        annot.xy = pos
        text = names[ind["ind"][0]]
        #print(text + ": " + str(y[ind["ind"][0]]))
        annot.set_text(text)
        annot.get_bbox_patch().set_alpha(0.4)


    def pruned_hover(event):
        vis = annot.get_visible()
        if event.inaxes == ax:
            cont, ind = sc_pruned.contains(event)
            if cont:
                pruned_update_annot(ind, sc_pruned,names_pruned)
                annot.set_visible(True)
                fig.canvas.draw_idle()
            else:
                cont, ind = sc_not_pruned.contains(event)
                if cont:
                    pruned_update_annot(ind, sc_not_pruned, names_not_pruned)
                    annot.set_visible(True)
                    fig.canvas.draw_idle()
                else:
                    if vis:
                        annot.set_visible(False)
                        fig.canvas.draw_idle()

    fig.canvas.mpl_connect("motion_notify_event", pruned_hover)


    plt.show()
def DT(X, y, train_size, data_name):
    X = StandardScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=train_size)

    # https://scikit-learn.org/stable/auto_examples/tree/plot_cost_complexity_pruning.html#sphx-glr-auto-examples-tree-plot-cost-complexity-pruning-py
    # Fit classification model
    dt = DecisionTreeClassifier()
    path = dt.cost_complexity_pruning_path(X_train, y_train)
    ccp_alphas, impurities = path.ccp_alphas, path.impurities

    fig, ax = plt.subplots()
    ax.plot(ccp_alphas[:-1],
            impurities[:-1],
            marker='o',
            drawstyle="steps-post")
    ax.set_xlabel("effective alpha")
    ax.set_ylabel("total impurity of leaves")
    ax.set_title("Total Impurity vs effective alpha for training set")

    clfs = []
    for ccp_alpha in ccp_alphas:
        clf = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
        clf.fit(X_train, y_train)
        clfs.append(clf)
    print("Number of nodes in the last tree is: {} with ccp_alpha: {}".format(
        clfs[-1].tree_.node_count, ccp_alphas[-1]))

    # %%
    # For the remainder of this example, we remove the last element in
    # ``clfs`` and ``ccp_alphas``, because it is the trivial tree with only one
    # node. Here we show that the number of nodes and tree depth decreases as alpha
    # increases.
    clfs = clfs[:-1]
    ccp_alphas = ccp_alphas[:-1]

    node_counts = [clf.tree_.node_count for clf in clfs]
    depth = [clf.tree_.max_depth for clf in clfs]
    fig, ax = plt.subplots(2, 1)
    ax[0].plot(ccp_alphas, node_counts, marker='o', drawstyle="steps-post")
    ax[0].set_xlabel("alpha")
    ax[0].set_ylabel("number of nodes")
    ax[0].set_title("Number of nodes vs alpha")
    ax[1].plot(ccp_alphas, depth, marker='o', drawstyle="steps-post")
    ax[1].set_xlabel("alpha")
    ax[1].set_ylabel("depth of tree")
    ax[1].set_title("Depth vs alpha")
    fig.tight_layout()

    # %%
    # Accuracy vs alpha for training and testing sets
    # ----------------------------------------------------
    # When ``ccp_alpha`` is set to zero and keeping the other default parameters
    # of :class:`DecisionTreeClassifier`, the tree overfits, leading to
    # a 100% training accuracy and 88% testing accuracy. As alpha increases, more
    # of the tree is pruned, thus creating a decision tree that generalizes better.
    # In this example, setting ``ccp_alpha=0.015`` maximizes the testing accuracy.
    train_scores = [clf.score(X_train, y_train) for clf in clfs]
    test_scores = [clf.score(X_test, y_test) for clf in clfs]

    fig, ax = plt.subplots()
    ax.set_xlabel("alpha")
    ax.set_ylabel("accuracy")
    ax.set_title("Accuracy vs alpha for training and testing sets")
    ax.plot(ccp_alphas,
            train_scores,
            marker='o',
            label="train",
            drawstyle="steps-post")
    ax.plot(ccp_alphas,
            test_scores,
            marker='o',
            label="test",
            drawstyle="steps-post")
    ax.legend()
    plt.show()
    # %%
    best_alpha = 0.040790348647614105
    # %%
    # Create CV training and test scores for various training set sizes
    train_sizes, train_scores, test_scores = learning_curve(
        DecisionTreeClassifier(ccp_alpha=best_alpha),
        X,
        y,
        # Number of folds in cross-validation
        cv=5,
        # Evaluation metric
        scoring='accuracy',
        # Use all computer cores
        n_jobs=-1,
        # 50 different sizes of the training set
        train_sizes=np.linspace(0.01, 1.0, 50))

    print(train_scores)
    # Create means and standard deviations of training set scores
    train_mean = np.mean(train_scores, axis=1)
    train_std = np.std(train_scores, axis=1)

    # Create means and standard deviations of test set scores
    test_mean = np.mean(test_scores, axis=1)
    test_std = np.std(test_scores, axis=1)

    # Draw lines
    plt.plot(train_sizes,
             train_mean,
             '--',
             color="#111111",
             label="Training score")
    plt.plot(train_sizes,
             test_mean,
             color="#111111",
             label="Cross-validation score")

    # Draw bands
    plt.fill_between(train_sizes,
                     train_mean - train_std,
                     train_mean + train_std,
                     color="#DDDDDD")
    plt.fill_between(train_sizes,
                     test_mean - test_std,
                     test_mean + test_std,
                     color="#DDDDDD")

    # Create plot
    plt.title("DT Learning Curve - {}".format(data_name))
    plt.xlabel("Training Set Size"), plt.ylabel("Accuracy Score"), plt.legend(
        loc="best")
    plt.tight_layout()
    plt.show()
def reconstruction_spectrum_original_lsf():
    """
    This function is used to test if we use the original lsf values and other variables predicted by models could
    improve the result of the prediction.
    If it's true, that means the problem is from the wrong prediction of lsf values.
    This test is used to localize the problem.
    :return:
    """
    tr_ae_lsf_model = keras.models.load_model(
        "../../results/week_0622/"
        "transfer_autoencoder_lsf_model1-12-0.00502079.h5")
    tr_ae_f0_model = keras.models.load_model(
        "../../results/week_0622/transfer_autoencoder_f0_model1-10-0.03161320.h5"
    )
    tr_ae_uv_model = keras.models.load_model(
        "../../results/week_0622/transfer_autoencoder_uv_model1-09-0.865.h5")
    tr_ae_energy_model = keras.models.load_model(
        "../../results/week_0622/"
        "transfer_autoencoder_energy_model6_dr03-06-0.01141086.h5")

    # predict the four variables using the pretrained models
    _, f0_prediected, uv_predicted, energy_predicted = \
        do_the_prediction_of_image2grandeurs(tr_ae_lsf_model, tr_ae_f0_model, tr_ae_uv_model, tr_ae_energy_model)
    # the lsf predicted in this case is replaced by the original lsf values
    lsf_original = np.load("../../LSF_data/lsp_all_chapiter.npy")
    lsf_test = lsf_original[-15951:, :]
    print("aaa")
    print(lsf_original.shape)
    print(f0_prediected.shape)
    print(uv_predicted.shape)
    print(energy_predicted.shape)

    X_f0 = np.load("../../LSF_data/f0_all_chapiter.npy")
    energy = np.load("../../LSF_data/energy_all_chapiters.npy")
    spectrum = np.load(
        "../../data_npy_one_image/spectrogrammes_all_chapitre_corresponding.npy"
    )
    max_spectrum = np.max(spectrum)
    spectrum = spectrum / max_spectrum
    spectrum = np.matrix.transpose(spectrum)
    y_test = spectrum[-15951:]

    # calculate the maximum value of the original data to be used during denormalisation
    max_f0 = np.max(X_f0)
    max_energy = np.max(energy)

    # denormalisation
    f0_prediected = f0_prediected * max_f0
    energy_predicted = energy_predicted * max_energy

    # load the energy_lsf_spectrum model used
    mymodel = keras.models.load_model(
        "C:/Users/chaoy/Desktop/StageSilentSpeech/results/week_0607/"
        "energy_lsf_spectrum_model2-667-0.00000845.h5")

    test_result = mymodel.predict(
        [lsf_test, f0_prediected, uv_predicted, energy_predicted])
    result = np.matrix.transpose(test_result)
    mse = tf.keras.losses.MeanSquaredError()
    error = mse(y_test, test_result).numpy()
    print(
        "mean squared error between the spectrum predicted and the original spectrum : %8f"
        % error)
    result = result * max_spectrum

    # reconstruct the wave file
    test_reconstruit = librosa.griffinlim(result,
                                          hop_length=735,
                                          win_length=735 * 2)
    sf.write("ch7_reconstructed_total_model_lsf.wav", test_reconstruit, 44100)

    # load the wave file produced by griffin-lim
    wav_produced, _ = librosa.load("ch7_reconstructed_total_model_lsf.wav",
                                   sr=44100)
    spectrogram_produced_griffin = np.abs(
        librosa.stft(wav_produced,
                     n_fft=735 * 2,
                     hop_length=735,
                     win_length=735 * 2))

    fig, ax = plt.subplots(nrows=3)
    img = librosa.display.specshow(librosa.amplitude_to_db(
        np.matrix.transpose(y_test), ref=np.max),
                                   sr=44100,
                                   hop_length=735,
                                   y_axis='linear',
                                   x_axis='time',
                                   ax=ax[0])
    ax[0].set_title('original spectrum')
    librosa.display.specshow(librosa.amplitude_to_db(result, ref=np.max),
                             sr=44100,
                             hop_length=735,
                             y_axis='linear',
                             x_axis='time',
                             ax=ax[1])
    ax[1].set_title('spectrum learned')
    librosa.display.specshow(librosa.amplitude_to_db(
        spectrogram_produced_griffin, ref=np.max),
                             sr=44100,
                             hop_length=735,
                             y_axis='linear',
                             x_axis='time',
                             ax=ax[2])
    ax[2].set_title('spectrum reproduced griffinlim')
    fig.colorbar(img, ax=ax, format="%+2.0f dB")
    plt.show()
Beispiel #59
0
opac2 = np.loadtxt(lines[7:])[:,1:].T
logT2 = np.loadtxt(lines[7:])[:,0]
logR2 = np.loadtxt(lines[5:6])

Rmin = min(np.min(logR1), np.min(logR2))
Rmax = max(np.max(logR1), np.max(logR2))
Tmin = min(np.min(logT1), np.min(logT2))
Tmax = max(np.max(logT1), np.max(logT2))

i1 = np.where(logT1>=4.)[0][0]
i2 = np.where(logT2<4.)[0][-1]

interpolator1 = RBS(logR1, logT1, opac1)
interpolator2 = RBS(logR2, logT2, opac2)

for rho in np.arange(-10.,0.1,2.):
    T = np.linspace(Tmin, Tmax, 200)
    R = rho - 3.*T + 18.
    T = T[(R>Rmin)&(R<Rmax)]
    R = R[(R>Rmin)&(R<Rmax)]

    # c = np.hstack((interpolator2(R[:i2],T[:i2]), interpolator1(R[i1:],T[i1:])))
    c = [interpolator2(Ri,Ti) for (Ri,Ti) in zip(R,T) if Ti<4.] + \
        [interpolator1(Ri,Ti) for (Ri,Ti) in zip(R,T) if Ti>=4.]
    c = np.squeeze(c)
    pl.plot(T, c, 'k-')

pl.xlabel(r'$\log_{10}(T/\mathrm{K})$')
pl.ylabel(r'$\log_{10}(\kappa_\mathrm{R}/(\mathrm{cm}^2/\mathrm{g}))$')
pl.show()
Beispiel #60
0
def plot_price(cfg):

    param_list = cfg['plot_params'].split(" ")
    if len(param_list) < 3:
        logging.warn("Plot params malformed. Skipping plot.")
        return
    else:
        logging.info(f"Plotting symbol {param_list[0].strip()}")
        logging.info(f"  from {param_list[1]}")
        logging.info(f"  to {param_list[2]}")

    register_matplotlib_converters()
    
    prices_input_file = CLEANED_PRICES_FILE
    #prices_input_file = cfg['raw_data_dir'] + cfg['raw_prices_input_file']
    try:
        logging.info("Reading " + prices_input_file)
        prices_df = pd.read_table(prices_input_file, sep=',')
        prices_df['date'] = pd.to_datetime(prices_df['date'])
        logging.info("Prices df shape " + str(prices_df.shape))
        
    except Exception as e: 
        logging.critical("Not parsed: " + prices_input_file + "\n" + str(e))
        sys.exit()   

    # param string [symbol start-date end-date] 
    #   e.g. IBM 2009-01-01 2019-01-01
    symbol = param_list[0].strip()

    start_list = param_list[1].split('-')
    start_yr = int(start_list[0])
    start_mo = int(start_list[1])
    start_d = int(start_list[2])

    end_list = param_list[2].split('-')
    end_yr = int(end_list[0])
    end_mo = int(end_list[1])
    end_d = int(end_list[2])

    date_start = pd.Timestamp(start_yr, start_mo, start_d)
    date_end = pd.Timestamp(end_yr, end_mo, end_d)
   

    # filter on date range
    logging.info("Filtering on date range")
    df = prices_df[(prices_df['date'] >= date_start) & (prices_df['date'] <= date_end)]
    df = df.sort_values(['date'])

    # get group for this symbol
    logging.info("Filtering on symbol")
    df = df.groupby('symbol').get_group(symbol)

    # write df to file
    span_str = (date_start.strftime("%Y-%m-%d") + "_" +
        date_end.strftime("%Y-%m-%d"))
    csv_name = STOX_DATA_DIR + symbol + "_" + span_str + ".csv"
    df.to_csv(csv_name, index=False, sep="\t", float_format='%.3f')

    # plot open/close price
    fig = plt.figure()
    plt.suptitle(symbol, fontsize=10)
    plt.scatter(df['date'].tolist(), df['open'], color='green', s=2)
    plt.scatter(df['date'].tolist(), df['close'], color = 'blue', s=2)

    plt_filename = STOX_DATA_DIR + symbol + "_" + span_str + ".png"
    plt.savefig(plt_filename)
    plt.show()