Ejemplo n.º 1
0
def run(X, Y):
    N = len(Y)

    # Z possui os valores obtidos com o somatório
    # [ [real, imaginario], [real, imaginario], ... ]
    Z = numpy.empty(shape=[N, 2])

    # mag é o array com os módulos
    mag = numpy.empty(N)

    # angle é o array com os angulos
    angle = numpy.empty(N)

    for k in range(0, N):
        for n in range(0, N):
            b = 2*math.pi*k*n / N
            Z[k][0] += Y[n]*math.cos(-b)
            Z[k][1] += Y[n]*math.sin(-b)

        mag[k] = math.sqrt(math.pow(Z[k][0], 2) + math.pow(Z[k][1], 2))

    # plotar espectro de linhas com mag no eixo Y
    plt.stem(X, mag)
    plt.title('Espectro de Linhas')
    plt.ylabel('Magnitude')

    print(Z)
    print(mag)
    print(angle)

    plt.show()
Ejemplo n.º 2
0
 def plot(self,
          typ='s3',
          title='',
          xl=False,
          yl=False,
          log=False,
          stem=True,
          color='b'):
     """
     """
     if typ == 's3':
         indices = self.ind3
         tl = indices[:, 0]
         C = []
         for l in np.unique(tl):
             k = np.where(tl == l)
             a = np.real(np.sum(self.s3[:, k] * np.conj(self.s3[:, k])))
             C.append(a)
         C = np.real(np.array(C))
         Cs = np.sqrt(C)
         if log:
             Cs = 20 * log10(Cs)
         if stem:
             plt.stem(np.unique(tl), Cs, markerfmt=color + 'o')
         else:
             plt.plot(np.unique(tl), Cs, color=color)
         #plt.axis([0,max(tl),0,5])
         plt.title(title)
         if xl:
             plt.xlabel('degree l')
         if yl:
             plt.ylabel('Integrated Module of coeff')
def Histogram_Equalization(image):
    # image1=[[1,2,3],[2,1,3],[1,3,3]]
    min_value = np.min(image)
    max_value = np.max(image)
    j, k = np.shape(image)
    frequency = []
    pdf = []
    cdf = []
    count = 0
    total_pixel = 0
    sum1 = 0
    for i in range(min_value, max_value + 1):
        for a in range(j):
            for b in range(k):
                if image[a][b] == i:
                    print(i)
                    count = count + 1
                    print(count)
        frequency.append((i, count))
        total_pixel = total_pixel + count
        count = 0
    for intensity, no_of_repeat in frequency:
        pdf.append(no_of_repeat / total_pixel)
    for value in pdf:
        sum1 = sum1 + value
        cdf.append(sum1)
    plt.stem(cdf)
    plt.show()
Ejemplo n.º 4
0
def histogram_plot():
    global image, extension, image_count, log_path, max_image_count, view_value, plot_number
    if (image.max() != 0):
        if (
                plot_number == 121
        ):  #because this function will be called two times in display_original()
            fig.clf()
        fig.add_subplot(plot_number)  #dynamically changing the grid plot
        im = plt.imshow(image, vmin=0, vmax=255)
        plt.set_cmap('gray')

        image_histogram = np.zeros(256)  #contains histogram of image
        #create the image histogram
        for i in range(image.shape[0]):
            for j in range(image.shape[1]):
                intensity_value = int(
                    abs(image[i][j])
                )  #abs() is taken in case of negative image intensities
                image_histogram[
                    intensity_value] = image_histogram[intensity_value] + 1
        fig.add_subplot(plot_number + 1)
        plt.stem(np.arange(256), image_histogram,
                 markerfmt=' ')  #plots a stem image of histogram

        canvas.draw()
        view_value = "100"  #set the callback so that the display image option is enabled
        var2.set(view_value)
    else:
        messagebox.showerror("Error", "Please load an image first.")
Ejemplo n.º 5
0
 def plot(self, typ="s3", title="", xl=False, yl=False, log=False, stem=True, color="b"):
     """
     """
     if typ == "s3":
         indices = self.ind3
         tl = indices[:, 0]
         C = []
         for l in np.unique(tl):
             k = np.where(tl == l)
             a = np.real(np.sum(self.s3[:, k] * np.conj(self.s3[:, k])))
             C.append(a)
         C = np.real(np.array(C))
         Cs = np.sqrt(C)
         if log:
             Cs = 20 * log10(Cs)
         if stem:
             plt.stem(np.unique(tl), Cs, markerfmt=color + "o")
         else:
             plt.plot(np.unique(tl), Cs, color=color)
         # plt.axis([0,max(tl),0,5])
         plt.title(title)
         if xl:
             plt.xlabel("degree l")
         if yl:
             plt.ylabel("Integrated Module of coeff")
Ejemplo n.º 6
0
def demo(text=None):
    from nltk.corpus import brown
    from matplotlib import pylab
    import jieba
    import sys
    reload(sys)
    sys.setdefaultencoding('utf-8')

    with open('flypaper_short.txt', 'r') as file:
        comments = file.read()
    # segment the word
    seg_list = jieba.cut(comments)
    text = (" ".join(seg_list))

    #print(text)
    pattern = re.compile("\n")
    matches = pattern.finditer(text)
    #for match in matches:
    #print(match.start())

    MIN_PARAGRAPH = 100  # minimum length of a paragraph
    last_break = 0
    pbreaks = [0]
    for pb in matches:

        if pb.start() - last_break < MIN_PARAGRAPH:
            continue
        else:
            pbreaks.append(pb.start())
            last_break = pb.start()
    print(pbreaks)

    pb_iter = pbreaks.__iter__()
    current_par_break = next(pb_iter)
    if current_par_break == 0:
        try:
            current_par_break = next(pb_iter)  # skip break at 0
        except StopIteration:
            raise ValueError(
                "No paragraph breaks were found(text too short perhaps?)")

    tt = TextTilingTokenizer(w=10, k=3, demo_mode=True)
    #if text is None: text = brown.raw()[:10000]
    s, ss, d, b = tt.tokenize(text)
    print(b)
    pylab.xlabel("Sentence Gap index")
    pylab.ylabel("Gap Scores")
    pylab.plot(range(len(s)), s, label="Gap Scores")
    pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
    pylab.plot(range(len(d)), d, label="Depth scores")
    pylab.stem(range(len(b)), b)
    pylab.legend()
    pylab.show()
Ejemplo n.º 7
0
def plot_feature_class_corr_matrix(df, labels, cols):

    corr = []
    for i in xrange(0, df.shape[1]):
        corr.append(stats.pointbiserialr(labels, df.icol(i)))
    pos = np.arange(1, df.shape[1] + 1)
    c, p = zip(*corr)
    plt.figure(figsize=(10, 10))
    plt.grid()
    plt.stem(pos, c)
    plt.xticks(pos, cols, rotation=90, fontsize=8)
    plt.ylabel('Correlation')
    plt.title('Point biserial Correlation - Features v. Class')
    plt.savefig('Graphs/BiSerialCorr.png', bbox_inches='tight')
Ejemplo n.º 8
0
def demo(text=None):
    from nltk.corpus import brown
    from matplotlib import pylab
    tt = TextTilingTokenizer(demo_mode=True)
    if text is None: text = brown.raw()[:10000]
    s, ss, d, b = tt.tokenize(text)
    pylab.xlabel("Sentence Gap index")
    pylab.ylabel("Gap Scores")
    pylab.plot(range(len(s)), s, label="Gap Scores")
    pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
    pylab.plot(range(len(d)), d, label="Depth scores")
    pylab.stem(range(len(b)), b)
    pylab.legend()
    pylab.show()
Ejemplo n.º 9
0
def demo(text=None):
    from nltk.corpus import brown
    from matplotlib import pylab
    tt = TextTilingTokenizer(demo_mode=True)
    if text is None: text = brown.raw()[:10000]
    s, ss, d, b = tt.tokenize(text)
    pylab.xlabel("Sentence Gap index")
    pylab.ylabel("Gap Scores")
    pylab.plot(range(len(s)), s, label="Gap Scores")
    pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
    pylab.plot(range(len(d)), d, label="Depth scores")
    pylab.stem(range(len(b)), b)
    pylab.legend()
    pylab.show()
Ejemplo n.º 10
0
def InitialFinalSilenceRemoved(sig):
    # Removes beginning and end silence periods of a wavfile
    # Input: sig, i.e. wavfile
    # Output: new_sig, i.e. wavfile without beginning and end silence periods
    #########################################################################
    window = 512
    hop = window / 2
    energy = []
    i = 0
    energy_index = []

    while i < (len(sig) - window):
        chunk = sig[i:i + window][np.newaxis]
        energy.append(chunk.dot(chunk.T)[0][0])
        energy_index.append(i)
        i = i + hop

    energy = np.array(energy)
    energy_thresh = 0.1 * np.mean(energy)
    significant_indices = np.where(energy > energy_thresh)[0]

    if significant_indices[0] == 0:
        start_point_sample = 0
    else:
        start_point_sample = (significant_indices[0] - 1) * hop
    if significant_indices[-1] == len(energy) - 1:
        end_point_sample = len(energy) * hop
    else:
        end_point_sample = (significant_indices[-1] + 1) * hop
    new_sig = sig[start_point_sample:end_point_sample + 1]
    if plot:
        plt.figure('figure from InitialFinalSilenceRemoved')
        plt.subplot(3, 1, 1)
        plt.plot(range(len(sig)), sig)
        plt.ylabel('amplitude')
        plt.title('Remove initial and final silences')
        plt.subplot(3, 1, 2)
        plt.plot(energy_index, energy)
        plt.ylabel('energy')
        plt.stem([start_point_sample, end_point_sample], [5, 5], 'k')
        plt.subplot(3, 1, 3)
        plt.plot(new_sig)
        plt.ylabel('amplitude')
        plt.xlabel('sample number')
        plt.show()
    return new_sig
Ejemplo n.º 11
0
def hist_mtx(mtx, tstr=''):
    """
    Given a piano-roll matrix, 128 MIDI piches x beats, plot the pitch class histogram
    """
    i_min, i_max = np.where(mtx.mean(1))[0][[0, -1]]
    P.figure(figsize=(14.5, 8))
    P.stem(np.arange(i_max + 1 - i_min), mtx[i_min:i_max + 1, :].sum(1))
    ttl = 'Note Frequency'
    if tstr: ttl += ': ' + tstr
    P.title(ttl, fontsize=16)
    t = P.xticks(np.arange(0, i_max + 1 - i_min, 3),
                 pc_labels[i_min:i_max + 1:3],
                 fontsize=14)
    P.xlabel('Pitch Class', fontsize=14)
    P.ylabel('Frequency', fontsize=14)
    ax = P.axis()
    P.axis(xmin=-0.5)
    P.grid()
Ejemplo n.º 12
0
def PlotProcessFragmentation(title, data, output):
    """Plots the Fragmentation vs size for a single process.

  Args:
    title: Title of the graph
    data: Data to plot. Should contain 'size' and 'fragmentation' entries.
    output: Filename to save the result to.
  """
    plt.figure(figsize=(16, 8))
    plt.title(title)
    plt.stem(data['data']['size'], data['data']['fragmentation'])
    plt.xscale('log', base=2)
    plt.yscale('linear')
    plt.ylim(ymin=0, ymax=100)
    plt.xlabel('Size (log)')
    plt.ylabel('Fragmentation (%)')
    plt.savefig(output, bbox_inches='tight')
    plt.close()
Ejemplo n.º 13
0
def plot_normalized_frequency_distribution(n_fqdist, n=20):

    text_type = unicode
    pylab.grid(True, color="silver")
    pylab.title(
        'Normalized Word Frequency Distribution. TOP {n} Samples'.format(n=n))

    freqs = n_fqdist.values()[0:n + 1]
    samples = n_fqdist.keys()[0:n + 1]

    pylab.stem(freqs)
    pylab.xticks(range(len(samples)), [text_type(s) for s in samples],
                 rotation=90)
    pylab.xlabel("Samples")
    pylab.ylabel("Normalized Word Frequency")
    pylab.show()

    return True
Ejemplo n.º 14
0
def _PlotProcess(all_data: dict, pid: int, output_prefix: str):
  """Represents the allocation size distribution.

  Args:
    all_data: As returned by _ParseTrace().
    pid: PID to plot the data for.
    output_prefix: Prefix of the output file.
  """
  data = all_data[pid]
  logging.info('Plotting data for PID %d' % pid)

  # Allocations vs size.
  plt.figure(figsize=(16, 8))
  plt.title('Allocation count vs Size - %s - %s' %
            (data['name'], data['labels']))
  plt.xscale('log', base=2)
  plt.yscale('log', base=10)
  plt.stem(data['data']['size'], data['data']['count'])
  plt.xlabel('Size (log)')
  plt.ylabel('Allocations (log)')
  plt.savefig('%s_%d_count.png' % (output_prefix, pid), bbox_inches='tight')
  plt.close()

  # CDF.
  plt.figure(figsize=(16, 8))
  plt.title('CDF of allocation size - %s - %s' % (data['name'], data['labels']))
  cdf = np.cumsum(100. * data['data']['count']) / np.sum(data['data']['count'])

  for value in [512, 1024, 2048, 4096, 8192]:
    index = np.where(data['data']['size'] == value)[0]
    cdf_value = cdf[index]
    plt.axvline(x=value, ymin=0, ymax=cdf_value / 100., color='lightgrey')

  plt.step(data['data']['size'], cdf, color='black', where='post')
  plt.ylim(ymin=0, ymax=100)
  plt.xlim(xmin=10, xmax=1e6)
  plt.xscale('log', base=2)
  plt.xlabel('Size (log)')
  plt.ylabel('CDF (%)')
  plt.savefig('%s_%d_cdf.png' % (output_prefix, pid),
              bbox_inches='tight',
              dpi=300)
  plt.close()
Ejemplo n.º 15
0
def PlotProcessWaste(title, data, output):
    """Plots the Unused memory vs size for a single process.

  Args:
    title: Title of the graph
    data: Data to plot. Should contain 'size' and 'unused' entries.
    output: Filename to save the result to.
  """
    plt.figure(figsize=(16, 8))
    plt.title(title)
    plt.xscale('log', base=2)
    plt.yscale('log', base=2)
    plt.stem(data['data']['size'][data['data']['unused'] != 0],
             data['data']['unused'][data['data']['unused'] != 0])
    plt.ylim(ymin=1, ymax=2**20)
    plt.xlabel('Size (log)')
    plt.ylabel('Unused Size (log)')
    plt.savefig(output, bbox_inches='tight')
    plt.close()
Ejemplo n.º 16
0
def define_params(ts_diff):
    lag_acf = acf(ts_diff, nlags=20)
    lag_pacf = pacf(ts_diff, nlags=20, method='ols')
    # q的获取:ACF图中曲线第一次穿过上置信区间.这里q取2
    plt.subplot(121)
    plt.stem(lag_acf)
    plt.axhline(y=0, linestyle='--', color='gray')
    plt.axhline(y=-1.96 / np.sqrt(len(ts_diff)), linestyle='--', color='gray')  # lowwer置信区间
    plt.axhline(y=1.96 / np.sqrt(len(ts_diff)), linestyle='--', color='gray')  # upper置信区间
    plt.title('Autocorrelation Function')
    # p的获取:PACF图中曲线第一次穿过上置信区间.这里p取2
    plt.subplot(122)
    plt.stem(lag_pacf)
    plt.axhline(y=0, linestyle='--', color='gray')
    plt.axhline(y=-1.96 / np.sqrt(len(ts_diff)), linestyle='--', color='gray')
    plt.axhline(y=1.96 / np.sqrt(len(ts_diff)), linestyle='--', color='gray')
    plt.title('Partial Autocorrelation Function')
    plt.tight_layout()
    plt.show()
def main():
    n1 = int(input('Enter the number of trials with a large p: '))
    p1 = float(input('Enter the probability of trial: '))

    '''Bernoulli Random Variable'''
    xbe = [0, 1]
    ybe = [1-p1, p1]
    plt.title("Bernoulli Distribution Stem Plot")
    plt.stem(xbe, ybe, '-.')
    plt.xlabel("X : Bernoulli Random Variable")
    plt.ylabel("PMF")
    plt.show()

    '''Binomial Random Variable Plot'''
    xbi, ybi = plot_Binomial(trial = n1, probability = p1)
    plt.subplot(211)
    plt.title("Binomial Distribution Stem Plot")
    plt.stem(xbi, ybi, '-.')
    plt.xlabel("X : Binomial Random Variable")
    plt.ylabel("PMF")

    '''Geometric Random Variable Plot'''
    xge, yge = plot_Geometric(success = n1, probability = p1)
    plt.subplot(212)
    plt.title("Geometric Distribution Stem Plot")
    plt.stem(xge, yge, '-.')
    plt.xlabel("X : Geometric Random Variable")
    plt.ylabel("PMF")
    plt.show()

    n2 = int(input('Enter the number of trials with less p.: '))
    p2 = float(input('Enter the probability of trial: '))
    t = int(input('Enter how long the incident took place.: '))

    '''Poisson Random Variable Plot'''
    xpo, ypo = plot_Poisson(trial = n2, probability = p2)
    plt.subplot(211)
    plt.title("Poisson Distribution Stem Plot")
    plt.xlabel("X : Poisson Random Variable")
    plt.ylabel("PMF")
    plt.stem(xpo, ypo, '-.')

    '''Exponential Random Variable Plot'''
    l = n2 * p2
    xex, yex = plot_Exponential(lamb = l, time = t)
    plt.subplot(212)
    plt.title("Exponential Distribution Plot")
    plt.xlabel("X : Exponential Random Variable")
    plt.ylabel("PDF")
    plt.plot(xex, yex)

    plt.show()
Ejemplo n.º 18
0
    def _process(self, data):

        num = math.ceil(len(data) / 2)
        fftf = np.fft.fft(data) * 2 / len(data)

        if not self.parameter['mph']:
            self.parameter['mph'] = np.mean(data) / 3

        frequency = process.detect_peaks(abs(fftf)[0:num],
                                         mph=self.parameter['mph'],
                                         mpd=self.parameter['mpd'],
                                         threshold=self.parameter['thre'])

        if self.parameter['show']:
            plt.figure()
            for i in frequency:
                plt.scatter(i - 1, abs(fftf)[0:num][i], color='r')
            plt.stem(abs(fftf)[1:num], color='#87CEEB')
            plt.show()

        return [i for i in frequency / 12 if i < self.parameter['mc']]
Ejemplo n.º 19
0
def stem_timeseries_multi(timeseries_array: List[Timeseries], title, xlabel, ylabel, separate):
    matplotlib.style.use('default')
    fig = plt.figure()

    bottom = None
    for ts in timeseries_array:
        for y in ts.y:
            if bottom is None or y < bottom:
                bottom = y

    for ts in timeseries_array:
        plt.stem(ts.x, ts.y, label=ts.label, bottom=bottom)

    set_disp(title, xlabel, ylabel)

    plt.legend()
    fig = plt.gcf()

    plt.show()

    return fig, mpld3.fig_to_html(fig)
def demo(text=None):
    '''
    use the bounary together with the pseudo sentences to evaluate the quality of segmentation.
    :param text:
    :return:
    '''
    from nltk.corpus import brown
    from matplotlib import pylab
    tt = TextTilingTokenizer(w=40, k=20, demo_mode=True)
    with open('flypaper_short.txt', 'r') as file:
        text = file.read()
    if text is None: text = brown.raw()[:10000]
    s, ss, d, b = tt.tokenize(text)
    print(b)
    pylab.xlabel("Sentence Gap index")
    pylab.ylabel("Gap Scores")
    pylab.plot(range(len(s)), s, label="Gap Scores")
    pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
    pylab.plot(range(len(d)), d, label="Depth scores")
    pylab.stem(range(len(b)), b)
    pylab.legend()
    pylab.show()
def myImHist(image):
    #image1=[[1,2,3],[2,1,3],[1,3,3]]
    min_value = np.min(image)
    max_value = np.max(image)
    j, k = np.shape(image)
    frequency = []
    pdf = []
    count = 0
    total_pixel = 0
    for i in range(min_value, max_value + 1):
        for a in range(j):
            for b in range(k):
                if image[a][b] == i:
                    print(i)
                    count = count + 1
                    print(count)
        frequency.append((i, count))
        total_pixel = total_pixel + count
        count = 0
    for intensity, no_of_repeat in frequency:
        pdf.append(no_of_repeat / total_pixel)

    plt.stem(pdf)
    plt.show()
Ejemplo n.º 22
0
def zpFFTsizeExpt():
	f  = 110.0
	fs = 1000.0
	t = np.arange(0,1,1.0/fs)
	x = cos(2 * pi * f * t)
	xseg = x[0:256]
	w1 = np.hamming(256)
	w2 = np.hamming(512)
	X1 = fft(xseg * w1)
	X2 = fft(x[0:512] * w2)
	X3 = fft(xseg * w1, 512)
	mx1 = abs(X1)
	mx2 = abs(X2)
	mx3 = abs(X3)
	fx1 = fs * np.arange(256) / 256
	fx2 = fs * np.arange(512) / 512
	plt.xlim(0,150)
	plt.stem(fx1[0:80],mx1[0:80],'y')
	plt.stem(fx2[0:80],mx2[0:80],'r')
	plt.stem(fx2[0:80],mx3[0:80],'b')
	plt.show()
Ejemplo n.º 23
0
N1 = int(N / 10)
N2 = int(N * 10)

Ceros1 = np.zeros(N1)

Ceros2 = np.zeros(N2)

resultado = np.concatenate((Ceros1, signal, Ceros2), axis=None)

plt.plot(resultado)
plt.show()

sp = np.fft.fft(resultado)

plt.stem(np.absolute(sp)[0:500])
plt.show()
#plt.plot(np.angle(sp)[0:500])

#plt.show()

cuadrado = (np.absolute(sp)[0:500])**2
energia = integrate.simps(cuadrado)

cuadrado0 = (np.absolute(sp)[f0])**2

plt.plot(20 * np.log10(np.absolute(sp)[0:500]))
plt.show()

asd = max(cuadrado)
Ejemplo n.º 24
0
###########################################################
# nlags: number of samples for autocorrelation to be returned for
# autucorrelation
lag_acf = acf(training_log_diff, nlags=30)
# partial autocorrelation
lag_pacf = pacf(training_log_diff, nlags=30, method='ols')

plt.figure(figsize=(12, 5))
plt.xlabel("no. of lag")
plt.ylabel("lag")
plt.title("ACF PLOT")
plt.axhline(y=0, linestyle='-', color='black')
plt.axhline(y=1.96 / np.sqrt(len(training)), linestyle='--', color='gray')
plt.axhline(y=-1.96 / np.sqrt(len(training)), linestyle='--', color='gray')
#plt.plot(lag_acf)
plt.stem(lag_acf)
# suggests seasonality period=12, hence S=12
# at S=12, lag is positive so P=1 and Q=0
# significat lag at 1 so q=1

plt.figure(figsize=(12, 5))
plt.xlabel("no. of lag")
plt.ylabel("lag")
plt.title("PACF PLOT")
plt.axhline(y=0, linestyle='-', color='black')
plt.axhline(y=1.96 / np.sqrt(len(training)), linestyle='--', color='gray')
plt.axhline(y=-1.96 / np.sqrt(len(training)), linestyle='--', color='gray')
#plt.plot(lag_acf)
plt.stem(lag_pacf)
# significant lag at 1 so p=1
Ejemplo n.º 25
0
    s = 10
    m = 40
    D = random_dict(m, n)
    k = 0
    while 1:

        x = get_sparse_x(n, s)
        y = np.dot(D, x)
        x_naive = omp_naive(D, y)
        x_scikit = orthogonal_mp(D, y, s)
        error_naive = norm(x - x_naive.reshape(n, 1))
        error_scikit = norm(x - x_scikit.reshape(n, 1))

        k += 1
        if error_naive < 1e-3 and error_scikit > 1:
            print x
            break

    print k

    import matplotlib.pylab as plt

    nr = np.arange(0, n)
    plt.subplot(311)
    plt.stem(nr, x)
    plt.subplot(312)
    plt.stem(nr, x_naive)
    plt.subplot(313)
    plt.stem(nr, x_scikit)
    plt.show()
Ejemplo n.º 26
0
N = 1024
t = np.arange(0,1,1/N)
f0 = 1
f1 = 100

sig = sig.chirp(t, f0, 1, f1, 'linear',90)
SIG = fftshift(fft(sig))
F = np.linspace(-N/2,N/2,N)

AVGR = sma(np.real(SIG),0.05*len(sig))
AVGI = sma(np.imag(SIG),0.05*len(sig))


plt.figure()
plt.subplot(3,1,1), plt.plot(t,sig)
plt.subplot(3,1,2), plt.stem(F,np.real(SIG/N))
plt.subplot(3,1,2), plt.stem(F,np.imag(SIG/N),'g')
plt.subplot(3,1,3), plt.stem(F,AVGR/N)
plt.subplot(3,1,3), plt.stem(F,AVGI/N,'g')


###################################################################
# Frome here test values

#a = np.array([2.,3.,1.,2.,2.,3.,1.,3.,1.,1.])
#smaval = sma(a,3)
#smahand = np.array([5./3, 6./3, 6./3, 5./3, 7./3, 6./3, 7./3, 5./3, 5./3, 2./3])
#
#for idx in range(len(smaval)):
#    print(smaval[idx], smahand[idx])
Ejemplo n.º 27
0
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
y = [2, 3, 1]
x = np.arange(len(y))
print(x)
xlabel = ['x label']
error = np.random.rand(len(y))
plt.title("Bar Chart")
plt.barh(x, y, alpha=0.5, xerr=error)  #alpha는 투명도, xerr:에러의 허용범위
plt.show()

#스템 플롯(폭이 없는 막대차트)
x = np.linspace(0.1, 2 * np.pi, 10)
plt.title("Stem Plot")
plt.stem(x, np.cos(x), '-.')
plt.show()

#파이차트
label = ['자바', '씨', '씨++', '파이썬']
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
plt.title('Pie Chart')
plt.pie(sizes, labels=label, colors=colors,
        startangle=90)  #shadow=True로 그림자 넣어줄 수도 있음
plt.axis('equal')  #
plt.show()

#히스토그램(도수분포표 막대그래프로 나타낸 것)
x = np.random.randn(1000)
plt.title("histogram")
Ejemplo n.º 28
0
plt.figure(figsize=(13, 4))
plt.subplot(1, 5, 1)
plt.plot(x, unif)
plt.ylim((0, 4))
plt.title('(A)')
plt.ylabel('rozdělení $p$')
plt.xlabel('p')
plt.subplot(1, 5, 2)
plt.plot(x, cent)
plt.title('(B1)')
plt.xlabel('p')
plt.ylim((0, 4))
plt.subplot(1, 5, 3)
plt.plot(x, cent2)
plt.title('(B2)')
plt.xlabel('p')
plt.ylim((0, 4))
plt.subplot(1, 5, 4)
plt.plot(x, skewed)
plt.ylim((0, 4))
plt.xlabel('p')
plt.title('(C)')
plt.subplot(1, 5, 5)
plt.stem([0.9], [4], markerfmt=None)
plt.ylim((0, 4))
plt.xlim(0, 1)
plt.xlabel('p')
plt.title('(D)')
plt.tight_layout()
plt.savefig('l1-prior-mince.jpg')
Ejemplo n.º 29
0
    DATA_f = fftshift(fft(data_f))  # Fit
    DATA_nf = fftshift(fft(data_nf))  # No fit
    F = np.linspace(-N/2,N/2,N)
elif method == 'file':
    [fs,data] = wavfile.read("../09 Sample 15sec.wav")#,dtype=float)
    data_nf = data[2048:2048+N:]
    data_nf = np.reshape(np.delete(data_nf,0, 1),len(data_nf))
    DATA_nf = fftshift(fft(data_nf))  # No fit
    F = np.linspace(-N/2,N/2,N) #(0,N/2,N/2+1)


if method == 'signal':
    plt.figure()
    plt.subplot(3,1,1), plt.plot(t,data_f,t,data_nf)
    plt.title("time: 2 sines")
    plt.subplot(3,1,2), plt.stem(F,np.abs(DATA_f/N)), #plt.stem(np.linspace(-N/2,N/2,N),np.imag(SIN_f/N),'g')
    plt.title("Spectrum Fit - Absolut No Window")
    plt.subplot(3,1,3), plt.stem(F,np.abs(DATA_nf/N)), #plt.stem(np.linspace(-N/2,N/2,N),np.imag(SIN_nf/N), 'g')
    plt.title("Spectrum No Fit - Absolut No Window")
elif method == 'file':
    plt.figure()
    plt.subplot(2,1,1), plt.plot(t,data_nf)
    plt.title("time: wav file")
    plt.subplot(2,1,2), plt.stem(F,np.abs(DATA_nf/N)), #plt.stem(np.linspace(-N/2,N/2,N),np.imag(SIN_nf/N), 'g')
    plt.title("Spectrum No Fit - Absolut No Window")

#[wt,x] = Window.triwind(N)
[x,whan] = Window.hanwind(N) 
#[x,wcos] = Window.coswind(N)

Ejemplo n.º 30
0
params[0, 20:50] = 0
diag = np.random.rand(n_features)
features = np.random.multivariate_normal(np.zeros(n_features), np.diag(diag), n_samples)

# Show the condition number of the gram matrix
print("cond = %.2f" % (diag.max() / diag.min()))

linear = True
if linear == True:
    residuals = np.random.randn(n_samples, 1)
    labels = features.dot(params.T) + residuals
else:
     labels = np.array([[float(np.random.rand() < p)] for p in logistic(features.dot(params.T))])

plt.figure(figsize=(8, 4))
plt.stem(params[0])
plt.title("True parameters", fontsize=16)
plt.show()

x_init = 1 - 2 * np.random.rand(1, n_features)
n_iter = 30
l_l1 = 0.0
l_l2 = 0.1

#f and gradient
if linear == True:
    f = lambda x: least_squares(x, features, labels)
    grad_f = lambda x: least_squares_grad(x, features, labels)
    hess_f = lambda x: least_squares_hess(x, features, labels)
    step = norm(features.T.dot(features)/ n_samples, 2)
else:
import numpy as num

a = int(input('Enter the amplitude = '))
f = int(input('Enter the frequency  = '))

t = num.arange(0, 2, 0.02)  # for a total of 16 samples
# %generation of an impulse signal
x1 = []
for i in range(len(t)):
    x1.append(1)
x2 = a * num.sin(2 * num.pi * f * t)  # %generation of sine wave
y = x1 * x2
#modulation step

#for impulse signal plot
plt.stem(t, x1)
plt.title('Impulse Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude ')
plt.grid(True)
plt.show()

#for sine wave plot
plt.plot(t, x2)
plt.title('Sine Wave')
plt.xlabel('Time ')
plt.ylabel('Amplitude ')
plt.grid(True)
plt.show()

#for PAM wave plot
Ejemplo n.º 32
0
    def _draw_levels(self, f, P, l, savefig, time_idx, Xhatlow, Xhathigh,
                     fP_record, X, X_next_l, verbose):
        """ 
        Draw descriptive plots at each level of mrDMD showing
        the spatial modes and spectral components recorded
        and/or extracted.

        Parameters
        ----------
        f : array-like
            Frequencies
        P : array-like
            Corresponding power
        l : int
            The level of mrDMD currently on
        savefig : boolean
            Saves figure
        time_idx : array_like
            The array of indices associated with current window
        Xhatlow : matrix
            The matrix representation of the unselected matrices
        Xhathigh : matrix
            The matrix representation of the selected matrices
        fP_record : list
            The list of (f, P) tuple pairs recorded
        X : matrix
            The raw data matrix
        X_next_l :
            The data matrix to be passed to lower levels
        verbose : boolean
            Explicit printing of f and P recorded

        """
        self.levelSet.add(l)  #only plot once per level
        if verbose:
            print('\n\nLevel: %d' % l)
        #Frequency vs. scalings of DMD
        plt.figure()
        plt.rc('text', usetex=True)
        plt.stem(f, P, 'k')
        plt.title(r'Level: %d' % l)
        title = r'SpectrumLevel:%d' % l
        plt.xlabel(r'Frequency')
        plt.ylabel(r'DMD scaling')
        plt.show()
        if savefig:
            plt.savefig('%s.pdf' % title)
        plt.close()
        if verbose:
            print('Power recorded [frequency, Power]:')
            print(fP_record)
            print('Added to time_idx %d - %d' % (time_idx[0], time_idx[-1]))

        #show the original time window of X in black, then
        #show the subtracted reconstruction or the high mode
        #reconstruction in red
        if self.original_channels == 1:
            plt.figure()
            plt.rc('text', usetex=True)
            plt.plot(time_idx, X[0, :], 'k', label=r'\left| X \right|')
            if self.subtraction:
                plt.plot(time_idx,
                         Xhatlow[0, :],
                         'r',
                         label=r'\left| \hat{X} \right|')
            else:
                plt.plot(time_idx,
                         Xhathigh[0, :],
                         'r',
                         label=r'\left| \hat{X} \right| left over modes')
            plt.legend()
            title = r'Reconstruction level: %d' % l
            plt.title(title)
            plt.show()
        else:
            self._day_plot(time_idx,
                           X,
                           title=r'\left| X \right|, level: %d' % l,
                           savefig=savefig)
            if self.subtraction:
                title_end = r'\left| \hat{X} \right|, level: %d' % l
                self._day_plot(time_idx,
                               Xhatlow,
                               color='r',
                               title=title_end,
                               savefig=savefig)
            elif self.excess_reconstruction:
                title_end = r'\left| \hat{X} \right| left over modes, level: %d' % l
                self._day_plot(time_idx,
                               Xhathigh,
                               color='r',
                               title=title_end,
                               savefig=savefig)

        #show the reconstruction for the next levels only for
        #subtraction constructed via: x - xhat
        if self.subtraction:
            if self.original_channels == 1:
                plt.figure()
                plt.rc('text', usetex=True)
                plt.plot(time_idx,
                         X_next_l[0, :],
                         'b',
                         label=r'\left| X \right|')
                title = r'\left| X - \hat{X} \right| level: %d' % l
                plt.title(title)
                plt.show()
                if savefig:
                    plt.savefig('%s.pdf' % title)
                plt.close()
            else:
                title = r'\left| X - \hat{X} \right| level: %d' % l
                self._day_plot(time_idx,
                               X_next_l,
                               color='b',
                               title=title,
                               savefig=savefig)

        #show heatmap so far
        self.heatmap(title='Level %d' % l)
Ejemplo n.º 33
0
    return tam, norm


def F1(w, t):
    f1 = 3 * np.cos(w * t) + 2 * np.cos(3 * w * t) + np.cos(5 * w * t)
    return f1


n = 51
T = 2 * np.pi
h = T / n
t = np.linspace(0, 50 * h, n)
w = 2 * np.pi / T

tam, val = fourier(F1(w, t))
tf = np.linspace(0, tam, tam)

plt.figure(1, figsize=(14, 5))

plt.subplot(1, 2, 1)
plt.plot(t, F1(w, t))
plt.scatter(t, F1(w, t), s=9)
plt.xlabel('t')
plt.ylabel('y(t)')

plt.subplot(1, 2, 2)
plt.stem(tf, abs(val), use_line_collection=True)
plt.xlabel('k')
plt.ylabel('|X|/N')

plt.savefig('1.png')
        plt.close()

        # variance of each feature
        plt.figure()
        plt.plot(featureList[i].Var)
        plt.title('variance ' + featureName[i])
        plt.xlabel('File number')
        plt.savefig('plot/variance ' + featureName[i] + '.png', dpi=1000)
        plt.close()

        # plot distribution of some features
        if i > 2:
            distributionData = dataAll[:, i]
            valueCounter = collections.Counter(distributionData)
            plt.figure()
            plt.stem(valueCounter.keys(), valueCounter.values())
            plt.title(featureName[i] + ' distribution with zero')
            plt.xlabel('Value')
            plt.ylabel('Quantity')
            plt.savefig('plot/' + featureName[i] +
                        ' value distribution with zero.png',
                        dpi=1000)
            plt.close()
            plt.figure()
            plt.stem(valueCounter.keys()[1:], valueCounter.values()[1:])
            plt.xlabel('Value')
            plt.ylabel('Quantity')
            plt.title(featureName[i] + ' distribution without zero')
            plt.savefig('plot/' + featureName[i] +
                        ' distribution without zero.png',
                        dpi=1000)
Ejemplo n.º 35
0
def SplitWavdataByEnergy(sig, fs, initialsegmentfolder, file, Xsec):
    # Splits wav data based on energy, i.e. pauses
    # Input: sig: the wavfile data in an array
    #        fs: sampling frequency
    #        initialsegmentfolder: the folder location where all the wav segments are dumped
    #        file: the wavfile to be split into segments
    # 		 Xsec: the minimum duration of the segments that we'll split the wavfile
    # Output: wavfile segments from 0 to N in initialsegmentfolder
    #########################################################################

    window = 512
    hop = window / 2
    energy = []
    i = 0
    energy_index = []
    while i < (len(sig) - window):
        chunk = sig[i:i + window][np.newaxis]
        energy.append(chunk.dot(chunk.T)[0][0])
        energy_index.append(i)
        i = i + hop

    energy = np.array(energy)
    energy_thresh = 0.1 * np.mean(
        energy)  #mean because there might be some spurious peak in energy
    indiceswithlowenergy = np.where(energy <= energy_thresh)
    timeinstance_withlowenergy = indiceswithlowenergy[0] * hop * 1.0 / fs

    ### retain those silences which are greater than or equal to 0.2 seconds, and hence find valid silent segments
    sil_dur_cap = 0.2
    num_samp_sil_dur_cap = np.floor(sil_dur_cap * fs * 1.0 / hop)
    lowenergyindices = indiceswithlowenergy[0]

    validlowenergy_subarray = []
    validlowenergyarray = []
    print '~~~~num_samp_sil_dur_cap = ', num_samp_sil_dur_cap
    print '\nlen(lowenergyindices): ', len(lowenergyindices)
    for ind in range(len(lowenergyindices) - 1):
        diff = lowenergyindices[ind + 1] - lowenergyindices[ind]
        if diff > 1:
            ##to account for breathy regions## BUT THIS PIECE OF CODE SPLITS FROM CONSONANTS ## NOT desirable
            # if diff>np.floor(0.2*fs*1.0/hop) and diff<np.floor(0.3*fs*1.0/hop): #0.2-0.3 seconds of breathy voice allowed
            #     for i in range(lowenergyindices[ind],lowenergyindices[ind+1],1):
            #         validlowenergy_subarray.append(i)
            #     continue
            #################################
            if validlowenergy_subarray:
                validlowenergy_subarray.append(lowenergyindices[ind])
                if len(validlowenergy_subarray) >= num_samp_sil_dur_cap:
                    validlowenergyarray = validlowenergyarray + validlowenergy_subarray
            validlowenergy_subarray = []
            continue
        validlowenergy_subarray.append(lowenergyindices[ind])
    if len(validlowenergy_subarray) >= num_samp_sil_dur_cap:
        validlowenergyarray = validlowenergyarray + validlowenergy_subarray
    validlowenergy_subarray = []

    #########################
    ##Finding center of valid silent regions. These will be boundaries of phrases/segments/song lines
    # print '\nlen(validlowenergyarray): ',len(validlowenergyarray)
    boundary = []
    for ind in range(len(validlowenergyarray) - 1):
        diff = validlowenergyarray[ind + 1] - validlowenergyarray[ind]
        if diff > 1:
            if validlowenergy_subarray:
                validlowenergy_subarray.append(validlowenergyarray[ind])
                boundary.append(validlowenergy_subarray[0] +
                                ((validlowenergy_subarray[-1] -
                                  validlowenergy_subarray[0]) / 2))
            validlowenergy_subarray = []
            # print '\nI\'m before Continue. Current iter is: ',ind,'\n'
            continue
        validlowenergy_subarray.append(validlowenergyarray[ind])
    if validlowenergy_subarray:
        boundary.append(validlowenergy_subarray[0] + (
            (validlowenergy_subarray[-1] - validlowenergy_subarray[0]) / 2))
    print 'len(boundary): ', len(boundary)
    WavSplitMinXsec(fs, initialsegmentfolder, file, sig, boundary, hop, Xsec)

    ##########################
    if plot:
        plt.figure("figure from SplitWavdataByEnergy 1")
        plt.subplot(2, 1, 1)
        plt.plot(range(len(sig)), sig)
        plt.ylabel('amplitude')
        plt.subplot(2, 1, 2)
        plt.plot(energy_index, energy)
        plt.stem(indiceswithlowenergy[0] * hop,
                 5 * np.ones(len(indiceswithlowenergy[0])), 'k')
        plt.ylabel('energy')
        plt.show()

        plt.figure("figure from SplitWavdataByEnergy 2")
        plt.title('amplitude vs. time')
        plt.subplot(2, 1, 1)
        plt.plot(np.array(range(len(sig))) * 1.0 / fs, sig)
        plt.ylabel('amplitude')
        plt.subplot(2, 1, 2)
        plt.plot(np.array(energy_index) * 1.0 / fs, energy)
        plt.stem(timeinstance_withlowenergy,
                 5 * np.ones(len(indiceswithlowenergy[0])), 'k')
        plt.ylabel('energy')
        plt.show()

    if plot:
        plt.figure("figure from SplitWavdataByEnergy 3")
        plt.subplot(3, 1, 1)
        plt.plot(range(len(sig)), sig)
        plt.ylabel('amplitude')
        plt.subplot(3, 1, 2)
        plt.plot(energy_index, energy)
        plt.stem(indiceswithlowenergy[0] * hop,
                 5 * np.ones(len(indiceswithlowenergy[0])), 'k')
        plt.ylabel('energy')
        plt.subplot(3, 1, 3)
        plt.plot(energy_index, energy)
        plt.stem(
            np.array(validlowenergyarray) * hop,
            10 * np.ones(len(validlowenergyarray)), 'k')
        plt.ylabel('energy')
        plt.show()

        plt.figure("figure from SplitWavdataByEnergy 4")
        plt.title('amplitude vs. time')
        plt.subplot(2, 1, 1)
        plt.plot(np.array(range(len(sig))) * 1.0 / fs, sig)
        plt.ylabel('amplitude')
        plt.subplot(2, 1, 2)
        plt.plot(np.array(energy_index) * 1.0 / fs, energy)
        plt.stem(
            np.array(validlowenergyarray) * hop * 1.0 / fs,
            5 * np.ones(len(validlowenergyarray)), 'k')
        plt.ylabel('energy')
        plt.show()

    if plot:
        plt.figure()
        plt.title('amplitude vs. time')
        plt.subplot(2, 1, 1)
        plt.plot(np.array(range(len(sig))) * 1.0 / fs, sig)
        plt.ylabel('amplitude')
        plt.subplot(2, 1, 2)
        plt.plot(np.array(energy_index) * 1.0 / fs, energy)
        plt.stem(
            np.array(validlowenergyarray) * hop * 1.0 / fs,
            1 * np.ones(len(validlowenergyarray)), 'k')
        plt.stem(
            np.array(boundary) * hop * 1.0 / fs, 10 * np.ones(len(boundary)),
            'r')
        plt.ylabel('energy')
        plt.show()
    return