def draw_grid(self):
     """ Draw current grid
     :return: -
     """
     # plt.ion()
     plt.matshow(self.grid)
     plt.show()
def show_distance_matrix_mpl(characters_iterator, distance_function):
    """ Compute a distance matrix on the fly and show it trough matPlotLib.
     ONLY FOR small data"""
    d = np.array([distance_function(x, y) for x in characters_iterator for y in characters_iterator])
    mpl.matshow(d.reshape(len(characters_iterator), len(characters_iterator)), cmap="Reds")
    mpl.colorbar()
    mpl.show()
Example #3
0
 def func(n):
     plt.matshow(z[n])
     plt.title('%i%s'%(freq.f_scaled[n],freq.unit)) 
     plt.grid(0)
     plt.colorbar()
     if clims is not None:
         plt.clim(clims)
def classifykNN():
    print 'Classify kNN'
    target_names = ['unacc', 'acc','good','v-good']
    df = pd.read_csv("data/cars-cleaned.txt", delimiter=",");    
    print df
    print df.dtypes
    df_y = df['accept']
    df_x = df.ix[:,:-1]

    #print df_y
    #print df_x
    train_y, test_y, train_x, test_x = train_test_split(df_y, df_x, test_size = 0.3, random_state=33)
    
    clf = KNeighborsClassifier(n_neighbors=3)
    tstart=time.time()
    model = clf.fit(train_x, train_y)
    print "training time:", round(time.time()-tstart, 3), "seconds"
    y_predictions = model.predict(test_x)
    print "Accuracy : " , model.score(test_x, test_y)
    #print y_predictions
    c_matrix = confusion_matrix(test_y,y_predictions)
    print "confusion matrix:"
    print c_matrix

    print "Nearest Neighbors probabilities"
    print model.predict_proba(test_x)
    
    plt.matshow(c_matrix)
    plt.colorbar();
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)    
    plt.ylabel('true label')
    plt.xlabel('predicted label')
    plt.show()
Example #5
0
def export_pdf(data, output, gradient=True):
    """Exports the data as a heatmap to the file specified by output (.pdf). The gradient
	marker specifies if the color in the heatmap should be a gradient that shows the
	fold change score, or as binary red/white colors with a cutoff for maximum fold-change
	score to be considered a hit.
	"""
    bacteria = sorted(data.values()[0].keys())
    matrix = np.zeros((len(data), len(bacteria)))
    rows = sorted(
        data.keys(),
        key=lambda w: np.sum([d ** 3 for d in data[w].values()]) / (len([d for d in data[w].values() if d < 0.3]) + 1),
    )
    for wind, well in enumerate(rows):
        for bind, bacterium in enumerate(bacteria):
            if gradient:
                matrix[(wind, bind)] = data[well][bacterium]
            else:
                matrix[(wind, bind)] = 0 if data[well][bacterium] < 0.3 else 0.5
    plt.matshow(matrix, cmap=plt.get_cmap("RdYlGn"), vmin=0, vmax=1)
    plt.xticks(range(len(bacteria)), bacteria, rotation=90)
    plt.yticks(range(len(rows)), rows)
    ax = plt.gca()
    for posi in ax.spines:
        ax.spines[posi].set_color("none")
    ax.tick_params(labelcolor="k", top="off", bottom="off", left="off", right="off")
    fig = plt.gcf()
    fig.set_size_inches(10, 100)
    plt.savefig(output + ".pdf", bbox_inches="tight", dpi=200)
    plt.close()
Example #6
0
def displayBoard(locations, shape):
    
    r = c = shape
    cmap = mpl.colors.ListedColormap(['#f5ecce', '#f5ecce'])
    img = mpl.image.imread('three.jpg').astype(np.float)
    boxprops = {"facecolor": "#614532", "edgecolor": "none"}

    x, y = np.meshgrid(range(c), range(r))
    plt.matshow(x % 2 ^ y % 2, cmap=cmap)
    #plt.axis("off")  # eliminate borders from plot

    fig = plt.gcf()
    fig.set_size_inches([r, c])
    scale = 0.6*fig.get_dpi() / max(img.shape)
    ax = plt.gca()
    word = ['one.jpg', 'two.jpg', 'three.jpg', 'four.png', 'five.jpg','six.jpg']
    i = 0
    for dim in locations:
        print(y)
        print(x)
        img = mpl.image.imread(word[i]).astype(np.float)
        scale = 0.6*fig.get_dpi() / max(img.shape)
        i += 1
        box = mpl.offsetbox.OffsetImage(img, zoom=scale)
        ab = mpl.offsetbox.AnnotationBbox(box, (dim[1],dim[0]), bboxprops=boxprops)
        ax.add_artist(ab)

    plt.show()
    return fig
def classify():
    print 'Classify SVM'
    target_names = ['unacc', 'acc','good','v-good']
    df = pd.read_csv("data/cars-cleaned.txt", delimiter=",");    
    print df
    print df.dtypes
    df_y = df['accept']
    df_x = df.ix[:,:-1]
   
    train_y, test_y, train_x, test_x = train_test_split(df_y, df_x, test_size = 0.3, random_state=33)
    
    clf = svm.SVC(kernel="linear", C=0.01)
    tstart=time.time()
    model = clf.fit(train_x, train_y)
    print "training time:", round(time.time()-tstart, 3), "seconds"
    y_predictions = model.predict(test_x)
    print "Accuracy : " , model.score(test_x, test_y)
    c_matrix = confusion_matrix(test_y,y_predictions)
    print "confusion matrix:"
    print c_matrix

    plt.matshow(c_matrix)
    plt.colorbar();
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)    
    plt.ylabel('true label')
    plt.xlabel('predicted label')
    plt.show()
def main():
    target, train = loadData()
    showFreq(target)

    rfc = RandomForestClassifier(n_estimators=100, n_jobs=4, verbose=3)
    print "Training..."
    t0 = time()
    rfc.fit(train, target)
    print "done in %0.3fs" % (time() - t0)

    # print "Scoring..."
    # print rfc.score(train, target)

    print "Predicting..."
    test = genfromtxt(TEST_FILE, dtype='int', delimiter=',', skip_header=True)
    results = rfc.predict(test)
    saveResults(results)

    # for index, image in enumerate(test[:4]):
    #     plt.subplot(2, 4, index + 5)
    #     plt.axis('off')
    #     plt.imshow(image.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
    # plt.show()

    importances = rfc.feature_importances_
    importances = importances.reshape((28, 28))

    # Plot pixel importances
    plt.matshow(importances, cmap=plt.cm.hot)
    plt.title("Pixel importances with forests of trees")
    plt.show()
Example #9
0
def generate_single_funnel_test_data( excitation_angles, emission_angles, \
                                          md_ex=0, md_fu=1, \
                                          phase_ex=0, phase_fu=0, \
                                          gr=1.0, et=1.0 ):

    ex, em = np.meshgrid( excitation_angles, emission_angles )

    alpha = 0.5 * np.arccos( .5*(((gr+2)*md_ex)-gr) )

    ph_ii_minus = phase_ex - alpha
    ph_ii_plus  = phase_ex + alpha
    
    print ph_ii_minus
    print ph_ii_plus

    Fnoet  =    np.cos( ex-ph_ii_minus )**2 * np.cos( em-ph_ii_minus )**2
    Fnoet += gr*np.cos( ex-phase_ex )**2    * np.cos( em-phase_ex )**2
    Fnoet +=    np.cos( ex-ph_ii_plus )**2  * np.cos( em-ph_ii_plus )**2
        
    Fnoet /= (2+gr)
    
    Fet   = .25 * (1+md_ex*np.cos(2*(ex-phase_ex))) \
        * (1+md_fu*np.cos(2*(em-phase_fu-phase_ex)))
    
    
    Fem = et*Fet + (1-et)*Fnoet


    import matplotlib.pyplot as plt
    plt.interactive(True)
    plt.matshow( Fem, origin='bottom' )
    plt.colorbar()
Example #10
0
def save_cm(cm, filename, target_names):
    fig = plt.figure()
    plt.matshow(cm, cmap=plt.cm.RdPu)
    plt.title("Confusion Matrix - Langid.py")
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names)
    plt.tick_params(axis="both", which="major", labelsize=12)
    plt.tick_params(axis="both", which="minor", labelsize=10)

    [width, height] = cm.shape
    diag_sum = 0
    for x in range(width):
        diag_sum = diag_sum + cm[x][x]
        row_sum = np.sum(cm[x][:])
        for y in range(height):

            div_res = cm[x][y] * 100.0 / row_sum

            if np.isnan(div_res):
                res = "-"
            else:
                if div_res != 0:
                    res = str(div_res) + "%"
                else:
                    res = "0"

            plt.annotate(str(res), xy=(y, x), horizontalalignment="center", verticalalignment="center")

    print("diag_sum, accuracy", diag_sum, diag_sum * 1.0 / 13000)
    plt.yticks(tick_marks, target_names)
    plt.ylabel("True")
    plt.xlabel("Predicted")
    plt.show()
    plt.savefig(filename)
Example #11
0
def plot_confusion_matrix(cm):
	pl.matshow(cm)
	pl.title('Confusion matrix')
	pl.colorbar()
	pl.ylabel('True label')
	pl.xlabel('Predicted label')
	pl.show()
def benchmark(clf_class, params, name):
    print("parameters:", params)
    t0 = time()
    clf = clf_class(**params).fit(X_train, y_train)
    print("done in %fs" % (time() - t0))

    if hasattr(clf, 'coef_'):
        print("Percentage of non zeros coef: %f"
              % (np.mean(clf.coef_ != 0) * 100))
    print("Predicting the outcomes of the testing set")
    t0 = time()
    pred = clf.predict(X_test)
    print("done in %fs" % (time() - t0))

    print("Classification report on test set for classifier:")
    print(clf)
    print()
    print(classification_report(y_test, pred,
                                target_names=news_test.target_names))

    cm = confusion_matrix(y_test, pred)
    print("Confusion matrix:")
    print(cm)

    # Show confusion matrix
    plt.matshow(cm)
    plt.title('Confusion matrix of the %s classifier' % name)
    plt.colorbar()
Example #13
0
def TestSVM(features, labels, silence=True):
    X_train = features[0:1600,:]
    Y_train = labels[0:1600]

    X_test = features[1600:,:]
    Y_test = labels[1600:]

    clf = SVM.SVC()
    clf.fit(X_train, Y_train)

    predictions = clf.predict(X_test)  

    error = np.mean(abs(predictions-Y_test))

    cm = confusion_matrix(Y_test, predictions)

    cm_sum = np.sum(cm, axis=1)

    cm_mean = cm.T / cm_sum
    cm_mean = cm_mean.T
    
    if silence==False:
        plt.matshow(cm_mean)
        plt.title('Confusion matrix')
        plt.colorbar()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        plt.show()
        
    return error, cm_mean
def main():
    np.set_printoptions(threshold=np.nan)
    testPixelRow = 24
    testPixelCol = 17
    #obs_20120919-131142.h5,obs_20120919-131346.h5
    #create a cal file from a twilight flat
    cal = FlatCal('../../params/flatCal.dict')
    #open another twilight flat as an observation and apply a wavelength cal and the new flat cal
#    run='LICK2012'
#    obsFileName = FileName(run=run,date='20120918',tstamp='20120919-131142').flat()
#    flatCalFileName = FileName(run=run,date='20120918',tstamp='20120919-131448').flatSoln()
#    wvlCalFileName = FileName(run=run,date='20120916',tstamp='20120917-072537').calSoln()

    run = 'PAL2012'
    obsFileName = FileName(run=run,date='20121211',tstamp='20121212-140003').obs()
    flatCalFileName = FileName(run=run,date='20121210',tstamp='').flatSoln()
    wvlCalFileName = FileName(run=run,date='20121210',tstamp='20121211-133056').calSoln()
    flatCalPath = os.path.dirname(flatCalFileName)

    ob = ObsFile(obsFileName)#('obs_20120919-131142.h5')
    ob.loadWvlCalFile(wvlCalFileName)#('calsol_20120917-072537.h5')
    ob.loadFlatCalFile(flatCalFileName)#('flatsol_20120919-131142.h5')

    #plot some uncalibrated and calibrated spectra for one pixel
    fig = plt.figure()
    ax = fig.add_subplot(211)
    ax2 = fig.add_subplot(212)
    print ob.getPixelCount(testPixelRow,testPixelCol)

    #flatSpectrum,wvlBinEdges = ob.getPixelSpectrum(testPixelRow,testPixelCol,weighted=False)
    spectrum,wvlBinEdges = ob.getPixelSpectrum(testPixelRow,testPixelCol,wvlStart=cal.wvlStart,wvlStop=cal.wvlStop,wvlBinWidth=cal.wvlBinWidth,weighted=False,firstSec=0,integrationTime=-1)

    weightedSpectrum,wvlBinEdges = ob.getPixelSpectrum(testPixelRow,testPixelCol,weighted=True)
    #flatSpectrum,wvlBinEdges = cal.flatFile.getPixelSpectrum(testPixelRow,testPixelCol,wvlStart=cal.wvlStart,wvlStop=cal.wvlStop,wvlBinWidth=cal.wvlBinWidth,weighted=False,firstSec=0,integrationTime=-1)
    flatSpectrum = cal.spectra[testPixelRow,testPixelCol]
    x = wvlBinEdges[0:-1]
    ax.plot(x,cal.wvlMedians,label='median spectrum',alpha=.5)
    ax2.plot(x,cal.flatFactors[testPixelRow,testPixelCol,:],label='pixel weights',alpha=.5)
    ax2.set_title('flat weights for pixel %d,%d'%(testPixelRow,testPixelCol))
    ax.plot(x,spectrum+20,label='unweighted spectrum for pixel %d,%d'%(testPixelRow,testPixelCol),alpha=.5)
    ax.plot(x,weightedSpectrum+10,label='weighted %d,%d'%(testPixelRow,testPixelCol),alpha=.5)
    ax.plot(x,flatSpectrum+30,label='flatFile %d,%d'%(testPixelRow,testPixelCol),alpha=.5)

    ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.3),fancybox=True,ncol=3)
    plt.show()

    #display a time-flattened image of the twilight flat as it is and after using itself as it's flat cal
    #cal.flatFile.loadFlatCalFile(flatCalFileName)#('flatsol_20120919-131142.h5')
    #cal.flatFile.displaySec(weighted=True,integrationTime=-1)
    #ob.displaySec(integrationTime=-1)
    #ob.displaySec(weighted=True,integrationTime=-1)


    for idx in range(0,100,20):
        factors10 = cal.flatFactors[:,:,idx]
        plt.matshow(factors10,vmax=np.mean(factors10)+1.5*np.std(factors10))
        plt.title('Flat weights at %d'%cal.wvlBinEdges[idx])
        plt.colorbar()
        plt.savefig('plots/factors%d.png'%idx)
        plt.show()
Example #15
0
def test_initialize_at_truth():
    global alpha, beta, num_topics, num_vocab, document_lengths, \
            doc_topic, topic_word, docs, model
    alpha = 5.
    beta = 20.
    num_topics = 20
    num_vocab = 1000
    document_lengths = [100]*1000

    doc_topic, topic_word, docs = generate_synthetic(alpha,beta,
            num_topics,num_vocab,document_lengths)

    model = lda.CollapsedSampler(alpha,beta,num_topics,num_vocab)
    model.add_documents_spmat(docs)

    # initialize at truth
    model.document_topic_counts = (model.document_topic_counts.sum(1)[:,None] * doc_topic).round()
    model.topic_word_counts = (model.topic_word_counts.sum(1)[:,None] * topic_word).round()

    model.resample(1000)

    plt.matshow(topic_word[:20,:20])
    plt.title('true topic_word on first 20 words')
    plt.matshow(model.topic_word_counts[:20,:20])
    plt.title('topic_word counts on first 20 words')
def calculate_temp(vars_file, data_file, method=rbf):
    ini_time = time()
    regression = calculate_regression(data_file)
    t_reg = time()
    temperature = create_regression_field(regression, vars_file)
    t_temp = time()
    residuals_field = method(regression, temperature.shape)
    t_res = time()
    final_field = calculate_final_field(temperature, residuals_field)
    t_final = time()

    plt.matshow(final_field)
    plt.savefig(method.__name__ + ".png")
    t_draw = time()

    print("""-------
Regression time: {:.0f} ms
Temperature field time: {:.0f} ms
Residuals field time: {:.0f} ms
Final field time: {:.0f} ms
Drawing time: {:.0f} ms
-------
Total time: {:.0f} ms"""
           .format(1000*(t_reg - ini_time),
            1000*(t_temp - t_reg),
            1000*(t_res - t_temp),
            1000*(t_final - t_res),
            1000*(t_draw - t_final),
            1000*(t_draw - ini_time)))
Example #17
0
def marg_mult(model, db, samples, burn=0, filename=None, n5=False):
    """
    generates histogram for marginal distribution of posterior multiplicities.

    :param model: TorsionFitModel
    :param db: pymc.database for model
    :param samples: length of trace
    :param burn: int. number of steps to skip
    :param filename: filename for plot to save
    """
    if n5:
        multiplicities = tuple(range(1, 7))
    else:
        multiplicities = (1, 2, 3, 4, 6)
    mult_bitstring = []
    for i in model.pymc_parameters.keys():
        if i.split('_')[-1] == 'bitstring':
            mult_bitstring.append(i)
    if n5:
        histogram = np.zeros((len(mult_bitstring), samples, 5))
    else:
        histogram = np.zeros((len(mult_bitstring), samples, 5))

    for m, torsion in enumerate(mult_bitstring):
        for i, j in enumerate(db.trace('%s' % torsion)[burn:]):
            for k, l in enumerate(multiplicities):
                if 2**(l-1) & int(j):
                    histogram[m][i][k] = 1

    plt.matshow(histogram.sum(1), cmap='Blues',  extent=[0, 5, 0, 20]), plt.colorbar()
    plt.yticks([])
    plt.xlabel('multiplicity term')
    plt.ylabel('torsion')
    if filename:
        plt.savefig(filename)
Example #18
0
    def plot(self):
        """ Return matplotlib plt object of similarity matrix. """

        plt.matshow(self.data, cmap="Reds")
        plt.colorbar()

        return plt
Example #19
0
def test_noise(noise_coeff=0.00):
    file = 'test16k.wav'
    fs, x = wavfile.read(file)
    fs, nbit, x_length, x = readwav(file)
    period = 5.0
    opt = pyDioOption(40.0, 700, 2.0, period, 4)

    if noise_coeff < 1:
        noise_str = str(noise_coeff).split('.')[-1]
    else:
        noise_str = str(noise_coeff).split('.')[0]
    f0, time_axis = dio(x, fs, period, opt)

    f0_by_dio = copy.deepcopy(f0)
    f0 = stonemask(x, fs, period, time_axis, f0)
    spectrogram = star(x, fs, period, time_axis, f0)
    spectrogram = cheaptrick(x, fs, period, time_axis, f0)
    residual = platinum(x, fs, period, time_axis, f0, spectrogram)
    old_spectrogram = np.copy(spectrogram)
    plt.matshow(old_spectrogram, cmap="gray")
    plt.title("Before %s noise" % noise_str)
    plt.savefig("before_%s.png" % noise_str)
    random_state = np.random.RandomState(1999)
    spectrogram += noise_coeff * np.abs(random_state.randn(*spectrogram.shape))
    residual += noise_coeff * np.abs(random_state.randn(*residual.shape))
    y = synthesis(fs, period, f0, spectrogram, residual, len(x))
    ys = synthesis(fs, period, f0, old_spectrogram, residual, len(x))
    wavfile.write("y_%s.wav" % noise_str, fs, soundsc(y))
    wavfile.write("y_no_noise.wav", fs, soundsc(ys))
    plt.clf()
    plt.plot(soundsc(ys), label='orig')
    plt.plot(soundsc(y), label='noisy', color='red')
    plt.title("Comparison of time series with %s noise" % noise_str)
    plt.legend()
    plt.savefig("comparison_%s.png" % noise_str)
def makeConfMat(estClasses, gtClasses, outFilename, numClasses = None, plotLabels = False):
   #If not defined, find number of unique numbers in gtClasses
   if numClasses == None:
      numClasses = len(np.unique(gtClasses))

   #X axis is est, y axis is gt
   #First index is y, second is x
   confMat = np.zeros((numClasses, numClasses))
   numInstances = len(estClasses)
   for (gtIdx, estIdx) in zip(gtClasses.astype(int), estClasses.astype(int)):
      confMat[gtIdx, estIdx]  += 1

   plt.matshow(confMat)
   plt.colorbar()
   plt.xlabel("Est class")
   plt.ylabel("True class")
   plt.title("Confusion matrix")
   ax = plt.gca()
   ax.xaxis.set_ticks_position('bottom')

   #Plot labels for each field
   if plotLabels:
      for i in range(numClasses):
         for j in range(numClasses):
            labelStr = generateStatString(confMat, i, j)
            #text receives x, y coord of plot
            ax.text(j, i, labelStr, fontweight='bold',
                  horizontalalignment='center', verticalalignment='center',
                  bbox={'facecolor':'white'})

   #plt.show()
   plt.savefig(outFilename)
Example #21
0
    def plot(self):
        '''
        geo.plot()

        Returns plot of raster data
        '''
        plt.matshow(self.raster)
def confusion(y,y_auto,l,set,method,plot=False,output=False):
  """
  Computes the confusion matrix
  """
  from sklearn.metrics import confusion_matrix
  cmat = confusion_matrix(y.values[:,0],y_auto)
  cmat = np.array(cmat,dtype=float)
  for i in range(cmat.shape[0]):
    cmat[i,:] = cmat[i,:]*100./len(np.where(y.values[:,0]==i)[0])

  if plot:
    plt.matshow(cmat,cmap=plt.cm.gray_r)
    for i in range(cmat.shape[0]):
      for j in range(cmat.shape[0]):
        if cmat[j,i] >= np.max(cmat)/2. or cmat[j,i] > 50:
          col = 'w'
        else:
          col = 'k'
        if cmat.shape[0] <= 4:
          plt.text(i,j,"%.2f"%cmat[j,i],color=col)
        else:
          plt.text(i,j,"%d"%np.around(cmat[j,i]),color=col)
    plt.title('%s set - %s'%(set,method.upper()))
    plt.xlabel('Prediction')
    plt.ylabel('Observation')
    if len(l) <= 4:
      plt.xticks(range(len(l)),l)
      plt.yticks(range(len(l)),l)
  if output:
    return cmat
Example #23
0
def compare_autoencoder_outputs(imgs, model, indices=[0], img_dim=(28, 28)):
    pred = model.predict(imgs)
    for i in indices:
        tup = (imgs[i].reshape(img_dim), pred[i].reshape(img_dim))
        plt.matshow(tup[0])
        plt.matshow(tup[1])
    plt.show()
def verify_gradient(f, x, eps=1e-4, tol=1e-6, **kwargs):
    """
    Compares the numerical and analytical gradients.
    """
    # print
    fval, fgrad = f(x=x, **kwargs)
    # print fval, fgrad.shape
    # bbbbbbbb
    ngrad = numerical_gradient(f=f, x=x, eps=eps, tol=tol, **kwargs)

    fgradnorm = numpy.sqrt(numpy.sum(fgrad**2))
    ngradnorm = numpy.sqrt(numpy.sum(ngrad**2))
    diffnorm = numpy.sqrt(numpy.sum((fgrad-ngrad)**2))
    # print fval.shape
    plt.matshow(fgrad)
    plt.show()
    plt.matshow(ngrad)
    plt.show()
    if fgradnorm > 0 or ngradnorm > 0:
        norm = numpy.maximum(fgradnorm, ngradnorm)
        if not (diffnorm < tol or diffnorm/norm < tol):
            raise Exception("Numerical and analytical gradients "
                            "are different: %s != %s!" % (ngrad, fgrad))
    else:
        if not (diffnorm < tol):
            raise Exception("Numerical and analytical gradients "
                            "are different: %s != %s!" % (ngrad, fgrad))
    return True
def coloc(dataR, dataG):
    #returns heatmap for colocalization based on the angle in the red- green value plot
    #regions with low intensity are filterd out

    if dataR.shape != dataG.shape:
        print('images must have same shape')
        return 0

    tol=0.02
    dataB = np.zeros(dataR.shape)
    dataB[...,0]=np.tan(dataR[...,2]/dataG[...,1])
    dataB[...,0]=np.where((dataB[...,0]-np.pi/2.)**2>tol,0,dataB[...,0])
    maskG=np.where(dataG[...,1]<np.mean(dataG[...,1]),0,dataG[...,1])
    maskR=np.where(dataR[...,2]<np.mean(dataR[...,2]),0,dataR[...,2])
    from matplotlib import pyplot
    #pyplot.matshow(maskR)
    #pyplot.show()
    #pyplot.matshow(maskG)
    #pyplot.show()

    dataB[...,0]=dataB[...,0]*maskG*maskR
    dataB=dataB*255/np.max(dataB)
    dataB = np.array(dataB, dtype=np.uint8)
    print(np.mean(dataB[...,0]))


    plot.matshow(dataB[...,0])
    plot.show()

    return dataB
Example #26
0
def show_confusion_matrix(X, y):
    """docstring for show_confusion_matrix"""

    print "show matrix..."
    # Split the data into a training set and a test set
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=.1)

    print "running classifier...."
    # Run classifier
    classifier = svm.SVC()
    y_pred = classifier.fit(X_train, y_train).predict(X_test)

    print "compute confusion matrix..."
    # Compute confusion matrix
    cm = confusion_matrix(y_test, y_pred)

    cm_sum = np.sum(cm, axis=1).T

    cm_ratio = cm / cm_sum.astype(float)[:, np.newaxis]

    print(cm_ratio)
    print cm
    print cm_sum

    print "plot matrix..."
    # Show confusion matrix in a separate window
    plt.matshow(cm_ratio)
    plt.title('Confusion matrix')
    plt.colorbar()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()
Example #27
0
def test(training_file, testing_file):
    X,y = train.process_training_examples(training_file)
    X_test, y_test = train.process_training_examples(testing_file)

    lin_svm = train.train_linear_svm(X, y)
    linear_svm_accuracy = test_with(lin_svm, X_test, y_test)
    print("LinearSVM has classification accuracy of {}%".format(100 * linear_svm_accuracy))

    rbf_svm = train.train_rbf_svm(X, y)
    rbf_svm_accuracy = test_with(rbf_svm, X_test, y_test)
    print("RBF-SVM has classification accuracy of {}%".format(100 * rbf_svm_accuracy))

    nbc = train.train_naive_bayes(X, y)
    nb_accuracy = test_with(nbc, X_test, y_test)
    print("Multinomial Naive Bayes has classification accuracy of {}%".format(100 * nb_accuracy))

    lda = train.train_lda(X, y)
    lda_accuracy = test_with(lda, X_test, y_test)
    print("LDA has classification accuracy of {}%".format(100 * lda_accuracy))

    #Print SVM confusion matrix
    y_pred = lin_svm.predict(X_test)
    cm = confusion_matrix(y_test, y_pred)
    plt.matshow(cm)
    plt.title('Confusion matrix for SVM Classification of File Fragment Types')
    #plt.colorbar()
    plt.ylabel('True File Type')
    plt.xlabel('Predicted File Type')
    plt.show()
Example #28
0
def makeConfusionMatrix(n=100):
	# import some data to play with
	trainingdata = sio.loadmat('train.mat')
	X = np.swapaxes(trainingdata['train_images'].reshape(784,60000), 0, 1)
	y = np.array(trainingdata['train_labels']).transpose()[0]
	# Split the data into a training set and a test set
	X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=n/60000.0)

	# Run classifier
	classifier = svm.SVC(kernel='linear')
	y_pred = classifier.fit(X_train, y_train).predict(X_test)
	# Compute confusion matrix
	cm = confusion_matrix(y_test, y_pred)

	print(cm)

	# Show confusion matrix in a separate window
	plt.matshow(cm)
	plt.title('Confusion matrix')
	plt.colorbar()
	plt.ylabel('True label')
	plt.xlabel('Predicted label')
	plt.savefig('matrix'+str(n))
	plt.show()
	return
Example #29
0
 def calc_field_OneDistance(self, distance, n_z_points, temp_field=None,
                            plot_matrix=False):
     if temp_field is not None:
         old_field = [self.do_static, self.do_induction, self.do_radiation]
         self.set_fields(temp_field)
         
     Z_array, dz = np.linspace(0, self.channel_height, n_z_points,
                               retstep=True)
     
     min_t = self.min_starttime + distance/C
     max_t = self.max_endtime + np.sqrt(distance*distance +
                              self.channel_height*self.channel_height)/C
     n_t_points = int((max_t-min_t)/self.dt)+1
     
     T_array = min_t + np.arange(n_t_points)*self.dt
     
     field_matrix=np.zeros((n_t_points,n_z_points))
     
     for z_i in range(n_z_points):
         self.integrand(Z_array[z_i], distance, min_t, field_matrix[:,z_i])
         
     Es=-simps(field_matrix, dx=dz)/two_pi_e0
     
     if temp_field!=None:
         self.do_static, self.do_induction, self.do_radiation=old_field
         
     if plot_matrix:
         plt.matshow(-field_matrix/two_pi_e0)
         plt.colorbar()
         plt.show()
     
     return T_array, Es
def on_slic_superpixels():
    data = load_data('train', independent=True)
    probs = get_kraehenbuehl_pot_sp(data)
    results = eval_on_pixels(data, [np.argmax(prob, axis=-1) for prob in
                                    probs])
    plt.matshow(results['confusion'])
    plt.show()
Example #31
0
plt.gcf().suptitle('sampled')

scores = []
for itr in progprint_xrange(10):
    scores.append(model.meanfield_coordinate_descent_step())

plt.figure()
model.plot()
plt.gcf().suptitle('fit')

plt.figure()
plt.plot(scores)

# plt.show()

plt.matshow(
    np.vstack((
        np.tile(truemodel.states_list[0].stateseq, (1000, 1)),
        np.tile(model.states_list[0].stateseq, (1000, 1)),
    )))

# # model.resample_model()
# # s = model.states_list[0]
# # s.E_step()
# # s.meanfieldupdate()

# # plt.figure()
# # plt.plot(sum(stats[0].sum(1) for stats in s.subhmm_stats))

plt.show()
SOLVER.rank_adaptation_options['early_stopping_factor'] = 10

T0 = time.time()
F, OUTPUT = SOLVER.solve()
T1 = time.time()
print(T1 - T0)

# %% Display of the results
F_X_TEST = np.argmax(F(X_TEST), 1)
Y_TEST_NP = np.argmax(Y_TEST.numpy(), 1)

print('\nAccuracy = %2.5e\n' %
      (1 - np.count_nonzero(F_X_TEST - Y_TEST_NP) / Y_TEST_NP.shape[0]))

IMAGES_AND_PREDICTIONS = list(zip(DIGITS.images[TEST], F_X_TEST))
for i in np.arange(1, 19):
    plt.subplot(3, 6, i)
    plt.imshow(IMAGES_AND_PREDICTIONS[i][0],
               cmap=plt.cm.gray_r,
               interpolation='nearest')
    plt.axis('off')
    plt.title('Pred.: %i' % IMAGES_AND_PREDICTIONS[i][1])

print('Classification report:\n%s\n' %
      (metrics.classification_report(Y_TEST_NP, F_X_TEST)))
MATRIX = metrics.confusion_matrix(Y_TEST_NP, F_X_TEST)
plt.matshow(MATRIX)
plt.title('Confusion Matrix')
plt.show()
print('Confusion matrix:\n%s' % MATRIX)
Example #33
0
    for i in range(n_sources_target):
        plt.subplot(2, n_sources_target, i + 1)
        plt.specgram(ref[i, :, 0], NFFT=1024, Fs=room.fs)
        plt.title('Source {} (clean)'.format(i))

        plt.subplot(2, n_sources_target, i + n_sources_target + 1)
        plt.specgram(y_hat[:, i], NFFT=1024, Fs=room.fs)
        plt.title('Source {} (separated)'.format(i))

    plt.tight_layout(pad=0.5)

    #room.plot(img_order=0)

    if args.algo.startswith('blink'):
        plt.matshow(U_blinky.T, aspect='auto')
        plt.title('Blinky Data')
        plt.tight_layout(pad=0.5)
        plt.matshow(np.dot(R[:, :n_sources_target], G).T, aspect='auto')
        plt.title('NMF approx')
        plt.tight_layout(pad=0.5)

    plt.figure()
    a = np.array(SDR)
    b = np.array(SIR)
    for i, (sdr, sir) in enumerate(zip(a.T, b.T)):
        plt.plot(np.arange(a.shape[0]) * 10,
                 sdr,
                 label='SDR Source ' + str(i),
                 marker='*')
        plt.plot(np.arange(a.shape[0]) * 10,
    Correlation < 0  =>  Negative Correlation
    Correlation > 0  =>  Positive Correlation

    Correlation near 0  =>  Weak
    Correlation near -1 or 1  =>  Strong
''')

data_ads = pd.read_csv('./datasets/ads/Advertising.csv')
# print(data_ads.head())

cols = data_ads.columns.values

for x in cols:
    for y in cols:
        print(f'{x} - {y} => {str(corr_coeff(data_ads.copy(), x, y))}')

plt.plot(data_ads['TV'], data_ads['Sales'], 'ro')
plt.title('Gasto en TV vs Ventas del Producto')
plt.show()
plt.plot(data_ads['Radio'], data_ads['Sales'], 'go')
plt.title('Gasto en Radio vs Ventas del Producto')
plt.show()
plt.plot(data_ads['Newspaper'], data_ads['Sales'], 'bo')
plt.title('Gasto en Newspaper vs Ventas del Producto')
plt.show()

# corr() => pandas method that returns a matrix with correlation between columns values
print(data_ads.corr())

plt.matshow(data_ads.corr())
plt.show()
Example #35
0
File: 5.py Project: Ermek-93/Python
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')

df = pd.DataFrame({'a': np.random.randint(0, 50, 1000)})
df['b'] = df['a'] + np.random.normal(0, 10,
                                     1000)  # positively correlated with 'a'
df['c'] = 100 - df['a'] + np.random.normal(
    0, 5, 1000)  # negatively correlated with 'a'
df['d'] = np.random.randint(0, 50, 1000)  # not correlated with 'a'
df.corr()

plt.matshow(df.corr())
plt.xticks(range(len(df.columns)), df.columns)
plt.yticks(range(len(df.columns)), df.columns)
plt.colorbar()
plt.show()
Example #36
0
               Y_true_errors)

# In[31]:

test_im = X_train[15]
plt.imshow(test_im.reshape(28, 28), cmap='viridis', interpolation='none')

# In[32]:

from keras import models
layer_outputs = [layer.output for layer in model.layers[:8]]
activation_model = models.Model(input=model.input, output=layer_outputs)
activations = activation_model.predict(test_im.reshape(1, 28, 28, 1))

first_layer_activation = activations[0]
plt.matshow(first_layer_activation[0, :, :, 4], cmap='viridis')

# In[33]:

model.layers[:-1]

# In[33]:

layer_names = []
for layer in model.layers[:-1]:
    layer_names.append(layer.name)
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
    if layer_name.startswith('conv'):
        n_features = layer_activation.shape[-1]
        size = layer_activation.shape[1]
Example #37
0
with open('../data/ilsvrc12/det_synset_words.txt') as f:
    labels_df = pd.DataFrame([{
        'synset_id':
        l.strip().split(' ')[0],
        'name':
        ' '.join(l.strip().split(' ')[1:]).split(',')[0]
    } for l in f.readlines()])
labels_df.sort('synset_id')
predictions_df = pd.DataFrame(np.vstack(df.prediction.values),
                              columns=labels_df['name'])
print(predictions_df.iloc[0])
print "\n"

# Let's look at the activations.
plt.gray()
plt.matshow(predictions_df.values)
plt.xlabel('Classes')
plt.ylabel('Windows')
#plt.show()

# Now let's take max across all windows and plot the top classes.
print "Classes principales"

max_s = predictions_df.max(0)
max_s.sort(ascending=False)
print(max_s[:10])
print "\n"

## We pick the top-scoring person and bicycle detections.
# Find, print, and display the top detections: person and bicycle.
i = predictions_df['person'].argmax()
bridged_mask = 1*(opened_mask + bridge > 0)

#plt.matshow(bridged_mask, cmap='Greys')
ly, lx = bridged_mask.shape
padded = np.zeros((ly+200, lx))
padded[:ly,:] = bridged_mask
np.save('hallprobemask.npy', padded[3:,5:-5])

mask = padded[3:,5:-5]

sample = rgba[230:290,335:435,1].copy()
sample[:,12] = 0
sample[:,-14] = 0
cut = sample[:,13:-14]
plt.matshow(cut)
scope_scale = 50./(cut.shape[1]) #micrometers

data = loadmat('../../../katja/im_HgTe_Inv_g_n1_061112_0447.mat')
scan = data['scans'][0][0][0][::-1,:,2]
ly, lx = scan.shape
imgspacing = [.16, .73]

mly, mlx = mask.shape
x, y = np.arange(mlx)*scope_scale, np.arange(mly)*scope_scale
xg, yg = np.meshgrid(x, y)

datax = np.arange(0, mlx*scope_scale, imgspacing[1])
datay = np.arange(0, mly*scope_scale, imgspacing[0])
dxg, dyg = np.meshgrid(datax, datay)
Example #39
0
cvs_clf_scaled = cross_val_score(sgd_clf,
                                 X_train_scaled,
                                 y_train,
                                 cv=3,
                                 scoring='accuracy')
print('Cross Validation Score_SGD_Scaled =\n{}\n\n'.format(cvs_clf_scaled))

#--------------------------------------------------------
# sklearn을 이용한 Error분석
# page=144
#--------------------------------------------------------
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
print('Confusion Matrix =\n{}\n\n'.format(conf_mx))

plt.matshow(conf_mx, cmap=plt.cm.gray)
plt.show()

row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums

np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plt.show()

cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]
Example #40
0
def build_focused_beam(r_in=0.0,
                       r_out=0.05,
                       dx=0.001,
                       R0=0.07,
                       f=1e6,
                       rho=1e3,
                       c=1500,
                       Ampl=1):
    Nx = int(2 * np.round(r_out / dx))
    print(r_out, dx)
    print(Nx)
    if (-1)**Nx > 0:
        x_array = np.concatenate(
            [np.arange(Nx / 2 + 1.),
             np.arange(-Nx / 2 + 1., 0.)])
    elif (-1)**Nx < 0:
        x_array = np.concatenate(
            [np.arange((Nx + 1.) / 2),
             np.arange(-(Nx - 1.) / 2, 0.)])
    x_array = x_array[:, np.newaxis]
    x_array = dx * x_array.astype(float)
    y_array = x_array.copy()
    y_array = y_array.T
    r_array = np.sqrt(x_array**2 + y_array**2)
    r_array[0, 0] = 1e-12

    K_r1 = 0.5 * (np.sign(r_array - r_in + 1.124e-10) + 1)
    K_r2 = 0.5 * (np.sign(r_out - r_array + 1.123e-10) + 1)
    K_12 = K_r1 * K_r2
    h_surf = (R0 - ((K_r2 * (R0**2 - r_array**2))**(1 / 2))) * K_r2
    Pressure = K_12 * Ampl

    N2 = int(np.round(Nx))

    if (-1)**N2 > 0:
        x_surf = np.concatenate([np.arange(-N2 / 2 + 1., N2 / 2 + 1.)])
    elif (-1)**N2 < 0:
        x_surf = np.concatenate([np.arange(-(N2 - 1.) / 2, (N2 + 1.) / 2)])
    x_surf = x_surf[:, np.newaxis]
    x_surf = dx * x_surf.astype(float)
    y_surf = x_surf.copy()
    y_surf = y_surf.T
    z_surf = R0 * 0.9
    p_flat = np.zeros((2 * int(N2), 2 * int(N2))) * (0. + 1j * 0.)
    n_z = np.sqrt(K_12 * (1 - (x_array**2 + y_array**2) / R0**2))

    def Rayleigh(x_ar, y_ar, z_ar, norm, Pr, f, c, dx, x0, y0, z0):
        r = np.sqrt((x0 - x_ar)**2 + (y0 - y_ar)**2 + (z0 - z_ar)**2)
        p_comp = -1j * f / c * Pr * dx**2 / (norm + 1e-13) * np.exp(
            1j * 2 * np.pi * f / c * r) / r
        p_nm = np.sum(np.sum(p_comp))
        return p_nm

    x_surf_test = dx * np.arange(-N2 / 2 + 1., N2 / 2 + 1.)
    x_surf_test = np.array([x_surf_test] * len(x_surf_test))
    y_surf_test = x_surf_test.T
    z_surf_test = x_surf_test * 0 + R0 * 0.9

    shape = x_surf_test.shape
    x_test = np.asarray(x_surf_test).reshape(-1)
    y_test = np.asarray(y_surf_test).reshape(-1)
    z_test = np.asarray(z_surf_test).reshape(-1)

    result = [
        Rayleigh(x_array, y_array, h_surf, n_z, Pressure, f, c, dx, x, y, z)
        for x, y, z in zip(x_test, y_test, z_test)
    ]
    p_trans = np.reshape(result, shape)

    p_flat = np.zeros((int(2 * N2), int(2 * N2)))
    p_flat[int(N2 / 2):int(3 * N2 / 2), int(N2 / 2):int(3 * N2 / 2)] = p_trans
    figure1 = plt.figure(figsize=(5, 5))
    m_z = p_flat.shape[0]
    m_x = p_flat.shape[1]
    extent = [-N2 * dx, N2 * dx, -N2 * dx, N2 * dx]

    print(np.abs(p_flat).shape)
    print(np.abs(p_flat))
    print(figure1.number)
    im1 = plt.matshow(np.abs(p_flat),
                      fignum=figure1.number,
                      extent=extent,
                      aspect='auto')
    plt.colorbar()

    return figure1, p_flat, z_surf
Example #41
0
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 13:19:34 2015

@author: matsumi
"""

import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits

# main文
if __name__ == '__main__':
    digits = load_digits(2)
    images = digits.images
    plt.matshow(images[4], cmap=plt.cm.gray)
    plt.show()

    # データ・セットの読み込み
    X = digits.data
    t = digits.target
    t[t == 0] = -1
    num_examples = len(X)

    # ρを定義する(ρ=0.5で良いか判断し,収束しなければ値を変える.)
    rho = 0.5
    # 最大の繰り返し回数
    max_iteration = 100

    # 1. wを定義する(64個の個数を持った配列をrandomで作成する)
    w = np.random.randn(64)
Example #42
0
# Plot the accuracy curves
plt.figure(figsize=(6, 6))
plt.plot(train_acc, 'bo')
plt.plot(test_acc, 'rx')

# Look at the final testing confusion matrix
pred = np.argmax(y.eval(feed_dict={
    x: test.reshape([-1, 1296]),
    y_: onehot_test
}),
                 axis=1)
conf = np.zeros([5, 5])
for p, t in zip(pred, np.argmax(onehot_test, axis=1)):
    conf[t, p] += 1

plt.matshow(conf)
plt.colorbar()

# Let's look at a subplot of some weights
f, plts = plt.subplots(4, 8, sharex=True)
for i in range(32):
    plts[i // 8, i % 8].matshow(W1.eval()[:, i].reshape([36, 36]))

# Examine the output weights
plt.matshow(W3.eval())
plt.colorbar()

# Save the weights
saver = tf.train.Saver()
saver.save(sess, "mpl.ckpt")
# Perform the standardization process
titanic_data_train = scaler.transform(titanic_data_train)
titanic_data_test = scaler.transform(titanic_data_test)

# Assess your model using a confusion matrix and 
# a classification report. 
nnclass1 = MLPClassifier(activation='relu', solver='sgd', 
                         hidden_layer_sizes=(30, 30))
nnclass1.fit(titanic_data_train, titanic_y_train)
nnclass1_pred = nnclass1.predict(titanic_data_test)
nnclass1_pred

cm = metrics.confusion_matrix(titanic_y_test, nnclass1_pred)
print(cm)
plt.matshow(cm)
plt.title('Confusion Matrix')
plt.xlabel('Predicted Value')
plt.ylabel('Actual Value')
plt.xticks([0, 1], ['Died','Survived'])
plt.yticks([0, 1], ['Died','Survived'])

print(metrics.classification_report(titanic_y_test, nnclass1_pred))


# Compare ANN Classification with the Classification Tree
## Create Classification Tree for titanic_data_train, 
# using min_samples_split=5 and min_samples_leaf=5

titanic_data['Class']=titanic_data['Class'].astype(np.int64)
titanic_data['Sex']=titanic_data['Sex'].astype(np.int64)
Example #44
0
    if ans > 0:
        flag = 0
        predict_test_list.append(flag)
    else:
        flag = 1
        predict_test_list.append(flag)
y_predict = np.array(predict_test_list)
sum = 0
scores_list = []

for j in range(5):
    for i in range(y_predict.shape[0]):
        if y_predict[i] == y_test[i]:
            sum = sum + 1
    score = sum / y_predict.shape[0]
    sum = 0
    scores_list.append(score)
scores = np.array(scores_list)
print("accuracy", np.mean(scores), scores)

confusion_matrix = confusion_matrix(y_test, y_predict)
print(confusion_matrix)
plt.matshow(confusion_matrix)
plt.rcParams['font.sans-serif'] = ['SimHei']  #指定默认字体 SimHei为黑体
plt.title(u'混淆矩阵')
plt.colorbar()
plt.ylabel(u'实际类型')
plt.xlabel(u'预测类型')
plt.savefig('./average4.png')
plt.show()
Example #45
0
def heat_rna(fl, genelist, field, libfile):

    data = read.read_dat(fl)
    data = data[:-1]
    if [''] in data:
        data.delete([['']])
    lib = []
    lib2 = []
    gene = []
    for i in data:
        try:
            if i[2] not in gene:
                gene.append(i[2])
        except:
            print i

    libstable = read.read_dat(libfile, '\t')
    for i in data:
        try:
            if i[0] + '-' + i[1] not in lib:
                lib.append(i[0] + '-' + i[1])
                lib2.append(i[0] + '-' + i[1])

            if len(libfile) > 0:
                for ID in libstable:
                    if i[0] in ID:
                        lib2[-1] = ID[0] + '-' + ID[field]
                        break
        except:
            print i

    pheno = []
    pnum = -1.5
    hist = ''
    for i in lib:
        j = i.split('-')
        if j[1] != hist:
            pnum += 1
        pheno.append(pnum)
        hist = j[1]
        #print hist,pnum

    mat = np.zeros((len(gene), len(lib)), np.float)
    for i in data:
        mat[gene.index(i[2]), lib.index(i[0] + '-' + i[1])] = np.float(i[3])
    proj = np.dot(mat, pheno)
    for i in xrange(len(proj)):
        norm = np.sqrt(np.dot(mat[i, :], mat[i, :]))
        if norm != 0.:
            proj[i] /= norm
    #proj=np.array(proj)
    inds = sortedinds = proj.argsort()
    mat = mat[inds]
    gene = np.array(gene)
    gene = gene[inds]
    inds = np.any(mat != 0, axis=1)
    mat = mat[inds]
    gene = np.array(gene)
    gene = gene[inds]
    #for i in mat:
    #  if np.mean(abs(i))==0:
    #   print i
    nm = norm_max(mat)
    #for i in nm:
    #  if np.mean(abs(i))==0:
    #  print i
    genex = gene
    if len(genelist) > 0:
        lb = ens_genes(gene, genelist)
        genex = lb
    else:
        return nm, lib2, gene, genex, lib

    b = plt.matshow(nm, aspect='auto', cmap='RdYlBu')
    if len(libfile) > 0:
        plt.xticks(range(len(lib2)), lib2)
    else:
        plt.xticks(range(len(lib)), lib)
    plt.colorbar()
    plt.xticks(rotation=90)
    plt.yticks(range(len(genex)), genex)

    plt.yticks(fontsize=8)
    mytemplate(nm)
    plt.xlabel(lb)
    lb = fl.split('/')[-1][:-5]

    return nm, lib2, gene, genex, lib
Example #46
0
  saver.restore(sess, "/ltmp/e2c-plane-single1.ckpt")

  # test to make sure samples are actually trajectories

  def getimgs(x):
    padsize=1
    padval=.5
    ph=B+2*padsize
    pw=A+2*padsize
    img=np.ones((ph,len(x)*pw))*padval
    for t in range(len(x)):
      startc=t*pw+padsize
      img[padsize:padsize+B, startc:startc+A]=x[t][20,:].reshape((A,B))
    return img
  (x_vals,u_vals)=dataset.sample_seq(batch_size,T)
  plt.matshow(getimgs(x_vals),cmap=plt.cm.gray,vmin=0,vmax=1)
  plt.show()

  train_iters=2e5 # 5K iters
  for i in range(int(train_iters)):
    (x_vals,u_vals)=dataset.sample_seq(batch_size,T,replace=False)
    feed_dict={}
    for t in range(T):
      feed_dict[xs[t]] = x_vals[t]
    for t in range(T-1):
      feed_dict[us[t]] = u_vals[t]

    results=sess.run([loss,all_summaries,train_op],feed_dict)
    if i%1000==0:
      print("iter=%d : Loss: %f" % (i,results[0]))
      if i>2000:
Example #47
0
#FSA basis
# P+P on even sites
Ipe = Hamiltonian(pxp,pxp_syms)
Ipe.site_ops[1] = np.array([[0,1,0],[0,0,0],[0,0,0]])
Ipe.model = np.array([[0,1,0]])
Ipe.model_coef = np.array([1])
Ipe.gen(parity=1)
Imo = Hamiltonian(pxp,pxp_syms)
Imo.site_ops[1] = np.array([[0,0,0],[1,0,0],[0,0,0]])
Imo.model = np.array([[0,1,0]])
Imo.model_coef = np.array([1])
Imo.gen(parity=0)
Ip = H_operations.add(Ipe,Imo,np.array([1,-1]))
Ip = Ip.sector.matrix()
Im = np.conj(np.transpose(Ip))
plt.matshow(np.abs(Ip))
plt.show()

Kpe = Hamiltonian(pxp,pxp_syms)
Kpe.site_ops[1] = np.array([[0,0,1],[0,0,0],[0,0,0]])
Kpe.model = np.array([[0,1,0]])
Kpe.model_coef = np.array([1])
Kpe.gen(parity=1)
Kmo = Hamiltonian(pxp,pxp_syms)
Kmo.site_ops[1] = np.array([[0,0,0],[0,0,0],[1,0,0]])
Kmo.model = np.array([[0,1,0]])
Kmo.model_coef = np.array([1])
Kmo.gen(parity=0)
Kp = H_operations.add(Kpe,Kmo,np.array([1,-1]))
Kp = Kp.sector.matrix()
Km = np.conj(np.transpose(Kp))
plt.show()

## Plot Time Vs. Pressure
plt.title('Pressure Vs. Time')
plt.plot(no_flag.utc_time, no_flag.pressure)
plt.xlabel('Epoch time')  
plt.ylabel('Pressure(decibar)')
plt.savefig('C:/Users/User/Desktop/for_public_github/time_pressure.png')
plt.show()

## A zoomed in plot of Time Vs. Pressure, to observe some of the finer structure
plt.title('Pressure Vs. Time')
plt.plot(no_flag.utc_time, no_flag.pressure)
plt.xlim(1552100000.785, 1552107530.785)
plt.xlabel('Epoch time')  
plt.ylabel('Pressure(decibar)')
plt.savefig('C:/Users/User/Desktop/for_public_github/cut_time_pressure.png')
plt.show()

## Plot a pearson correlation matrix for all the numerical data
corr_matrix = no_flag.corr()
plt.matshow(corr_matrix)
plt.title('Pearson Correlation matrix', pad=80)
plt.xticks(np.arange(7),('utc_time', 'compass_head', 'pitch', 'pressure', 'roll', 'sound_spd', 'temp'), rotation=70)
plt.yticks(np.arange(7),('utc_time', 'compass_head', 'pitch', 'pressure', 'roll', 'sound_spd', 'temp'))
plt.savefig('C:/Users/User/Desktop/for_public_github/corr.png')
plt.show()



Example #49
0
print("n_features: ", X.shape[1])

from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier

param_grid = {'max_depth': [3, 5, 8, 10, 15, 20, 30],
              'max_features': [4, 8, 16, 20, 25, 40]}
grid = GridSearchCV(RandomForestClassifier(),
                    param_grid=param_grid)

# use [::10] to subsample by a factor of 10 for impatience
# could also have used StratifiedShuffleSplit(train_size=.1)
grid.fit(X_train[::10], y_train[::10])

res = pd.DataFrame(grid.cv_results_)
print(res.keys())
res_piv = pd.pivot_table(
    res, values='mean_test_score', index='param_max_depth',
    columns='param_max_features')

display(res_piv)

import matplotlib.pyplot as plt
%matplotlib inline
plt.matshow(res_piv.values)
plt.xlabel(res_piv.columns.name)
plt.xticks(range(res_piv.shape[1]), res_piv.columns)
plt.ylabel(res_piv.index.name)
plt.yticks(range(res_piv.shape[0]), res_piv.index)
plt.colorbar()
Example #50
0
def show_matrix(mat):
    plt.matshow(mat, fignum=1)
    # plt.imshow(image)
    plt.draw()
    plt.waitforbuttonpress()
    
all_predictions1 = aspect_detector1.predict(test_tfidf1)
print( all_predictions1 )

#MultinomialNB accuracy
print( 'accuracy', accuracy_score(train_Aspects, all_predictions2))
print( 'confusion matrix\n', confusion_matrix(train_Aspects, all_predictions2))
print( '(row=expected, col=predicted)')
#BernoulliNB accuracy
print( 'accuracy', accuracy_score(train_Aspects, all_predictions3))
print( 'confusion matrix\n', confusion_matrix(train_Aspects, all_predictions3))
print( '(row=expected, col=predicted)')

#here I am choosing Multinomial Naive Bayes because MultinomialNB(98%) is greater than BernoulliNB(95%) accuracy.

plt.matshow(confusion_matrix(train_Aspects, all_predictions2), cmap=plt.cm.binary, interpolation='nearest')
plt.title('confusion matrix')
plt.colorbar()
plt.ylabel('expected Aspect')
plt.xlabel('predicted Aspect')                                                             

print(classification_report(train_Aspects, all_predictions2))
#for test data
print( 'accuracy', accuracy_score(test_Aspects, all_predictions))
print( 'confusion matrix\n', confusion_matrix(test_Aspects, all_predictions))
print( '(row=expected, col=predicted)')

print(classification_report(test_Aspects, all_predictions))

######################
reviews_cv = count_vectors.transform(reviews['words'])
Example #52
0
plt.show()


def gaussian(x):
    return math.exp(-x * x)


A = np.zeros((1000, 1000))

for i in range(0, 1000):
    for j in range(0, 1000):
        dist = sqrt((points[i, 0] - points[j, 0])**2 +
                    (points[i, 1] - points[j, 1])**2)
        A[i, j] = gaussian(dist)

plt.matshow(A)
plt.colorbar()
plt.show()

X = clustering.clustering(A, 3, 0.1)

for i in range(0, 1000):
    if (X[i, 0] == 1):
        plt.plot(points[i, 0], points[i, 1], 'bo')
    elif (X[i, 1] == 1):
        plt.plot(points[i, 0], points[i, 1], 'ro')
    elif (X[i, 2] == 1):
        plt.plot(points[i, 0], points[i, 1], 'go')

plt.show()
Example #53
0
    def toyData(self):

        #        Data representation
        #        Everything is a numpy array (or a scipy sparse matrix)!

        digits = load_digits()

        #         print the shape of the images.

        print("images shape: %s" % str(digits.images.shape))
        print("targets shape: %s" % str(digits.target.shape))

        #     plot the array

        plt.matshow(digits.images[0], cmap='gray')
        #  plt.show();

        digits.target

        # prepare the data

        X = digits.data.reshape(-1, 64)

        print("data of x")
        print(X.shape)

        y = digits.target
        print(y.shape)

        # We have 1797 data points, each an 8x8 image -> 64 dimensional vector.

        #X.shape is always (n_samples, n_feature)

        print(X)

        # Principal Component Analysis (PCA)

        # nstantiate the model. Set parameters.
        pca = PCA(n_components=2)

        #Fit the model.
        pca.fit(X)

        # Apply the model. For embeddings / decompositions, this is transform.

        X_pca = pca.transform(X, None)
        X_pca.shape

        plt.figure(figsize=(16, 10))

        plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y)

        plt.show()

        # print the pca mean and component

        print(pca.mean_.shape)
        print(pca.components_.shape)

        fix, ax = plt.subplots(1, 3)
        ax[0].matshow(pca.mean_.reshape(8, 8), cmap='gray')
        ax[1].matshow(pca.components_[0, :].reshape(8, 8), cmap='gray')
        ax[2].matshow(pca.components_[1, :].reshape(8, 8), cmap='gray')

        #Isomap

        # Instantiate the model. Set parameters.

        isomap = Isomap(n_components=2, n_neighbors=20)
             

    evalues, evectors = np.linalg.eigh(hn)
    if count == kindex:
        hngood = np.copy(hn)
        evalue = evalues[edgest].real
        state0 = evectors[:, edgest]
        statek = [k1]
        stateeval = [evalue]
    for i in range(len(evalues)):
        evalue = evalues[i].real
        e.append(evalue)
    count += 1

if showmatrix:
    plt.matshow(hn.real)

defects = []

#make X which along the diagonal goes 1,2,...,2m,1,2,...,2m,1,2,...
#make Y which along the diagonal goes 1,1,...,2,2,...,2m,2m,...
X = np.zeros((size,size), complex)
Y = np.zeros((size,size), complex)
for i in range(size):
    X[i,i] = i%(2*m) + 1
    Y[i,i] = i/(2*m) + 1

h, defects = makeH(m,n,va1,va0,t,t20,t21,ppos,topology=0,defect=0)

pseudospectra(0,50,X,Y,h)
'''I think we'll be able to turn down epsilon once we use the lattice
clf = MultinomialNB()
clf.fit(X_train, data_train['final_senti_vote'])

# Testing the Naive Bayes model
X_test = count_vect.transform(data_test['text'])
y_test = data_test['final_senti_vote']
y_predicted = clf.predict(X_test)
print("Accuracy of Naive Bayes: %.2f" % accuracy_score(y_test, y_predicted))

# Reporting on classification performance
#print("Accuracy: %.2f" % accuracy_score(y_test,y_predicted))
classes = [0, 1]
cnf_matrix = confusion_matrix(y_test, y_predicted, labels=classes)
print("Confusion matrix Naive Bayes:")
print(cnf_matrix)
plt.matshow(cnf_matrix, cmap=plt.cm.binary, interpolation='nearest')
plt.title('confusion matrix Naive Bayes')
plt.colorbar()
plt.ylabel('expected label-Naive Bayes')
plt.xlabel('predicted label-Naive Bayes')
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
plt.show()

fpr, tpr, threshold = sklearn.metrics.roc_curve(y_test, y_predicted)
roc_auc = sklearn.metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic Naive Bayes')
plt.plot(fpr, tpr, 'b', label='AUC_Naive_Bayes = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
Example #56
0
# get the indices where data is 1
#x,y = np.argwhere(array == 1).T
#
#plt.scatter(x,y)
#plt.show()

linearray = sum_array
#linearray = np.array(linearray)
#for row in range(len(sum_array)):
for col in reversed(range(len(sum_array[0]))):
    if col % 4 == 0:
        linearray = np.insert(linearray, col, values=3, axis=1)
#print col, linearray[0], linearray[1]

f2 = open('line.bin', 'w+')
print >> f2, linearray
f2.close()

from matplotlib import mpl, pyplot
# make a color map of fixed colors
cmap = mpl.colors.ListedColormap(['blue', 'green', 'red', 'black'])
#bounds=[-1,0.9,1.9,2.9,4]
bounds = [0, 1, 2, 3]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)

#plt.figure(figsize=(10,10))
#plt.matshow(sum_array)
plt.matshow(linearray, cmap=cmap)
plt.show()
plt.savefig('books_read.png')
Example #57
0
import pywt
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(300)
y = np.sin(2*np.pi*x*90)
coef, freqs=pywt.cwt(y,np.arange(1,300),'gaus1',2)
#print(coef)
print(freqs)
plt.matshow(coef) # doctest: +SKIP
plt.show() # doctest: +SKIP300
Example #58
0
def warp_circle_to_rect(img, verbose=True):
    a0, a1, a2, a3 = split(img)
    na0 = np.zeros(a0.shape)
    na1 = np.zeros(a1.shape)
    na2 = np.zeros(a2.shape)
    na3 = np.zeros(a3.shape)
    for i in range(1, a0.shape[0]):
        for j in range(1, a0.shape[1]):
            if i**2 >= j**2:
                u = int((np.sqrt(i**2 + j**2)))
                v = int((j / i) * (np.sqrt(i**2 + j**2)))

            else:
                v = int((np.sqrt(i**2 + j**2)))
                u = int((i / j) * (np.sqrt(i**2 + j**2)))

            if v > 255:
                v = 255
            if u > 255:
                u = 255

            na0[u, v] = a0[i, j]
    for i in range(1, a1.shape[0]):
        for j in range(1, a1.shape[1]):
            if i**2 >= j**2:
                u = int((np.sqrt(i**2 + j**2)))
                v = int((j / i) * (np.sqrt(i**2 + j**2)))
            else:
                v = int((np.sqrt(i**2 + j**2)))
                u = int((i / j) * (np.sqrt(i**2 + j**2)))
            if v > 255:
                v = 255
            if u > 255:
                u = 255

            na1[u, v] = a1[i, j]
    for i in range(1, a2.shape[0]):
        for j in range(1, a2.shape[1]):
            if i**2 >= j**2:
                u = int((np.sqrt(i**2 + j**2)))
                v = int((j / i) * (np.sqrt(i**2 + j**2)))
            else:
                v = int((np.sqrt(i**2 + j**2)))
                u = int((i / j) * (np.sqrt(i**2 + j**2)))
            if v > 255:
                v = 255
            if u > 255:
                u = 255

            na2[u, v] = a2[i, j]
    for i in range(1, a3.shape[0]):
        for j in range(1, a3.shape[1]):
            if i**2 >= j**2:
                u = int((np.sqrt(i**2 + j**2)))
                v = int((j / i) * (np.sqrt(i**2 + j**2)))
            else:
                v = int((np.sqrt(i**2 + j**2)))
                u = int((i / j) * (np.sqrt(i**2 + j**2)))
            if v > 255:
                v = 255
            if u > 255:
                u = 255
            na3[u, v] = a3[i, j]

    img3 = stitch_back(na0, na1, na2, na3, False)
    if verbose:
        plt.matshow(img3, cmap='gray')
        plt.suptitle('Circle to rectangle')
        plt.show()
    return img3
Example #59
0
print ('y_train dimensions: ', y_train.shape)
print ('X_test dimensions: ', X_test.shape)
print ('y_test dimensions: ', y_test.shape)


model = NearestCentroid().fit(X_train,y_train.ravel())


y_train_pred = model.predict(X_train) 
print("Training Data prediction: \n",y_train_pred)
print("Training Data ground truth: \n",y_train.ravel())
matrix = metrics.confusion_matrix(y_train, y_train_pred)
print(matrix)
accuracy = round((accuracy_score(y_train,y_train_pred))*100,2)
print("Accuracy for training dataset: ", accuracy,"%")
plt.matshow(matrix)
plt.title('Confusion Matrix for Train Data')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
y_test_pred = model.predict(X_test)
print("Testing Data Predicton: \n", y_test_pred)
print("Testing Data Ground Truth: \n", y_test.ravel())

matrix_test = confusion_matrix(y_test, y_test_pred)
print(matrix_test)
accuracy_test = (accuracy_score(y_test, y_test_pred))*100

print("Accuracy for Testing Dataset: ", accuracy_test,"%")
Example #60
0
import sklearn
from sklearn.model_selection import GridSearchCV
data = pd.read_csv('data.csv')
data.info()
data.isna().sum()
data.dropna(axis=0, inplace=True)
data.isna().sum()
data.hist(bins=30, figsize=(14, 16))
plt.show()
sns.boxplot(x=data['oldpeak'])
i = data[((data.oldpeak >= 6))].index
data.drop(i, inplace=True)
sns.boxplot(x=data['oldpeak'])
rcParams['figure.figsize'] = 10, 15
rcParams["figure.dpi"] = 100
plt.matshow(data.corr())
plt.yticks(np.arange(data.shape[1]), data.columns)
plt.xticks(np.arange(data.shape[1]), data.columns)
plt.colorbar()
data['target'].unique()
rcParams['figure.figsize'] = 6, 6
plt.bar(data['target'].unique(),
        data['target'].value_counts(),
        color=['red', 'green'])
plt.xticks([0, 1])
plt.xlabel('Target Classes')
plt.ylabel('Count')
plt.title('Count of each Target Class')
y = data['target']
X = data.drop(['target'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X,