Example #1
0
def matrixMontage(spcomps,*args, **kwargs):
    numcomps, width, height=spcomps.shape
    rowcols=int(np.ceil(np.sqrt(numcomps)));           
    for k,comp in enumerate(spcomps):        
        plt.subplot(rowcols,rowcols,k+1)       
        plt.imshow(comp,*args, **kwargs)                             
        plt.axis('off')         
 def imshow_vy(self):
     v_dense = self.v_dense.cpu
     #        vmin,vmax=v_dense.min(),v_dense.max()
     vmax = np.max(np.abs(v_dense))
     vmin = -vmax
     vy = v_dense[:, 1].reshape(self.nRows, self.nCols)
     plt.imshow(vy.copy(), vmin=vmin, vmax=vmax, interpolation="Nearest")
Example #3
0
def visual_results(Image_data, preds, Labels=None, Top=0):
    from pylab import plt
    pred_age_value = preds['age']
    pred_gender_value = preds['gender']
    pred_smile_value = preds['smile']
    pred_glass_value = preds['glass']
    Num = Image_data.shape[0] if Top == 0 else Top

    for k in xrange(Num):
        print k, Num
        plt.figure(1)
        plt.imshow(de_preprocess_image(Image_data[k]))
        title_str = 'Prediction: Age %0.1f, %s, %s, %s.' % (
            pred_age_value[k], gender_list[pred_gender_value[k]],
            glass_list[pred_glass_value[k]], smile_list[pred_smile_value[k]])
        x_label_str = 'GT: '
        try:
            x_label_str = x_label_str + 'Age %0.1f' % Labels['age'][k]
        except:
            pass
        try:
            x_label_str = x_label_str + '%s, %s, %s' % (gender_list[int(
                Labels['gender'][k])], glass_list[int(
                    Labels['glass'][k])], smile_list[int(Labels['smile'][k])])
        except:
            pass

        plt.title(title_str)
        plt.xlabel(x_label_str)
        plt.show()
Example #4
0
    def imshow_vy(self):
        v_dense = self.v_dense.cpu
#        vmin,vmax=v_dense.min(),v_dense.max()
        vmax = np.max(np.abs(v_dense))
        vmin = -vmax        
        vy=v_dense[:,1].reshape(self.nRows,self.nCols)         
        plt.imshow(vy.copy(),vmin=vmin,vmax=vmax,interpolation="Nearest")
Example #5
0
def matrixMontage(spcomps, *args, **kwargs):
    numcomps, width, height = spcomps.shape
    rowcols = int(np.ceil(np.sqrt(numcomps)))
    for k, comp in enumerate(spcomps):
        plt.subplot(rowcols, rowcols, k + 1)
        plt.imshow(comp, *args, **kwargs)
        plt.axis('off')
def save_images(images, path):
    fig = plt.figure()
    for i, image in enumerate(images):
        fig.add_subplot(1, len(images), i + 1)
        plt.imshow(image)
        plt.axis('off')
    plt.savefig(path, bbox_inches='tight')
    plt.close()
Example #7
0
 def imshow(self, name):
     '''
     显示灰度图
     '''
     img = self.buffer2img(name)
     plt.imshow(img, cmap='gray')
     plt.axis('off')
     plt.show()
Example #8
0
 def imshow(self, name):
     '''
     显示灰度图
     '''
     img = self.buffer2img(name)
     plt.imshow(img, cmap='gray')
     plt.axis('off')
     plt.show()
    def imshow_vx(self):
        v_dense = self.v_dense.cpu
        if v_dense is None:
            raise ValueError
#        vmin,vmax=v_dense.min(),v_dense.max()
        vmax = np.max(np.abs(v_dense))
        vmin = -vmax
        vx = v_dense[:, 0].reshape(self.nRows, self.nCols)
        plt.imshow(vx.copy(), vmin=vmin, vmax=vmax, interpolation="Nearest")
Example #10
0
    def imshow_vx(self):        
        v_dense = self.v_dense.cpu
        if v_dense is None:
            raise ValueError
#        vmin,vmax=v_dense.min(),v_dense.max()
        vmax = np.max(np.abs(v_dense))
        vmin = -vmax
        vx=v_dense[:,0].reshape(self.nRows,self.nCols)         
        plt.imshow(vx.copy(),vmin=vmin,vmax=vmax,interpolation="Nearest")
 def plot_data(self):
     for i,dat in enumerate(self.data):
         plt.imshow(dat, cmap=cm.jet, interpolation=None, extent=[11,22,-3,2])
         txt='plot '.format(self.n[i])
         txt+='\nmin {0:.2f} und max {1:.2f}'.format(self.min[i],self.max[i])
         txt+='\navg. min {0:.2f} und avg. max {1:.2f}'.format(mean(self.min),mean(self.max))
         plt.suptitle(txt,x=0.5,y=0.98,ha='center',va='top',fontsize=10)
         plt.savefig(join(self.plotpath,'pic_oo_'+str(self.n[i])+'.png'))
         plt.close()  # wichtig, sonst wird in den selben Plot immer mehr reingepackt
Example #12
0
    def detect(self, img):
        bboxes = None

        # pnet
        if not self.pnet:
            return None
        bboxes = self.detect_pnet(img)

        if bboxes is None:
            return None

        ## 可视化PNET的结果
        if SHOW_FIGURE:
            plt.figure()
            tmp = img.copy()
            for i in bboxes:
                x0 = int(i[0])
                y0 = int(i[1])
                x1 = x0 + int(i[2])
                y1 = y0 + int(i[3])
                cv2.rectangle(tmp, (x0, y0), (x1, y1), (0, 0, 255), 2)
            plt.imshow(tmp[:, :, ::-1])
            plt.title("pnet result")

        # rnet
        if not self.rnet:
            return bboxes
        bboxes = bboxes[:, 0:4].astype(np.int32)
        bboxes = self.detect_ronet(img, bboxes, 24)

        if bboxes is None:
            return None

        ## 可视化RNET的结果
        if SHOW_FIGURE:
            plt.figure()
            tmp = img.copy()
            for i in bboxes:
                x0 = int(i[0])
                y0 = int(i[1])
                x1 = x0 + int(i[2])
                y1 = y0 + int(i[3])
                cv2.rectangle(tmp, (x0, y0), (x1, y1), (0, 0, 255), 2)
            plt.imshow(tmp[:, :, ::-1])
            plt.title("rnet result")

        #onet
        if not self.onet:
            return bboxes
        bboxes = bboxes[:, 0:4].astype(np.int32)
        bboxes = self.detect_ronet(img, bboxes, 48)

        return bboxes
def plot_scenarios(scenarios):
    nrows = len(scenarios)
    fig = plt.figure(figsize=(24, nrows))
    n_plot = nrows
    plt.axis('off')
    # plot fake samples
    for iplot in range(nrows):
        for jplot in range(24):
            ax = plt.subplot(n_plot, 24, iplot * 24 + jplot + 1)
            if iplot == 0:
                ax.annotate(f'{jplot:02d}'
                            ':00',
                            xy=(0.5, 1),
                            xytext=(0, 5),
                            xycoords='axes fraction',
                            textcoords='offset points',
                            size='large',
                            ha='center',
                            va='baseline')
            im = plt.imshow(scenarios[iplot, jplot - 1, :, :],
                            cmap=plt.cm.gist_earth_r,
                            norm=LogNorm(vmin=0.01, vmax=50))
            plt.axis('off')
    fig.subplots_adjust(right=0.93)
    cbar_ax = fig.add_axes([0.93, 0.15, 0.007, 0.7])
    cbar = fig.colorbar(im, cax=cbar_ax)
    cbar.set_label('fraction of daily precipitation', fontsize=16)
    cbar.ax.tick_params(labelsize=16)

    return fig
Example #14
0
def show_data(list_dat, num=4):    
    from pylab import plt
    for dat in np.random.choice(list_dat, num):
        print dat
        im=cv2.imread(dat['filepath'])[:,:,::-1]
        plt.figure(1)
        plt.imshow(im)
        for bbox in dat['bboxes']:
            plt.gca().add_patch(plt.Rectangle((bbox['x1'], bbox['y1']),
                      bbox['x2'] - bbox['x1'],
                      bbox['y2'] - bbox['y1'], fill=False,
                      edgecolor='red', linewidth=1) )
        for idx, bbox in enumerate(dat['bboxes']):
            ann = np.array(Image.open(bbox['ann_path']))
            if len(ann.shape)==3: ann = ann[:,:,0] # Make sure ann is a two dimensional np array. 
            plt.figure(11+idx)
            plt.imshow(ann)
        plt.show()
Example #15
0
def dynamic_img_show(img,title_str='',fig_size=[14,8],hide_axes=True):
    '''Show image <img>. If called repeatedly within a cycle will dynamically redraw image.
    #DEMO
    import time

    for i in range(10):
        img = np.zeros([50,50])
        img[:i*5]=1
        dynamic_img_show(img,'iter=%s'%i)
        time.sleep(0.1)
    '''
    plt.clf()
    plt.title(title_str)
    plt.imshow(img)
    plt.xticks([]); plt.yticks([]);
    plt.gcf().set_size_inches(fig_size)
    display.display(plt.gcf())
    display.clear_output(wait=True)   
Example #16
0
def show_prediction_result(image, label_image, clf):
    size = (8, 8)
    plt.figure(figsize=(15, 10))
    plt.imshow(image, cmap='gray_r')
    candidates = []
    predictions = []
    for region in regionprops(label_image):
        # skip small images
        #     if region.area < 100:
        #         continue
        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region.bbox
        # make regions square
        maxwidth = np.max([maxr - minr, maxc - minc])
        minr, maxr = int(0.5 * ((maxr + minr) - maxwidth)) - 3, int(0.5 * ((maxr + minr) + maxwidth)) + 3
        minc, maxc = int(0.5 * ((maxc + minc) - maxwidth)) - 3, int(0.5 * ((maxc + minc) + maxwidth)) + 3
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2, alpha=0.2)
        plt.gca().add_patch(rect)
        # predict digit
        candidate = image[minr:maxr, minc:maxc]
        candidate = np.array(imresize(candidate, size), dtype=float)
        # invert
        # candidate = np.max(candidate) - candidate
        #     print im
        # rescale to 16 in integer
        candidate = (candidate - np.min(candidate))
        if np.max(candidate) == 0:
            continue
        candidate /= np.max(candidate)
        candidate[candidate < 0.2] = 0.0
        candidate *= 16
        candidate = np.array(candidate, dtype=int)
        prediction = clf.predict(candidate.reshape(-1))
        candidates.append(candidate)
        predictions.append(prediction)
        plt.text(minc - 10, minr - 10, "{}".format(prediction), fontsize=50)
    plt.xticks([], [])
    plt.yticks([], [])
    plt.tight_layout()
    plt.show()
    return candidates, predictions
def show_prediction_result(image, label_image, clf):
    size = (8, 8)
    plt.figure(figsize=(15, 10))
    plt.imshow(image, cmap='gray_r')
    candidates = []
    predictions = []
    for region in regionprops(label_image):
        # skip small images
        #     if region.area < 100:
        #         continue
        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region.bbox
        # make regions square
        maxwidth = np.max([maxr - minr, maxc - minc])
        minr, maxr = int(0.5 * ((maxr + minr) - maxwidth)) - 3, int(0.5 * ((maxr + minr) + maxwidth)) + 3
        minc, maxc = int(0.5 * ((maxc + minc) - maxwidth)) - 3, int(0.5 * ((maxc + minc) + maxwidth)) + 3
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2, alpha=0.2)
        plt.gca().add_patch(rect)
        # predict digit
        candidate = image[minr:maxr, minc:maxc]
        candidate = np.array(imresize(candidate, size), dtype=float)
        # invert
        # candidate = np.max(candidate) - candidate
        #     print im
        # rescale to 16 in integer
        candidate = (candidate - np.min(candidate))
        if np.max(candidate) == 0:
            continue
        candidate /= np.max(candidate)
        candidate[candidate < 0.2] = 0.0
        candidate *= 16
        candidate = np.array(candidate, dtype=int)
        prediction = clf.predict(candidate.reshape(-1))
        candidates.append(candidate)
        predictions.append(prediction)
        plt.text(minc - 10, minr - 10, "{}".format(prediction), fontsize=50)
    plt.xticks([], [])
    plt.yticks([], [])
    plt.tight_layout()
    plt.show()
    return candidates, predictions
Example #18
0
def show_data(list_dat, num=4):
    from pylab import plt
    for dat in np.random.choice(list_dat, num):
        print dat
        im = cv2.imread(dat['filepath'])[:, :, ::-1]
        plt.figure(1)
        plt.imshow(im)
        for bbox in dat['bboxes']:
            plt.gca().add_patch(
                plt.Rectangle((bbox['x1'], bbox['y1']),
                              bbox['x2'] - bbox['x1'],
                              bbox['y2'] - bbox['y1'],
                              fill=False,
                              edgecolor='red',
                              linewidth=1))
        for idx, bbox in enumerate(dat['bboxes']):
            ann = cv2.imread(bbox['ann_path'], cv2.IMREAD_GRAYSCALE)
            plt.figure(11 + idx)
            plt.imshow(ann)
        plt.show()
Example #19
0
def generate_word_cloud(text, no, name=None, show=True):
    ''' Generates a word cloud bitmap given a
        text document (string).
        It uses the Term Frequency (TF) and
        Inverse Document Frequency (IDF) 
        vectorization approach to derive the
        importance of a word -- represented
        by the size of the word in the word cloud.
        
    Parameters
    ==========
    text: str
        text as the basis
    no: int
        number of words to be included
    name: str
        path to save the image
    show: bool
        whether to show the generated image or not
    '''
    tokens = tokenize(text)
    vec = TfidfVectorizer(min_df=2,
                          analyzer='word',
                          ngram_range=(1, 2),
                          stop_words='english')
    vec.fit_transform(tokens)
    wc = pd.DataFrame({'words': vec.get_feature_names(), 'tfidf': vec.idf_})
    words = ' '.join(wc.sort_values('tfidf', ascending=True)['words'].head(no))
    wordcloud = WordCloud(max_font_size=110,
                          background_color='white',
                          width=1024,
                          height=768,
                          margin=10,
                          max_words=150).generate(words)
    if show:
        plt.figure(figsize=(10, 10))
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.axis('off')
        plt.show()
    if name is not None:
        wordcloud.to_file(name)
Example #20
0
def pro_adjust_data(path_list, num_class, save_path):

    #    temp_path = path_list[0] + '/' + 'frame000.jpg'
    #    with open(temp_path, 'rb') as t:
    #        mask_shape = np.array(Image.open(t))
    #        trans_mask = np.zerps(mask_shape.shape)
    if os.path.exists(path_list[0][0]):
        for image in os.listdir(path_list[0][0]):

            #            temp_path = path_list[0][0] + '/' + image
            #            mask_shape = np.array(Image.open(temp_path))
            trans_mask = np.zeros(shape=[1080, 1920])

            #            temp0 = path_list[0][0] + '/' + image
            #            temp1 = path_list[1][0] + '/' + image
            #            temp2 = path_list[2][0] + '/' + image
            #            temp3 = path_list[3][0] + '/' + image

            #            mask0 = np.array(Image.open(temp0))
            #            mask1 = np.array(Image.open(temp1))
            #            mask2 = np.array(Image.open(temp2))
            #            mask3 = np.array(Image.open(temp3))

            for i in range(num_class):
                temp = path_list[i][0] + '/' + image
                mask = np.array(Image.open(temp))
                trans_mask[mask == 255] = color_dict[i]

            plt.imshow(trans_mask)

            f, e = os.path.splitext(image)
            save_dir = save_path + '/' + f + '.jpg'
            try:
                #                plt.imshow(trans_mask)
                cv2.imwrite(save_dir, trans_mask)
            except IOError:
                print("Fail to save the mask image to file")
    else:
        print("Input path is not exist.")
Example #21
0
def load_mnist(path, filename='mnist.pkl.gz', plot=True):
    """
    Loads the MNIST dataset. Downloads the data if it doesn't already exist.
    This code is adapted from the deeplearning.net tutorial on classifying
    MNIST data with Logistic Regression: http://deeplearning.net/tutorial/logreg.html#logreg
    :param path: (str) Path to where data lives or should be downloaded too
    :param filename: (str) name of mnist file to download or load
    :return: train_set, valid_set, test_set
    """
    dataset = '{}/{}'.format(path, filename)
    data_dir, data_file = os.path.split(dataset)

    if data_dir == "" and not os.path.isfile(dataset):
        new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
        if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
            dataset = new_path

    if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
        import urllib

        origin = ('http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz')
        print 'Downloading data from {}'.format(origin)
        urllib.urlretrieve(origin, dataset)

    print '... loading data'
    f = gzip.open(dataset, 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()

    X_train = train_set[0]
    y_train = train_set[1]
    if plot:
        for k in range(25):
            plt.subplot(5,5,k)
            plt.imshow(np.reshape(X_train[k,:], (28,28)))
            plt.axis('off')
            plt.title(y_train[k])

    return train_set, valid_set, test_set
Example #22
0
def affichage(matrice):
    """permet d'afficher le terrain uniquement"""
    img = []
    for j in matrice:
        temp = []
        for k in j:
            i = 0
            for e in k:
                if e > i:
                    i = e
            if i == 0:
                temp.append((51, 153, 0))  #vert
            elif i == 1:
                temp.append((255, 255, 0))  #jaune
            elif i == 2:
                temp.append((204, 153, 51))  #marron
            elif i == 3:
                temp.append((128, 0, 128))  #violet
            elif i == 4:
                temp.append((192, 192, 192))  #gris
        img.append(temp)
    plt.imshow(img)
    plt.show()
    return
Example #23
0
def show_paf(img,paf,stride = 5,thres=0.1):
    """
    @param img: ndarry, HxWx3
    @param paf: ndarry, HxWxN
    """
    paf = transform.rescale(paf,img.shape[0]/paf.shape[0])
    h,w,n = paf.shape
    mask  = (paf**2).reshape(h,w,n/2,2).sum(axis=3).sum(axis=2)<thres
    paf[mask] = 0
    
#     img_size = img.shape[:2]
    X,Y = np.meshgrid(np.arange(0,w),np.arange(0,h))
    
    plt.imshow(img, alpha=0.5)
    res = plt.quiver( X[::stride,::stride],
                Y[::stride,::stride],
                -paf[::stride,::stride,::2].sum(axis=2),
                paf[::stride,::stride,1::2].sum(axis=2),            
                scale=20,
                units='width', headaxislength=0.01,
                    alpha=.8,
                    width=0.001,
                    color='r')
    return  res
        # we batch all n_fake_per_real together
        cond_batch = np.repeat(cond[np.newaxis],
                               repeats=n_fake_per_real,
                               axis=0)
        generated = gen.predict([latent, cond_batch])

        # make a matrix of mapplots.
        # first column: condition (daily mean), the same for every row
        # first row: real fractions per hour
        # rest of the rows: generated fractions per hour, 1 row per realization
        fig = plt.figure(figsize=(25, 12))
        n_plot = n_fake_per_real + 1
        ax = plt.subplot(n_plot, 25, 1)
        # compute unnormalized daily sum. squeeze away empty channel dimension (for plotting)
        dsum = cond.squeeze() * norm_scale
        plt.imshow(dsum, cmap=cmap, norm=plotnorm)
        plt.axis('off')
        ax.annotate('real',
                    xy=(0, 0.5),
                    xytext=(-5, 0),
                    xycoords='axes fraction',
                    textcoords='offset points',
                    size='large',
                    ha='right',
                    va='center',
                    rotation='vertical')
        ax.annotate(f'daily sum',
                    xy=(0.5, 1),
                    xytext=(0, 5),
                    xycoords='axes fraction',
                    textcoords='offset points',
# Discriminator determines validity
valid = critic([img, label])
# Defines generator model
generator_model = tf.keras.Model([z_gen, label], valid)
generator_model.compile(loss=wasserstein_loss, optimizer=optimizer)
print('finished building networks')

# plot some real samples
# plot a couple of samples
plt.figure(figsize=(25, 25))
n_plot = 30
[X_real, cond_real] = next(generate_real_samples(n_plot))
for i in range(n_plot):
    plt.subplot(n_plot, 25, i * 25 + 1)
    plt.imshow(cond_real[i, :, :].squeeze(),
               cmap=plt.cm.gist_earth_r,
               norm=LogNorm(vmin=0.01, vmax=1))
    plt.axis('off')
    for j in range(1, 24):
        plt.subplot(n_plot, 25, i * 25 + j + 1)
        plt.imshow(X_real[i, j, :, :].squeeze(),
                   vmin=0,
                   vmax=1,
                   cmap=plt.cm.hot_r)
        plt.axis('off')
plt.colorbar()
plt.savefig(f'{plotdir}/real_samples.{plot_format}')

hist = {'d_loss': [], 'g_loss': []}
print(f'start training on {n_samples} samples')
Example #26
0
    if SHOW_FIGURE:
        if bboxes is not None:
            plt.figure()
            tmp = img.copy()
            for i in bboxes:
                print(i)
                x0 = int(i[0])
                y0 = int(i[1])
                x1 = x0 + int(i[2])
                y1 = y0 + int(i[3])
                cv2.rectangle(tmp, (x0, y0), (x1, y1), (0, 0, 255), 2)
                for ii in range(5):
                    x = int(i[5+ii*2])
                    y = int(i[5+ii*2+1])
                    cv2.circle(tmp,(x ,y), 3, (0,0,255),-1) 
            plt.imshow(tmp[:, :, ::-1])
            plt.title("result")
            cv2.imwrite("../img/res_tiny_face_lmk.jpg",tmp)
        plt.show()

    '''
    fp = "~/dataset/GC_FACE_VAL"
    fp = os.path.expanduser(fp)
    txt = "file_list.txt"
    lines = []
    with open(os.path.join(fp, txt)) as f:
        lines = f.readlines()

    f = open("res.txt", "w")
    for i in lines:
        j = i.strip()
Example #27
0
    return scale_dict


if __name__ == "__main__":

    d = np.linspace(1e4, 1e5, 100)
    data = np.sin(np.outer(d, d)) * 16384
    noise = np.random.random((100,100)) / 1000
    data = data + noise
    scale_dict = quinoa_scale(data, q=0.001)
    
    data_unscaled = quinoa_unscale(scale_dict)
    
    print scale_dict
    print data_unscaled
    
    plt.figure()
    plt.subplot(2,2,1)
    plt.imshow(data)
    plt.colorbar()
    plt.subplot(2,2,2)
    plt.imshow(scale_dict["data"])
    plt.colorbar()
    plt.subplot(2,2,3)
    plt.imshow(data_unscaled)
    plt.colorbar()
    plt.subplot(2,2,4)
    plt.imshow(np.abs(data - data_unscaled) / np.max(data) * 100)
    plt.colorbar()
    plt.show()
Example #28
0
 def imshow_matplotlib(self,*args,**kwargs):
     plt.imshow( self,*args,interpolation = 'nearest',**kwargs )
Example #29
0
        xx = xx.astype(np.float)
        yy = yy.astype(np.float)
       
        dimx = float(dimx)
        dimy=float(dimy)        
        nTimesInX = np.floor(xx / M).max() + 1
 
        seg_cpu = np.floor(yy / M)  * nTimesInX + np.floor(xx / M)
        seg_cpu = seg_cpu.astype(np.int32)
        return seg_cpu


def random_permute_seg(seg):
    p=np.random.permutation(seg.max()+1)   
    seg2 = np.zeros_like(seg)
    for c in range(seg.max()+1):             
        seg2[seg==c]=p[c]
    return seg2.astype(np.int32)


if __name__ == "__main__":  
    tic = time.clock()
    seg= get_init_seg(500, 500,17,True)      
#    seg= get_init_seg(512, 512,50,False)  
    toc = time.clock()
    print toc-tic 
    print 'k = ', seg.max()+1
    plt.figure(1)
    plt.clf()  
    plt.imshow(seg,interpolation="Nearest")
    plt.axis('scaled') 
Example #30
0
def runAnalysis( caseDirs , resultsDir , noReweight = False):
    
    # Do a reference for each one
    for refDir in caseDirs:

        # ID of reference case
        refID = refDir.split("/")[-1]
        
        # User info
        print "Doing PCA analysis with "+refDir+" as reference"
        
        # Get the PCA limits of component 1-2 plot
        limit = 10
        with open(refDir+"/analysis/data/pca_limits_1", "r") as fi:
            limit = int(float(fi.read()))
            limit += 0.01
        
        # Go through the case dirs to plot
        for caseDir in caseDirs:
            
            print "Using "+caseDir+" as case"
            
            # ID of case
            caseID = caseDir.split("/")[-1]
            
            ## PCA PLOTTING ON REF DIR PCA COMPONENTS
            #########################################
            
            # Create & run cpptraj for plotting all cases on the axes of the first eigenvector
            # Good URLs for PCA in CPPTRAJ:
            # http://archive.ambermd.org/201404/0243.html
                        
            # PCA plotter
            pcaHandler = pcaFuncs.PCA( 
                resultsDir+"/plots/pcaComparison/PCA_"+caseID+"_on_"+refID+".pdf"
            )    
            
            # Create new submission file
            TEMPLATE = open( caseDir+"/ccptraj_analysis_pca.ptraj", 'r')
            TEMP = TEMPLATE.read().replace("[PCAREFERENCE]", refDir  )
            TEMPLATE.close()
                                  
            # Write the submission file
            FILE = open(caseDir+"/ccptraj_analysis_pca.ptraj","w");        
            FILE.write( TEMP );
            FILE.close();
            
            # Run the cpptraj utility
            os.system( "$AMBERHOME/bin/cpptraj -p "+caseDir+"/md-files/peptide_nowat.prmtop -i "+caseDir+"/ccptraj_analysis_pca.ptraj" )
        
            # Do the plots of energy landscapes & distributions
            pcaHandler.plotPCA( 
                "Case: "+caseID+". Ref case: "+refID,   # Plot Title
                caseDir+"/analysis/data/" ,        # Data Dir
                "global_pca",                      # Eigenvector file
                eigenVectorCount = 2,              # Only plot two
                plotDistibution = False,           # Do not plot the distribution
                limits = limit
            )
            
            # Save the plot
            pcaHandler.savePlot()
            
            ## REWEIGHTING OF PCA PLOTS ON RED DIR PCA COMPONENTS
            #####################################################

            # Check if we should do a reweighted version
            if noReweight == False:
                if os.path.isfile( caseDir+"/md-logs/weights.dat" ):
                    
                    # User info
                    print "aMD weights found. Now attempting 2D reweighting"   
                    
                    # Prepare input file
                    numLines = 0
                    with open(caseDir+"/analysis/data/global_pca", "r") as fi:
                        with open(caseDir+"/analysis/data/global_pca_singleColumn", "w") as fo:
                            next(fi)
                            for line in fi:
                                numLines += 1
                                fo.write( line.split()[1]+"\t"+line.split()[2]+"\n" )

                    # Set the discretization
                    reqBins = 100         
                    discretization = (2*limit) / reqBins    
                    
                    # Get the max value of normal plot
                    maxValue = math.ceil(pcaHandler.getLatestMax())
                    
                    # Run the reweighting procedure
                    command = "python $PLMDHOME/src/PyReweighting/PyReweighting-2D.py \
                                -input "+caseDir+"/analysis/data/global_pca_singleColumn \
                                -name "+caseDir+"/analysis/data/global_pca_singleColumn_reweighted \
                                -Xdim -"+str(limit)+" "+str(limit)+" \
                                -Ydim -"+str(limit)+" "+str(limit)+" \
                                -discX "+str(discretization)+" \
                                -discY "+str(discretization)+" \
                                -cutoff 10 \
                                -Emax "+str(maxValue)+" \
                                -job amdweight_CE \
                                -weight "+refDir+"/md-logs/weights.dat | tee -a reweight_variable.log"
                    print "Running command:", command
                    os.system( command )
                    
                    # Create long file for PCA module
                    with open(caseDir+"/analysis/data/global_pca_reweightedDone", "w") as fo:
                        with open(caseDir+"/analysis/data/global_pca_singleColumn_reweighted-pmf-c2.dat", "r") as fi:
                            frame = 0
                            for line in fi:
                                temp = line.split()
                                entries = int(float(temp[2])*10)
                                for i in range(0,entries):
                                    fo.write( str(frame) + "\t" + temp[0] + "\t" + temp[1] +"\n" )
                                    frame += 1

                    # Print block analysis 'family' : 'Arial',
                    fig, ax = plt.subplots(figsize=(8, 8), nrows=1, ncols=1 )
                    font = {'weight' : 'normal','size' : 10}
                    plt.rc('font', **font)
                    
                    # Now plot the 2d histogram
                    hist = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2EnergyHist.npy")   
                    xedges = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2edgesX.npy")   
                    yedges = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2edgesY.npy")   
                    
                    # Remove points above limit
                    for jy in range(len(hist[0,:])):
                        for jx in range(len(hist[:,0])):
                            if hist[jx,jy] >= maxValue:
                                hist[jx,jy] = float("inf")
                    
                    # Do plot
                    img = plt.imshow(hist.transpose(),  interpolation='nearest', origin='lower',extent=[yedges[0], yedges[-1],xedges[0], xedges[-1]] , rasterized=True )
                    
                    # create an axes on the right side of ax. The width of cax will be 5%
                    # of ax and the padding between cax and ax will be fixed at 0.05 inch.
                    divider = make_axes_locatable(ax)
                    cax = divider.append_axes("right", size="5%", pad=0.05)   
                    
                    # Create colorbar
                    colorbar = plt.colorbar(img, ax=ax, cax = cax)
                    colorbar.set_label("Kcal / mol")
                    
                    # Set title, labels etc
                    plt.legend()
                    ax.set_xlabel("PC1", fontsize=12)
                    ax.set_ylabel("PC2", fontsize=12)
                    
                    ax.set_title( "PCA. Case: "+caseID+" Reweighted. Ref case: "+refID )
                    plt.rc('font', **font) 
                    
                    # Save figure
                    fig.savefig(resultsDir+"/plots/pcaComparison/PCA_"+caseID+"_on_"+refID+"_reweighted.pdf")
                    

            ## CLUSTER PLOTS ON PCA COMPONENTS
            ##################################

            # Do both hier and dbscan
            for clusterType in ["dbscan","hier"]:            
                
                # Instantiate the class
                if os.path.isfile(caseDir+"/analysis/data/cluster_"+clusterType+"_out"):   
                    
                    print "Doing the "+clusterType+" cluster equivalent of the PCA plot"
                
                    # Start the cluster handler. Load the file declaring cluster for each frame
                    clusterHandler = cluster.clusterBase( caseDir+"/analysis/data/cluster_"+clusterType+"_out" )
                    
                    # Separate the dataset.
                    # global_pca is the projection file for this case on the ref modes
                    numPCAdataSets = clusterHandler.separateDataSet( 
                        caseDir+"/analysis/data/global_pca",            # Input file
                        caseDir+"/analysis/data/cluster_"+clusterType+"_pca_",   # Output files
                        xColumn = 1
                    ) 
                    
                    # Create lists of labels and files for plotting
                    clusterLabels = []
                    clusterFiles = []
                    offset = 1 if clusterType == "hier" else 0
                    for i in range( 0+offset, numPCAdataSets+offset):
                        clusterLabels.append( "Cluster "+str(i) )
                        clusterFiles.append( caseDir+"/analysis/data/cluster_"+clusterType+"_pca_d2_c"+str(i) )
                    
                    # First one is noise
                    if offset == 0:
                        clusterLabels[0] = "Noise"                 
                    
                    myPlot.plotData( 
                        resultsDir+"/plots/pcaComparison/" , 
                        clusterType+"_"+caseID+"_on_"+refID, 
                        clusterLabels, 
                        clusterFiles , 
                        "PC2",
                        xUnit = "PC1",
                        scatter = True,
                        legendLoc = 4,
                        figWidth = 8,
                        figHeight = 8,
                        tightXlimits = False,
                        legendFrame = 1,
                        legendAlpha = 1,
                        xLimits = [-limit,limit],
                        yLimits = [-limit,limit]
                    )
def example(tess='I',base=[2,2,2],nLevels=1,
            zero_v_across_bdry=[True]*3,
            vol_preserve=False,
           nRows=100, nCols=100,nSlices=100,
           use_mayavi=False,
           eval_v=False,
           eval_cell_idx=False):  
     
    tw = TransformWrapper(nRows=nRows,
                          nCols=nCols,
                          nSlices=nSlices,
                          nLevels=nLevels,  
                          base=base,
                          zero_v_across_bdry=zero_v_across_bdry,
                          tess=tess,
                          valid_outside=False,
                          only_local=False,
                          vol_preserve=vol_preserve)
     
     
    print_iterable(tw.ms.L_cpa_space)
    print tw
    
    # create some fake 3D image.
    img = np.zeros((nCols,nRows,nSlices),dtype=np.float64)
    
#    img[:]=np.random.random_integers(0,255,img.shape)
    
    # Fill the image with the x coordinates as fake values
    img[:]=tw.pts_src_dense.cpu[:,0].reshape(img.shape)
    
    img0 = CpuGpuArray(img.copy().astype(np.float64))
    img_wrapped_fwd= CpuGpuArray.zeros_like(img0)
    img_wrapped_inv= CpuGpuArray.zeros_like(img0)
    
     
    seed=0
    np.random.seed(seed)    
    
                  
    ms_Avees=tw.get_zeros_PA_all_levels()
    ms_theta=tw.get_zeros_theta_all_levels() 
    
    
    if tess == 'II' :        
        for level in range(tw.ms.nLevels): 
            cpa_space = tw.ms.L_cpa_space[level]  
            Avees = ms_Avees[level]    
#            1/0
            if level==0:
                tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=None)# zero mean
#                ms_theta[level].fill(0)
#                ms_theta[level][-4]=10
                cpa_space.theta2Avees(theta=ms_theta[level],Avees=Avees)
            else:
                tw.sample_from_the_ms_prior_coarse2fine_one_level(ms_Avees,ms_theta,
                                                                    level_fine=level)
    else:
        # For tess='I' in 3D, I have yet to implement the coarse-to-fine sampling.
        for level in range(tw.ms.nLevels): 
            cpa_space = tw.ms.L_cpa_space[level]
            velTess = cpa_space.zeros_velTess()
            ms_Avees[level].fill(0)
            Avees = ms_Avees[level]
            tw.sample_gaussian_velTess(level,Avees,velTess,mu=None)
    
       
    
    
    print 'img shape:',img0.shape
   
   
    # You don't have use these. You can use any 2d array
    # that has 3 columns (regardless of the number of rows).   
    pts_src = tw.pts_src_dense       
    pts_src=CpuGpuArray(pts_src.cpu[::1].copy())
	
    # Create a buffer for the output
    pts_fwd = CpuGpuArray.zeros_like(pts_src) 
    pts_inv = CpuGpuArray.zeros_like(pts_src)  
   
   
    for level in range(tw.ms.nLevels):              
        tw.update_pat_from_Avees(ms_Avees[level],level) 
        
         
        if eval_v:
            # Evaluating the velocity field. 
            # You don't have to do it in unless you want to visualize v.
            # (when evaluting the treansformation, v will be internally 
            # evaluated anyway -- but its result won't be stored)
            tw.calc_v(level=level) 
        
        
        print 'level',level
        print
        print 'number of points:',len(pts_src)   
        print 'number of cells:',tw.ms.L_cpa_space[level].nC    
        
        
        
        # optional, if you want to time it
        timer_gpu_T_fwd = GpuTimer()           
        
        # Simply calling 
        #   tic = time.clock()
        # and then 
        #   tic = time.clock()
        # won't work.
        # In fact, most likely you will get that toc-tic is zero.
        # You need to use the GpuTimer object. When you do that, 
        # one side effect is that suddenly the toc-tic from above will
        # give you a more realistic result.
        
        
        tic = time.clock() 
        timer_gpu_T_fwd.tic()
        tw.calc_T_fwd(pts_src,pts_fwd,level=level)
        timer_gpu_T_fwd.toc()   
        toc = time.clock()
        

        print 'Time, in sec, for computing T_fwd:'           
        print timer_gpu_T_fwd.secs
        print toc-tic  # likely to be 0, unless you also used the GpuTimer.
        
        # You can also time the inv of course. Results will be similar.
        tw.calc_T_inv(pts_src,pts_inv,level=level)   
 
        
       
        if eval_cell_idx:   
            # cell_idx is computed here just for display. 
            cell_idx = CpuGpuArray.zeros(len(pts_src),dtype=np.int32)
            tw.calc_cell_idx(pts_src,cell_idx,level)
    
        tw.remap_fwd(pts_inv,img0,img_wrapped_fwd)
        tw.remap_inv(pts_fwd,img0,img_wrapped_inv)
        
         
    
        # For display purposes, do gpu2cpu transfer
        print "For display purposes, do gpu2cpu transfer"

        if eval_cell_idx:
            cell_idx.gpu2cpu()
        if eval_v:
            tw.v_dense.gpu2cpu() 
        pts_fwd.gpu2cpu()
        pts_inv.gpu2cpu()
        img_wrapped_fwd.gpu2cpu()
        img_wrapped_inv.gpu2cpu()
        
         
    
       
       
    
    
        if use_mayavi:
            ds=1 # downsampling factor
            i= 17
            pts_src_grid = pts_src.cpu.reshape(tw.nRows,tw.nCols,-1,3)
            pts_src_ds=pts_src_grid[::ds,::ds,i].reshape(-1,3)
            pts_fwd_grid = pts_fwd.cpu.reshape(tw.nRows,tw.nCols,-1,3)
            pts_fwd_ds=pts_fwd_grid[::ds,::ds,i].reshape(-1,3)
            pts_inv_grid = pts_inv.cpu.reshape(tw.nRows,tw.nCols,-1,3)
            pts_inv_ds=pts_inv_grid[::ds,::ds,i].reshape(-1,3)
        
        
            from of.my_mayavi import *
            mayavi_mlab_close_all()
            mayavi_mlab_figure_bgwhite('src')
            x,y,z=pts_src_ds.T
            mayavi_mlab_plot3d(x,y,z)
            mayavi_mlab_figure_bgwhite('fwd')
            x,y,z=pts_fwd_ds.T
            mayavi_mlab_plot3d(x,y,z)    
         
        figsize = (12,12)
        plt.figure(figsize=figsize)               
        i= 17 # some slice
        plt.subplot(131)
        plt.imshow(img0.cpu[:,:,i].astype(np.uint8),interpolation="Nearest")  
        plt.title('slice from img')
        plt.subplot(132)
        plt.imshow(img_wrapped_fwd.cpu[:,:,i].astype(np.uint8),interpolation="Nearest")  
        plt.axis('off') 
        plt.title('slice from fwd(img)')
        plt.subplot(133)
        plt.imshow(img_wrapped_inv.cpu[:,:,i].astype(np.uint8),interpolation="Nearest")    
        plt.axis('off') 
        plt.title('slice from inv(img)')
        
    
    if 0: # debug    
        
        cpa_space=tw.ms.L_cpa_space[level]
        if eval_v:
            vx=tw.v_dense.cpu[:,0].reshape(cpa_space.x_dense_grid_img.shape[1:])
            vy=tw.v_dense.cpu[:,1].reshape(cpa_space.x_dense_grid_img.shape[1:])
            vz=tw.v_dense.cpu[:,2].reshape(cpa_space.x_dense_grid_img.shape[1:])
        
        
            plt.figure()
            plt.imshow(vz[:,:,17],interpolation="Nearest");plt.colorbar()
            plt.title('vz in some slice')
     
    return tw
    stdx = 9
    stdy = 3

    sint = positionToIntensityUncertainty(img, stdx, stdy)

    # CASE2: variable position uncertainty:
    # x,y 0...15
    stdx2 = np.fromfunction(lambda x, y: x * y, img.shape)
    stdx2 /= stdx2[-1, -1] / 9
    stdy2 = np.fromfunction(lambda x, y: x * y, img.shape)
    stdy2 /= stdy2[-1, -1] / 9
    stdy2 = stdy2[::-1, ::-1]  # flip content twice

    sint2 = positionToIntensityUncertainty(img, stdx2, stdy2, 21)

    if 'no_window' not in sys.argv:
        plt.figure('input')
        plt.imshow(img)
        plt.colorbar()

        plt.figure('output for const. position uncertainty (x%s,y%s)' %
                   (stdx, stdy))
        plt.imshow(sint)
        plt.colorbar()

        plt.figure('output for var. position uncertainty 0...15')
        plt.imshow(sint2)
        plt.colorbar()

        plt.show()
Example #33
0
            mayavi_mlab_figure_bgwhite('src')
            mayavi_mlab_set_parallel_projection(True)
            mayavi_mlab_figure_bgwhite('transformed')
            mayavi_mlab_clf()
            mayavi_mlab_set_parallel_projection(True)
            x0,y0,z0=pts.cpu.T
            
            mayavi_mlab_figure('src')
            points3d(x0,y0,z0,scale_factor=5,color=red)
            x1,y1,z1=pts_transformed.cpu.T
            mayavi_mlab_figure('transformed')
            points3d(x1,y1,z1,scale_factor=5,color=blue)


    


#
    if 0:
        plt.close('all')
        for c in range(3):
            plt.figure(c+1)
            for i in range( min(21,pts_grid[0].shape[2])):
                plt.subplot(3,7,i+1)
                plt.imshow(v[:,c].reshape(pts_grid[0].shape)[:,:,i],
                           interpolation='Nearest',vmin=v.min(),vmax=v.max());
                           #plt.colorbar()
    print cpa_space._calcs_gpu.kernel_filename
    if computer.has_good_gpu_card:
        raw_input("raw_input:")                      
Example #34
0
                                       
    
    
    
#    v_at_trajs_full = np.zeros_like(trajs_full)     
#    for _pts,_v in zip(trajs_full,v_at_trajs_full):
#        cpa_space.calc_v(pat=pat, pts=_pts, out=_v)
    
    pts_grid=cpa_space.x_dense_grid_img
#    pts_grid = np.asarray([xx,yy]).copy() 
    grid_shape = pts_grid[0].shape
                               
    fig = plt.figure()       
    plt.subplot(234)
#    plt.imshow(cell_idx.reshape(Ny,Nx))
    plt.imshow(cell_idx.cpu.reshape(grid_shape))
    
    plt.subplot(231)
    scale=[2*30,1.5*4][vol_preserve]
    
    
    
    cpa_space.quiver(pts_grid,v_dense,scale, ds=16/2)          
    config_plt()
    
   
     
    
    plt.subplot(232)
    plt.imshow(v_dense.cpu[:,0].reshape(grid_shape),interpolation='Nearest',
               vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
Example #35
0
 def showGrid(grid_):
     if not showPlots:
         return
     plt.imshow(grid_[..., 0], interpolation='nearest')
     plt.show()
    generated = np.array([
        downscale_spatiotemporal(dsum, alpha, beta, 24)
        for _ in range(n_fake_per_real)
    ])

    # now the same, but showing absolute precipitation fields
    # compute absolute precipitation from fraction of daily sum.
    # this can be done with numpy broadcasting.
    # we also have to multiply with norm_scale (because cond is normalized)
    generated_scaled = generated
    real_scaled = real
    fig = plt.figure(figsize=(25, 12))
    # plot real one
    ax = plt.subplot(n_plot, 25, 1)
    im = plt.imshow(dsum, cmap=cmap, norm=plotnorm)
    plt.axis('off')
    ax.annotate('real',
                xy=(0, 0.5),
                xytext=(-5, 0),
                xycoords='axes fraction',
                textcoords='offset points',
                size='large',
                ha='right',
                va='center',
                rotation='vertical')
    ax.annotate(f'daily sum',
                xy=(0.5, 1),
                xytext=(0, 5),
                xycoords='axes fraction',
                textcoords='offset points',
def train(n_epochs, _batch_size, start_epoch=0):
    """
        train with fixed batch_size for given epochs
        make some example plots and save model after each epoch
    """
    global batch_size
    batch_size = _batch_size
    # create a dataqueue with the keras facilities. this allows
    # to prepare the data in parallel to the training
    sample_dataqueue = GeneratorEnqueuer(generate_real_samples(batch_size),
                                         use_multiprocessing=True)
    sample_dataqueue.start(workers=2, max_queue_size=10)
    sample_gen = sample_dataqueue.get()

    # targets for loss function
    gan_sample_dataqueue = GeneratorEnqueuer(
        generate_latent_points_as_generator(batch_size),
        use_multiprocessing=True)
    gan_sample_dataqueue.start(workers=2, max_queue_size=10)
    gan_sample_gen = gan_sample_dataqueue.get()

    # targets for loss function
    valid = -np.ones((batch_size, 1))
    fake = np.ones((batch_size, 1))
    dummy = np.zeros((batch_size, 1))  # Dummy gt for gradient penalty

    bat_per_epo = int(n_samples / batch_size)

    # we need to call the discriminator once in order
    # to initialize the input shapes
    [X_real, cond_real] = next(sample_gen)
    latent = np.random.normal(size=(batch_size, latent_dim))
    critic_model.predict([X_real, cond_real, latent])
    for i in trange(n_epochs):
        epoch = 1 + i + start_epoch
        # enumerate batches over the training set
        for j in trange(bat_per_epo):

            for _ in range(n_disc):
                # fetch a batch from the queue
                [X_real, cond_real] = next(sample_gen)
                latent = np.random.normal(size=(batch_size, latent_dim))
                d_loss = critic_model.train_on_batch(
                    [X_real, cond_real, latent], [valid, fake, dummy])
                # we get for losses back here. average, valid, fake, and gradient_penalty
                # we want the average of valid and fake
                d_loss = np.mean([d_loss[1], d_loss[2]])

            # train generator
            # prepare points in latent space as input for the generator
            [latent, cond] = next(gan_sample_gen)
            # update the generator via the discriminator's error
            g_loss = generator_model.train_on_batch([latent, cond], valid)
            # summarize loss on this batch
            print(f'{epoch}, {j + 1}/{bat_per_epo}, d_loss {d_loss}' + \
                  f' g:{g_loss} ')  # , d_fake:{d_loss_fake} d_real:{d_loss_real}')

            if np.isnan(g_loss) or np.isnan(d_loss):
                raise ValueError('encountered nan in g_loss and/or d_loss')

            hist['d_loss'].append(d_loss)
            hist['g_loss'].append(g_loss)

        # plot generated examples
        plt.figure(figsize=(25, 25))
        n_plot = 30
        X_fake, cond_fake = generate_fake_samples(n_plot)
        for iplot in range(n_plot):
            plt.subplot(n_plot, 25, iplot * 25 + 1)
            plt.imshow(cond_fake[iplot, :, :].squeeze(),
                       cmap=plt.cm.gist_earth_r,
                       norm=LogNorm(vmin=0.01, vmax=1))
            plt.axis('off')
            for jplot in range(1, 24):
                plt.subplot(n_plot, 25, iplot * 25 + jplot + 1)
                plt.imshow(X_fake[iplot, jplot, :, :].squeeze(),
                           vmin=0,
                           vmax=1,
                           cmap=plt.cm.hot_r)
                plt.axis('off')
        plt.colorbar()
        plt.suptitle(f'epoch {epoch:04d}')
        plt.savefig(
            f'{plotdir}/fake_samples_{params}_{epoch:04d}_{j:06d}.{plot_format}'
        )

        # plot loss
        plt.figure()
        plt.plot(hist['d_loss'], label='d_loss')
        plt.plot(hist['g_loss'], label='g_loss')
        plt.ylabel('batch')
        plt.legend()
        plt.savefig(f'{plotdir}/training_loss_{params}.{plot_format}')
        pd.DataFrame(hist).to_csv('hist.csv')
        plt.close('all')

        generator.save(f'{outdir}/gen_{params}_{epoch:04d}.h5')
        critic.save(f'{outdir}/disc_{params}_{epoch:04d}.h5')
Example #38
0
            dpi = notedpi

    # im.show()
    figdefaultdpi = plt.rcParams.get('figure.dpi')
    figwinchs = round(colwidthmax * (dpi / figdefaultdpi) / figdefaultdpi / 10,
                      3)
    fighinchs = round(
        rowwidth * len(dflines) * (dpi / figdefaultdpi) / figdefaultdpi / 10,
        3)
    print(f"输出图片的画布宽高(英寸):\t{(figwinchs, fighinchs)}")
    plt.figure(figsize=(figwinchs, fighinchs), dpi=dpi)
    plt.axis('off')
    # font = ImageFont.truetype("../msyh.ttf", 12)
    if title:
        plt.title(title)
    plt.imshow(im)
    imgtmppath = dirmainpath / 'img' / 'dbimgtmp.png'
    plt.axis('off')
    plt.savefig(imgtmppath)
    if not showincell:
        plt.close()

    return imgtmppath


# %% [markdown]
# ### def descdb(df)


# %%
# 显示DataFrame或Series的轮廓信息
def example(img=None,tess='I',eval_cell_idx=True,eval_v=True,show_downsampled_pts=True,
            valid_outside=True,base=[1,1],
            scale_spatial=.1,
            scale_value=100,
            permute_cell_idx_for_display=True,
            nLevels=3,
            vol_preserve=False,
            zero_v_across_bdry=[0,0],
            use_lims_when_plotting=True):
          
    show_downsampled_pts = bool(show_downsampled_pts)
    eval_cell_idx = bool(eval_cell_idx)
    eval_v = bool(eval_cell_idx)
    valid_outside = bool(valid_outside)
    permute_cell_idx_for_display = bool(permute_cell_idx_for_display)
    vol_preserve = bool(vol_preserve)
    
    if img is None:
        img =  Img(get_std_test_img())
    else:
        img=Img(img)
        img = img[:,:,::-1] # bgr2rgb
        
        
    
    tw = TransformWrapper(nRows=img.shape[0],
                          nCols=img.shape[1],
                          nLevels=nLevels,  
                          base=base,
                          scale_spatial=scale_spatial, # controls the prior's smoothness
                          scale_value=scale_value, # controls the prior's variance
                          tess=tess,
                          vol_preserve=vol_preserve,
                          zero_v_across_bdry=zero_v_across_bdry,
                          valid_outside=valid_outside)
    print tw
         
     
    # You probably want to do that: padding image border with zeros
    border_width=1
    img[:border_width]=0
    img[-border_width:]=0
    img[:,:border_width]=0
    img[:,-border_width:]=0      
    
    # The tw.calc_T_fwd (or tw.calc_T_inv) is always done in gpu.
    # After using it to compute new pts, 
    # you may want to use remap (to warp an image accordingly). 
    # If you will use tw.remap_fwd (or tw.remap_inv), which is done in gpu,
    # then the image type can be either float32 or float64.
    # But if you plan to use tw.tw.remap_fwd_opencv (or tw.remap_inv_opencv),
    # which is done in cpu (hence slightly lower) but supports better 
    # interpolation methods, then the image type must be np.float32.
    
#    img_original = CpuGpuArray(img.copy().astype(np.float32))
    img_original = CpuGpuArray(img.copy().astype(np.float64))
    
    img_wrapped_fwd= CpuGpuArray.zeros_like(img_original)
    img_wrapped_bwd= CpuGpuArray.zeros_like(img_original)
    
     
    seed=0
    np.random.seed(seed)    
               
    ms_Avees=tw.get_zeros_PA_all_levels()
    ms_theta=tw.get_zeros_theta_all_levels() 
    
    for level in range(tw.ms.nLevels):  
        if level==0:
            tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=None)# zero mean
        else:
            tw.sample_from_the_ms_prior_coarse2fine_one_level(ms_Avees,ms_theta,
                                                                level_fine=level)                
    
    
    print('\nimg shape: {}\n'.format(img_original.shape))

    # You don't have use these. You can use any 2d array 
    # that has two columns (regardless of the number of rows).
    pts_src = tw.pts_src_dense
    
    # Create buffers for the output
    pts_fwd = CpuGpuArray.zeros_like(pts_src) 
    pts_inv = CpuGpuArray.zeros_like(pts_src)  

   
    for level in range(tw.ms.nLevels):
        
        
        #######################################################################
        # instead of the tw.sample_from_the_ms_prior() above,
        # you may want to use one of the following.        
        # 1)
        # tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=None)# zero mean
        # 2)
        # tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=some_user_specified_mu)
        # The following should be used only for level>0 :
        # 3)
        # tw.sample_normal_in_one_level_using_the_coarser_as_mean(Avees_coarse=ms_Avees[level-1], 
        #                                                        Avees_fine=ms_Avees[level],
        #                                                        theta_fine=ms_theta[level], 
        #                                                        level_fine=level)
        #
        #######################################################################
        
        
#        You can also change the values this way:
#         cpa_space = tw.ms.L_cpa_space[level]
#        theta = cpa_space.get_zeros_theta()
#        theta[:] = some values
#        Avees = cpa_space.get_zeros_PA()
#        cpa_space.theta2Avees(theta,Avees)
#        cpa_space.update_pat(Avees)         
              
        
        # This step is important and must be done 
        # before are trying to "use" the new values of 
        # the (vectorized) A's. 
        tw.update_pat_from_Avees(ms_Avees[level],level) 
        
     
        if eval_v:
            # Evaluating the velocity field. 
            # You don't have to do it in unless you want to visualize v.
            # (when evaluting the treansformation, v will be internally 
            # evaluated anyway -- but its result won't be stored)
            tw.calc_v(level=level) 
        
    

        
        
        # optional, if you want to time it
        timer_gpu_T_fwd = GpuTimer()           
        
        # Simply calling 
        #   tic = time.clock()
        # and then 
        #   tic = time.clock()
        # won't work.
        # In fact, most likely you will get that toc-tic is zero.
        # You need to use the GpuTimer object. When you do that, 
        # one side effect is that suddenly the toc-tic from above will
        # give you a more realistic result.
        
        
        tic = time.clock() 
        timer_gpu_T_fwd.tic()
        tw.calc_T_fwd(pts_src,pts_fwd,level=level)
        timer_gpu_T_fwd.toc()   
        toc = time.clock()

        print 'Time, in sec, for computing T_fwd:'           
        print timer_gpu_T_fwd.secs
        print toc-tic  # likely to be 0, unless you also used the GpuTimer.
        
        # You can also time the inv of course. Results will be similar.
        tw.calc_T_inv(pts_src,pts_inv,level=level)  
             
        if eval_cell_idx:   
            # cell_idx is computed here just for display. 
            cell_idx = CpuGpuArray.zeros(len(pts_src),dtype=np.int32)
            tw.calc_cell_idx(pts_src,cell_idx,level,
                             permute_for_disp=permute_cell_idx_for_display)
 

        # If may also want ro to time the remap.
        # However, the remap is usually very fast (e.g, about 2 milisec).
#            timer_gpu_remap_fwd = GpuTimer()  
#            tic = time.clock()
#            timer_gpu_remap_fwd.tic()
#        tw.remap_fwd(pts_inv=pts_inv,img=img_original,img_wrapped_fwd=img_wrapped_fwd)
        tw.remap_fwd(pts_inv=pts_inv,img=img_original,img_wrapped_fwd=img_wrapped_fwd)
#            timer_gpu_remap_fwd.toc()   
#            toc = time.clock()   

        # If the img type is np.float32, you may also use 
        # tw.remap_fwd_opencv instead of tw.remap_fw. The differences between
        # the two methods are explained above  
                   
        
        
        tw.remap_inv(pts_fwd=pts_fwd,img=img_original,img_wrapped_inv=img_wrapped_bwd)
        
    
        # For display purposes, do gpu2cpu transfer
        print ("For display purposes, do gpu2cpu transfer")
        if eval_cell_idx:        
            cell_idx.gpu2cpu()  
            


            
            
            
        if eval_v:
            tw.v_dense.gpu2cpu() 
        pts_fwd.gpu2cpu()
        pts_inv.gpu2cpu()
        img_wrapped_fwd.gpu2cpu()
        img_wrapped_bwd.gpu2cpu()
        
        figsize = (12,12)
        plt.figure(figsize=figsize)

         
        if eval_v: 
            plt.subplot(332)
            tw.imshow_vx() 
            plt.title('vx')
            plt.subplot(333)
            tw.imshow_vy()   
            plt.title('vy') 
        
        if eval_cell_idx:
            plt.subplot(331)
            cell_idx_disp = cell_idx.cpu.reshape(img.shape[0],-1)
            plt.imshow(cell_idx_disp)
            plt.title('tess (type {})'.format(tess))
        
        if show_downsampled_pts:
            ds=20
            pts_src_grid = pts_src.cpu.reshape(tw.nRows,-1,2)
            pts_src_ds=pts_src_grid[::ds,::ds].reshape(-1,2)
            pts_fwd_grid = pts_fwd.cpu.reshape(tw.nRows,-1,2)
            pts_fwd_ds=pts_fwd_grid[::ds,::ds].reshape(-1,2)
            pts_inv_grid = pts_inv.cpu.reshape(tw.nRows,-1,2)
            pts_inv_ds=pts_inv_grid[::ds,::ds].reshape(-1,2)
        
           
            use_lims=use_lims_when_plotting
#            return tw
            plt.subplot(334)    
            plt.plot(pts_src_ds[:,0],pts_src_ds[:,1],'r.')
            plt.title('pts ds')
            tw.config_plt()
            plt.subplot(335)
            plt.plot(pts_fwd_ds[:,0],pts_fwd_ds[:,1],'g.')
            plt.title('fwd(pts)')
            tw.config_plt(axis_on_or_off='on',use_lims=use_lims)
            plt.subplot(336)
            plt.plot(pts_inv_ds[:,0],pts_inv_ds[:,1],'b.')
            plt.title('inv(pts)')
            tw.config_plt(axis_on_or_off='on',use_lims=use_lims)
         
                        
        plt.subplot(337)
        plt.imshow(img_original.cpu.astype(np.uint8))
        plt.title('img')
#        plt.axis('off') 
        plt.subplot(338)
        plt.imshow(img_wrapped_fwd.cpu.astype(np.uint8))
#        plt.axis('off') 
        plt.title('fwd(img)')
        plt.subplot(339)
        plt.imshow(img_wrapped_bwd.cpu.astype(np.uint8))    
#        plt.axis('off') 
        plt.title('inv(img)')
    
    
    return tw
def detect_signal(file, window, samplerate, df, every, v, xf, f_center):


    ##### detect
    # loading in sdr data

    # loading in the database what frequencies are known
    db_satname = ["NOAA19", "NOAA15", "NOAA18", "ISS", "ISS APRS"]
    db_f = [137100000.0, 137620000.0, 137912500.0, 145800000.0, 145825000.0]
    db_f_band = [24000.0, 24000.0, 24000.0, 6000.0, 6000.0]

    f = []
    f_band = []
    for kkk in range(len(db_f)):
        if db_f[kkk] >= f_center - samplerate and db_f[kkk] <= f_center + samplerate:
            f.append(db_f[kkk])
            f_band.append(db_f_band[kkk])

    print(f)


    start = []
    bandrange = []
    for kkkk in range(len(f)):
        start.append(int((f[kkkk] - f_center) / df + len(xf)/2.0))
        #print(start[-1], (0), df, len(xf)/2.0)
        bandrange.append(int(f_band[kkkk]/df))
        #print("band", bandrange[-1])
        #print("test", start[-1], df, bandrange[-1])

    print(time.time(), timeit.default_timer()-time_start, "graphing start")
    foundknownsignal = []
    foundknownsignal1 = []
    for j in range(len(f)):
        w1 = []
        w2 = []
        for ll in range(len(v)):
            w1.append(v[ll][start[j] - bandrange[j]: start[j] + bandrange[j]])
            w2.append(v[ll][start[j] - (bandrange[j]+int(60000/df)): start[j] + (bandrange[j]+int(60000/df))])


        foundknownsignal.append(signal_distance(w1, every))
        foundknownsignal1.append(int(np.sum(w1)))
        filename = file+"_"+str(f[j])+"_sig"+str(foundknownsignal[j])+"_"+str(foundknownsignal1[j])+".png"
        filename1 = file+"_"+str(f[j])+"x_sig"+str(foundknownsignal[j])+"_"+str(foundknownsignal1[j])+".png"

        plt.imshow(w1, interpolation='nearest')
        plt.gca().invert_yaxis()
        plt.savefig(filename, format='png')
        #plt.show()

        plt.imshow(w2, interpolation='nearest')
        #plt.imshow(result)
        plt.gca().invert_yaxis()
        plt.savefig(filename1, format='png', dpi=100)
        #plt.show()

    print(time.time(), timeit.default_timer()-time_start, "graphing end")

    '''
    #signal_strength = signal_strength / np.max(signal_strength)
    #signal_strength1 = signal_strength1 / np.max(signal_strength1)
    print(len(signal_strength))
    print(signal_strength)
    #plt.plot(signal_strength)
    plt.plot(xf, frr)
    plt.show()
    '''
    return foundknownsignal, foundknownsignal1
Example #41
0
        yy = yy.astype(np.float)

        dimx = float(dimx)
        dimy = float(dimy)
        nTimesInX = np.floor(xx / M).max() + 1

        seg_cpu = np.floor(yy / M) * nTimesInX + np.floor(xx / M)
        seg_cpu = seg_cpu.astype(np.int32)
        return seg_cpu


def random_permute_seg(seg):
    p = np.random.permutation(seg.max() + 1)
    seg2 = np.zeros_like(seg)
    for c in range(seg.max() + 1):
        seg2[seg == c] = p[c]
    return seg2.astype(np.int32)


if __name__ == "__main__":
    tic = time.clock()
    seg = get_init_seg(500, 500, 17, True)
    #    seg= get_init_seg(512, 512,50,False)
    toc = time.clock()
    print(toc - tic)
    print('k = ', seg.max() + 1)
    plt.figure(1)
    plt.clf()
    plt.imshow(seg, interpolation="Nearest")
    plt.axis('scaled')
def signal_substraction(file, window, samplerate, df, every, result_of_fft):

    # we asume, that our satellite signals are not contineously received
    # so we do a substraction on frequency level. the signal intensity of each frequency for the window kernel
    # is substracted from the overall, longtime average of this frequency.
    # this allows to see fluctuations better, because they are adding to the average.
    # so we can find a superposed signal easier this way.
    threshold = meaning(result_of_fft)
    signal_lowered = substract(result_of_fft, threshold)
    print(time.time(), timeit.default_timer()-time_start, "lowering done")

    v = np.zeros((len(signal_lowered), window))
    #w = np.zeros((len(signal_lowered), window))

    bandwidth = int(window / (2*2*2*2*2*2*2*2*2))
    #print("band", bandwidth*df)

    for j in range(0, window, bandwidth):
        #print(j)

        u = []
        for kk in range(len(signal_lowered)):
            u.append(signal_lowered[kk][j:j+bandwidth])
            #u = u/np.max(u)

        #print("test", len(u), len(u[0]))
        '''
        plt.imshow(u, interpolation='nearest')
        #plt.imshow(result)
        plt.gca().invert_yaxis()
        #plt.gca().invert_xaxis()
        # #plt.gca().set_xticks(xf)
        plt.show()
        '''

        edged = signal(edge_detection1(u))
        #print(np.max(edged))
        #graved = centerofgravity(edged)
        #signal_strength.append(np.sum(edged))


        for k in range(len(edged)):
            for l in range(len(edged[k])):
                v[k][j+l] = edged[k][l]# + signaaaal

    print(time.time(), timeit.default_timer()-time_start, "edging done")

    w3 = []
    filename3 = file+"_all.png"
    for lll in range(len(v)):
        w3.append(signal_lowered[lll])

    plt.imshow(w3, interpolation='nearest')
    #plt.imshow(result)
    plt.gca().invert_yaxis()
    #plt.figure(figsize=(1200, 800))
    plt.savefig(filename3, format='png', dpi=500)
    #plt.savefig(filename, format='png', dpi=1000)
    #plt.show()

    del w3

    return v
Example #43
0
def find_parkings(img):
    plt.imshow(img)
    plt.show()
    hor_lines, ver_lines = lineDetection(img)
    sorted_hor_lines = sorted(sorted(hor_lines, key=lambda x: x.y1),
                              key=special_x_sort)
    parkings = []
    i = 0
    is_next_neig = False
    prev_line = sorted_hor_lines[0]
    prev_points = [(prev_line.x1, int(prev_line.y1)),
                   (prev_line.x2, int(prev_line.y2))]
    while i < len(sorted_hor_lines):
        cur_line = sorted_hor_lines[i]
        cur_x_start = cur_line.x1
        cur_x_end = cur_line.x2
        if i < len(sorted_hor_lines) - 1:
            next_line = sorted_hor_lines[i + 1]
            is_next_neig = areNeigbours(cur_line, next_line)
        is_prev_neig = areNeigbours(cur_line, prev_line)
        if is_prev_neig:
            #  is_prev_neig &&  is_next_neig => line in the middle of row
            if is_next_neig:
                cur_x1 = statistics.median(
                    [prev_points[0][0], cur_x_start, next_line.x1])
                cur_x2 = statistics.median(
                    [prev_points[1][0], cur_x_end, next_line.x2])
            # is_prev_neig && !is_next_neig => line last of row
            else:
                cur_x1 = cur_x_start if np.abs(
                    prev_points[0][0] - cur_x_start) < 5 else prev_points[0][0]
                cur_x2 = cur_x_end if np.abs(
                    prev_points[1][0] - cur_x_start) < 5 else prev_points[1][0]
        else:
            # !is_prev_neig &&  is_next_neig => line first of row
            if is_next_neig:
                cur_x1 = cur_x_start
                cur_x2 = cur_x_end
            # !is_prev_neig && !is_next_neig => not a parking!
            else:
                cur_line = None
                cur_x1 = -1000
                cur_x2 = -1000
        if cur_line:
            cur_points = [(cur_x1, int(cur_line.y1)),
                          (cur_x2, int(cur_line.y2))]
            cur_points_orig = [(cur_x_start, int(cur_line.y1)),
                               (cur_x_end, int(cur_line.y2))]
        else:
            cur_points = [(cur_x1, -1000), (cur_x2, -1000)]
            cur_points_orig = [(cur_x_start, -1000), (cur_x_end, -1000)]
        if is_prev_neig:
            parkings.append(cur_points + prev_points)
        prev_line = cur_line
        prev_points = cur_points_orig
        i += 1

    pickle_out = open("parking_array.pickle", "wb")
    pickle.dump(parkings, pickle_out)
    pickle_out.close()

    return parkings
Example #44
0
    print(text_6)
    text_7, image_7 = send_request_byimage(sign, imagefile_3)
    print(text_7)
    text_8, image_8 = send_request_byimage(sign, imagefile_4)
    print(text_8)
    score_5 = str(json.loads(text_5)["data"]["score"])
    score_6 = str(json.loads(text_6)["data"]["score"])
    score_7 = str(json.loads(text_7)["data"]["score"])
    score_8 = str(json.loads(text_8)["data"]["score"])

    # print(res.request.body)
    # print(res.request.headers)
    # print(res.text)

    plt.subplot(241).set_title("无翻拍 : %s 分" % score_1)
    plt.imshow(image_1)
    plt.axis('off')
    plt.subplot(242).set_title("翻拍 : %s 分" % score_2)
    plt.imshow(image_2)
    plt.axis('off')
    plt.subplot(243).set_title("无翻拍 : %s 分" % score_3)
    plt.imshow(image_3)
    plt.axis('off')
    plt.subplot(244).set_title("无翻拍-黑白 : %s 分" % score_4)
    plt.imshow(image_4)
    plt.axis('off')

    plt.subplot(245).set_title("电脑拍摄 : %s 分" % score_5)
    plt.imshow(image_5)
    plt.axis('off')
    plt.subplot(246).set_title("手机拍摄 : %s 分" % score_6)
Example #45
0
frameRate=0.0083;

#filename='img007.tif'
#frameRate=0.033;


#%%
filename_mc=filename[:-4]+'_mc.npz'
filename_analysis=filename[:-4]+'_analysis.npz'    
filename_traces=filename[:-4]+'_traces.npz'    

#%% load movie
m=XMovie(filename, frameRate=frameRate);

#%% example plot a frame
plt.imshow(m.mov[100],cmap=plt.cm.Greys_r)

#%% example play movie
m.playMovie(frate=.03,gain=20.0,magnification=4)

#%% example take first channel of two
channelId=1;
totalChannels=2;
m.makeSubMov(range(channelId-1,m.mov.shape[0],totalChannels))

#%%  prtition into two movies up and down

m.crop(crop_top=150,crop_bottom=150,crop_left=150,crop_right=150,crop_begin=0,crop_end=0)


#%% concatenate movies (it will add to the original movie)
Example #46
0
    def disp(self,sampler,interp_type_during_visualization):
        level=sampler.level    
        theta=sampler.theta_current
        tw=self.tw
#        interval=self.interval
#        interval_dense=self.interval_dense
        markersize = 5
        fontsize=30
        cpa_space = tw.ms.L_cpa_space[level]            
        plt.subplot(231)
        sampler.plot_ll()
        plt.title('ll',fontsize=fontsize)
        sampler.plot_wlp()
        sampler.plot_wlp_plus_ll()
        if sampler.lp_func:         
            plt.legend(['ll','wlp','ll+wlp'])
        
        plt.subplot(232)
        sampler.plot_ar()
        plt.title('accept ratio',fontsize=fontsize)
         
#        print theta
        cpa_space.theta2As(theta=theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
    
        src = self.src
#        dst = self.dst
        transformed = self.transformed
        
#        src_dense=self.src_dense
#        transformed_dense=self.transformed_dense
#        tw.calc_T(src_dense, transformed_dense, mysign=1, level=level, 
#        
#        transformed_dense.gpu2cpu()

        tw.calc_T_inv(src, transformed,  level=level, 
                  int_quality=+1)            
        transformed.gpu2cpu()
        
        if interp_type_during_visualization=='gpu_linear':
            my_dtype = np.float64
        else:
            my_dtype = np.float32 # For opencv
        
        img_src = self.signal.src.cpu.reshape(tw.nRows,tw.nCols)
        img_src = CpuGpuArray(img_src.astype(my_dtype))  
        img_wrapped = CpuGpuArray.zeros_like(img_src)

        img_dst = self.signal.dst.cpu.reshape(tw.nRows,tw.nCols)
        img_dst = CpuGpuArray(img_dst)         
        
                
        if interp_type_during_visualization=='gpu_linear':
            tw.remap_fwd(transformed,img_src,img_wrapped)
        else:
            tw.remap_fwd_opencv(transformed,img_src,img_wrapped,interp_type_during_visualization)
        img_wrapped.gpu2cpu()
             
        plt.subplot(233)   
        plt.imshow(img_src.cpu,interpolation="None")
        plt.gray()
        cpa_space.plot_cells('r')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}$')

        
                
        
        plt.subplot(234)   
        plt.imshow(img_wrapped.cpu,interpolation="None")
        plt.gray()
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}\circ T^{\theta}$')
        
        plt.subplot(235)   
        plt.imshow(img_dst.cpu,interpolation="None")
        plt.gray()
        plt.title(r'$I_{\mathrm{dst}}$')
        
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        
        plt.subplot(2,6,11)
        self.tw.imshow_vx()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_x$')
        plt.subplot(2,6,12)
        self.tw.imshow_vy()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_y$')
Example #47
0
def example(tess='I',
            base=[2, 2, 2],
            nLevels=1,
            zero_v_across_bdry=[True] * 3,
            vol_preserve=False,
            nRows=100,
            nCols=100,
            nSlices=100,
            use_mayavi=False,
            eval_v=False,
            eval_cell_idx=False):

    tw = TransformWrapper(nRows=nRows,
                          nCols=nCols,
                          nSlices=nSlices,
                          nLevels=nLevels,
                          base=base,
                          zero_v_across_bdry=zero_v_across_bdry,
                          tess=tess,
                          valid_outside=False,
                          only_local=False,
                          vol_preserve=vol_preserve)

    print_iterable(tw.ms.L_cpa_space)
    print tw

    # create some fake 3D image.
    img = np.zeros((nCols, nRows, nSlices), dtype=np.float64)

    #    img[:]=np.random.random_integers(0,255,img.shape)

    # Fill the image with the x coordinates as fake values
    img[:] = tw.pts_src_dense.cpu[:, 0].reshape(img.shape)

    img0 = CpuGpuArray(img.copy().astype(np.float64))
    img_wrapped_fwd = CpuGpuArray.zeros_like(img0)
    img_wrapped_inv = CpuGpuArray.zeros_like(img0)

    seed = 0
    np.random.seed(seed)

    ms_Avees = tw.get_zeros_PA_all_levels()
    ms_theta = tw.get_zeros_theta_all_levels()

    if tess == 'II':
        for level in range(tw.ms.nLevels):
            cpa_space = tw.ms.L_cpa_space[level]
            Avees = ms_Avees[level]
            #            1/0
            if level == 0:
                tw.sample_gaussian(level,
                                   ms_Avees[level],
                                   ms_theta[level],
                                   mu=None)  # zero mean
                #                ms_theta[level].fill(0)
                #                ms_theta[level][-4]=10
                cpa_space.theta2Avees(theta=ms_theta[level], Avees=Avees)
            else:
                tw.sample_from_the_ms_prior_coarse2fine_one_level(
                    ms_Avees, ms_theta, level_fine=level)
    else:
        # For tess='I' in 3D, I have yet to implement the coarse-to-fine sampling.
        for level in range(tw.ms.nLevels):
            cpa_space = tw.ms.L_cpa_space[level]
            velTess = cpa_space.zeros_velTess()
            ms_Avees[level].fill(0)
            Avees = ms_Avees[level]
            tw.sample_gaussian_velTess(level, Avees, velTess, mu=None)

    print 'img shape:', img0.shape

    # You don't have use these. You can use any 2d array
    # that has 3 columns (regardless of the number of rows).
    pts_src = tw.pts_src_dense
    pts_src = CpuGpuArray(pts_src.cpu[::1].copy())

    # Create a buffer for the output
    pts_fwd = CpuGpuArray.zeros_like(pts_src)
    pts_inv = CpuGpuArray.zeros_like(pts_src)

    for level in range(tw.ms.nLevels):
        tw.update_pat_from_Avees(ms_Avees[level], level)

        if eval_v:
            # Evaluating the velocity field.
            # You don't have to do it in unless you want to visualize v.
            # (when evaluting the treansformation, v will be internally
            # evaluated anyway -- but its result won't be stored)
            tw.calc_v(level=level)

        print 'level', level
        print
        print 'number of points:', len(pts_src)
        print 'number of cells:', tw.ms.L_cpa_space[level].nC

        # optional, if you want to time it
        timer_gpu_T_fwd = GpuTimer()

        # Simply calling
        #   tic = time.clock()
        # and then
        #   tic = time.clock()
        # won't work.
        # In fact, most likely you will get that toc-tic is zero.
        # You need to use the GpuTimer object. When you do that,
        # one side effect is that suddenly the toc-tic from above will
        # give you a more realistic result.

        tic = time.clock()
        timer_gpu_T_fwd.tic()
        tw.calc_T_fwd(pts_src, pts_fwd, level=level)
        timer_gpu_T_fwd.toc()
        toc = time.clock()

        print 'Time, in sec, for computing T_fwd:'
        print timer_gpu_T_fwd.secs
        print toc - tic  # likely to be 0, unless you also used the GpuTimer.

        # You can also time the inv of course. Results will be similar.
        tw.calc_T_inv(pts_src, pts_inv, level=level)

        if eval_cell_idx:
            # cell_idx is computed here just for display.
            cell_idx = CpuGpuArray.zeros(len(pts_src), dtype=np.int32)
            tw.calc_cell_idx(pts_src, cell_idx, level)

        tw.remap_fwd(pts_inv, img0, img_wrapped_fwd)
        tw.remap_inv(pts_fwd, img0, img_wrapped_inv)

        # For display purposes, do gpu2cpu transfer
        print "For display purposes, do gpu2cpu transfer"

        if eval_cell_idx:
            cell_idx.gpu2cpu()
        if eval_v:
            tw.v_dense.gpu2cpu()
        pts_fwd.gpu2cpu()
        pts_inv.gpu2cpu()
        img_wrapped_fwd.gpu2cpu()
        img_wrapped_inv.gpu2cpu()

        if use_mayavi:
            ds = 1  # downsampling factor
            i = 17
            pts_src_grid = pts_src.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_src_ds = pts_src_grid[::ds, ::ds, i].reshape(-1, 3)
            pts_fwd_grid = pts_fwd.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_fwd_ds = pts_fwd_grid[::ds, ::ds, i].reshape(-1, 3)
            pts_inv_grid = pts_inv.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_inv_ds = pts_inv_grid[::ds, ::ds, i].reshape(-1, 3)

            from of.my_mayavi import *
            mayavi_mlab_close_all()
            mayavi_mlab_figure_bgwhite('src')
            x, y, z = pts_src_ds.T
            mayavi_mlab_plot3d(x, y, z)
            mayavi_mlab_figure_bgwhite('fwd')
            x, y, z = pts_fwd_ds.T
            mayavi_mlab_plot3d(x, y, z)

        figsize = (12, 12)
        plt.figure(figsize=figsize)
        i = 17  # some slice
        plt.subplot(131)
        plt.imshow(img0.cpu[:, :, i].astype(np.uint8), interpolation="Nearest")
        plt.title('slice from img')
        plt.subplot(132)
        plt.imshow(img_wrapped_fwd.cpu[:, :, i].astype(np.uint8),
                   interpolation="Nearest")
        plt.axis('off')
        plt.title('slice from fwd(img)')
        plt.subplot(133)
        plt.imshow(img_wrapped_inv.cpu[:, :, i].astype(np.uint8),
                   interpolation="Nearest")
        plt.axis('off')
        plt.title('slice from inv(img)')

    if 0:  # debug

        cpa_space = tw.ms.L_cpa_space[level]
        if eval_v:
            vx = tw.v_dense.cpu[:, 0].reshape(
                cpa_space.x_dense_grid_img.shape[1:])
            vy = tw.v_dense.cpu[:, 1].reshape(
                cpa_space.x_dense_grid_img.shape[1:])
            vz = tw.v_dense.cpu[:, 2].reshape(
                cpa_space.x_dense_grid_img.shape[1:])

            plt.figure()
            plt.imshow(vz[:, :, 17], interpolation="Nearest")
            plt.colorbar()
            plt.title('vz in some slice')

    return tw
Example #48
0
from pylab import *
from pylab import plt
import numpy as np
import sys
sys.path.append('../python2.7/site-packages')
import cv2
from skimage.measure import ransac, LineModelND
from skimage import feature
import utils
from PIL import Image, ImageEnhance
import slic

image = Image.open('./sourceData/image.jpg')
plt.imshow(image)
plt.show()
contrast = ImageEnhance.Sharpness(image)
new_img = np.array(contrast.enhance(2))
plt.imshow(new_img)
plt.show()

src1 = [85, 137]
src2 = [69, 43]
src3 = [484, 113]
src4 = [374, 33]
dst1 = [250, 175]
dst2 = [790, 175]
dst3 = [250, 490]
dst4 = [790, 490]

# Use the same homogrphy as for the PCA
img = utils.straightenImage(new_img, src1, src2, src3, src4, dst1, dst2, dst3,