Exemplo n.º 1
0
Arquivo: cv.py Projeto: cjjdzh/Carla
def find_street_lanes(image, prev_left_fit=None, prev_right_fit=None):
    DEBUG = False
    roi_image, roi_vert = roi(image)
    birds, m_inv = perspective_transform(image, roi_vert)
    binary_warped = preprocess(birds) // 255

    # if prev_left_fit is None or prev_right_fit is None:
    leftx, lefty, rightx, righty = find_lane_pixels_using_histogram(
        binary_warped)

    # else:
    #   leftx, lefty, rightx, righty = find_lane_pixels_using_prev_poly(binary_warped, prev_left_fit, prev_right_fit)

    left_fit, right_fit, left_fitx, right_fitx, ploty = fit_poly(
        binary_warped, leftx, lefty, rightx, righty)
    painted = draw_poly_lines(binary_warped, left_fitx, right_fitx, ploty)
    dewarped = cv2.warpPerspective(painted,
                                   m_inv, (image.shape[1], image.shape[0]),
                                   flags=cv2.INTER_LINEAR)
    result = np.zeros_like(image)
    result = cv2.addWeighted(image, 0.7, dewarped, 0.3, 0)
    result[dewarped == 0] = image[dewarped == 0]

    if DEBUG:
        plt.figure(figsize=(20, 6))
        plt.subplot(121)
        plt.title("birds eye view")
        plt.imshow(birds)

        plt.subplot(122)
        plt.title("binary")
        plt.imshow(binary_warped)

    return left_fit, right_fit, result
Exemplo n.º 2
0
Arquivo: cv.py Projeto: cjjdzh/Carla
def draw_poly_lines(binary_warped, left_fitx, right_fitx, ploty):
    DEBUG = False
    # Create an image to draw on and an image to show the selection window
    out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
    window_img = np.zeros_like(out_img)

    points_left = np.vstack((left_fitx, ploty)).astype(np.int32)
    points_right = np.flip(np.vstack((right_fitx, ploty)).astype(np.int32), 1)

    for pt in points_left.T:
        cv2.circle(window_img, tuple(pt), 3, (255, 0, 0), 3)

    for pt in points_right.T:
        cv2.circle(window_img, tuple(pt), 3, (0, 0, 255), 3)

    points = np.hstack((points_left, points_right)).astype(np.int32).T

    cv2.fillPoly(window_img, [points], color=[0, 255, 0])

    result = cv2.addWeighted(out_img, 0.6, window_img, 0.4, 0)
    if DEBUG:
        f = plt.figure()
        f.add_subplot(121)
        plt.imshow(window_img)
        f.add_subplot(122)
        plt.imshow(out_img)
    ## End visualization steps ##
    return window_img
Exemplo n.º 3
0
def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
Exemplo n.º 4
0
def plot_images(header):
    ''' function to plot images from header.

    It plots images, return nothing
    Parameters
    ----------
        header : databroker header object
            header pulled out from central file system
    '''
    # prepare header
    if type(list(headers)[1]) == str:
        header_list = list()
        header_list.append(headers)
    else:
        header_list = headers

    for header in header_list:
        uid = header.start.uid
        img_field = _identify_image_field(header)
        imgs = np.array(get_images(header, img_field))
        print('Plotting your data now...')
        for i in range(imgs.shape[0]):
            img = imgs[i]
            plot_title = '_'.join(uid, str(i))
            # just display user uid and index of this image
            try:
                fig = plt.figure(plot_title)
                plt.imshow(img)
                plt.show()
            except:
                pass # allow matplotlib to crash without stopping other function
Exemplo n.º 5
0
def plot_test_image(testX, image_index, predictions_array, true_binary_labels):
    """
        testX: this is the test dataset
        image_index: index of the image that we will plot from the test dataset
        predictions_array: it is the array that contains all the predictions of the test dataset as output of model.predict(testX)
        true_binary_labels: these are the true label expressed as INTEGER values. It does not work with hot-encoding and string labels. 
    """
    single_predictions_array, true_binary_label, test_image = predictions_array, true_binary_labels[
        image_index], testX[image_index]
    plt.grid(False)
    plt.xticks([])
    plt.yticks([])

    plt.imshow(test_image, cmap=plt.cm.binary)

    predicted_binary_label = np.argmax(predictions_array)
    #print ("predicted_binary_label:", predicted_binary_label)
    #print ("true_binary_label:",true_binary_label)

    if predicted_binary_label == true_binary_label:
        color = 'blue'
    else:
        color = 'red'

    plt.xlabel("predicted: {} {:2.0f}% (true: {})".format(
        predicted_binary_label, 100 * np.max(single_predictions_array),
        true_binary_label),
               color=color)
Exemplo n.º 6
0
def vis_square(data, name_fig):
    """Take an array of shape (n, height, width) or (n, height, width, 3)
       and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""

    # normalize data for display
    data = (data - data.min()) / (data.max() - data.min())

    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = (((0, n**2 - data.shape[0]), (0, 1),
                (0, 1))  # add some space between filters
               + ((0, 0), ) * (data.ndim - 3)
               )  # don't pad the last dimension (if there is one)
    data = np.pad(data, padding, mode='constant',
                  constant_values=1)  # pad with ones (white)

    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose(
        (0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) +
                        data.shape[4:])

    plt.imshow(data)
    plt.show()
    plt.imsave(name_fig, data)
    return data
Exemplo n.º 7
0
def show_images(images, cols=1, titles=None):
    """Display a list of images in a single figure with matplotlib.

    Parameters
    ---------
    images: List of np.arrays compatible with plt.imshow.

    cols (Default = 1): Number of columns in figure (number of rows is
                        set to np.ceil(n_images/float(cols))).

    titles: List of titles corresponding to each image. Must have
            the same length as titles.
    """
    assert ((titles is None) or (len(images) == len(titles)))
    n_images = len(images)
    if titles is None:
        titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
    fig = plt.figure()
    for n, (image, title) in enumerate(zip(images, titles)):
        a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)
        if image.ndim == 2:
            plt.gray()
        plt.imshow(image)
        a.set_title(title)
    fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
    plt.show()
Exemplo n.º 8
0
def plot_results(pil_img, prob, boxes, classIDs, Name_model = 'DeTr',  Save_Images = True, image_name='test', inferene_time = None):
    if Save_Images :
        os.makedirs(Name_model,exist_ok=True)

    plt.figure(figsize=(16,10))
    plt.imshow(pil_img)
    ax = plt.gca()
    ######################
    if len(idxs) > 0:
        # loop over the indexes we are keeping
        for i in idxs.flatten():

            (x, y) = (boxes[i][0], boxes[i][1])
            (w, h) = (boxes[i][2], boxes[i][3])

            # draw a bounding box rectangle and label on the image
            color = [int(c) for c in COLORS[classIDs[i]]]
            cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
            text = "{}: {:.4f}".format(LABELS[classIDs[i]], prob[i])
            print(text)
            #print(str(LABELS[classIDs[i]]) + ' : ' + str(confidences[i]))
            cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)

        cv2.putText(image, f"inference time : {inferene_time:0.2f}", (10, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0,0,255], 1)
        cv2.putText(image, f"Number Objects : {len(idxs)}", (10, 25 +30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0,0,255], 1)
        # plt.axis('off')
        if Save_Images :
            addr_image = os.path.join(Name_model,image_name)
            cv2.imwrite(addr_image, image)
        else:
            cv2.imshow(f'image_name', image)
            cv2.waitKey(0)
Exemplo n.º 9
0
def plot_confusion_matrix(cm,
                          classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues,
                          filename='viz\\confusion_matrix.png'):
    plt.figure()
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j,
                 i,
                 cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.savefig(filename)
Exemplo n.º 10
0
def plot_classification_report(cr,
                               title='Classification report ',
                               with_avg_total=False,
                               cmap=plt.cm.Blues):

    lines = cr.split('\n')

    classes = []
    plotMat = []
    for line in lines[2:(len(lines) - 3)]:
        #print(line)
        t = line.split()
        if len(t):
            classes.append(t[0])
            v = [float(x) for x in t[1:len(t) - 1]]
            print(v)
            plotMat.append(v)

    if with_avg_total:
        aveTotal = lines[len(lines) - 1].split()
        classes.append('avg/total')
        vAveTotal = [float(x) for x in t[1:len(aveTotal) - 1]]
        plotMat.append(vAveTotal)

    plt.imshow(plotMat, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    x_tick_marks = np.arange(3)
    y_tick_marks = np.arange(len(classes))
    plt.xticks(x_tick_marks, ['precision', 'recall', 'f1-score', 'support'],
               rotation=45)
    plt.yticks(y_tick_marks, classes)
    plt.tight_layout()
    plt.ylabel('Classes')
    plt.xlabel('Measures')
Exemplo n.º 11
0
def analyze():
    loop = True
    while loop is True:
        img1 = cv2.imread('Images.png', 0)
        img2 = cv2.imread('data.png', 0)

        orb = cv2.ORB_create()

        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)

        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        matches = bf.match(des1, des2)
        matches = sorted(matches, key=lambda x: x.distance)

        img3 = cv2.drawMatches(img1,
                               kp1,
                               img2,
                               kp2,
                               matches[:10],
                               None,
                               flags=2)
        plt.imshow(img3)
        plt.show()
def plot_confusion_matrix(confusion_matrix,
                          class_labels,
                          normalize=False,
                          title='Confusion Matrix',
                          cmap=plt.cm.Blues):
    """ Code courtesy of Abinav Sagar: https://towardsdatascience.com/convolutional-neural-network-for-breast-cancer-classification-52f1213dcc9 """

    if normalize:
        confusion_matrix = confusion_matrix.astype(
            'float') / confusion_matrix.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(confusion_matrix)

    plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(class_labels))
    plt.xticks(tick_marks, class_labels, rotation=55)
    plt.yticks(tick_marks, class_labels)
    fmt = '.2f' if normalize else 'd'
    thresh = confusion_matrix.max() / 2.
    for i, j in itertools.product(range(confusion_matrix.shape[0]),
                                  range(confusion_matrix.shape[1])):
        plt.text(j,
                 i,
                 format(confusion_matrix[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if confusion_matrix[i, j] > thresh else "black")

    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()
Exemplo n.º 13
0
def plotConfusionMatrix(lbllist, predlist, classes, type):
    confusionMatrix = confusion_matrix(lbllist, predlist)

    # print(confusionMatrix)

    plt.imshow(confusionMatrix, interpolation="nearest", cmap=plt.cm.Blues)
    if type == 'train':
        plt.title("Confusion matrix training")
    elif type == 'test':
        plt.title("Confusion matrix testing")
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    fmt = "d"
    thresh = confusionMatrix.max() / 2.
    for i, j in itertools.product(range(confusionMatrix.shape[0]),
                                  range(confusionMatrix.shape[1])):
        plt.text(j,
                 i,
                 format(confusionMatrix[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if confusionMatrix[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel("True label")
    plt.xlabel("Predicted label")
    # plt.show()
    if type == 'train':
        plt.savefig(LOG_PATH + 'Confusion matrix training.png')
    elif type == 'test':
        plt.savefig(LOG_PATH + 'Confusion matrix testing.png')
    plt.close()
Exemplo n.º 14
0
def main():
    src_image_path = "src.jpg"
    des_image_path = "dst.jpg"

    img1 = cv2.imread(src_image_path, 0) / 255.0  # greyscale
    img2 = cv2.imread(des_image_path, 0) / 255.0  # greyscale

    # Part A
    corner_points1, descriptors1 = HCD.harris_corner(img1)
    corner_points2, descriptors2 = HCD.harris_corner(img2)

    print "descriptors1 length ", len(descriptors1)
    print "descriptors2 length ", len(descriptors2)

    # Part B
    matches_arr = match.match(corner_points1, corner_points2, descriptors1, descriptors2, match.hamming_metric, n=50)
    print matches_arr.shape
    print matches_arr
    

    # Part C
    H=findHomography.best_H(matches_arr)
    print H
    # Part D
    panorama = ws.warp_and_stitch(img1,img2,H)
    
    plt.imshow(panorama, cmap='gray')
    plt.show()
def plot_confusion_matrix(cm, classes, title='混淆矩阵', cmap=plt.cm.Greens):
    # imshow() 表示绘制并显示二维图 有18个参数
    # 参数1 X 混淆矩阵中显示的数值 二维数组
    # 参数2 cmap 颜色 plt.cm.Blues表示蓝色 plt.cm.Reds表示红色 plt.cm.Greens表示绿色
    # 参数5 interpolation 插值法 一般有如下值
    #     nearest 最近邻插值法
    #     bilinear 双线性插值法
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    plt.imshow(cm, cmap=cmap, interpolation="nearest")
    plt.title(title)  # 标题
    plt.colorbar()  # 显示颜色的进度条
    tick_marks = np.arange(2)  # [0 1]
    plt.xticks(tick_marks, classes)  # 对x轴上分类进行标记
    plt.yticks(tick_marks, classes)  # 对y轴上分类进行标记

    thresh = np.mean(cm)
    for i in range(2):
        for j in range(2):
            plt.text(i,
                     j,
                     cm[j][i],
                     horizontalalignment='center',
                     color='white' if cm[i][j] >= thresh else 'black')

    plt.xlabel('预测值')
    plt.ylabel('真实值')
Exemplo n.º 16
0
def plot_confusion_matrix(cm,
                          classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print("Confusion Matrix, without normalization")
    print(cm)
    #imshow displays data as an image on a 2d master
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    #returns evenly spaced values with a given inteerval
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)
    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j,
                 i,
                 format(cm[i, j], fmt),
                 horizontalalignment='center',
                 color='white' if cm[i, j] > thresh else "black")
    plt.tight_layout()
    plt.ylabel('True Label')
    plt.xlabel('Predicted Label')
Exemplo n.º 17
0
def plot_images(header):
    ''' function to plot images from header.

    It plots images, return nothing
    Parameters
    ----------
        header : databroker header object
            header objects obtained from a query to dataBroker
    '''
    # prepare header
    if type(list(headers)[1]) == str:
        header_list = list()
        header_list.append(headers)
    else:
        header_list = headers

    for header in header_list:
        uid = header.start.uid
        img_field = _identify_image_field(header)
        imgs = np.array(get_images(header, img_field))
        print('Plotting your data now...')
        for i in range(imgs.shape[0]):
            img = imgs[i]
            plot_title = '_'.join(uid, str(i))
            # just display user uid and index of this image
            try:
                fig = plt.figure(plot_title)
                plt.imshow(img)
                plt.show()
            except:
                pass # allow matplotlib to crash without stopping other function
Exemplo n.º 18
0
def imshow(image, title=None):
  if len(image.shape) > 3:
    image = tf.squeeze(image, axis=0)

  plt.imshow(image)
  if title:
    plt.title(title)
Exemplo n.º 19
0
def visualize_image(digits, num_width, num_height, mode=None, dir=None):
    for h in range(num_height):
        for w in range(num_width):
            # 画像を水平方向に連結していく
            if w != 0:
                tmp_img = np.hstack((tmp_img, digits[w + h * num_width]))
            else:
                tmp_img = digits[w + h * num_width]

        # 画像を垂直方向に連結する
        if h != 0:
            img = np.vstack((img, tmp_img))
        else:
            img = tmp_img

    #表示モード
    if mode == "show":
        plt.imshow(img, cmap='gray')
        plt.axis('off')
        plt.show()
    #保存モード
    elif mode == "save":
        if not dir == None:  #ファイル名がしていされていれば
            pilImg = Image.fromarray(img)  # uintに変換
            pilImg.save(dir)
def print_save_all_mean_faces(x_class_mean, global_mean, show, save):
    rows = 4
    cols = 14
    index = 1
    font_size = 10
    plt.figure(figsize=(20, 10))
    plt.subplot(rows, cols,
                index), plt.imshow(np.reshape(global_mean, (46, 56)).T,
                                   cmap='gist_gray')
    title = str("Global Mean")
    plt.title(title, fontsize=font_size).set_position([0.5, 0.95]), plt.xticks(
        []), plt.yticks([])
    index = index + 1
    for i in range(0, x_class_mean.shape[1]):
        title = str("Class Mean " + str(i + 1))
        plt.subplot(rows, cols,
                    index), plt.imshow(np.reshape(x_class_mean[:, i],
                                                  (46, 56)).T,
                                       cmap='gist_gray')
        plt.title(title, fontsize=font_size).set_position(
            [0.5, 0.95]), plt.xticks([]), plt.yticks([])
        index = index + 1
    if show == 'yes':
        plt.show()
    if save == 'yes':
        plt.savefig('Global and Class Mean')
    plt.close()
def imshow(img):
    img = img / 2 + 0.5  # unnormalize反标准化过程input = output*0.5 + 0.5
    npimg = img.numpy()  # 转换为numpy
    plt.imshow(
        np.transpose(npimg, (1, 2, 0))
    )  # Pytorch内Tensor顺序[batch, channel, height, width],由于输入没有batch,故channel对于0,height对应1,width对应2
    # 此处要还原为载入图像时基础的shape,所以应把顺序变为[height, width, channel], 所以需要np.transpose(npimg, (1, 2, 0))
    plt.show()
Exemplo n.º 22
0
def draw_image(byte_array, img_title):
    img_byte_array_len = len(np.ravel(byte_array))
    dim = int(np.sqrt(img_byte_array_len))
    if not ENABLE_IMAGE_SHOW:
        return
    plt.imshow(byte_array.reshape(dim, dim), interpolation='None', cmap=cm.gray)
    plt.title(img_title)
    show()
Exemplo n.º 23
0
def show_number(A):
    """Plot a matrix to the screen
    Expects A to be a sequence or sequence of sequences (of the same size).
    A should include only numerical values
    Return None"""
    B = np.array(A)
    ploty.imshow(B, cmap='gray')
    ploty.show()
Exemplo n.º 24
0
def visualize(win,
              rgb=False,
              imSize=None,
              hidDims=None,
              ordered=False,
              saveFile=None,
              normalize=False):

    if rgb:
        visualizeRGB(win,
                     imSize=imSize,
                     hidDims=hidDims,
                     saveFile=saveFile,
                     normalize=normalize,
                     ordered=ordered)
        return
    w = win - np.min(win)
    w /= np.max(w)
    numVis, numHid = w.shape
    if imSize is None:
        imSize = (int(np.sqrt(numVis)), int(np.sqrt(numVis)))
    assert (imSize[0] * imSize[1] == numVis)
    if hidDims is None:
        tmp = min(20, int(np.ceil(np.sqrt(numHid))))
        hidDims = (tmp, tmp)

    if ordered:
        valList = []
        for h in range(numHid):
            wtmp = w[:, h] - np.mean(w[:, h])
            val = wtmp.dot(wtmp)
            valList.append(val)
        order = np.argsort(valList)[::-1]

    margin = 1
    img = np.zeros(
        (hidDims[0] * (imSize[0] + margin), hidDims[1] * (imSize[1] + margin)))
    for h in range(min(hidDims[0] * hidDims[1], numHid)):
        i = h / hidDims[1]
        j = h % hidDims[1]
        if ordered:
            hshow = order[h]
        else:
            hshow = h
        content = (np.reshape(w[:, hshow], imSize))  # - np.mean(w[:,hshow]))
        img[(i * (imSize[0] + margin)):(i * (imSize[0] + margin) + imSize[0]),
            (j * (imSize[1] + margin)):(j * (imSize[1] + margin) +
                                        imSize[1])] = content
    plt.figure()
    plt.axis('off')
    plt.imshow(img, cmap=plt.cm.Greys_r, interpolation="nearest")
    if saveFile is not None:
        plt.tight_layout()
        plt.savefig('./figures/' + saveFile + ".svg",
                    bbox_inches='tight',
                    dpi=2000)

    plt.show()
Exemplo n.º 25
0
def visualizeRGB(win,
                 imSize=None,
                 hidDims=None,
                 ordered=False,
                 saveFile=None,
                 normalize=False):
    w = win - np.min(win)
    w /= np.max(w)
    numVis, numHid = w.shape
    numVis /= 3

    if imSize is None:
        imSize = (int(np.sqrt(numVis)), int(np.sqrt(numVis)))
    assert (imSize[0] * imSize[1] == numVis)

    if hidDims is None:
        tmp = min(20, int(np.ceil(np.sqrt(numHid))))
        hidDims = (tmp, tmp)
    margin = 2
    img = np.zeros((hidDims[0] * (imSize[0] + margin),
                    hidDims[1] * (imSize[1] + margin), 3))

    if ordered:
        valList = []
        for h in range(numHid):
            wtmp = w[:, h] - np.mean(w[:, h])
            val = wtmp.dot(wtmp)
            valList.append(val)
        order = np.argsort(valList)[::-1]

    for h in range(min(hidDims[0] * hidDims[1], numHid)):
        i = h / hidDims[1]
        j = h % hidDims[1]
        if ordered:
            hshow = order[h]
        else:
            hshow = h
        for co in range(3):
            tmp = np.reshape(w[(numVis * co):(numVis * (co + 1)), hshow],
                             imSize)
            if normalize:
                tmp -= tmp.min()
                tmp /= tmp.max()
            img[(i * (imSize[0] + margin)):(i * (imSize[0] + margin) +
                                            imSize[0]),
                (j * (imSize[1] + margin)):(j * (imSize[1] + margin) +
                                            imSize[1]), co] = tmp

    plt.axis('off')
    if saveFile is not None:
        plt.tight_layout()
        plt.savefig('./figures/' + saveFile + ".svg",
                    bbox_inches='tight',
                    dpi=2000)
    else:
        plt.imshow(img)

    plt.show()
Exemplo n.º 26
0
def displayImg(X, amount):
    if (X.shape[1] == 784):
        for s in range(amount):
            plt.imshow(X[s].reshape(28, 28),
                       interpolation='None',
                       cmap=cm.gray)
            plt.show()
    else:
        print("Dimension is not 784.")
Exemplo n.º 27
0
def confused_pic(class_indices, y, prediction, decision_matrix,testData):
   wrong_classify = y != prediction
   index_wrong = np.argwhere(wrong_classify == True)  # indices for misclassified example
   index_wrong= index_wrong[:, 0]
   y_wrong = y[wrong_classify]  # belonged class for the misclassified
   prediction_wrong = prediction[wrong_classify]  # the predicted class for the misclassified examples
   wrong_decision_matrix = decision_matrix[wrong_classify, :]  # only the rows that misclassified

   # mapping class to index -> the first class will map to 0 the second to 1 and go on...
   j = 0  # counter
   y_wrong_mapping = np.copy(y_wrong)
   prediction_wrong_mapping = np.copy(prediction_wrong)
   for i in class_indices:
       y_wrong_mapping[y_wrong_mapping == i] = j
       prediction_wrong_mapping[prediction_wrong_mapping == i] = j
       j = j + 1
   y_wrong_mapping = y_wrong_mapping.astype(int)
   prediction_wrong_mapping = prediction_wrong_mapping.astype(int)

   # create vector with the mistake value to the correct class from the predicted class
   mistakes_values = np.asarray([])
   for i in range(y_wrong.shape[0]):  # loop over each row of wrong_decision_matrix, all the misclassified
       mistake = wrong_decision_matrix[i, y_wrong_mapping[i]] - wrong_decision_matrix[i, prediction_wrong_mapping[i]]
       mistakes_values = np.append(mistakes_values, mistake)

   # combine mistake, belonged class, original index to one matrix
   combine = np.asarray([mistakes_values, y_wrong, index_wrong])
   combine = np.transpose(combine)
   # sorting the combined matrix by the mistake
   combine = combine[combine[:, 0].argsort()[::-1]]
   # sorting the combined matrix by the mistake and then by class
   combine = combine[combine[:, 1].argsort()]

   # find 2 largest mistakes
   example_idx = np.asarray([])
   belonged_class = np.asarray([])
   for i in range(combine.shape[0]):
       if example_idx.shape[0] >= 2:
           if belonged_class[-1] == combine[i, 1] and belonged_class[-2] == combine[i, 1]:
               continue
           else:
               example_idx = np.append(example_idx, combine[i, 2])
               belonged_class = np.append(belonged_class, combine[i, 1])
       else:
           example_idx = np.append(example_idx, combine[i, 2])
           belonged_class = np.append(belonged_class, combine[i, 1])

   # show the images
   for i in range(example_idx.shape[0]):
       cv2.imshow('',testData[example_idx.astype(int)][i])
       cv2.waitKey()
       cv2.destroyAllWindows()
       plt.imshow(testData[example_idx.astype(int)][i], cmap = 'gray')
       plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
       plt.show()
   return
Exemplo n.º 28
0
def plot_gradient(gradient):
    """
    This function plots the gradient after normalizing it.
    """
    # Normalize the gradient so it is between 0.0 and 1.0
    gradient_normalized = normalize_image(gradient)

    # Plot the normalized gradient.
    plt.imshow(gradient_normalized, interpolation='bilinear')
    plt.show()
Exemplo n.º 29
0
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
    """Helper function to plot a gallery of portraits"""
    pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
    pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
    for i in range(n_row * n_col):
        pl.subplot(n_row, n_col, i + 1)
        pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
        pl.title(titles[i], size=12)
        pl.xticks(())
        pl.yticks(())
Exemplo n.º 30
0
 def plotNNFilter(units):
     filters = 3
     fig = plt.figure(1, figsize=(20, 20))
     n_columns = 6
     n_rows = math.ceil(filters / n_columns) + 1
     for i in range(filters):
         plt.subplot(n_rows, n_columns, i + 1)
         plt.title('Filter ' + str(i))
         plt.imshow(units[0, :, :, i], interpolation="nearest", cmap="gray")
     plt.savefig('/Users/raghav/Documents/Uni/oc-nn/models/representation_sigmoid_dog.png')
Exemplo n.º 31
0
    def retrieve_input():
        text = T.get("1.0",'end-1c')

        
        #Functions: Remove stopwords, lemmatize the words, tokenize the words, remove punctuation
        def remove_stopwords(text):
          sw=stopwords.words('french')
          #sw=['de','al','se','mi','me','te','le','les','nos','os','les','tu','el','me','un','la','y''que','lo','en','es','a','no','para','una','él','pero','tien','todo','o','está','día','persona','cuando','caso','si','casa','había','muy','ella','esta']
          words = [w for w in text if not w in sw]
          return words
        def make_lower(text):
          words= text.lower()
          return words
        def remove_punc(text):
          words = [word for word in text if word.isalpha()]
          return words
        def lemmatize_words(text):
          porter=PorterStemmer()
          words=[porter.stem(word) for word in text]
          return words

        words = nltk.tokenize.word_tokenize(text)
        words=remove_punc(words)
        words=lemmatize_words(words)
        words=remove_stopwords(words)


        word_dist = nltk.FreqDist(words)
        top_N = 50
        rslt = pd.DataFrame(word_dist.most_common(top_N),
        columns=['Word', 'Frequency'])
        stringss = rslt['Word'][0]+" was your most common word!"
        gameDisplay.fill(light_blue);
        message_to_screen(stringss, black, 40, 100, 100)
        pygame.display.update() 


        
        newstr=''
        for i in words:
          newstr=newstr+' '+i
        from PIL import Image
        from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
        import matplotlib.pyplot as plt

        newstr=''
        for i in words:
          newstr=newstr+' '+i
        wordcloud = WordCloud().generate(newstr)

        # Display the generated image:
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.axis("off")
        plt.show()
        wordcloud.to_file("cloud.tiff")
Exemplo n.º 32
0
def generate_plot(array, vmin, vmax, figNumber=1):
    plt.figure(figNumber)
    plt.subplot(2,3,i)
    print i
    plt.imshow(array, vmin = vmin, vmax= vmax, interpolation = None) 
    plt.xlabel('Sample')
    plt.ylabel('Line')
    plt.title(row[0])
    cb = plt.colorbar(orientation='hor', spacing='prop',ticks = [vmin,vmax],format = '%.2f')
    cb.set_label('Reflectance / cos({0:.2f})'.format(incAnglerad*180.0/math.pi))
    plt.grid(True)
Exemplo n.º 33
0
def draw_circle(c,r):
	t = arange(0,1.01,.01)*2*pi
	x = r*cos(t) + c[0]
	y = r*sin(t) + c[1]
	plt.plot(x,y,'b',linewidth=2)
	plt.imshow(im)
	if circle:
		for p in locs:
			plt.draw_circle(p[:2],p[2])
	else:
		plt.plot(locs[:,0],locs[:,1],'ob')
	plt.axis('off')
Exemplo n.º 34
0
 def plot(self, output):
     plt.figure(figsize=output.fsize, dpi=output.dpi)
     for ii in range(0, len(self.v)):
         imsize = [self.t[0], self.t[-1], self.x[ii][-1], self.x[ii][0]]
         lim = amax(absolute(self.v[ii])) / output.scale_sat
         plt.imshow(self.v[ii], extent=imsize, vmin=-lim, vmax=lim, cmap=cm.gray, origin='upper', aspect='auto')
         plt.title("%s-Velocity for Trace #%i" % (self.comp.upper(), ii))
         plt.xlabel('Time (s)')
         plt.ylabel('Offset (km)')
         #plt.colorbar()
         plt.savefig("Trace_%i_v%s.pdf" % (ii, self.comp))
         plt.clf()
Exemplo n.º 35
0
def compress_kmeans(im, k=4):
    height, width, depth = im.shape

    data = im.reshape((height * width, depth))
    labels, centers = kmeans(data, k, 1e-2)
    rep = closest(data, centers)
    data_compressed = centers[rep]

    im_compressed = data_compressed.reshape((height, width, depth))
    plt.figure()
    plt.imshow(im_compressed)
    plt.show()
def plot_confusion_matrix(cm, labels, title='Confusion matrix', cmap=plt.cm.Blues, save=False):
    plt.figure()
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(labels))
    plt.xticks(tick_marks, labels, rotation=45)
    plt.yticks(tick_marks, labels)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()
    if save:
        plt.savefig(save)
Exemplo n.º 37
0
def plotColorCodedNetworkSpikes(network):
    assert network is not None, "Network is not initialised! Visualising failed."
    import matplotlib as plt
    from NetworkBuilder import sameDisparityInd
    
    cellsOutSortedByDisp = []
    spikes = []
    for disp in range(0, maxDisparity+1):
        cellsOutSortedByDisp.append([network[x][2] for x in sameDisparityInd[disp]])
        spikes.append([x.getSpikes() for x in cellsOutSortedByDisp[disp]])
    
    sortedSpikes = sortSpikesByColor(spikes)
    print sortedSpikes
    framesOfSpikes = generateColoredFrames(sortedSpikes)
    print framesOfSpikes
    
    fig = plt.figure()
    
    initialData = createInitialisingDataColoredPlot()
    
    imNet = plt.imshow(initialData[0], c=initialData[1], cmap=plt.cm.coolwarm, interpolation='none', origin='upper')
    
    plt.xticks(range(0, dimensionRetinaX)) 
    plt.yticks(range(0, dimensionRetinaY))
    plt.title("Disparity Map {0}".format(disparity))
    args = (framesOfSpikes, imNet)
    anim = animation.FuncAnimation(fig, animate, fargs=args, frames=int(simulationTime)*10, interval=30)
          
    plt.show()
Exemplo n.º 38
0
def plotRetinaSpikes(retina=None, label=""):
    
    assert retina is not None, "Network is not initialised! Visualising failed."
    import matplotlib.pyplot as plt
    from matplotlib import animation
    
    print "Visualising {0} Spikes...".format(label) 

    spikes = [x.getSpikes() for x in retina]
#     print spikes
    
    sortedSpikes = sortSpikes(spikes)
#     print sortedSpikes
    
    framesOfSpikes = generateFrames(sortedSpikes)
#     print framesOfSpikes
    
    x = range(0, dimensionRetinaX)
    y = range(0, dimensionRetinaY)
    from numpy import meshgrid
    rows, pixels = meshgrid(x,y)
    
    fig = plt.figure()
    
    initialData = createInitialisingData()
    
    imNet = plt.imshow(initialData, cmap='green', interpolation='none', origin='upper')
    
    plt.xticks(range(0, dimensionRetinaX)) 
    plt.yticks(range(0, dimensionRetinaY))
    args = (framesOfSpikes, imNet)
    anim = animation.FuncAnimation(fig, animate, fargs=args, frames=int(simulationTime)*10, interval=30)
          
    plt.show()
Exemplo n.º 39
0
def plot_confusion_matrix(cm, classes,
    normalize=False, title='Confusion matrix',
    cmap=plt.cm.Blues, filename='viz\\confusion_matrix.png'):
  plt.figure()
  plt.imshow(cm, interpolation='nearest', cmap=cmap)
  plt.title(title)
  plt.colorbar()
  tick_marks = np.arange(len(classes))
  plt.xticks(tick_marks, classes, rotation=45)
  plt.yticks(tick_marks, classes)

  if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

  thresh = cm.max() / 2.
  for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
      plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")

  plt.tight_layout()
  plt.ylabel('True label')
  plt.xlabel('Predicted label')
  plt.savefig(filename)
Exemplo n.º 40
0
    def plot(self, output):
        # Create adaptive scale
        scale_len = 100
        scale_fix = 250
        nframes = self.v.shape[2]
        scale = zeros((nframes, 1))
        win = ones((scale_len, 1))
        for ii in range(0, nframes):
            scale[ii] = amax(absolute(self.v[:, :, ii]))
        scale = convolve(squeeze(scale), squeeze(win), mode='same') / output.scale_sat
        if (self.writestep == 0):
            scale[:scale_fix] = scale[scale_fix]

        # Initialize figure
        comp = {'x': ['Y', 'Z'], 'y': ['X', 'Z'], 'z': ['X', 'Y']}
        fig = plt.figure(figsize=(output.hres / output.sres, output.hres / (output.sres * output.hratio)),
                         dpi=output.sres)
        imsize = [self.x[0], self.x[-1], self.y[-1], self.y[0]]
        vimg = plt.imshow(transpose(self.v[:, :, 0]), extent=imsize, vmin=-scale[0], vmax=scale[0], cmap=cm.RdBu)
        vtitle = plt.title('')
        plt.xlabel(comp[self.dir][0])
        plt.ylabel(comp[self.dir][1])
        plt.colorbar()

        def animate(ii):
            vimg.set_array(transpose(self.v[:, :, ii]))
            vimg.set_clim(-scale[ii], scale[ii])
            vtitle.set_text("%s for %s=%s km (t=%1.2e s)" % (self.type, self.dir, self.loc, self.t[ii]))
            return vimg, vtitle

        try:
            ani = animation.FuncAnimation(fig, animate, frames=self.v.shape[2], interval=20, blit=False, repeat=False)
            if (self.writestep == 0):
                ani.save("./%s_%s_%s.mp4" % (self.dir, self.loc, self.type), fps=30, codec='libx264', bitrate=1800)
            else:
                ani.save("./%s_%s_%s_%i.mp4" % (self.dir, self.loc, self.type, self.writestep), fps=30, codec='libx264', bitrate=1800)
        except IndexError:
            print 'To render movies, make sure that ffmpeg is installed!'
        self.writestep += 1
Exemplo n.º 41
0
def plotDisparityMap(network=None, disparity=0):
    
    assert network is not None, "Network is not initialised! Visualising failed."
    assert disparity >= 0 and disparity <= maxDisparity, "No such disparity map in the network."
    import matplotlib.pyplot as plt
    from matplotlib import animation
    from NetworkBuilder import sameDisparityInd
    
    print "Visualising results for disparity value {0}...".format(disparity) 
    
    cellsOut = [network[x][2] for x in sameDisparityInd[disparity]]

    spikes = [x.getSpikes() for x in cellsOut]
#     print spikes
    
    sortedSpikes = sortSpikes(spikes)
#     print sortedSpikes
    
    framesOfSpikes = generateFrames(sortedSpikes)
#     print framesOfSpikes
    
    x = range(0, dimensionRetinaX)
    y = range(0, dimensionRetinaY)
    from numpy import meshgrid
    rows, pixels = meshgrid(x,y)
    
    fig = plt.figure()
    
    initialData = createInitialisingData()
#     print initialData
    imNet = plt.imshow(initialData, cmap='gray', interpolation='none', origin='upper')
    
    plt.xticks(range(0, dimensionRetinaX)) 
    plt.yticks(range(0, dimensionRetinaY))
    plt.title("Disparity Map {0}".format(disparity))
    args = (framesOfSpikes, imNet)
    anim = animation.FuncAnimation(fig, animate, fargs=args, frames=int(simulationTime)*10, interval=30)
          
    plt.show()
Exemplo n.º 42
0
    def plot(self, model, output):
        Z = linspace(0, model.number[2] * model.spacing[2], model.number[2]) + model.spacing[2]
        imsize = [model.origin[0], model.size[0] + model.origin[0], model.origin[1], model.size[1] + model.origin[1]]
        fig = plt.figure(figsize=(output.hres / output.sres, output.hres / (output.sres * output.hratio)),
                         dpi=output.sres)
        vimg = plt.imshow(transpose(self.v[:, :, 0]), extent=imsize, vmin=round(amin(self.v), 1) - 0.05, vmax=round(amax(self.v) + 0.05, 1),
                          cmap=cm.jet)
        vtitle = plt.title('')
        plt.xlabel('X')
        plt.ylabel('Y')
        plt.colorbar()

        def animate(ii):
            vimg.set_array(transpose(self.v[:, :, ii]))
            vtitle.set_text("%s Plot (Z = %1.2fkm)" % (self.name, Z[ii]))
            return vimg, vtitle

        try:
            ani = animation.FuncAnimation(fig, animate, frames=len(Z), interval=20, blit=False, repeat=False)
            ani.save("./%s.mp4" % self.type, fps=30, codec='libx264', bitrate=1800)
        except IndexError:
            print 'To render movies, make sure that ffmpeg is installed!'
Exemplo n.º 43
0
import cv2
import matplotlib as plt
#http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.html
img = cv2.resize(cv2.imread('im2_1.jpg'),(800, 600))
# Create SURF object. You can specify params here or later.
# Here I set Hessian Threshold to 400
surf = cv2.SURF(400)
# In actual cases, it is better to have a value 300-500
surf.hessianThreshold = 500
# Find keypoints and descriptors directly
kp, des = surf.detectAndCompute(img,None)


img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4)
plt.imshow(img2),plt.show()
Exemplo n.º 44
0
        return int(d)+14
    elif m == '07':
        return int(d)+34

df["date"] = [t[0:4] + t[5:7] + t[8:10] for t in df["lastupdated"]]
df["time"] = [t[11:13] + t[14:16] for t in df["lastupdated"]]
df["day"] = [date2int(t) for t in df["lastupdated"]] # day 0 is May 17, 2014
df["dayofwk"] = [(t+6)%7 for t in df["day"]] # 0 indexed Sunday
df.head()

# <codecell>

plt.figure(figsize=(10,15))

im = plt.imread('chicago.png')
implot = plt.imshow(im)

x = (df['west'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
y = 798-(df['north'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
s = df['currentspeed'] / df['currentspeed'].max()
plt.scatter(x,y,c=s,linewidth=0,s=1000,alpha=0.1)

#x0 = (df.ix[0]['west'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
#y0 = 798-(df.ix[0]['north'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
#plt.scatter(x0,y0,c='r',s=2000)
#x0 = (df.ix[0]['east'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
#y0 = 798-(df.ix[0]['south'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
#plt.scatter(x0,y0,c='r',s=2000)
plt.xlim(0,477)
plt.ylim(798,0)
plt.xticks([])
Exemplo n.º 45
0
	if yc > 50:
		yc = 100-yc
	x.append(xc)
	y.append(yc)
	data.append({"x":xc*11+30 , "y":yc*10.5+10, "count":1})

print data


# color =['m','g']

# scatter(y,x, s=100 ,marker='o', c=color)
# show()


heatmap, xedges, yedges = np.histogram2d(x, y, bins=100)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]

plt.clf()
plt.imshow(d, extent=extent)
plt.colorbar()
plt.show()

# 	gamedata = box.find_one({"gameId":shot['gameId']})
# 	print gamedata['team1']['name'] + " vs. " + gamedata['team2']['name']

# for game in box.find({"$or":[{"team1.name":"Dallas Mavericks"}, {"team2.name":"Dallas Mavericks"}]}):
# 	if game['team1']['name'] == 'Dallas Mavericks':
# 		# print game['team1']['tto']
# 	else:
# 		# print game['team2']['tto']
Exemplo n.º 46
0
dat=pd.read_csv('Voters.csv').as_matrix()
x=dat[:,0]
y=dat[:,1]
plt.scatter(x,y)
plt.show()
plt.hist(x)
plt.hist(y,bins=15)


#images
train=pd.read_csv('test.csv')
M=train.as_matrix()
im=M[0,1:]
im=im.reshape(28,28)
M=train.as_matrix()
plt.imshow(im)
plt.show()
plt.imshow(im,cmap="gray")


from scipy.stats import norm
norm.pdf(0)
norm.pdf(0,loc=5, scale=10)
r=np.random.randn(10)
norm.pdf(r)
norm.cdf(r)
r=10*np.random.randn(10000)+5
plt.hist(r,bins=100)

r=np.random.randn(10000,2)
plt.scatter(r[:,0],r[:,1])
Exemplo n.º 47
0
def train_dcgan_labeled(gen, dis, epoch0=0):
    print('CHAINER begin training');    sys.stdout.flush()

    o_gen = optimizers.Adam(alpha=0.0002, beta1=0.5)
    o_dis = optimizers.Adam(alpha=0.0002, beta1=0.5)
    print('CHAINER begin gen');    sys.stdout.flush()
    o_gen.setup(gen)
    o_dis.setup(dis)
    print('CHAINER begin add');    sys.stdout.flush()
    o_gen.add_hook(chainer.optimizer.WeightDecay(0.00001))
    o_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))
    print('CHAINER begin zvis');    sys.stdout.flush()
    # zvis = (xp.random.uniform(-1, 1, (100, nz), dtype=np.float32))
    print('CHAINER begin for');    sys.stdout.flush()
    for epoch in range(epoch0,n_epoch):
        print("epoch:",epoch)
        sys.stdout.flush()
        perm = np.random.permutation(n_train)
        sum_l_dis = np.float32(0)
        sum_l_gen = np.float32(0)
        
        for i in range(0, n_train, batchsize):
            # discriminator
            # 0: from dataset
            # 1: from noise

            #print "load image start ", i
            x2 = np.zeros((batchsize, 3, 96, 96), dtype=np.float32)
            for j in range(batchsize):
                #try:
                    rnd = np.random.randint(len(dataset))
                    rnd2 = np.random.randint(2)

                    img = np.asarray(Image.open(io.BytesIO(dataset[rnd])).convert('RGB')).astype(np.float32).transpose(2, 0, 1)
                    x2[j,:,:,:] = (img[:,0:96,0:96]-128.0)/128.0
                #except:
                #    print('read image error occured', fs[rnd])
            #print "load image done"
            
            # train generator
            z = Variable(xp.random.uniform(-1, 1, (batchsize, nz), dtype=np.float32))
            x = gen(z)
            yl = dis(x)
            L_gen = F.softmax_cross_entropy(yl, Variable(xp.zeros(batchsize, dtype=np.int32)))
            L_dis = F.softmax_cross_entropy(yl, Variable(xp.ones(batchsize, dtype=np.int32)))
            
            # train discriminator
                    
            x2 = Variable(cuda.to_gpu(x2))
            yl2 = dis(x2)
            L_dis += F.softmax_cross_entropy(yl2, Variable(xp.zeros(batchsize, dtype=np.int32)))
            
            #print "forward done"

            o_gen.zero_grads()
            L_gen.backward()
            o_gen.update()
            
            o_dis.zero_grads()
            L_dis.backward()
            o_dis.update()
            
            sum_l_gen += L_gen.data.get()
            sum_l_dis += L_dis.data.get()
            
            #print "backward done"

            if i%image_save_interval==0:
                pylab.rcParams['figure.figsize'] = (16.0,16.0)
                pylab.clf()
                vissize = 100
                z = zvis
                z[50:,:] = (xp.random.uniform(-1, 1, (50, nz), dtype=np.float32))
                z = Variable(z)
                x = gen(z, test=True)
                x = x.data.get()
                for i_ in range(100):
                    tmp = ((np.vectorize(clip_img)(x[i_,:,:,:])+1)/2).transpose(1,2,0)
                    pylab.subplot(10,10,i_+1)
                    pylab.imshow(tmp)
                    pylab.axis('off')
                pylab.savefig('%s/vis_%d_%d.png'%(out_image_dir, epoch,i))
                
        serializers.save_hdf5("%s/dcgan_model_dis_%d.h5"%(out_model_dir, epoch),dis)
        serializers.save_hdf5("%s/dcgan_model_gen_%d.h5"%(out_model_dir, epoch),gen)
        serializers.save_hdf5("%s/dcgan_state_dis_%d.h5"%(out_model_dir, epoch),o_dis)
        serializers.save_hdf5("%s/dcgan_state_gen_%d.h5"%(out_model_dir, epoch),o_gen)
        print('epoch end', epoch, sum_l_gen/n_train, sum_l_dis/n_train)