예제 #1
0
파일: predict.py 프로젝트: ecvgit/deephacks
def forward_pass(args):
    ''' Runs a forward pass to segment the image. '''

    model = get_trained_model(args)

    # Load image and swap RGB -> BGR to match the trained weights
    image_rgb = np.array(Image.open(args.input_path)).astype(np.float32)
    image = image_rgb[:, :, ::-1] - args.mean
    image_size = image.shape

    # Network input shape (batch_size=1)
    net_in = np.zeros((1, input_height, input_width, 3), dtype=np.float32)

    output_height = input_height - 2 * label_margin
    output_width = input_width - 2 * label_margin

    # This simplified prediction code is correct only if the output
    # size is large enough to cover the input without tiling
    assert image_size[0] < output_height
    assert image_size[1] < output_width

    # Center pad the original image by label_margin.
    # This initial pad adds the context required for the prediction
    # according to the preprocessing during training.
    image = np.pad(image,
                   ((label_margin, label_margin),
                    (label_margin, label_margin),
                    (0, 0)), 'reflect')

    # Add the remaining margin to fill the network input width. This
    # time the image is aligned to the upper left corner though.
    margins_h = (0, input_height - image.shape[0])
    margins_w = (0, input_width - image.shape[1])
    image = np.pad(image,
                   (margins_h,
                    margins_w,
                    (0, 0)), 'reflect')

    # Run inference
    net_in[0] = image
    prob = model.predict(net_in)[0]

    # Reshape to 2d here since the networks outputs a flat array per channel
    prob_edge = np.sqrt(prob.shape[0]).astype(np.int)
    prob = prob.reshape((prob_edge, prob_edge, 21))

    # Upsample
    if args.zoom > 1:
        prob = interp_map(prob, args.zoom, image_size[1], image_size[0])

    # Recover the most likely prediction (actual segment class)
    prediction = np.argmax(prob, axis=2)

    # Apply the color palette to the segmented image
    color_image = np.array(pascal_palette)[prediction.ravel()].reshape(
        prediction.shape + (3,))

    print('Saving results to: ', args.output_path)
    with open(args.output_path, 'wb') as out_file:
        Image.fromarray(color_image).save(out_file)
def makeimage(prob,image_size):
    print prob.shape
    # exit()
    if CONFIG[ds]['zoom'] > 1:
        prob = interp_map(prob, CONFIG[ds]['zoom'], image_size[1], image_size[0])

    prediction = np.argmax(prob, axis=0)
    print prediction.shape
    # exit()
    color_image = CONFIG[ds]['palette'][prediction.ravel()].reshape(image_size)
    return color_image
예제 #3
0
def predict(image, model, ds):

    image = image.astype(np.float32) - CONFIG[ds]['mean_pixel']
    conv_margin = CONFIG[ds]['conv_margin']

    input_dims = (1,) + CONFIG[ds]['input_shape']
    batch_size, num_channels, input_height, input_width = input_dims
    model_in = np.zeros(input_dims, dtype=np.float32)

    image_size = image.shape
    output_height = input_height - 2 * conv_margin
    output_width = input_width - 2 * conv_margin
    image = cv2.copyMakeBorder(image, conv_margin, conv_margin,
                               conv_margin, conv_margin,
                               cv2.BORDER_REFLECT_101)

    num_tiles_h = image_size[0] // output_height + (1 if image_size[0] % output_height else 0)
    num_tiles_w = image_size[1] // output_width + (1 if image_size[1] % output_width else 0)

    row_prediction = []
    for h in range(num_tiles_h):
        col_prediction = []
        for w in range(num_tiles_w):
            offset = [output_height * h,
                      output_width * w]
            tile = image[offset[0]:offset[0] + input_height,
                         offset[1]:offset[1] + input_width, :]
            margin = [0, input_height - tile.shape[0],
                      0, input_width - tile.shape[1]]
            tile = cv2.copyMakeBorder(tile, margin[0], margin[1],
                                      margin[2], margin[3],
                                      cv2.BORDER_REFLECT_101)
            model_in[0] = tile.transpose([2, 0, 1])

            prob = model.predict(model_in)[0]

            col_prediction.append(prob)

        col_prediction = np.concatenate(col_prediction, axis=2)
        row_prediction.append(col_prediction)
    prob = np.concatenate(row_prediction, axis=1)
    if CONFIG[ds]['zoom'] > 1:
        prob = interp_map(prob, CONFIG[ds]['zoom'], image_size[1], image_size[0])

    prediction = np.argmax(prob, axis=0)
    color_image = CONFIG[ds]['palette'][prediction.ravel()].reshape(image_size)

    return color_image
예제 #4
0
def forward_pass(args):
    ''' Runs a forward pass to segment the image. '''

    model = get_trained_model(args)

    image = cv2.imread(args.input_path, 1).astype(np.float32) - args.mean
    image_size = image.shape

    # Shape: (1, 900, 900, 3)
    net_in = np.zeros((1, 900, 900, 3), dtype=np.float32)

    output_height = input_height - 2 * label_margin
    output_width = input_width - 2 * label_margin
    image = cv2.copyMakeBorder(image, label_margin, label_margin, label_margin,
                               label_margin, cv2.BORDER_REFLECT_101)

    # Tile the input to operate on arbitrarily
    # large images.
    num_tiles_h = image_size[0] // output_height + \
                  (1 if image_size[0] % output_height else 0)
    num_tiles_w = image_size[1] // output_width + \
                  (1 if image_size[1] % output_width else 0)

    prediction = []
    for h in range(num_tiles_h):
        col_prediction = []

        for w in range(num_tiles_w):
            offset = [output_height * h, output_width * w]
            tile = image[offset[0]:offset[0] + input_height,
                         offset[1]:offset[1] + input_width, :]
            margin = [
                0, input_height - tile.shape[0], 0, input_width - tile.shape[1]
            ]
            tile = cv2.copyMakeBorder(tile, margin[0], margin[1], margin[2],
                                      margin[3], cv2.BORDER_REFLECT_101)

            # Pass the tile to the network
            net_in[0] = tile
            prob = model.predict(net_in)[0]
            col_prediction.append(prob)

        col_prediction = np.concatenate(col_prediction, axis=2)
        prediction.append(col_prediction)

    prob = np.concatenate(prediction, axis=1)

    if args.zoom > 1:
        prob = prob.transpose(2, 0, 1)  # to caffe ordering
        prob = interp_map(prob, args.zoom, image_size[1], image_size[0])
        prob = prob.transpose(1, 2, 0)  # to tf ordering

    # Recover the most likely prediction (actual segment class)
    prediction = np.argmax(prob, axis=2)

    # Apply the color palette to the segmented image
    color_image = np.array(palette)[prediction.ravel()].reshape(
        prediction.shape + (3, ))
    color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)

    print('Writing', args.output_path)
    cv2.imwrite(args.output_path, color_image)
def predict(image, model, ds):

    image = image.astype(np.float32) - CONFIG[ds]['mean_pixel']
    conv_margin = CONFIG[ds]['conv_margin']
    print 'conv_margin = ', conv_margin
    input_dims = (1, ) + CONFIG[ds]['input_shape']
    print 'input dims ', input_dims
    batch_size, num_channels, input_height, input_width = input_dims
    model_in = np.zeros(input_dims, dtype=np.float32)
    print '############################################'
    print '############################################'
    print '############################################'
    print model_in.shape
    print input_height, input_width
    print '############################################'
    print '############################################'
    image_size = image.shape
    output_height = input_height - 2 * conv_margin
    output_width = input_width - 2 * conv_margin
    print output_width, output_height
    image = cv2.copyMakeBorder(image, conv_margin, conv_margin, conv_margin,
                               conv_margin, cv2.BORDER_REFLECT_101)
    # imsave('/home/zoro/Desktop/check0.jpg',image)
    print 'img_size = ', image_size
    num_tiles_h = image_size[0] // output_height + (1 if image_size[0] %
                                                    output_height else 0)
    num_tiles_w = image_size[1] // output_width + (1 if image_size[1] %
                                                   output_width else 0)
    # print num_tiles_h, num_tiles_w
    row_prediction = []
    count = 0
    for h in range(num_tiles_h):
        col_prediction = []
        for w in range(num_tiles_w):
            offset = [output_height * h, output_width * w]
            # print ('offset ',h,offset,len(offset))
            tile = image[offset[0]:offset[0] + input_height,
                         offset[1]:offset[1] + input_width, :]
            # print ('tile ',h,tile,len(tile))
            margin = [
                0, input_height - tile.shape[0], 0, input_width - tile.shape[1]
            ]
            print('margin ', h, margin, len(margin))
            tile = cv2.copyMakeBorder(tile, margin[0], margin[1], margin[2],
                                      margin[3], cv2.BORDER_REFLECT_101)
            model_in[0] = tile.transpose([2, 0, 1])
            # print type(model_in) , model_in.shape
            # predicted_model = model.predict(model_in)
            # print (np.asarray(predicted_model)).shape
            prob = model.predict(model_in)[0]
            print len(prob), len(prob[0]), len(prob[0][0])
            col_prediction.append(prob)
            # print h,col_prediction
            print '##############################################'
            count = count + 1
            print h, w, count
        col_prediction = np.concatenate(col_prediction, axis=2)
        row_prediction.append(col_prediction)
    prob = np.concatenate(row_prediction, axis=1)
    print 'check', prob.shape
    if CONFIG[ds]['zoom'] > 1:
        prob = interp_map(prob, CONFIG[ds]['zoom'], image_size[1],
                          image_size[0])

    prediction = np.argmax(prob, axis=0)
    print 'dikha bhai', prediction.shape
    print prediction[0:100, 0:100]
    color_image = CONFIG[ds]['palette'][prediction.ravel()].reshape(image_size)
    print 'size dekh lo bhai', color_image.shape
    return color_image
예제 #6
0
파일: predict.py 프로젝트: zhfkt/GipEcaf
def forward_pass(args):
    ''' Runs a forward pass to segment the image. '''
    model = get_trained_model(args)
    imageFilePathList = [
        os.path.join(args.input_path, f) for f in os.listdir(args.input_path)
    ]

    for imageFilePath in imageFilePathList:
        #re-scale to 480*360

        print("Now Processing: " + imageFilePath)

        oriImg = Image.open(imageFilePath)
        img = oriImg.resize((480, 360), Image.ANTIALIAS)

        # Load image and swap RGB -> BGR to match the trained weights
        image_rgb = np.array(img).astype(np.float32)
        image = image_rgb[:, :, ::-1] - args.mean
        image_size = image.shape

        # Network input shape (batch_size=1)
        net_in = np.zeros((1, input_height, input_width, 3), dtype=np.float32)

        output_height = input_height - 2 * label_margin
        output_width = input_width - 2 * label_margin

        # This simplified prediction code is correct only if the output
        # size is large enough to cover the input without tiling
        assert image_size[0] < output_height
        assert image_size[1] < output_width

        # Center pad the original image by label_margin.
        # This initial pad adds the context required for the prediction
        # according to the preprocessing during training.
        image = np.pad(image, ((label_margin, label_margin),
                               (label_margin, label_margin), (0, 0)),
                       'reflect')

        # Add the remaining margin to fill the network input width. This
        # time the image is aligned to the upper left corner though.
        margins_h = (0, input_height - image.shape[0])
        margins_w = (0, input_width - image.shape[1])
        image = np.pad(image, (margins_h, margins_w, (0, 0)), 'reflect')

        # Run inference
        net_in[0] = image
        prob = model.predict(net_in)[0]

        # Reshape to 2d here since the networks outputs a flat array per channel
        prob_edge = np.sqrt(prob.shape[0]).astype(np.int)
        prob = prob.reshape((prob_edge, prob_edge, 21))

        # Upsample
        if args.zoom > 1:
            prob = interp_map(prob, args.zoom, image_size[1], image_size[0])

        # Recover the most likely prediction (actual segment class)
        prediction = np.argmax(prob, axis=2)

        # Apply the color palette to the segmented image
        color_image = np.array(pascal_palette)[prediction.ravel()].reshape(
            prediction.shape + (3, ))

        #re-scale to orginal size

        largeMaskImage = Image.fromarray(color_image).resize(
            oriImg.size, Image.ANTIALIAS).convert('L')
        largeMaskImage = largeMaskImage.point(lambda x: 255 if x > 0 else 0)

        #with open("temp.jpg", 'wb') as out_file:
        #	largeMaskImage.save(out_file)

        if not args.output_path:
            input_dir_name, file_name = os.path.split(imageFilePath)
            output_path = os.path.join(
                input_dir_name,
                '{}_seg.jpg'.format(os.path.splitext(file_name)[0]))
        else:

            input_dir_name, file_name = os.path.split(imageFilePath)
            output_path = os.path.join(args.output_path, file_name)

        print('Saving results to: ', output_path)

        #apply mask

        oriImg.putalpha(largeMaskImage)

        #clear alpha

        pixdata = oriImg.load()

        width, height = oriImg.size
        for y in range(height):
            for x in range(width):
                if pixdata[x, y][3] == 0:
                    pixdata[x, y] = (255, 255, 255, 0)

        with open(output_path, 'wb') as out_file:
            oriImg.save(out_file)