コード例 #1
1
def mean_shift(image_rgb, radius=20, bandwidth=4, eps=1, proc_count=4):
    start = time.time()

    print("Mean Shift: Radius =", radius, ", Bandwidth =", bandwidth, ", EPS =", eps)
    print("Initializing ...")
    image = color.rgb2luv(image_rgb)
    coords = np.rollaxis(np.indices(image.shape[:2]), 0, 3).reshape(-1, 2)

    print("Iterating Points ...")
    partial_iterate = partial(_do_mean_shift, image=image, radius=radius, bandwidth=bandwidth, eps=eps)

    pool = Pool(proc_count)
    segmentation = np.array(pool.map(partial_iterate, coords))
    pool.close()
    pool.join()

    print("Post Processing ...")
    sys.setrecursionlimit(image.shape[0] * image.shape[1] + 100)
    segmentation = segmentation.reshape(image.shape)
    _floodfill_compression(segmentation, bandwidth)

    print("Ending ...")
    segmentation = color.luv2rgb(segmentation)

    end = time.time()
    print("Time elapsed:", end - start, "s")

    return segmentation
コード例 #2
0
 def test_luv_rgb_roundtrip(self, channel_axis):
     img_rgb = img_as_float(self.img_rgb)
     img_rgb = np.moveaxis(img_rgb, source=-1, destination=channel_axis)
     assert_array_almost_equal(
         luv2rgb(rgb2luv(img_rgb, channel_axis=channel_axis),
                 channel_axis=channel_axis),
         img_rgb,
     )
コード例 #3
0
    def luv_method(img, n_clusters, n_colors):
        img = rgb2luv(img)

        # make img array for kmeans
        X = img.reshape(-1, 3)

        n_clusters = n_clusters if n_clusters > n_colors else n_colors
        km = MiniBatchKMeans(n_clusters=n_clusters,
                             init='k-means++',
                             n_init=3,
                             random_state=0)
        labels = km.fit_predict(X)
        cluster_centers = km.cluster_centers_
        bincount = np.bincount(labels)

        # sort colors by frequency
        dominants = cluster_centers[np.argsort(bincount,
                                               axis=0)[::-1]][:n_colors]

        colors = [luv2rgb([[x]])[0][0] for x in dominants]
        return colors
コード例 #4
0
    def test_luv2rgb_dtype(self):
        img = self.luv_array.astype('float64')
        img32 = img.astype('float32')

        assert luv2rgb(img).dtype == img.dtype
        assert luv2rgb(img32).dtype == img32.dtype
コード例 #5
0
        for j in range(0, img.shape[0]-patch_size[0]+1, 1):    # con patch 5x5: range(0, 252, 1)
            for i in range(0, img.shape[1]-patch_size[1]+1, 1):             
                #print ('    Processing patch: ' + str(j) + ', ' + str (i))
                patchL = imgGrey[j:j+patch_size[0],i:i+patch_size[1]].reshape (1, patch_size[0]*patch_size[1])
                pixelPos = np.array([[j,i]])
                X_predict[pos,:] = np.concatenate((patchL, pixelPos),1)
                pos += 1  

        y_predict = pipeline.predict(X_predict)

        imgUV = som._normalizer.denormalize_by(som.data_raw, som.codebook.matrix)[y_predict]

        originalGroup = mySom.getCodeword(som, originalUV.reshape(img.shape[0]*img.shape[1],2))
        refImg = np.concatenate((imgGrey.reshape(img.shape[0]*img.shape[1],1), originalGroup),1)
        refImg = refImg.reshape(img.shape[0],img.shape[1],3)
        misc.imsave('out/'+filename + '.reference.png',color.luv2rgb(refImg))

        originalGroup = mySom.getCodeword(som, originalUV.reshape(img.shape[0]*img.shape[1],2))
        midgray = np.full((img.shape[0]*img.shape[1]), 50).reshape(img.shape[0]*img.shape[1],1)
        refImg = np.concatenate((    midgray   , originalGroup),1)
        refImg = refImg.reshape(img.shape[0],img.shape[1],3)
        misc.imsave('out/'+filename + '.reference_uv.png',color.luv2rgb(refImg))
            
        imgGrey = imgGrey[(patch_size[0]//2):(img.shape[0]-patch_size[0]//2), (patch_size[1]//2):(img.shape[1]-patch_size[1]//2)]     # con patch 5x5: [2:254,2:254]
        newImg = np.concatenate((imgGrey.reshape(img_predict_size[0]*img_predict_size[1],1), imgUV.reshape(img_predict_size[0]*img_predict_size[1],2)),1)
        newImg = newImg.reshape(img_predict_size[0],img_predict_size[1],3)
        misc.imsave('out/'+filename + '.colored.png',color.luv2rgb(newImg))

        imgGrey = imgGrey[(patch_size[0]//2):(img.shape[0]-patch_size[0]//2), (patch_size[1]//2):(img.shape[1]-patch_size[1]//2)]     # con patch 5x5: [2:254,2:254]
        midgray = np.full((img_predict_size[0]*img_predict_size[1]), 50).reshape(img_predict_size[0]*img_predict_size[1],1)
        newImg = np.concatenate((  midgray      , imgUV.reshape(img_predict_size[0]*img_predict_size[1],2)),1)
コード例 #6
0
ファイル: test_colorconv.py プロジェクト: AceHao/scikit-image
 def test_luv_rgb_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)
     assert_array_almost_equal(luv2rgb(rgb2luv(img_rgb)), img_rgb)
コード例 #7
0
convr1Upsampled = UpSampling2D(size=(2, 2), data_format=None)(convr1)
print("convr1Upsampled.shape: ", convr2Upsampled.shape)

#elem0 = tf.add(conv2,convr1Upsampled,'elem0')
elem0 = Add()([conv2, convr1Upsampled])
print("elem0.shape: ", elem0.shape)

pred = Conv2D(2, (3, 3), padding='same', activation='softmax')(elem0)
print("pred.shape: ", pred.shape)

#Finish Model
#predictions = Dense(50, activation='softmax')(convb3)
model = Model(inputs=input_img, outputs=pred)
model.compile(optimizer='adam', loss='mse')

#Train the neural network
model.fit(x=X, y=Y, batch_size=1, epochs=10)
print(model.evaluate(X, Y, batch_size=1))

# Output colorizations
output = model.predict(X)
output = output / 100
#print ("output: ", output)
canvas = np.empty((224, 224, 3))
canvas[:, :, 0] = X[0][:, :, 0]
canvas[:, :, 1:] = output[0]
imsave("img_luv_result.png", canvas)
imsave("img_result_10.png", rgb2luv(canvas))
imsave("img_gray_scale.png", rgb2gray(luv2rgb(canvas)))
コード例 #8
0
from scipy import misc
from skimage import color
import numpy
import sompy
import pickle

for mapsize in [[3,3],[5,5],[10,10],[20,20]]:
    somFile = open ('pkls/trainedSOM' + str(mapsize[0]) +'x'+str(mapsize[1])+'.pkl','rb')
    som = pickle.load (somFile)
    somFile.close()

    codebook = som._normalizer.denormalize_by(som.data_raw, som.codebook.matrix)

    L = numpy.zeros(mapsize[0]*mapsize[1])
    L = L.reshape(-1,1)

    img = numpy.concatenate((L, codebook),1)
    img = img.reshape(mapsize[0],mapsize[1],3)
    img[:,:,0]=50

    misc.imsave('SOM' + str(mapsize[0]) +'x'+str(mapsize[1])+'_L50.png',color.luv2rgb(img))

コード例 #9
0
    dataset = pickle.load(dataFile)
    dataFile.close()
else:
    print('Creating dataset...')
    dataset = mySom.createDatasetSom('dataset/Opencountry', 0.3)
    print('Dataset of size: ' + str(dataset.shape))
    dataFile = open('pkls/datasetSOM.pkl', 'wb')
    pickle.dump(dataset, dataFile)
    dataFile.close()

for mapsize in [[3, 3], [5, 5], [10, 10], [20, 20]]:
    print('Training SOM with mapsize: ' + str(mapsize))
    som = mySom.defineAndTrainSOM(dataset, mapsize, 5, 10)
    fileObject = open(
        'pkls/trainedSOM' + str(mapsize[0]) + 'x' + str(mapsize[1]) + '.pkl',
        'wb')
    pickle.dump(som, fileObject)
    fileObject.close()

    ####print SOM###
    size_x, size_y = mapsize[0], mapsize[1]
    codebook = som._normalizer.denormalize_by(som.data_raw,
                                              som.codebook.matrix)
    L = numpy.zeros(size_x * size_y)
    L = L.reshape(-1, 1)
    img = numpy.concatenate((L, codebook), 1)
    img = img.reshape(size_x, size_y, 3)
    img[:, :, 0] = 50
    misc.imsave('SOM' + str(size_x) + 'x' + str(size_y) + '_L50.png',
                color.luv2rgb(img))
コード例 #10
0
import som as mySom
import sys
import pickle
import numpy as np
from scipy import misc
from skimage import color

if len(sys.argv) < 2:
    print('A filename is needed')
    sys.exit(1)

filename = sys.argv[1]
img = misc.imread(filename)
imgLUV = color.rgb2luv(img)
imgL = imgLUV[:, :, 0]
imgUV = imgLUV[:, :, 1:]

for mapsize in [[2, 2], [3, 3], [5, 5], [10, 10]]:
    somFile = open(
        'pkls/trainedSOM' + str(mapsize[0]) + 'x' + str(mapsize[1]) + '.pkl',
        'rb')
    som = pickle.load(somFile)
    somFile.close()

    imgCode = mySom.getCodeword(som, imgUV.reshape(256 * 256, 2))
    refImg = np.concatenate((imgL.reshape(256 * 256, 1), imgCode), 1)
    refImg = refImg.reshape(256, 256, 3)
    misc.imsave(
        filename.replace('.jpg', '') + '_reference_' + str(mapsize[0]) + 'x' +
        str(mapsize[1]) + '.png', color.luv2rgb(refImg))
コード例 #11
0
ファイル: image.py プロジェクト: zshipko/imagepy
 def to_rgb_from_luv(self):
     return Image(luv2rgb(self)).convert_type(self.dtype)
コード例 #12
0
ファイル: colors.py プロジェクト: athoune/Palette
    a = set(lpoints)
    for i, mask in enumerate(masks):
        near = set([(x, y) for x, y in points[mask]])
        near.remove(lpoints[i])
        print len(near), points[i], near



if __name__ == "__main__":
    import sys

    X = convert(colors(sys.argv[1]))

    labels, cluster_centers = mean_shift(X[:,1:])
    print (labels).shape
    print cluster_centers.shape
    print "clusters", cluster_centers
    #thresold(cluster_centers)

    lab = unconvert(cluster_centers)
    print lab.shape
    rgbs = luv2rgb(lab.reshape((1, lab.shape[0], 3)))
    print rgbs

    with file('toto.html', 'w') as f:
        f.write('<html><body><table><tr>')
        for rgb in rgbs[0] * 256:
            color = [int(c) for c in rgb]
            f.write('<td style="background:rgb(%i, %i, %i); width:64px; height:64px;">&nbsp;</td>' % tuple(color))
        f.write('</tr></table><img src="%s"/></body></html>' % sys.argv[1])
コード例 #13
0
                X_predict[pos, :] = patchL
                pos += 1

        #print (X_predict)
        y_predict = pipeline.predict(X_predict)

        imgUV = som._normalizer.denormalize_by(som.data_raw,
                                               som.codebook.matrix)[y_predict]

        originalGroup = mySom.getCodeword(som,
                                          originalUV.reshape(256 * 256, 2))
        refImg = np.concatenate((imgGrey.reshape(256 * 256, 1), originalGroup),
                                1)
        refImg = refImg.reshape(256, 256, 3)
        misc.imsave('out/' + filename + '.reference.png',
                    color.luv2rgb(refImg))

        originalGroup = mySom.getCodeword(som,
                                          originalUV.reshape(256 * 256, 2))
        midgray = np.full((256 * 256), 50).reshape(256 * 256, 1)
        refImg = np.concatenate((midgray, originalGroup), 1)
        refImg = refImg.reshape(256, 256, 3)
        misc.imsave('out/' + filename + '.reference_uv.png',
                    color.luv2rgb(refImg))

        imgGrey = imgGrey[(patch_size[0] // 2):(256 - patch_size[0] // 2), (
            patch_size[1] //
            2):(256 - patch_size[1] // 2)]  # con patch 5x5: [2:254,2:254]
        newImg = np.concatenate(
            (imgGrey.reshape(img_predict_size[0] * img_predict_size[1], 1),
             imgUV.reshape(img_predict_size[0] * img_predict_size[1], 2)), 1)
コード例 #14
0
 def luv2rgb(self, imageArray):
     return color.luv2rgb(imageArray)
コード例 #15
0
 def test_luv_rgb_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)
     assert_array_almost_equal(luv2rgb(rgb2luv(img_rgb)), img_rgb)
コード例 #16
0
 def trans(self, img1, img2, img3):
     rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
     rst -= 128
     rst = color.luv2rgb(rst.T)
     rst *= 255
     return (rst).astype(np.uint8)
コード例 #17
0
     _, c = sess.run([opt, cost], feed_dict={x: batch_x, y: batch_y})
     avg_cost += c / total_batch
 # evaluate model
 tst_loss = sess.run(cost, feed_dict={x: tst_x, y: tst_y})
 tr_losses.append(avg_cost / (tr_n * 64 * 64))
 tst_losses.append(tst_loss / (tst_n * 64 * 64))
 print('Epoch:', '%04d' % epoch, \
    'training loss:', '{:.3f}'.format(avg_cost), \
    'test loss:', '{:.3f}'.format(tst_loss))
 if (epoch + 1) % display_step == 0:
     # visualize colorization on training and test sets
     img_out = sess.run(pred, feed_dict={x: tst_x})
     img_out = np.concatenate((tst_x, img_out), axis=3)
     for i in range(tst_n):
         img = img_out[i]
         img = color.luv2rgb(img)
         fname = 'out/test_out' + str(i) + '_epoch' + str(
             epoch) + '.png'
         imsave(fname, img)
     img_out = sess.run(pred, feed_dict={x: tr_x})
     img_out = np.concatenate((tr_x, img_out), axis=3)
     for i in range(tr_n):
         img = img_out[i]
         img = color.luv2rgb(img)
         fname = 'out/tr_out' + str(i) + '_epoch' + str(epoch) + '.png'
         imsave(fname, img)
     # plot error
     plt.figure(1)
     plt.plot(tr_losses, 'r')
     plt.plot(tst_losses, 'g')
     plt.savefig('error.png')