def tile(img, sz=256, N=16):
    shape = img.shape
    pad0, pad1 = (sz - shape[0] % sz) % sz, (sz - shape[1] % sz) % sz
    img = np.pad(
        img,
        [[pad0 // 2, pad0 - pad0 // 2], [pad1 // 2, pad1 - pad1 // 2], [0, 0]],
        constant_values=255)
    img = img.reshape(img.shape[0] // sz, sz, img.shape[1] // sz, sz, 3)
    img = img.transpose(0, 2, 1, 3, 4).reshape(-1, sz, sz, 3)
    if len(img) < N:
        img = np.pad(img, [[0, N - len(img)], [0, 0], [0, 0], [0, 0]],
                     constant_values=255)
    idxs = np.argsort(img.reshape(img.shape[0], -1).sum(-1))[:N]
    img = img[idxs]
    return img
Exemple #2
0
def main(request):
    if request.method == 'POST':
        form = form_canvas(request.POST)
        tim = request.POST.get('im')

        if form.is_valid():
            data = base64.b64decode(tim)
            print(type(data))
            print(
                '***************************************************************'
            )

            img0 = "ml_app/test.bmp"

            with open(img0, 'wb') as f:
                f.write(data)
                f.close

            o = count(name=img0)
            o.save()

            print(type(img0))
            print('+++++++++++++++++++++++++++++++++++')

            # to save image in different files
            '''
            img1 = Image.open(img0)
            idi = o.id
            a = "C:/Users/Yzat/Downloads/ML_Django/Credit_Approval/Credit_project/media/"
            adr = a + str(idi) + '.bmp'
            img1.save(adr)
            '''

            ####################    ML Model  ##################################
            new_model = tf.keras.models.load_model('ml_app/yzat.h5')
            img = cv2.imread('ml_app/test.bmp', -1)
            b, g, r, alpha = cv2.split(img)
            img_BGR = cv2.merge((r, g, alpha))
            image = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2GRAY)
            image = cv2.resize(image, (28, 28))
            image = image.astype('float32')
            image = image.reshape(1, 28, 28, 1)
            image /= 255
            predictions = new_model.predict(image)
            yzat_predictions = np.argmax(predictions)
            print('******************', yzat_predictions,
                  '**********************')
            messages.success(request, '{}'.format(yzat_predictions))

            #################### ML Model End ####################################

            return HttpResponseRedirect('/canvas')
        else:

            return HttpResponseRedirect('/canvas')

    else:
        return render(request, 'canvas.html')
Exemple #3
0
def process_images(fp):
    imgs = []
    for f in fp:
        img = load_img(f)
        img = img.resize((w, h), Image.ANTIALIAS)
        img = img_to_array(img) / 255
        img = img.reshape(3, w, h)
        imgs.append(img)
    return np.array(imgs)
Exemple #4
0
def prepare_img_for_predection(): 
    for dirpath, dirnames, filenames in walk("croped"):
       image = color.rgb2gray(io.imread("croped/"+filenames))     
       #plt.imshow(255-img4, cmap='gray')
       # plt.show()
       image = resize(image, (28, 28),anti_aliasing=True)
       image = np.asarray(image)
       #print(img4.shape)
       image = image.reshape(1,784)
       #print(img4.shape)
       predict_new_img(image)
Exemple #5
0
def restore_channels(image, n_channels):
    """
    Convert 1-channel image to n-channel image.

    :param image: an image in ndarray format
    :param n_channels: target number of channels
    :return: ndarray with image's pixels' intensities
    """
    width, height, m = image.shape[0], image.shape[1], 1
    arr = image.reshape(width, height)
    return np.repeat(arr, m * n_channels).reshape(width, height, n_channels)
Exemple #6
0
 def __call__(self, sample):
     image, keypoints = sample['image'], sample['keypoints']
     # If image has no grayscale channel, add it
     if len(image.shape) == 2:
         image = image.reshape(image.shape[0], image.shape[1], 1)
     # Reshape color dimensions to torch Tensor: [channel, height, width]
     image = image.transpose((2, 0, 1))
     return {
         'image': torch.from_numpy(image),
         'keypoints': torch.from_numpy(keypoints)
     }
Exemple #7
0
def feed_net_mnist(sess):
    x = int(input('input index:'))

    digit = mnist.test.images[x]
    img = np.array(digit)
    pixels = img.reshape((28, 28))
    plt.imshow(pixels, cmap='gray')
    plt.show()

    # with tf.Session() as sess:
    result = tf.argmax(prediction, 1)
    print(sess.run(result, feed_dict={
        x_: [digit],
        y_: [mnist.test.labels[x]]
    }))
Exemple #8
0
def mouth_cluster(image_path,files_path):
    global gmm
    # GMM分群
    pic_path = glob.glob(image_path + '/*')
   
    opic = []
    pic = []
    for file in pic_path:
        image = io.imread(file + '/filled_mouth_cu_center.jpg')
        opic.append(image)
        image = image[:,:,0]
        images = image.reshape((image.shape[0]*image.shape[1]))
        pic.append(images)
        
    if not 'gmm' in globals():
        gmm = joblib.load(files_path + '/mouth_10.pkl')

    return gmm.predict(pic)[0]
    def __call__(self, sample: Sample) -> Sample:
        """
        Apply transformation on provided sample.

        :param sample:
        :return: transformed sample
        """
        image, key_pts = sample['image'], sample['keypoints']
        # If image has no grayscale color channel, add one
        if len(image.shape) == 2:
            # Add that third color dim
            image = image.reshape(image.shape[0], image.shape[1], 1)
        # Swap color axis because
        # Numpy image: H x W x C
        # Torch image: C X H X W
        image = image.transpose((2, 0, 1))
        return {
            'image': torch.from_numpy(image),
            'keypoints': torch.from_numpy(key_pts)
        }
def gen(batch_size=1, flag='train'):
    if flag == 'train':
        start = 1
        end = 3
    else:
        start = 3
        end = 4
    x_train = np.zeros((batch_size, 256 * 256), dtype='float32')
    y_train = np.zeros((batch_size, 1), dtype='float32')
    while True:
        for i in range(start, end):
            for j in range(batch_size):
                with open("scores.csv", "rb") as f:
                    reader = csv.reader(f)
                    y_train[j, :] = float(list(reader)[j][0])
                path = "codes_GRBM/codes_GRBM/target_images/" + str(j) + ".jpg"
                img = image.load_img(path, grayscale=True)
                img = image.img_to_array(img)
                img = preprocess_input(img, mode='tf')
                x_train[j, :] = img.reshape(256 * 256)
            yield x_train, y_train
Exemple #11
0
 def callback(image):
     #print "Cur loglik: ", image_prior_nll(image), "mean loglik:", image_prior_nll(all_mean)
     matplotlib.image.imsave("../figures/optimizing", image.reshape((28,28)))
Exemple #12
0
    :param: width of the image
    :param: BGR pixel values of the color
    :return: tuple of bar, rgb values 
    """
    bar = np.zeros((height, width, 3), np.uint8)
    bar[:] = color
    red, green, blue = int(color[2]), int(color[1]), int(color[0])
    return bar, (red, green, blue)


# Load image
img = cv2.imread('YOUR_NDVI_IMAGE')
height, width, _ = np.shape(img)

# Reshape the image to be a list of RGB pixels
image = img.reshape((height * width, 3))

#convert from BGR to RGB for kmeans
img_kmeans = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

# define a cluster
num_clusters = 1
clusters = KMeans(n_clusters=num_clusters)
clusters.fit(image)

# count the dominant colors and sort them into groups
histogram = make_histogram(clusters)

# sort the groups, most-common first
combined = zip(histogram, clusters.cluster_centers_)
combined = sorted(combined, key=lambda x: x[0], reverse=True)
Exemple #13
0
    #Save the picture in the current directory
    #OverWrite it. Temporary solution.
    cv2.imwrite("NewPicture.jpg", frame)
    #Add delay for temp solution
    time.sleep(0.5)
    image = cv2.imread('NewPicture.jpg')
    #Take image in numpy array
    data = np.array(image)
    a2D = data.reshape(-1, data.shape[-1])
    #R G and B values of the image captured
    r = a2D[:, 0]
    g = a2D[:, 1]
    b = a2D[:, 2]

    #print(a2D)
    pixels = np.float32(image.reshape(-1, 3))

    #Use Kmeans to find the dominant colors in the picture
    n_colors = 5
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)
    flags = cv2.KMEANS_RANDOM_CENTERS

    _, labels, palette = cv2.kmeans(pixels, n_colors, None, criteria, 10,
                                    flags)
    _, counts = np.unique(labels, return_counts=True)

    #Pick the RGB color with the highest count (the dominant)

    dominant = palette[np.argmax(counts)]
    print(dominant)
test_input_folder = "/content/sample_data/Rain-Haze/Haze"
test_output_folder = "/content/sample_data/dehazed_test_images2/"
if not os.path.exists(test_output_folder):
  os.mkdir(test_output_folder)
  
file_types = ['jpeg','jpg','png']

with tf.Session() as sess:
  #saver.restore(sess,'models/model_checkpoint_9.ckpt')
  saver.restore(sess,'/content/sample_data/TESTING_TAR/MY_WEIGHTS_USING_EEC206_DATASET/model_checkpoint_9.ckpt')
  test_image_paths = []
  for file_type in file_types:
    test_image_paths.extend(glob.glob(test_input_folder+"/*."+file_type))
  
  
  for path in test_image_paths:
    image_label = path.split(test_input_folder)[-1][1:]
    image = Image.open(path)
    image = image.resize((640, 480))
    image = np.asarray(image) / 255.0
    image = image.reshape((1,) + image.shape)
    dehazed_image = sess.run(dehazed_X,feed_dict={X:image,Y:image})
    
    
    fig, axes = plt.subplots(nrows=1, ncols=2,figsize=(10,10))
    axes[0].imshow(image[0])
    axes[1].imshow(dehazed_image[0])
    fig.tight_layout()
    
    dehazed_image = np.asarray(dehazed_image[0] * 255,dtype=np.uint8)
    mpl.image.imsave(test_output_folder + "/" + 'dehazed_' + image_label, dehazed_image)
Exemple #15
0
import numpy as np
import tensorflow as tf
from matplotlib import image
import matplotlib.pyplot as plt

# shape=(256,256), dtype=float
image = image.imread('cameraman.png').min(2)

x = np.zeros((256 * 256, 2))
for i in range(256 * 256):
    x[i][0] = (i // 256) / 255
    x[i][1] = (i % 256) / 255

y = image.reshape((256 * 256, 1))

activation = tf.math.sin

model = tf.keras.Sequential([
    tf.keras.layers.Dense(16, input_dim=2),
    tf.keras.layers.Activation(activation),
    tf.keras.layers.Dense(16, input_dim=2),
    tf.keras.layers.Activation(activation),
    tf.keras.layers.Dense(16, input_dim=2),
    tf.keras.layers.Activation(activation),
    tf.keras.layers.Dense(16, input_dim=2),
    tf.keras.layers.Activation(activation),
    tf.keras.layers.Dense(1)
])

model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.03,
                                                momentum=0.9),
Exemple #16
0
def vis(img):
    if os.path.isfile('model_{}_.h5'.format(model_version)):
        print 'loading model...'
        cnn = load_cnn_model()
        cnn.load_weights('model_{}_.h5'.format(model_version))

        # list all layers in loaded model.
        layer_name = "output_layer"
        layer_idx = [
            idx for idx, layer in enumerate(cnn.layers)
            if layer.name == layer_name
        ][0]

        # selected layers to visualise.
        layers = [
            'conv_layer_1', 'conv_layer_2', 'conv_layer_3', 'output_layer'
        ]

        # visualise convnet visualisation for each layer, place them in a subplot.
        for layer_name in layers:
            print "Generating visualisation of {}".format(layer_name)
            layer_idx = [
                idx for idx, layer in enumerate(cnn.layers)
                if layer.name == layer_name
            ][0]

            if 'conv' not in layer_name:
                plt.figure()
                for idx, e in enumerate(emotions):
                    plt.subplot(6, 6, idx + 1)
                    plt.text(1, 7, '{}'.format(e))
                    img = visualize_activation(cnn,
                                               layer_idx,
                                               filter_indices=idx,
                                               max_iter=750)
                    img = array_to_img(img.reshape(3, w, h))
                    plt.axis('off')
                    plt.imshow(img)

                plt.suptitle('Visualisation of the Output Layer')
                plt.savefig('{}.png'.format(layer_name), bbox_inches='tight')
                plt.show()
                break

            filters = np.arange(get_num_filters(cnn.layers[layer_idx]))

            images = []
            for idx in filters:
                img = visualize_activation(cnn,
                                           layer_idx,
                                           tv_weight=0,
                                           verbose=False,
                                           filter_indices=idx,
                                           max_iter=750)
                img = array_to_img(img.reshape(3, w, h))
                images.append(img)

            plt.figure()
            for idx, i in enumerate(images):
                plt.subplots_adjust(wspace=0, hspace=0)
                plt.subplot(6, 6, idx + 1)
                plt.text(0, 15, 'Filter {}'.format(idx))
                plt.axis('off')
                plt.imshow(i)

            plt.suptitle('Visualisation of Convolution Layer {}'.format(
                layer_name[len(layer_name) - 1]))
            plt.savefig('{}.png'.format(layer_name), bbox_inches='tight')
            plt.show()

    else:
        print 'model does not exist, train the network first.'
Exemple #17
0
import numpy as np
from math import pi, sin, cos, sqrt, hypot
from matplotlib import image
import matplotlib.pyplot as plt
import scipy.sparse.linalg

# shape=(256,256), dtype=float
image = image.imread('cameraman.png').min(2)


x = np.zeros((256*256,2))
for i in range(256*256):
    x[i][0] = (i//256)/255
    x[i][1] = (i%256)/255

y = image.reshape(256*256)


def fit_cosine(x, y, deg):
    dim = deg*deg

    def generate_u(x):
        ui = np.array([cos(pi*i*x[0]) for i in range(deg)])
        uj = np.array([cos(pi*j*x[1]) for j in range(deg)])
        return np.tensordot(ui, uj, axes=0).reshape((deg*deg))

    cov = np.zeros((dim, dim))
    vec = np.zeros((dim))

    for i in range(len(x)):
        ui = generate_u(x[i])