Example #1
0
def overlay_mask_2(image_layer, mask_layer, channel, fraction, mask_color):
    image_layer = copy.deepcopy(image_layer)
    mask_layer = copy.deepcopy(mask_layer)
    ind = mask_layer.astype(bool)

    if image_layer.shape[2] == 1:
        image_layer = np.squeeze(image_layer)
        image_layer[ind] = image_layer[ind] * fraction
        mask_layer = mask_layer * (1 - np.max(image_layer[ind]))
        g_layer = (image_layer + mask_layer)

        if mask_color == 'cyan':
            rgb_layer = np.dstack((image_layer, g_layer, g_layer))
        elif mask_color == 'yellow':
            rgb_layer = np.dstack((g_layer, g_layer, image_layer))
        elif mask_color == 'violet':
            rgb_layer = np.dstack((g_layer, image_layer, g_layer))

    elif image_layer.shape[2] == 3:

        r_layer = np.squeeze(np.expand_dim(image_layer[:, :, 0], axis=-1))
        g_layer = np.squeeze(np.expand_dim(image_layer[:, :, 1], axis=-1))
        b_layer = np.squeeze(np.expand_dim(image_layer[:, :, 2], axis=-1))

        if mask_color == 'cyan':

            g_layer[ind] = g_layer[ind] * fraction
            b_layer[ind] = b_layer[ind] * fraction
            mask_layer = mask_layer * (1 - np.max(image_layer[ind]))
            g_layer = g_layer + mask_layer
            b_layer = b_layer + mask_layer

        elif mask_color == 'yellow':

            r_layer[ind] = r_layer[ind] * fraction
            g_layer[ind] = g_layer[ind] * fraction
            mask_layer = mask_layer * (1 - np.max(image_layer[ind]))
            r_layer = r_layer + mask_layer
            g_layer = g_layer + mask_layer

        elif mask_color == 'violet':

            g_layer[ind] = g_layer[ind] * fraction
            b_layer[ind] = g_layer[ind] * fraction
            mask_layer = mask_layer * (1 - np.max(image_layer[ind]))
            r_layer = g_layer + mask_layer
            b_layer = b_layer + mask_layer

        rgb_layer = np.dstack((r_layer, g_layer, b_layer))

    return rgb_layer
Example #2
0
 def sample_both_batch(self, batch_type, batch_size, rf):
     latent_sample = np.random.multivariate_normal(
         np.zeros(self.latent_dim).astype(np.float32),
         np.eye(self.latent_dim).astype(np.float32), (batch_size, rf))
     if batch_type == "generator":
         return latent_sample
     lis_keys = [(k, v) for k, v in self.data.items()]
     subject_inds = np.random.randint(len(lis_keys), size=batch_size)
     real_sample = np.zeros((batch_size, rf, self.pose_dim))
     twod_sample = np.zeros((batch, size, rf, 34))
     cam_intrins = np.tile(np.expand_dim(self.cam_intrinsics, axis=0),
                           (rf, 1))
     for i in range(batch_size):
         subject, action = lis_keys[subject_inds[i]]
         action_keys = [k for k in action.keys()]
         action_ind = np.random.randint(len(action_keys))
         action = action_keys[action_ind]
         pose_seq = self.data[subject][action][:, self.relevant_joints, :]
         start_ind = np.random.randint(pose_seq.shape[0] - rf)
         pose_seq = pose_seq[start_ind:start_ind + rf, :, :]
         pos_2d = wrap(project_to_2d, pose_seq, cam_intrins)
         twod_sample[i, :, :] = np.reshape(pos_2d, (rf, 34))
         pose_seq[:, 1:, :] = pose_seq[:, 1:, :] - np.tile(
             np.expand_dims(pose_seq[:, 0, :], axis=1), (1, 16, 1))
         real_sample[i, :, :] = np.reshape(pose_seq, (rf, self.pose_dim))
     return latent_sample, real_sample, twod_sample, cam_intrins
def extract_group2(filename, refpix):
    """Read in the PEDESTAL values from a *fitopt.fits file

    Parameters
    ----------
    filename : str
        Name of uncalibrated file. This should be a *uncal.fits file.

    refpix : tup
        4-element tuple listing the number of outer rows and columns that
        are reference pixels

    Returns
    -------
    group2 : numpy.ndarray
        3D array of group 2
    """
    with fits.open(filename) as hdulist:
        dims = hdulist['SCI'].data.shape
        if len(dims) == 4:
            group2 = hdulist['SCI'].data[:, 1, :, :]
        elif len(dims) == 3:
            group2 = np.expand_dim(hdulist['SCI'].data[1, :, :], axis=0)
        nint, ydim, xdim = group2.shape
        # Crop the reference pixels
        left, right, bottom, top = refpix
        group2 = group2[:, bottom:ydim - top, left:xdim - right]

    return group2
Example #4
0
    def get_image(self, img_name_list, idx, flip):
        img_name = os.path.join(self.dataset_path, img_name_list.iloc[idx])
        image = io.imread(img_name)
        
        # if gray scale, convert to 3-channel image
        if image.ndim == 2:
            image = np.repeat(np.expand_dim(image, 2), axis=2, repeats=3)
        
        if flip:
            image = np.flip(image, 1)

        # get image size
        im_size = np.asarray(image.shape)
        
        # convert to torch Variable
        image = np.expand_dims(image.transpose((2,0,1)),0)
        image = torch.Tensor(image.astype(np.float32))
        image_var = Variable(image,requires_grad=False)
        
        # Resize image using bilinear sampling with identity affine tnf
        image = self.affineTnf(image_var).data.squeeze(0)
        
        im_size = torch.Tensor(im_size.astype(np.float32))
        
        return (image, im_size)
    def get_image(self, img_name_list, idx, flip):
        img_name = os.path.join(self.dataset_path, img_name_list.iloc[idx])
        image = io.imread(img_name)

        # if gray scale, convert to 3-channel image
        if image.ndim == 2:
            image = np.repeat(np.expand_dim(image, 2), axis=2, repeats=3)

        if self.random_crop:
            h, w, c = image.shape
            top = np.random.randint(h / 4)
            bottom = int(3 * h / 4 + np.random.randint(h / 4))
            left = np.random.randint(w / 4)
            right = int(3 * w / 4 + np.random.randint(w / 4))
            boundary = (top, bottom, left, right)
            image = image[top:bottom, left:right, :]

        if flip:
            image = np.flip(image, 1)

        # get image size
        im_size = np.asarray(image.shape)

        # convert to torch Variable
        image = np.expand_dims(image.transpose((2, 0, 1)), 0)
        image = torch.Tensor(image.astype(np.float32))
        image_var = Variable(image, requires_grad=False)

        # Resize image using bilinear sampling with identity affine tnf
        image = self.affineTnf(image_var).data.squeeze(0)

        im_size = torch.Tensor(im_size.astype(np.float32))

        return (image, im_size, boundary)
Example #6
0
 def predict(self, x, hidden):
     x = torch.FloatTensor(x.astype(np.float64))
     if self.use_cuda:
         x = x.contiguous().cuda()
     x = np.expand_dim(x, 0)
     self.eval()
     with torch.no_grad():
         pi, v, hidden = self.forward(x)
     return pi.data.cpu().numpy()[0], v.data.cpu().numpy()[0]
Example #7
0
def dimX(x, ts):
    # Simple way to expand dimmension
    # x.shape = (batch_size, num_feature)
    batch_size, num_feature = x.shape
    x = np.expand_dim(x, axis=1)
    x = np.repeat(x, repeats=ts, axis=1)
    # x.shape = (batch_size, timestep, num_feature)
    assert x.shape == (batch_size, ts, num_feature)
    return
Example #8
0
 def np2torch(img):
     img = img.astype(np.float32)
     if len(img.shape) == 2:
         img = np.expand_dim(img, axis=0)
     elif len(img.shape) == 3:
         img = img.transpose(2, 0, 1)
     elif len(img.shape) == 4:
         img = img.transpose(0, 3, 1, 2)
     else:
         raise NotImplementedError
     img = torch.from_numpy(img)
     return img
Example #9
0
    def setInit(self, X, Z, U=None, P=None, G=None, R=None, Q=None):
        """
		pval=0.1, qval=1e-4, rval=0.1
		"""
        if len(X.shape) == 1:
            X = np.expand_dim(X, axis=1)
        if len(X.shape) == 2:
            self._stateDim = X.shape[0]
        if len(Z.shape) == 1:
            Z = np.expand_dim(Z, axis=1)
        if len(Z.shape) == 2:
            self._sensDim = Z.shape[0]
        global I
        I = np.eye(self._stateDim)
        self.X = X
        self.Z = Z
        self.U = np.zeros((self._stateDim, 1)) if U is None else U
        self.P = np.eye(self._stateDim) if P is None else P
        self.G = np.zeros((self._stateDim, self._sensDim)) if G is None else G
        self.R = np.eye(self._sensDim) if R is None else R
        self.Q = np.zeros((self._stateDim, self._stateDim)) if Q is None else Q
Example #10
0
 def setInit(self,
             X,
             Z,
             U=None,
             A=None,
             B=None,
             C=None,
             P=None,
             G=None,
             R=None,
             Q=None):
     if len(X.shape) == 1:
         X = np.expand_dim(X, axis=1)
     if len(X.shape) == 2:
         self._stateDim = X.shape[0]
     if len(Z.shape) == 1:
         Z = np.expand_dim(Z, axis=1)
     if len(Z.shape) == 2:
         self._sensDim = Z.shape[0]
     global I
     I = np.eye(self._stateDim)
     self.X = X
     self.Z = Z
     self.U = np.zeros((self._stateDim, 1)) if U is None else U
     self.A = np.eye(self._stateDim) if A is None else A
     self.B = np.eye(self._stateDim) if B is None else B
     if self._stateDim == self._sensDim:
         self.C = np.eye(self._stateDim) if C is None else C
     else:
         if C is None:
             self.C = np.zeros((self._sensDim, self._stateDim))
             print(
                 "[WARNING] Please assign sensor contribution for Matrix C")
         else:
             self.C = C
     self.P = np.eye(self._stateDim) if P is None else P
     self.G = np.zeros((self._stateDim, self._sensDim)) if G is None else G
     self.R = np.eye(self._sensDim) if R is None else R
     self.Q = np.zeros((self._stateDim, self._stateDim)) if Q is None else Q
Example #11
0
def traj_plot(demo_traj, fitted_traj, objects, xlim, ylim):
    if type(demo_traj) is list: demo_traj = np.array(demo_traj)
    if len(demo_traj.shape) == 2:
        demo_traj = np.expand_dims(demo_traj, axis=0)

    plt.plot(objects[:, 0], objects[:, 1], 'ko')
    for traj in demo_traj:
        plt.plot(traj[:, 0], traj[:, 1], 'r-')

    if fitted_traj is not None:
        if type(fitted_traj) is list: fitted_traj = np.array(fitted_traj)
        if len(fitted_traj.shape) == 2:
            fitted_traj = np.expand_dim(fitted_traj, axis=0)
        for traj in fitted_traj:
            plt.plot(traj[:, 0], traj[:, 1], 'b-')

    ## plt.plot(self.start_state[0], self.start_state[1], 'rx')
    ## plt.plot(self.goal_state[0], self.goal_state[1], 'r^')
    plt.xlim(xlim)
    plt.ylim(ylim)
    plt.show()
import argparse
import tensorflow as tf


parser = argparse.ArgumentParser("evaluate tflite model")
parser.add_argument("-tflite_file", type=str, required=True)
parser.add_argument("-quantize", action='store_true')
parser.add_argument("-n_test", type=int, default=50)
args = parser.parse_args()

tflite_file = args.tflite_file
n_test = args.n_test

test_x = np.load("sv_set/voxc1/fbank64/dev/merged/test_500.npy")
test_y = np.load("sv_set/voxc1/fbank64/dev/merged/test_500_label.npy")
test_x = np.expand_dim(test_x[:n_test], 2) / 24
test_y = test_y[:n_test]

def quantize(detail, data):
    shape = detail['shape']
    dtype = detail['dtype']
    a, b = detail['quantization']

    return (data/a + b).astype(dtype).reshape(shape)

def dequantize(detail, data):
    a, b = detail['quantization']

    return (data - b)*a

#####################################################
print(model.predict_classes(test_image))
print(Y_test[0:1])
test_image = cv2.imread('F:\hdataset\prain\cars\carsgraz_139.bmp')
test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
test_image = cv2.resize(test_image, (128, 128))
test_image = np.array(test_image)
test_image = test_image.astype('float32')
test_image = test_image / 255
print(test_image.shape)
if num_channels == 1:
    if k.image_dim_ordering() == 'th':
        test_image = np.expand_dims(test_image, axis=0)
        test_image = np.expand_dims(test_image, axis=0)
        print(test_image.shape)
    else:
        test_image = np.expand_dim(test_image, axis=3)
        test_image = np.expand_dim(test_image, axis=0)
        print(test_image.shape)
else:
    if k.image_dim_ordering() == 'th':
        test_image = np.rollaxis(test_image, 2, 0)
        test_image = np.expand_dims(test_image, axis=0)
        print(test_image.shape)
    else:
        test_image = np.expand_dims(test_image, axis=0)
        print(test_image.shape)
print(model.predict(test_image))
print(model.predict_classes(test_image))
from sklearn.metrics import confusion_matrix
Y_pred = model.predict(X_test)
print(Y_pred)
Example #14
0
plt.show()

# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
    plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
    plot_image(i, predictions, test_labels, test_images)
    plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
    plot_value_array(i, predictions, test_labels)
plt.show()

# Grab an image from the test dataset
img = test_images[0]
print(img.shape)

# Add the image to a batch whhere it's the only member.
img = (np.expand_dim(img, 0))
print(img.shape)

predictions_single = model.predict(img)
print(predictions_single)

plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)

np.argmax(predictions_single[0])
Example #15
0
def process(S, X):
    X = np.expand_dim(X, axis=2)
    self.S1 = np.append(S[:, :, 1:], X, axis=2)
Example #16
0
六、預測資料
    當訓練好model後,會想要預測看看資料,以下就是一些程式
1.  前面三行是colab特有的,讓我們可以上傳圖片
2.  np.expand_dim,沿著axis=0新增資料
3.  np.vstack則是沿著垂直方向推疊
    重點要確定輸入資料與模型的資料型態要一致
'''

from google.colab import files
from keras.preprocessing import image
import numpy as np

upload = files.upload()
for fn in upload.keys():
    path = 'content' + fn
    img = image.load_img(path, target_size=(300,300))
    x = image.img_to_array(img)
    x = np.expand_dim(x, axis=0)
    
    images = np.vstack([x])
    classes = model.predict(images,batch_size=10)

    print(classes[0])
    if classes[0]>0.5:
        print(fn + 'is human')
    else:
        print(fn + 'is horse')