示例#1
0
def predict(image_file):
    image = preprocess_image(image_file, [256, 256, 3])
    # Training
    model = tflearn.DNN(resnet())
    model.load(pjoin(MODEL_PATH, MODEL_NAME))
    y_pred = model.predict([image])
    label = np.argmax(y_pred[0])
    return 'Cat' if label == 0 else 'Dog'
示例#2
0
    def predict(self, img):
        img = image_utils.preprocess_image(img)

        input_data = img[
            np.newaxis, :, :, :]  # Append batch dimension for keras
        prediction = self.model.predict(input_data)[0]

        top5 = self.get_top5(prediction)
        return top5
    def classify(self, data):

        image = self.convertImg(data)
        image = preprocess_image(image, dbg=False)
        #if not self.hasSaved:
        #  self.hasSaved = True
        #  np.save(self.encoding,image)
        # Run classifier
        shouldTurn = False

        pub.publish(shouldTurn)
示例#4
0
    def predict_cropped(self, img):
        img = image_utils.preprocess_image(img)
        cropped = image_utils.crop_split(img)
        cropped.append(img)
        input_data = np.stack(cropped, axis=0)

        predictions = self.model.predict(input_data)
        avg_predictions = np.mean(predictions, axis=0)

        top5 = self.get_top5(avg_predictions)
        return top5
示例#5
0
 def recognize_face(self, img, conf=0.6):
     with self.graph.as_default():
         with tf.Session(graph=self.graph) as sess:
             # Get facenet embeddings
             emb_array = np.zeros((1, self.embedding_size))
             image = np.array([preprocess_image(img, False, False, 160)])
             feed_dict = {
                 self.images_placeholder: image,
                 self.phase_train_placeholder: False
             }
             emb_array[0, :] = sess.run(self.embeddings,
                                        feed_dict=feed_dict)
             print(emb_array)
示例#6
0
    def classify(self, data):
        image = self.convertImg(data)
        image = preprocess_image(image, dbg=False)
        print(image.shape)
        raw_input('image.shape')
        shouldTurn, duration = self._classifier.predict(image, verbose=True)
        #if not self.hasSaved:
        #  self.hasSaved = True
        #  np.save(self.encoding,image)
        # Run classifier
        # shouldTurn = False

        pub.publish(shouldTurn)
示例#7
0
def read_data():
    X = []
    Y = []
    for f in glob.glob(TRAIN_DATA + '/*.jpg'):
        fname = os.path.basename(f)
        # 0 for cat, 1 for dog
        label = 0 if fname.startswith('cat') else 1
        image = preprocess_image(f, [256, 256, 3])
        X.append(image)
        Y.append(label)
        # split training data and validation set data
    X, X_test, y, y_test = train_test_split(X,
                                            Y,
                                            test_size=0.2,
                                            random_state=42)
    return (X, y), (X_test, y_test)
示例#8
0
文件: app.py 项目: Holmedy/api
    def encode_image(
        self,
        image_file: Optional[Union[PurePath, str]] = None,
        image_array: Optional[np.ndarray] = None,
    ) -> np.ndarray:
        """
        Generate CNN encoding for a single image.
        Args:
            image_file: Path to the image file.
            image_array: Optional, used instead of image_file. Image typecast to numpy array.
        Returns:
            encoding: Encodings for the image in the form of numpy array.
        Example:
        ```
        from imagededup.methods import CNN
        myencoder = CNN()
        encoding = myencoder.encode_image(image_file='path/to/image.jpg')
        OR
        encoding = myencoder.encode_image(image_array=<numpy array of image>)
        ```
        """
        if isinstance(image_file, str):
            image_file = Path(image_file)

        if isinstance(image_file,
                      (PurePath, urllib.request.http.client.HTTPResponse)):

            if isinstance(image_file, PurePath) and not image_file.is_file():
                raise ValueError(
                    "Please provide either image file path or image array!")

            image_pp = load_image(image_file=image_file,
                                  target_size=self.target_size,
                                  grayscale=False)

        elif isinstance(image_array, np.ndarray):
            image_pp = preprocess_image(image=image_array,
                                        target_size=self.target_size,
                                        grayscale=False)
        else:
            raise ValueError(
                "Please provide either image file path or image array!")

        return (self._get_cnn_features_single(image_pp) if isinstance(
            image_pp, np.ndarray) else None)
示例#9
0
def batched_calibration_fn():
    """function to pass as argument to the builder function
    it will be used to optimize the network based on given examples """
    print("[CALIBRATION] Starting calibration process...")
    
    images_found = sorted(os.listdir(IMAGES_PATH))
    print("[CALIBRATION] Obtaining calibration images from {}"
        .format(images_found))
    print("[CALIBRATION] Done! Found {} images for calibration"
            .format(len(images_found)))

    print("[CALIBRATION] Starting image yielding...")
    start_calibration = time.time()
    batched_input = np.zeros((len(images_found), INPUT_SIZE, INPUT_SIZE, 3), dtype=np.uint8)
    for input_value in range(len(images_found)):

        # read and resize the image
        input_image = imgutils.read_image_from_cv2(IMAGES_PATH + images_found[input_value])
        image_data = imgutils.preprocess_image(input_image, (INPUT_SIZE, INPUT_SIZE))

        # add a new axis to match requested shape
        final_image = image_data[np.newaxis, ...].astype("uint8")

        # add image to batched images
        batched_input[input_value, :] = final_image

        print("[CALIBRATION] Adding image {} from {}".format(input_value+1, images_found[input_value]))
        start_adding = time.time()
        
        print("[CALIBRATION] Image adding step Done, it took {} ms"
            .format((time.time()-start_adding)*1000))

    print("[CALIBRATION] Yielding batched input")
    start_yielding = time.time()
    yield (final_image,)
    print("[CALIBRATION] Image yielding Done, it took {} ms"
        .format((time.time()-start_yielding)*1000))
    
    CALIBRATION_TIME = (time.time()-start_calibration)*1000
    print("[CALIBRATION] Calibration procces finished, it took {} ms"
        .format(CALIBRATION_TIME))
示例#10
0
def DataGenerator(data_loader, batch_size=32, use_augment=True):
    while True:
        datas = []
        labels = []
        while len(datas) < batch_size:
            im = data_loader.random_im()
            img = data_loader.get_image(im)
            label = data_loader.get_label(im)

            img = image_utils.preprocess_image(img)

            data = [img]
            if use_augment:
                augmentations = augment.get_augmentations(img)
                data.extend(augmentations)
                data = random.sample(data, 4)

            datas.extend(data)
            for i in xrange(len(data)):
                labels.append(label)

        data = np.stack(datas[:batch_size], axis=0)
        label = np.stack(labels[:batch_size], axis=0)
        yield (data, label)
示例#11
0
def style_transfer(content_image,
                   style_image,
                   image_size,
                   style_size,
                   content_layer,
                   content_weight,
                   style_layers,
                   style_weights,
                   tv_weight,
                   init_random=False):
    """Run style transfer!
    
    Inputs:
    - content_image: filename of content image
    - style_image: filename of style image
    - image_size: size of smallest image dimension (used for content loss and generated image)
    - style_size: size of smallest style image dimension
    - content_layer: layer to use for content loss
    - content_weight: weighting on content loss
    - style_layers: list of layers to use for style loss
    - style_weights: list of weights to use for each layer in style_layers
    - tv_weight: weight of total variation regularization term
    - init_random: initialize the starting image to uniform random noise
    """
    # Extract features from the content image
    content_img = preprocess_image(load_image(content_image, size=image_size))
    feats = model.extract_features(model.image)
    content_target = sess.run(feats[content_layer],
                              {model.image: content_img[None]})

    # Extract features from the style image
    style_img = preprocess_image(load_image(style_image, size=style_size))
    style_feat_vars = [feats[idx] for idx in style_layers]
    style_target_vars = []
    # Compute list of TensorFlow Gram matrices
    for style_feat_var in style_feat_vars:
        style_target_vars.append(gram_matrix(style_feat_var))
    # Compute list of NumPy Gram matrices by evaluating the TensorFlow graph on the style image
    style_targets = sess.run(style_target_vars, {model.image: style_img[None]})

    # Initialize generated image to content image

    if init_random:
        img_var = tf.Variable(tf.random_uniform(content_img[None].shape, 0, 1),
                              name="image")
    else:
        img_var = tf.Variable(content_img[None], name="image")

    # Extract features on generated image
    feats = model.extract_features(img_var)
    # Compute loss
    c_loss = content_loss(content_weight, feats[content_layer], content_target)
    s_loss = style_loss(feats, style_layers, style_targets, style_weights)
    t_loss = tv_loss(img_var, tv_weight)
    loss = c_loss + s_loss + t_loss

    # Set up optimization hyperparameters
    initial_lr = 3.0
    decayed_lr = 0.1
    decay_lr_at = 180
    max_iter = 200

    # Create and initialize the Adam optimizer
    lr_var = tf.Variable(initial_lr, name="lr")
    # Create train_op that updates the generated image when run
    with tf.variable_scope("optimizer") as opt_scope:
        train_op = tf.train.AdamOptimizer(lr_var).minimize(loss,
                                                           var_list=[img_var])
    # Initialize the generated image and optimization variables
    opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope=opt_scope.name)
    sess.run(tf.variables_initializer([lr_var, img_var] + opt_vars))
    # Create an op that will clamp the image values when run
    clamp_image_op = tf.assign(img_var, tf.clip_by_value(img_var, -1.5, 1.5))

    f, axarr = plt.subplots(1, 2)
    axarr[0].axis('off')
    axarr[1].axis('off')
    axarr[0].set_title('Content Source Img.')
    axarr[1].set_title('Style Source Img.')
    axarr[0].imshow(deprocess_image(content_img))
    axarr[1].imshow(deprocess_image(style_img))
    plt.savefig('orig_imgs')
    plt.show()
    plt.figure()

    # Hardcoded handcrafted
    for t in range(max_iter):
        # Take an optimization step to update img_var
        sess.run(train_op)
        if t < decay_lr_at:
            sess.run(clamp_image_op)
        if t == decay_lr_at:
            sess.run(tf.assign(lr_var, decayed_lr))
        if t % 100 == 0:
            print('Iteration {}'.format(t))
            img = sess.run(img_var)
            plt.imshow(deprocess_image(img[0], rescale=True))
            plt.axis('off')
            plt.savefig('iteration{}'.format(t))
            plt.show()
    print('Iteration {}'.format(t))
    img = sess.run(img_var)
    plt.imshow(deprocess_image(img[0], rescale=True))
    plt.axis('off')
    plt.savefig('style_transfer.png')
    plt.show()
示例#12
0

#Shallow feature reconstruction
filename = 'kitten.jpg'
layer = 3  # layers start from 0 so these are features after 4 convolutions
img = imresize(imread(filename), (64, 64))

plt.imshow(img)
plt.gcf().set_size_inches(3, 3)
plt.title('Original image')
plt.axis('off')
plt.show()

# Preprocess the image before passing it to the network:
# subtract the mean, add a dimension, etc
img_pre = preprocess_image(img, data['mean_image'])

# Extract features from the image
feats, _ = model.forward(img_pre, end=layer)

# Invert the features
kwargs = {
    'num_iterations': 400,
    'learning_rate': 5000,
    'l2_reg': 1e-8,
    'show_every': 100,
    'blur_every': 10,
}
X = invert_features(feats, layer, model, **kwargs)

#deep feature reconstruction
def style_transfer(content_img_path,
                   img_size,
                   style_img_path,
                   style_size,
                   content_layer,
                   content_weight,
                   style_layers,
                   style_weights,
                   tv_weight,
                   init_random=False):
    """Perform style transfer from style image to source content image
    
    Args:
        content_img_path (str): File location of the content image.
        img_size (int): Size of the smallest content image dimension.
        style_img_path (str): File location of the style image.
        style_size (int): Size of the smallest style image dimension.
        content_layer (int): Index of the layer to use for content loss.
        content_weight (float): Scalar weight for content loss.
        style_layers ([]int): Indices of layers to use for style loss.
        style_weights ([]float): List of scalar weights to use for each layer in style_layers.
        tv_weigh (float): Scalar weight of total variation regularization term.
        init_random (boolean): Whether to initialize the starting image to uniform random noise.
    """
    tf.reset_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    try:
        model = SqueezeNet(ckpt_path=CKPT_PATH, sess=sess)
    except NotFoundError:
        raise ValueError('checkpoint file is not found, please check %s' %
                         CKPT_PATH)

    # Extract features from content image
    content_img = preprocess_image(load_image(content_img_path, size=img_size))
    content_feats = model.extract_features(model.image)

    # Create content target
    content_target = sess.run(content_feats[content_layer],
                              {model.image: content_img[None]})

    # Extract features from style image
    style_img = preprocess_image(load_image(style_img_path, size=style_size))
    style_feats_by_layer = [content_feats[i] for i in style_layers]

    # Create style targets
    style_targets = []
    for style_feats in style_feats_by_layer:
        style_targets.append(gram_matrix(style_feats))
    style_targets = sess.run(style_targets, {model.image: style_img[None]})

    if init_random:
        generated_img = tf.Variable(tf.random_uniform(content_img[None].shape,
                                                      0, 1),
                                    name="image")
    else:
        generated_img = tf.Variable(content_img[None], name="image")

    # Extract features from generated image
    current_feats = model.extract_features(generated_img)

    loss = content_loss(content_weight, current_feats[content_layer], content_target) + \
        style_loss(current_feats, style_layers, style_targets, style_weights) + \
        total_variation_loss(generated_img, tv_weight)

    # Set up optimization parameters
    init_learning_rate = 3.0
    decayed_learning_rate = 0.1
    max_iter = 200

    learning_rate = tf.Variable(init_learning_rate, name="lr")
    with tf.variable_scope("optimizer") as opt_scope:
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(
            loss, var_list=[generated_img])

    opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope=opt_scope.name)
    sess.run(
        tf.variables_initializer([learning_rate, generated_img] + opt_vars))

    # Create an op that will clamp the image values when run
    clamp_image_op = tf.assign(generated_img,
                               tf.clip_by_value(generated_img, -1.5, 1.5))

    display_content_and_style(content_img, style_img)

    for t in range(max_iter):
        sess.run(train_op)
        if t < int(0.90 * max_iter):
            sess.run(clamp_image_op)
        elif t == int(0.90 * max_iter):
            sess.run(tf.assign(learning_rate, decayed_learning_rate))

        if t % 20 == 0:
            current_loss = sess.run(loss)
            print 'Iteration %d: %f' % (t, current_loss)

    img = sess.run(generated_img)
    plt.imshow(deprocess_image(img[0], rescale=True))
    plt.axis('off')
    plt.show()
示例#14
0
    for patch_index, start in enumerate(starts):
        print(
            f"Processing patch {patch_index} of {len(starts)} for image {image_id}..."
        )

        end = start[0] + n0, start[1] + n1

        patch_image = image[start[0]:end[0], start[1]:end[1], :]

        downsampled_patch = resize(patch_image, (r0, r1),
                                   anti_aliasing=config['image_antialiasing'],
                                   order=3,
                                   preserve_range=True)

        downsampled_patch = preprocess_image(downsampled_patch, model_type)

        downsampled_mask = np.squeeze(
            model.predict(downsampled_patch[np.newaxis, :], batch_size=1))

        # Upsample the predicted mask to match the original patch
        patch_mask = resize(downsampled_mask, (n0, n1),
                            anti_aliasing=False,
                            order=3,
                            preserve_range=True)

        # Plonk that back into the super mask, and update the count
        mask_image[start[0]:end[0], start[1]:end[1]] += patch_mask
        count_image[start[0]:end[0], start[1]:end[1]] += 1.0

    mask_image = mask_image / count_image
示例#15
0
def style_transfer(content_img,style_img,content_size,style_size,content_layer,style_layers,
					content_weight,style_weights,tv_weight,init_random=False):
	content_pre_img = preprocess_image(load_image(content_img,size=content_size))
	feats = model.extract_features(model.image)  #extract features of every layer from the input image
	content_targets = sess.run(feats[content_layer],{model.image:content_pre_img[None]})
	style_pre_img = preprocess_image(load_image(style_img,size = style_size))
	style_feats = [feats[idx] for idx in style_layers]
	#to transfer gram
	style_target=[] 
	for style_feat_var in style_feats:
		style_target.append(gram_matrix(style_feat_var))
	style_targets = sess.run(style_target,{model.image:style_pre_img[None]})

	if init_random:
		img_var = tf.Variable(tf.random_uniform(content_pre_img[None].shape,0,1),name="image")
	else:
		img_var = tf.Variable(content_pre_img[None], name="image")
	# to compute loss  
	#print(img_var[None].shape)
	feat = model.extract_features(img_var)
	conloss = content_loss(content_weight,feat[content_layer],content_targets)
	styloss = style_loss(style_weights,feat,style_layers,style_targets)
	tvloss = TV_loss(img_var,tv_weight)
	loss = conloss+styloss+tvloss
	
	#params 
	initial_lr = 3.0
	decayed_lr = 0.1 
	decayed_lr_at = 180
	max_iters = 200 

	lr_var = tf.Variable(initial_lr,name="lr")
	# Create train_op that updates the generated image when run
	with tf.variable_scope("optimizer") as opt_scope:
		train_op = tf.train.AdamOptimizer(lr_var).minimize(loss,var_list=[img_var])
	# Initialize the generated image and optimization variables
	opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope=opt_scope.name)
	sess.run(tf.variables_initializer([lr_var,img_var]+opt_vars))
	# Create an op that will clamp the image values when run
	clamp_image = tf.assign(img_var,tf.clip_by_value(img_var,-1.5,1.5))
	
	#plot 
	f,s = plt.subplots(1,2)
	s[0].axis('off')
	s[1].axis('off')
	s[0].set_title('content source img')
	s[1].set_title('style source img')
	s[0].imshow(deprocess_image(content_pre_img))
	s[1].imshow(deprocess_image(style_pre_img))
	plt.show()
	plt.figure()

	for i in range(max_iters):
		#take a optimization step to update the img
		sess.run(train_op)
		if i < decayed_lr_at:
			sess.run(clamp_image)
		if i == decayed_lr_at:
			sess.run(tf.assign(lr_var,decayed_lr))
		if i % 100 ==0:
			print('Iteration:{}'.format(i))
			img = sess.run(img_var)
			plt.imshow(deprocess_image(img[0],rescale=True))
			plt.axis('off')
			plt.show()
	print('Iteration:{}'.format(i))
	img = sess.run(img_var)
	plt.imshow(deprocess_image(img[0],rescale=True))
	plt.axis('off')
	plt.show()
示例#16
0
def check_scipy():
	import scipy 
	vnum = int(scipy.__version__.split('.')[1])
	assert vnum>=16,"You must install scipy >=0.16.0"
check_scipy()
from squeezenet import SqueezeNet
import tensorflow as tf 
tf.reset_default_graph()
sess = get_session()
SAVE_PATH = 'datasets/squeezenet.ckpt'
print(SAVE_PATH)
#if not os.path.exists(SAVE_PATH):
    #raise ValueError("You need to download SqueezeNet!")
model = SqueezeNet(save_path=SAVE_PATH, sess=sess)
#load data for testing 
content_img_test = preprocess_image(load_image('tubingen.jpg',size=192))[None]
style_img_test = preprocess_image(load_image('starry_night.jpg',size=192))[None]
answers = np.load('style-transfer-checks.npz')

def content_loss(content_weight,content_curr,content_orig):
	return content_weight*tf.reduce_sum(tf.squared_difference(content_curr,content_orig))

def gram_matrix(features,normalize=True):
	"""Inputs: the shape of features is (1,H,W,C)"""
	features = tf.transpose(features,[0,3,1,2])
	shape = tf.shape(features)
	features = tf.reshape(features,(shape[0],shape[1],-1))
	transpose_features = tf.transpose(features,[0,2,1])
	output = tf.matmul(features,transpose_features)
	if normalize:
		output = tf.div(output,tf.cast(shape[0]*shape[1]*shape[2]*shape[3],tf.float32))
示例#17
0
def style_transfer(content_image, style_image, image_size, style_size, content_layer, content_weight,
                   style_layers, style_weights, tv_weight, model, sess, out_path, init_random = False, 
                   max_iter = 100):
    """Run style transfer!
    
    Inputs:
    - content_image: filename of content image
    - style_image: filename of style image
    - image_size: size of smallest image dimension (used for content loss and generated image)
    - style_size: size of smallest style image dimension
    - content_layer: layer to use for content loss
    - content_weight: weighting on content loss
    - style_layers: list of layers to use for style loss
    - style_weights: list of weights to use for each layer in style_layers
    - tv_weight: weight of total variation regularization term
    - init_random: initialize the starting image to uniform random noise
    """
    # Extract features from the content image
    content_img = preprocess_image(load_image(content_image, size=image_size))
    feats = model.extract_features(model.image)
    content_target = sess.run(feats[content_layer],
                              {model.image: content_img[None]})

    # Extract features from the style image
    style_img = preprocess_image(load_image(style_image, size=style_size))
    style_feat_vars = [feats[idx] for idx in style_layers]
    style_target_vars = []
    # Compute list of TensorFlow Gram matrices
    for style_feat_var in style_feat_vars:
        style_target_vars.append(gram_matrix(style_feat_var))
    # Compute list of NumPy Gram matrices by evaluating the TensorFlow graph on the style image
    style_targets = sess.run(style_target_vars, {model.image: style_img[None]})

    # Initialize generated image to content image
    
    if init_random:
        img_var = tf.Variable(tf.random_uniform(content_img[None].shape, 0, 1), name="image")
    else:
        img_var = tf.Variable(content_img[None], name="image")

    # Extract features on generated image
    feats = model.extract_features(img_var)
    # Compute loss
    c_loss = content_loss(content_weight, feats[content_layer], content_target)
    s_loss = style_loss(feats, style_layers, style_targets, style_weights)
    t_loss = tv_loss(img_var, tv_weight)
    loss = c_loss + s_loss + t_loss
    
    # Set up optimization hyperparameters
    initial_lr = 3.0
    decayed_lr = 0.1
    decay_lr_at = 180
    

    # Create and initialize the Adam optimizer
    lr_var = tf.Variable(initial_lr, name="lr")
    # Create train_op that updates the generated image when run
    with tf.variable_scope("optimizer") as opt_scope:
        train_op = tf.train.AdamOptimizer(lr_var).minimize(loss, var_list=[img_var])
    # Initialize the generated image and optimization variables
    opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=opt_scope.name)
    sess.run(tf.variables_initializer([lr_var, img_var] + opt_vars))
    # Create an op that will clamp the image values when run
    clamp_image_op = tf.assign(img_var, tf.clip_by_value(img_var, -1.5, 1.5))
    


    # Hardcoded handcrafted 
    for t in range(max_iter):
        # Take an optimization step to update img_var
        sess.run(train_op)
        if t < decay_lr_at:
            sess.run(clamp_image_op)
        if t == decay_lr_at:
            sess.run(tf.assign(lr_var, decayed_lr))

    img = sess.run(img_var)        
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    plt.axis('off')
    plt.imshow(deprocess_image(img[0], rescale=True))
    extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
    if out_path is not None:
        plt.savefig(out_path, bbox_inches=extent, pad_inches=0)
    else:
        out_path = re.split("/|\.", content_image)[-2]
        out_path = "output/" + out_path + "_out.jpg"
        plt.savefig(out_path, bbox_inches=extent, pad_inches=0)
示例#18
0
    error = rel_error(correct, output)
    print('Error is {}'.format(error))


def tv_loss_test(correct):
    tv_weight = 2e-2
    t_loss = tv_loss(model.image, tv_weight)
    output = sess.run(t_loss, {model.image: content_img_test})
    error = rel_error(correct, output)
    print('Error is {}'.format(error))


if __name__ == '__main__':
    tf.reset_default_graph()  # remove all existing variables in the graph
    sess = get_session()  # start a new Session
    model = load_model(sess)

    content_img_test = preprocess_image(
        load_image('styles/tubingen.jpg', size=192))[None]
    style_img_test = preprocess_image(
        load_image('styles/starry_night.jpg', size=192))[None]
    answers = np.load('style-transfer-checks-tf.npz')

    content_loss_test(answers['cl_out'])

    gram_matrix_test(answers['gm_out'])

    style_loss_test(answers['sl_out'])

    tv_loss_test(answers['tv_out'])