Ejemplo n.º 1
0
    def rgb_callback(self, data):
        img = self.bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")
        img = cv2.resize(img, (320,240))
        rows, cols, _ = img.shape

        M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 180, 1)
        img = cv2.warpAffine(img, M, (cols, rows))

        #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        depth = cv2.resize(self.depth, (320,240))
        depth = cv2.warpAffine(depth, M, (cols, rows))

        depth = depth.astype(np.float32) / 1000

        tf = transforms.ToTensor()
        source = tf(img)
        mask = (torch.sum(source[:3, :, :], 0) > 0).float().unsqueeze(0)
        source_depth = tf(np.expand_dims(depth, 2).astype(np.float32) / 128.0 * 255)
        mask = torch.cat([source_depth, mask], 0)

        self.imgv.data.copy_(source)
        self.maskv.data.copy_(mask)
        recon = self.model(self.imgv, self.maskv)
        goggle_img = (recon.data.clamp(0, 1).cpu().numpy()[0].transpose(1, 2, 0) * 255).astype(np.uint8)
        goggle_msg = self.bridge.cv2_to_imgmsg(goggle_img, encoding="rgb8")
        self.image_pub.publish(goggle_msg)

        depth_msg = self.bridge.cv2_to_imgmsg(depth, encoding="passthrough")
        self.depth_pub.publish(depth_msg)
Ejemplo n.º 2
0
def standarize_image(image):
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    tf = ToFloat(max_value=255.0)
    norm = Normalize(mean=mean, std=std, max_pixel_value=1.0)
    image = tf(image=image)['image']
    image = norm(image=image)['image']
    return image
Ejemplo n.º 3
0
def hmpatch_only_corners(x, y, alpha, edge, scale=1):
    tf1 = skimage.transform.SimilarityTransform(translation=[-x, -y])
    tf2 = skimage.transform.SimilarityTransform(rotation=np.deg2rad(alpha))
    tf3 = skimage.transform.SimilarityTransform(scale=scale)
    tf4 = skimage.transform.SimilarityTransform(
        translation=[+edge / 2, +edge / 2])
    tf = (tf1 + (tf2 + (tf3 + tf4))).inverse
    corners = tf(np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0.5, 0.5]]) * edge)
    return corners
Ejemplo n.º 4
0
def hmpatch(hm, x, y, alpha, edge, scale=1):
    tf1 = skimage.transform.SimilarityTransform(translation=[-x, -y])
    tf2 = skimage.transform.SimilarityTransform(rotation=np.deg2rad(alpha))
    tf3 = skimage.transform.SimilarityTransform(scale=scale)
    tf4 = skimage.transform.SimilarityTransform(
        translation=[+edge / 2, +edge / 2])
    tf = (tf1 + (tf2 + (tf3 + tf4))).inverse
    corners = tf(np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0.5, 0.5]]) * edge)
    patch = skimage.transform.warp(hm,
                                   tf,
                                   output_shape=(edge, edge),
                                   mode="edge")
    return patch, corners
def hmpatch_only_corners(x, y, alpha, edge, scale=1):
    # Cutout a patch from the image, centered on (x,y), rotated by alpha
    # degrees (0 means bottom in hm remains bottom in patch, 90 means bottom in hm becomes right in patch),
    # with a specified edge size (in pixels) and scale (relative).
    tf1 = skimage.transform.SimilarityTransform(translation=[-x, -y])
    tf2 = skimage.transform.SimilarityTransform(rotation=np.deg2rad(alpha))
    tf3 = skimage.transform.SimilarityTransform(scale=scale)
    tf4 = skimage.transform.SimilarityTransform(
        translation=[+edge / 2, +edge / 2])
    tf = (tf1 + (tf2 + (tf3 + tf4))).inverse
    corners = tf(np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0.5, 0.5]]) * edge)
    #patch = skimage.transform.warp(hm, tf,output_shape=(edge,edge),mode="edge")
    return corners
Ejemplo n.º 6
0
	def sentences_intersection(self, sent1, sent2, content):

		# split the sentence into words/tokens
		total_score= 0
		s1 = set(sent1.split(" "))
		s2 = set(sent2.split(" "))
		score = 0 
		# If there is not intersection, just return 0
		if (len(s1) + len(s2)) == 0:
			return 0

		common_words = s1.intersection(s2) 

		list_words= list(common_words)
		for words in list_words:
			score = idf(words,content) * tf(words,content)
			total_score += score
			#print score

		# We normalize the result by the average number of words
		return total_score
    def sentences_intersection(self, sent1, sent2, content):

        # split the sentence into words/tokens
        total_score = 0
        s1 = set(sent1.split(" "))
        s2 = set(sent2.split(" "))
        score = 0
        # If there is not intersection, just return 0
        if (len(s1) + len(s2)) == 0:
            return 0

        common_words = s1.intersection(s2)

        list_words = list(common_words)
        for words in list_words:
            score = idf(words, content) * tf(words, content)
            total_score += score
            #print score

        # We normalize the result by the average number of words
        return total_score
Ejemplo n.º 8
0
def test_fitted_model_full_map_stride_cnn(fitted, heightmap_png, edges, resize,
                                          rad_ori, stride):
    print("Generating traversability image for orientation " + str(rad_ori) +
          " rads")
    #hm=skimage.color.rgb2gray(skimage.io.imread(heightmap_png))
    #hm = hm * height_scale_factor
    hm = read_image(heightmap_png)
    X_mp = []
    #y=[]
    hm_cols = int((np.shape(hm)[0] - edges) / stride)
    hm_rows = int((np.shape(hm)[1] - edges) / stride)

    full_data = pd.DataFrame()
    full_data["patch"] = range(0, hm_cols * hm_rows)
    full_data.set_index("patch")
    print("Filling patches (by stride)")
    full_data["hm_x"] = [
        int((edges / 2) + (j * stride)) for i in range(0, hm_rows)
        for j in range(0, hm_cols)
    ]
    full_data["hm_y"] = [
        int((edges / 2) + (i * stride)) for i in range(0, hm_rows)
        for j in range(0, hm_cols)
    ]
    full_data["G"] = [rad_ori for i in range(0, hm_cols * hm_rows)]

    total_samples = len(full_data.index)

    startTime = time.time()
    print("Cropping patches for feature extraction (only patch cropping)")
    if multiprocessing == False:
        for i, d in full_data.iterrows():
            print("\rProcessing " + str(i) + "/" + str(total_samples), end='')
            patch = hmpatch(hm,
                            d["hm_x"],
                            d["hm_y"],
                            np.rad2deg(d["G"]),
                            edges,
                            scale=1)[0]
            patch = transform_patch(patch, resize)
            X_mp.append(patch[:, :, np.newaxis])
        print("\rProcessed " + str(total_samples) + "/" + str(total_samples))
    else:
        print("\rProcessing " + str(total_samples) + " [multiprocessing]",
              end='')
        multiprocessing_hm = read_image(heightmap_png)
        X_mp = Parallel(n_jobs=4)(delayed(mc_extract_features_cnn)(
            d["hm_x"], d["hm_y"], np.rad2deg(d["G"]), edges, resize, scale=1)
                                  for idx, d in full_data.iterrows())
    endTime = time.time()
    #calculate the total time it took to complete the work
    workTime = endTime - startTime
    print("-- time: " + str(workTime))
    print("Estimating traversability for all the patches")
    startTime = time.time()

    X = np.array(X_mp).astype('float32')
    y_pred = fitted.predict(X)

    endTime = time.time()
    workTime = endTime - startTime
    print("-- time: " + str(workTime))
    fig, ax1 = plt.subplots(figsize=(9, 9))
    ax1.imshow(hm / height_scale_factor, cmap="viridis")  #cmap="gray")
    fig.savefig(heightmap_png[:-4] + '_out_viridis_base' + '.png', dpi=fig.dpi)

    cax1 = ax1.imshow(hm / height_scale_factor, cmap="viridis")  #cmap="gray")
    cbar = fig.colorbar(
        cax1, ticks=[round(np.amin(hm) + .01, 2),
                     round(np.amax(hm), 2)])
    fig.savefig(heightmap_png[:-4] + '_out_viridis__bar_base' + '.png',
                dpi=fig.dpi)

    fig, ax1 = plt.subplots(figsize=(9, 9))
    ax1.imshow(hm / height_scale_factor, cmap="gray")
    fig.savefig(heightmap_png[:-4] + '_out_gray_base' + '.png', dpi=fig.dpi)

    #draw a white canvas for the traversability results
    # remove this if you want the overlay results
    #ax1.fill([0,0,np.shape(hm)[0],np.shape(hm)[0],0],[0,np.shape(hm)[1],np.shape(hm)[1],0,0],'w',alpha=1.0)

    # use a white skimage to draw traversabiliy results
    sk_hm = np.ones((np.shape(hm)[0], np.shape(hm)[1], 4), dtype='float64')
    sk_hm[:, :, 3] = np.zeros((np.shape(hm)[0], np.shape(hm)[1]),
                              dtype='float64')

    print("Drawing predictions on patchs for current orientation")
    startTime = time.time()

    tf = skimage.transform.SimilarityTransform(
        translation=[edges / 2, edges / 2], rotation=-rad_ori)
    tf_sk = skimage.transform.SimilarityTransform(translation=[10, 10],
                                                  rotation=-rad_ori)
    arrow_points = tf(np.array([[0, 0], [edges / 2, 0]]))
    arrow_points_sk = tf_sk(np.array([[-5, 0], [-10, 5], [5, 0], [-10, -5]
                                      ]))  #15px arrowhead for skimage ploting

    ax1.arrow(arrow_points[0][0],
              arrow_points[0][1],
              arrow_points[1][0] - arrow_points[0][0],
              arrow_points[1][1] - arrow_points[0][1],
              length_includes_head=True,
              width=3)
    patches_squares = []
    patches_colors = []
    for i, d in full_data.iterrows():
        print("\rProcessing " + str(i) + "/" + str(total_samples), end='')
        corners = hmpatch_only_corners(d["hm_x"],
                                       d["hm_y"],
                                       np.rad2deg(d["G"]),
                                       stride,
                                       scale=1)
        color_box_sk = [0.0, 1.0, 0.0,
                        y_pred[i][1]]  # green with prob of non-trav in alpha
        # if we only want to draw the patch without its orientations, use this (avoids holes between patches)
        s_patch = skimage.draw.polygon([
            corners[4, 1] - stride / 2, corners[4, 1] + stride / 2,
            corners[4, 1] + stride / 2, corners[4, 1] - stride / 2,
            corners[4, 1] - stride / 2
        ], [
            corners[4, 0] - stride / 2, corners[4, 0] - stride / 2,
            corners[4, 0] + stride / 2, corners[4, 0] + stride / 2,
            corners[4, 0] - stride / 2
        ])
        skimage.draw.set_color(sk_hm, (s_patch[0], s_patch[1]), color_box_sk)
    # for ploting with skimage
    sk_hm_pure = np.copy(sk_hm)
    s_patch = skimage.draw.polygon(arrow_points_sk[[0, 1, 2, 3, 0], 1],
                                   arrow_points_sk[[0, 1, 2, 3, 0], 0])
    skimage.draw.set_color(sk_hm, (s_patch[0], s_patch[1]),
                           [0.0, 0.0, 1.0, 1.0])
    ax1.imshow(sk_hm)

    #fig.savefig(heightmap_png[:-4] + '_out_' + ("%.3f" % rad_ori) + '.png', dpi=fig.dpi)
    skimage.io.imsave(heightmap_png[:-4] + '_out_' + ("%.3f" % rad_ori) +
                      '.png',
                      sk_hm_pure)  #sk_hm if you want to save the arrows

    endTime = time.time()
    workTime = endTime - startTime
    print("\rProcessed " + str(total_samples) + "/" + str(total_samples))
    print("-- time: " + str(workTime))
    #plt.show()
    return sk_hm_pure
Ejemplo n.º 9
0
            pass

        @LogFileWriter(ex)
        def hello(self, argument):
            with tf.Session() as s:
                tf.summary.FileWriter(argument, s.graph)

    @ex.main
    def run_experiment(_run):
        assert _run.info.get("tensorflow", None) is None
        foo = FooClass()
        with tf.Session() as s:
            swr = tf.summary.FileWriter(TEST_LOG_DIR, s.graph)
            assert swr is not None
            # Because FileWriter was not called in an annotated function
            assert _run.info.get("tensorflow", None) is None
        foo.hello(TEST_LOG_DIR2)
        # Because foo.hello was anotated
        assert _run.info["tensorflow"]["logdirs"] == [TEST_LOG_DIR2]

        with tf.Session() as s:
            swr = tf.summary.FileWriter(TEST_LOG_DIR, s.graph)
            # Nothing should be added, because FileWriter was again not called in an annotated function
            assert _run.info["tensorflow"]["logdirs"] == [TEST_LOG_DIR2]

    ex.run()


if __name__ == "__main__":
    test_log_file_writer(ex(), tf())
Ejemplo n.º 10
0
                         PoseStamped,
                         self.publish,
                         queue_size=1)
        while not rospy.is_shutdown():
            pass

    def publish(self, data):
        broadcaster = tf2_ros.StaticTransformBroadcaster()

        static_transformStamped = TransformStamped()
        static_transformStamped.header.stamp = rospy.Time.now()
        static_transformStamped.header.frame_id = 'station'
        static_transformStamped.child_frame_id = 'world'

        static_transformStamped.transform.translation.x = data.pose.position.x
        static_transformStamped.transform.translation.y = data.pose.position.y
        static_transformStamped.transform.translation.z = data.pose.position.z

        static_transformStamped.transform.rotation.x = data.pose.orientation.x
        static_transformStamped.transform.rotation.y = data.pose.orientation.y
        static_transformStamped.transform.rotation.z = data.pose.orientation.z
        static_transformStamped.transform.rotation.w = data.pose.orientation.w

        broadcaster.sendTransform(static_transformStamped)
        print 'spinning'
        rospy.spin()


if __name__ == '__main__':
    tf()
Ejemplo n.º 11
0
def setup_model(args, phrase_plh, region_plh, train_phase_plh, labels_plh,
                num_boxes_plh, is_conf_plh, neg_region_plh, gt_plh):
    """Describes the computational graph and returns the losses and outputs.

    Arguments:
    args -- command line arguments passed into the main function
    phrase_plh -- tensor containing the phrase features
    region_plh -- tensor containing the region features
    train_phase_plh -- indicator whether model is in training mode
    labels_plh -- indicates positive (1), negative (-1), or ignore (0)
    num_boxes_plh -- number of boxes per example in the batch
    region_feature_dim -- dimensions of the region features

    Returns:
    total_loss -- weighted combination of the region and concept loss
    region_loss -- logistic loss for phrase-region prediction
    concept_loss -- L1 loss for the output of the concept weight branch
    region_prob -- each row contains the probability a region is associated with a phrase
    """
    final_embed = args.dim_embed
    embed_dim = final_embed * 4
    phrase_embed = embedding_branch(phrase_plh, embed_dim, train_phase_plh,
                                    'phrase')
    input_region_feature = tf.concat([region_plh, neg_region_plh, gt_plh], 1)
    region_embed_raw = embedding_branch(input_region_feature, embed_dim,
                                        train_phase_plh, 'region')
    region_embed = region_embed_raw[:, :args.max_boxes, :]
    neg_region_embed = region_embed_raw[:, args.max_boxes:-1, :]
    gt_region_embed = region_embed_raw[:, -1, :]
    # dgt_p = tf.expand_dims(phrase_embed, 1) * tf.expand_dims(gt_region_embed, 1)
    # dp_neg = tf.expand_dims(phrase_embed, 1)
    dgt_p = tf(phrase_embed, gt_region_embed, is_conf_plh)
    dneg_p = cos_distance(tf.expand_dims(phrase_embed, 1), neg_region_embed,
                          is_conf_plh)

    LossTrp = tf.maximum(0.00, 0.02 + dgt_p - dneg_p)
    concept_weights = embedding_branch(phrase_plh,
                                       embed_dim,
                                       train_phase_plh,
                                       'concept_weight',
                                       do_l2norm=False,
                                       outdim=args.num_embeddings)
    concept_loss = tf.reduce_mean(tf.norm(concept_weights, axis=1, ord=1))
    concept_weights = tf.nn.softmax(concept_weights)

    elementwise_prod = tf.expand_dims(phrase_embed, 1) * region_embed

    joint_embed_1 = add_fc(elementwise_prod, embed_dim, train_phase_plh,
                           'joint_embed_1')
    joint_embed_2 = concept_layer(joint_embed_1, final_embed, train_phase_plh,
                                  1, concept_weights)
    for concept_id in range(2, args.num_embeddings + 1):
        joint_embed_2 += concept_layer(joint_embed_1, final_embed,
                                       train_phase_plh, concept_id,
                                       concept_weights)

    joint_embed_2 = tf.reshape(
        joint_embed_2,
        [tf.shape(joint_embed_2)[0], num_boxes_plh, final_embed])
    joint_embed_3 = tf.contrib.layers.fully_connected(
        joint_embed_2,
        1,
        activation_fn=None,
        weights_regularizer=tf.contrib.layers.l2_regularizer(0.005),
        scope='joint_embed_3')
    joint_embed_3 = tf.squeeze(joint_embed_3, [2])
    region_prob = 1. / (1. + tf.exp(-joint_embed_3))

    ind_labels = tf.abs(labels_plh)
    num_samples = tf.reduce_sum(ind_labels)
    region_loss = tf.reduce_sum(
        tf.log(1 + tf.exp(-joint_embed_3 * labels_plh)) *
        ind_labels) / num_samples
    total_loss = region_loss + concept_loss * args.embed_l1 + LossTrp
    return total_loss, region_loss, concept_loss, region_prob, dneg_p, dgt_p, LossTrp, phrase_embed, gt_region_embed, neg_region_embed
def test_fitted_model_full_map_stride_cnn(fitted, heightmap_png, edges, resize,
                                          rad_ori, stride):
    #
    print("Generating traversability image for orientation " + str(rad_ori) +
          " rads")
    #hm=skimage.color.rgb2gray(skimage.io.imread(heightmap_png))
    #hm = hm * height_scale_factor
    hm = read_image(heightmap_png)
    X_mp = []
    #y=[]
    hm_cols = int((np.shape(hm)[0] - edges) / stride)
    hm_rows = int((np.shape(hm)[1] - edges) / stride)

    full_data = pd.DataFrame()
    full_data["patch"] = range(0, hm_cols * hm_rows)
    full_data.set_index("patch")
    print("Filling patches (by stride)")
    full_data["hm_x"] = [
        int((edges / 2) + (j * stride)) for i in range(0, hm_rows)
        for j in range(0, hm_cols)
    ]
    full_data["hm_y"] = [
        int((edges / 2) + (i * stride)) for i in range(0, hm_rows)
        for j in range(0, hm_cols)
    ]
    full_data["G"] = [rad_ori for i in range(0, hm_cols * hm_rows)]

    total_samples = len(full_data.index)

    startTime = time.time()
    print("Cropping patches for feature extraction (only patch cropping)")
    if multiprocessing == False:
        for i, d in full_data.iterrows():
            print("\rProcessing " + str(i) + "/" + str(total_samples), end='')
            patch = hmpatch(hm,
                            d["hm_x"],
                            d["hm_y"],
                            np.rad2deg(d["G"]),
                            edges,
                            scale=1)[0]
            patch = transform_patch(patch, resize)
            #features=skimage.feature.hog(skimage.transform.resize(patch,(resize_patch_size,resize_patch_size)))
            X_mp.append(patch[:, :, np.newaxis])
        print("\rProcessed " + str(total_samples) + "/" + str(total_samples))
    else:
        print("\rProcessing " + str(total_samples) + " [multiprocessing]",
              end='')
        multiprocessing_hm = read_image(heightmap_png)
        X_mp = Parallel(n_jobs=4)(delayed(mc_extract_features_cnn)(
            d["hm_x"], d["hm_y"], np.rad2deg(d["G"]), edges, resize, scale=1)
                                  for idx, d in full_data.iterrows())
    endTime = time.time()
    #calculate the total time it took to complete the work
    workTime = endTime - startTime

    print("-- time: " + str(workTime))

    print("Estimating traversability for all the patches")
    startTime = time.time()

    X = np.array(X_mp).astype('float32')

    y_pred = fitted.predict(X)

    endTime = time.time()
    workTime = endTime - startTime
    print("-- time: " + str(workTime))

    fig, ax1 = plt.subplots(figsize=(9, 9))
    #plt.show(block=False)
    ax1.imshow(hm / height_scale_factor, cmap="rainbow")  #cmap="gray")
    fig.savefig(heightmap_png[:-4] + '_out_rainbow_base' + '.png', dpi=fig.dpi)

    cax1 = ax1.imshow(hm / height_scale_factor, cmap="rainbow")  #cmap="gray")
    cbar = fig.colorbar(
        cax1, ticks=[round(np.amin(hm) + .01, 2),
                     round(np.amax(hm), 2)])
    fig.savefig(heightmap_png[:-4] + '_out_rainbow__bar_base' + '.png',
                dpi=fig.dpi)

    fig, ax1 = plt.subplots(figsize=(9, 9))
    ax1.imshow(hm / height_scale_factor, cmap="gray")
    fig.savefig(heightmap_png[:-4] + '_out_gray_base' + '.png', dpi=fig.dpi)

    # use a white skimage to draw traversabiliy results
    sk_hm = np.ones((np.shape(hm)[0], np.shape(hm)[1]), dtype='float64')
    sk_hm = skimage.color.gray2rgb(sk_hm)

    #to store only true traversability values (for using later in a visualization)
    true_t_hm = np.zeros((np.shape(hm)[0], np.shape(hm)[1]), dtype='float64')

    print("Drawing predictions on patchs for current orientation")
    startTime = time.time()
    '''
    Description of SimilarityTransform states that the rotaiton angle is 
    counter-clockwise, however it does not behave like so. Be aware of this 
    issue and consider Angles from dataset are in radians and counter-clockwise 
    so PI/2 means left orientation, while 3PI/2 means right orientation, if we 
    do not invert the orientation,  skimiage transformation does the opposite. 
    
    gazebo orientation frame is different from vrep (sic), 0 degrees is at the 
    right and goes up counter-clockwise
    '''
    tf = skimage.transform.SimilarityTransform(
        translation=[edges / 2, edges / 2], rotation=-rad_ori)
    tf_sk = skimage.transform.SimilarityTransform(translation=[10, 10],
                                                  rotation=-rad_ori)
    arrow_points = tf(np.array([[0, 0], [edges / 2, 0]]))
    arrow_points_sk = tf_sk(np.array([[-5, 0], [-10, 5], [5, 0], [-10, -5]
                                      ]))  #15px arrowhead for skimage ploting
    # plot arrow_points
    ax1.arrow(arrow_points[0][0],
              arrow_points[0][1],
              arrow_points[1][0] - arrow_points[0][0],
              arrow_points[1][1] - arrow_points[0][1],
              length_includes_head=True,
              width=3)
    patches_squares = []
    patches_colors = []
    for i, d in full_data.iterrows():
        print("\rProcessing " + str(i) + "/" + str(total_samples), end='')
        corners = hmpatch_only_corners(d["hm_x"],
                                       d["hm_y"],
                                       np.rad2deg(d["G"]),
                                       stride,
                                       scale=1)
        color_box = '#cc0000'  #red
        color_box_sk = [1.0, 0.0, 0.0]  #red
        alpha_box = y_pred[i][0]
        if y_pred[i][1] > 0.5:
            color_box = '#73d216'  #geen
            color_box_sk = [0.0, 1.0, 0.0]  #geen
            alpha_box = y_pred[i][1]
        # plot the respective traversability patches on an image
        s_patch = skimage.draw.polygon([
            corners[4, 1] - stride / 2, corners[4, 1] + stride / 2,
            corners[4, 1] + stride / 2, corners[4, 1] - stride / 2,
            corners[4, 1] - stride / 2
        ], [
            corners[4, 0] - stride / 2, corners[4, 0] - stride / 2,
            corners[4, 0] + stride / 2, corners[4, 0] + stride / 2,
            corners[4, 0] - stride / 2
        ])
        skimage.draw.set_color(sk_hm, (s_patch[0], s_patch[1]),
                               color_box_sk,
                               alpha=alpha_box)
    # for ploting with skimage
    sk_hm_pure = np.copy(sk_hm)
    s_patch = skimage.draw.polygon(arrow_points_sk[[0, 1, 2, 3, 0], 1],
                                   arrow_points_sk[[0, 1, 2, 3, 0], 0])
    skimage.draw.set_color(sk_hm, (s_patch[0], s_patch[1]), [0.0, 0.0, 1.0],
                           alpha=1.0)
    ax1.imshow(sk_hm)

    skimage.io.imsave(
        heightmap_png[:-4] + '_out_' + ("%.3f" % rad_ori) + '.png', sk_hm)

    endTime = time.time()
    workTime = endTime - startTime
    print("\rProcessed " + str(total_samples) + "/" + str(total_samples))
    print("-- time: " + str(workTime))
    #plt.show()
    return sk_hm_pure