Beispiel #1
0
def build_input(params, image_to_process, caption):

    processed_image = tf.identity(image_processing.image_processing(
        image_to_process, params.img_size, False),
                                  name='processed_image')

    embedding_matrix = tf.get_variable(
        'embedding_weights', [params.vocab_size, params.embedding_dim])
    embedded_caption = tf.nn.embedding_lookup(embedding_matrix, caption)
    position_encoding = transformer.position_encoding(params.embedding_dim,
                                                      params.max_length)
    caption_input = embedded_caption + position_encoding

    return processed_image, caption_input
Beispiel #2
0
    def process_raw_image(self, raw_image: Image):
        image = self.bridge.imgmsg_to_cv2(raw_image,
                                          desired_encoding="passthrough")
        area, _, masked_image = image_processing(image)

        if area > 0:
            self.mask_pub.publish(
                self.bridge.cv2_to_imgmsg(masked_image, encoding="bgr8"))
        else:
            self.mask_pub.publish(
                self.bridge.cv2_to_imgmsg(image, encoding="bgr8"))
        if area > self.detect_thresh:
            self.detected = True
            cv2.imwrite("./final_view.jpg", masked_image)
    def parse_function(self, data):

        features = tf.parse_single_example(
            data,
            features={
                'image': tf.FixedLenFeature([], tf.string),
                'caption': tf.FixedLenFeature([self.config.max_length],
                                              tf.int64)
            })

        image = self.image_decode(features['image'])
        image = image_processing.image_processing(image, self.config.img_size,
                                                  self.is_training)
        caption = self.caption_processing(features['caption'],
                                          self.is_training)

        return image, caption
Beispiel #4
0
def main():
    global Stop
    print("Begin!")
    """
    make a thread code
    """
    while True:
        transition_angle = list()
        stopSignal = 0
        isNotFoundAngle = 0

        # image processing -> line trace , Determining whether to stop
        for i in range(image_cnt):
            angle_temp, stop_temp = image_processing(img)
            stopSignal += stop_temp
            if angle_temp == error_signal:
                isNotFoundAngle += 1
            elif angle_temp > turning_max:
                transition_angle.append(turning_max)
            elif angle_temp < -turning_max:
                transition_angle.append(-turning_max)
            else:
                transition_angle.append(angle_temp)

        # stop state or If the road `isn't found
        if stopSignal > (image_cnt / 2):
            #print 'Stop signal!!!'
            cw.stop()
            continue
        elif isNotFoundAngle == image_cnt:
            #print 'Cannot detect line...'
            #cw.stop()
            continue
        else:
            #print 'Movement'
            angle = dataRefining(transition_angle, len(transition_angle))
            #straight move
            if angle < approach_angle and angle > -approach_angle:
                print("straight mode :")
                print(angle)
                cw.turn_straight()
            #turning
            else:
                print("turning mode : ")
                print(angle)
                cw.turn(angle)
Beispiel #5
0
def get_realtime_predict():
    _, image = camera.read()
    cv2.imshow("camera", image)

    image = image_processing(image)
    cv2.imshow("image_process", image)

    image = image / 255.
    image -= 0.5
    image *= 2.

    image = np.expand_dims(image, axis=0)
    image = np.expand_dims(image, axis=3)

    predict = model.predict(image)
    predict = np.argmax(predict)
    return predict
Beispiel #6
0
from image_processing import image_processing
import cv2
img = cv2.VideoCapture(0)
while True:
    print(image_processing(img))
Beispiel #7
0
import os
import cv2
from image_processing import image_processing

if not os.path.isdir('image_data_processed'):
    os.mkdir('image_data_processed')

for folder in os.listdir('image_data'):
    source_folder = os.path.join('image_data', folder)
    dest_folder = os.path.join('image_data_processed', folder)
    if not os.path.isdir(dest_folder):
        os.mkdir(dest_folder)

    for filename in os.listdir(source_folder):
        source_file_path = os.path.join(source_folder, filename)
        dest_file_path = os.path.join(dest_folder, filename)
        cv2.imwrite(dest_file_path,
                    image_processing(cv2.imread(source_file_path)))
Beispiel #8
0
import cv2

from ai import NeuralNetwork
from image_processing import image_processing

# Capture from camera
cap = cv2.VideoCapture(0)
cv2.startWindowThread()

# Load model
NeuralNetwork.instance()

while True:
    _, img = cap.read()
    output = image_processing(img)
    cv2.imshow("output", output)

    key = cv2.waitKey(10)
    if key == 27:
        break

cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.VideoCapture(0).release()
cv2.waitKey(1)
Beispiel #9
0
def shape_decoding(s):
    if s == 0:
        return "circle"
    elif s == 1:
        return "ellipse"
    elif s == 2:
        return "etc"
    else:
        return "Wrong!"


image = cv2.imread(sys.argv[1])

image1 = cv2.resize(image, (420, 560), interpolation=cv2.INTER_LINEAR_EXACT)
imgcolorc, imgcolorf, imgshape, colorname = image_processing(image1)
_, thres = findtext(imgcolorc, imgcolorf)

c_arr = ''
for th in thres:
    th_sq = make_square(th)

    c = char_test(th_sq)[0]

    c_arr = c_arr + char_decoding(c)

s = shape_test(imgshape)[0]

s_arr = shape_decoding(s)

result = {'character': c_arr, 'shape': s_arr, 'color': colorname}
Beispiel #10
0
print("Folders walked")
print(time.time() - start_time)

# get photo and video files lists
image_files_list, video_files_list = index_folder_files(path=path,
                                                        max_depth=4,
                                                        indexing_type="all")

# print(image_files_list)
# print(video_files_list)

print("Files indexed")

# video_processing(video_files_list)
print(len(image_files_list))
image_processing(image_files_list)
print((time.time() - start_time) / len(image_files_list))

with db_session():
    image_files_query = Image.get_descriptors()

feature_description(images_list=image_files_query)
print((time.time() - start_time) / len(image_files_query))
raise Exception

print(len(image_files_list))
print(len(image_files_query))

# get certain image all duplicates
result = get_image_duplicates(image_id=8, similarity_threshold=150)
print(result)
Beispiel #11
0
    def run(self):
        # start indexing folder
        images, videos = index_folder_files(
            path=self.folderField.text(),
            max_depth=json_settings.user_json_read("folderDepth")
            if self.folderTreeCheckbox.isChecked() else 0,
        )

        # processing new files
        processed_files = image_processing(images)

        # save new files
        with db_session():
            save_new_files(indexed_files=processed_files, file_type="image")
            db_flush()
            # get available images from DB
            images = Image.all()

        for idx, image in enumerate(images):
            str_image_idx = str(idx)

            IMAGE_PATH_DICT[str_image_idx] = {
                "id": image.id,
                "name": image.image_name,
                "additional_attrs": {
                    "height": image.image_height,
                    "width": image.image_width
                },
                "folder": image.image_path,
                "type": (image.image_name.split(".")[-1]).lower(),
                "full_path": image.full_path(),
            }
            self.imageListTable.setRowCount(idx)
            self.imageListTable.setItem(idx - 1, 0,
                                        QTableWidgetItem(str_image_idx))
            self.imageListTable.setItem(idx - 1, 1,
                                        QTableWidgetItem(image.image_name))
            self.imageListTable.setItem(
                idx - 1, 2,
                QTableWidgetItem(IMAGE_PATH_DICT[str_image_idx]["type"]))

            duplicateIcon = QTableWidgetItem()
            duplicateIcon.setIcon(QIcon("gui/static/icon_view_duplicates.png"))
            self.imageListTable.setItem(idx - 1, 3, duplicateIcon)

        # TODO add video to DB and processing logic
        """
        for video in videos:
            rowVideos += 1
            videoId = str(rowVideos)
            VIDEO_PATH_DICT[videoId] = [
                video[0],
                (video[0].split(".")[-1]).lower(),
                os.path.join(video[1], video[0]),
            ]
            self.videoListTable.setRowCount(rowVideos)
            self.videoListTable.setItem(rowVideos - 1, 0, QTableWidgetItem(videoId))
            self.videoListTable.setItem(rowVideos - 1, 1, QTableWidgetItem(video[0]))
            self.videoListTable.setItem(
                rowVideos - 1, 2, QTableWidgetItem(VIDEO_PATH_DICT[videoId][1])
            )

            duplicateIcon = QTableWidgetItem()
            duplicateIcon.setIcon(
                QWidget().style().standardIcon(QStyle.SP_FileDialogContentsView)
            )
            self.videoListTable.setItem(rowVideos, 3, duplicateIcon)

        """

        self.finishedTrigger.emit()
Beispiel #12
0
import os
from commands import (png_to_pnm, pnm_to_csv, csv_to_stl)
from image_processing import image_processing, get_image_file

full_path = os.path.dirname(os.path.abspath(__file__))
path = os.path.abspath(os.path.join(full_path, 'images'))

file = get_image_file(path)
if not file:
    print("Upload and image first!")
    exit(1)

try:
    image_processing(path, file)
except Exception as e:
    print(e)
    print(
        "Make sure you have python3 installed with numpy and opencv libraries and try again"
    )
    exit(1)

png_to_pnm(path)
pnm_to_csv(path)
csv_to_stl(path)