コード例 #1
0
	def __init__(self):
		self.ui = Ui_SettingsWindow()
		self.image_handler = ImageHandler()
		self.ui.set_image_handler(self.image_handler)
		self.clicked_square = ""
		self.main_window_video_open = False
		self.main_window = None
コード例 #2
0
ファイル: Model.py プロジェクト: oliverj96/open-stylizer
            def closure():
                # correct the values of updated input image
                input_img.data.clamp_(0, 1)

                optimizer.zero_grad()
                model(input_img)
                style_score = 0
                content_score = 0

                for sl in style_losses:
                    style_score += sl.loss
                for cl in content_losses:
                    content_score += cl.loss

                style_score *= style_weight
                content_score *= content_weight

                loss = style_score + content_score
                loss.backward()

                run[0] += 1
                if run[0] % 5 == 0:
                    print("run {}:".format(run))
                    print('Style Loss : {:4f} Content Loss: {:4f}'.format(
                        style_score.item(), content_score.item()))
                    print()
                    if prev:
                        img_handler.imshow(input_img,
                                           unloader,
                                           title='Run {}'.format(run))
                    if runs:
                        run_save = os.path.join(run_path, f'run{run}.jpg')
                        img_handler.imsave(run_save, unloader, input_img)
                return style_score + content_score
コード例 #3
0
    def recognize_text(self, img, task=""):
        word_list = []

        print("Image Processing Started")
        img_handler = ImageHandler()

        words = img_handler.split_text(img, task)

        for word in words:
            img = cv2.cvtColor(word, cv2.COLOR_BGR2GRAY)
            # img = img_handler.preprocess_normal_handwriting(img)
            img = img_handler.preprocess(img, self.img_size)
            word_list.append(img)
            # cv2.imshow('word', word)
            # cv2.waitKey(0)
        print("Image Processing Finished")

        print("Recognizing Text Started")
        batch = Batch(None, word_list)
        recognized_list = self.model.batch_test(batch)
        print('Image Text: ', recognized_list)

        text = ''
        for i in recognized_list:
            text += i + ' '

        return text
コード例 #4
0
 def __init__(self, url, timeout):
     self.url = url
     self.timeout = timeout
     self.ioloop = IOLoop.instance()
     self.ws = None
     self.imgHandler = ImageHandler()
     self.connect()
     PeriodicCallback(self.keep_alive, 20000).start()
     self.ioloop.start()
コード例 #5
0
    def __init__(self, *args, **kwargs):
        self.clip_name = args[0].input_video
        self.subclip_length = args[0].subclip_length
        self.out_clip = None
        self.out_clip_name = args[0].output_video
#         self.video_clip = VideoFileClip(self.clip_name).subclip(0, self.subclip_length)
#         self.video_clip = VideoFileClip(self.clip_name).subclip(20, 25)
#         self.video_clip = VideoFileClip(self.clip_name).subclip(38, 40)
        self.video_clip = VideoFileClip(self.clip_name)
        self.frame_counter = 0
        self.image_handler = ImageHandler()
コード例 #6
0
    def predict(self, model_path='./model/the_model.ckpt'):
        n_input = 784
        n_output = 10
        n_hidden1 = 256
        n_hidden2 = 128
        n_hidden3 = 64
        net_input = tf.placeholder(tf.float32, [None, n_input])
        y_true = tf.placeholder(tf.float32, [None, 10])
        W1 = tf.Variable(tf.truncated_normal([n_input, n_hidden1]))
        b1 = tf.Variable(tf.truncated_normal([n_hidden1]))
        W2 = tf.Variable(tf.truncated_normal([n_hidden1, n_hidden2]))
        b2 = tf.Variable(tf.truncated_normal([n_hidden2]))
        W3 = tf.Variable(tf.truncated_normal([n_hidden2, n_hidden3]))
        b3 = tf.Variable(tf.truncated_normal([n_hidden3]))
        W4 = tf.Variable(tf.truncated_normal([n_hidden3, n_output]))
        b4 = tf.Variable(tf.truncated_normal([n_output]))
        # the model
        hidden1_res = tf.nn.tanh(tf.add(tf.matmul(net_input, W1), b1))
        hidden2_res = tf.nn.tanh(tf.add(tf.matmul(hidden1_res, W2), b2))
        hidden3_res = tf.nn.sigmoid(tf.add(tf.matmul(hidden2_res, W3), b3))
        hidden3_res = tf.nn.dropout(hidden3_res, 0.8)
        net_output = tf.add(tf.matmul(hidden3_res, W4), b4)
        trained = tf.add(tf.matmul(hidden3_res, W4), b4)
        # prediction and actual using the argmax as the predicted label
        correct_prediction = tf.equal(tf.argmax(net_output, 1),
                                      tf.argmax(y_true, 1))
        # And now we can look at the mean of our network's correct guesses
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        cost = tf.reduce_sum(
            tf.nn.softmax_cross_entropy_with_logits(logits=net_output,
                                                    labels=y_true))
        eta = 0.02
        # optimizer = tf.train.AdamOptimizer(eta).minimize(cost)
        optimizer = tf.train.GradientDescentOptimizer(eta).minimize(cost)
        # optimizer = tf.train.RMSPropOptimizer(eta).minimize(cost)
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        # ### load model from file ###
        with tf.Session() as sess:
            # Restore variables from disk.
            sess.run(tf.global_variables_initializer())
            saver = tf.train.import_meta_graph(model_path + '.meta')
            saver.restore(sess, model_path)
            parser = ImageHandler()
            images, labels = parser.parse_images()
            print("Accuracy for self uploaded images: {}".format(
                sess.run(accuracy,
                         feed_dict={
                             net_input: images,
                             y_true: labels
                         })))

            pred = sess.run(trained, feed_dict={net_input: images})
            return pred
コード例 #7
0
ファイル: PetCare.py プロジェクト: zhw278/tritonPetCare
def delete_post():
    if session.get('logged_in') is not None:
        accountDao = AccountDao()
        ownerMatched = accountDao.remove_account_post(session['logged_in'],
                                                      request.form['postId'])
        if not ownerMatched:
            return redirect(url_for('PetCare.list_posts'))
        postDao = PostDao()
        prevImages = postDao.remove_post(request.form['postId'])
        for img in prevImages:
            ImageHandler.delete_image(img)
    return redirect(url_for('PetCare.list_posts'))
コード例 #8
0
ファイル: PymageMain.py プロジェクト: edson-gonzales/PymageC
    def read_format_to_export_an_image(self):
        """
        This method reads the format to export an image file from one format to another 

        Return:
        new_format.- String with the new format to convert the image
        """
        image_handler = ImageHandler()
        msg_to_display = "Insert the format to export an image file from one format to another: "
        new_format = self.read_value_from_user(msg_to_display)
        if (image_handler.validate_format_inserted(new_format) == True):
            return new_format
        else:
            return "Incorrect format"
コード例 #9
0
ファイル: text.py プロジェクト: watarus-nt/mock_ebook_app
    def setUp(self):
        # set up appium
        desired_caps = {}
        desired_caps[
            "app"] = "jp.co.sharp.ebook.ebookapp_h35559jr9hy9m!jp.co.sharp.ebook.ebookapp"
        desired_caps["deviceName"] = "WindowsPC"
        desired_caps["platformName"] = "Windows"
        # desired_caps["app"] = "Microsoft.WindowsCalculator_8wekyb3d8bbwe"
        self.driver = webdriver.Remote(
            command_executor='http://127.0.0.1:4723/wd/hub',
            desired_capabilities=desired_caps)

        self.imageHandler = ImageHandler()
        self.mouseHandler = MouseHandler()
コード例 #10
0
class PoolTableFinder:
    def __init__(self):
        self.imageHandler = ImageHandler()
        self.rawImagePath = ConfigHandler.getRawImagePath()
        self.croppedImagePath = ConfigHandler.getCroppedImagePath()

    def saveCroppedImage(self):
        Logger.info("Saving cropped image")

        currentImage = self.imageHandler.readImage(self.rawImagePath)
        if CommandHandler.areBoundsSet():
            bounds = CommandHandler.getBounds()
            croppedImage = self.imageHandler.cropAndRotateRectangleInImage(
                currentImage, bounds)
        else:
            bounds = self.getBoundingRectForTable(currentImage)
            croppedImage = self.imageHandler.cropImageByBoundingRect(
                currentImage, bounds)

        cv.imwrite(self.croppedImagePath, croppedImage)

    def getMaskForTable(self, poolImage):
        self.th_green_low = numpy.array(CommandHandler.getThGreenLow())
        self.th_green_high = numpy.array(CommandHandler.getThGreenHigh())

        hsvImage = cv.cvtColor(poolImage, cv.COLOR_BGR2HSV)
        return self.getMask(hsvImage)

    def getBoundingRectForTable(self, poolImage):
        mask = self.getMaskForTable(poolImage)
        contours = self.getContours(mask)
        return self.getLargestBoundingRect(contours)

    def getMask(self, hsvImage):
        mask = cv.inRange(hsvImage, self.th_green_low, self.th_green_high)

        return mask

    def getContours(self, mask):
        contours = cv.findContours(mask.copy(), cv.RETR_EXTERNAL,
                                   cv.CHAIN_APPROX_SIMPLE)[-2]
        if len(contours) == 0:
            raise PoolTableException("No contours found")

        return contours

    def getLargestBoundingRect(self, contours):
        area = max(contours, key=cv.contourArea)
        return cv.boundingRect(area)
コード例 #11
0
    def make_post_output(postInfo):
        postInfo['start_date_year_first'] = time.strftime(
            '%Y-%m-%d', time.localtime(postInfo['start_date']))
        postInfo['end_date_year_first'] = time.strftime(
            '%Y-%m-%d', time.localtime(postInfo['end_date']))

        postInfo['start_date'] = time.strftime(
            '%m/%d/%Y', time.localtime(postInfo['start_date']))
        postInfo['end_date'] = time.strftime(
            '%m/%d/%Y', time.localtime(postInfo['end_date']))
        if 'post_date' in postInfo:
            postInfo['post_date'] = time.strftime(
                '%m/%d/%Y %H:%M:%S', time.localtime(postInfo['post_date']))
        if 'gender' in postInfo:
            postInfo[
                'gender'] = 'Male' if postInfo['gender'] == 1 else 'Female'
        if 'age' in postInfo:
            postInfo['age'] = '<1 year' if postInfo[
                'age'] == 0 else '>= 10 years' if postInfo['age'] == 10 else (
                    str(postInfo['age']) + ' years')
        if 'interested' in postInfo:
            postInfo['interested'] = [] if postInfo[
                'interested'] is None else postInfo['interested'].split(',')
        for i in ['image1', 'image2', 'image3']:
            if i in postInfo and postInfo[i] is not None:
                postInfo[i] = ImageHandler.get_image_full_path(postInfo[i])
        return postInfo
コード例 #12
0
    def get_next(self):

        labels = []
        imgs = []

        batch_range = range(self.current_index, self.current_index + self.batch_size)

        for i in batch_range:
            sample = self.samples[i]

            labels.append(sample.label)
            img = cv2.imread(sample.file_path, cv2.IMREAD_GRAYSCALE)
            img_handler = ImageHandler()
            img = img_handler.preprocess(img, self.img_size)
            imgs.append(img)

        self.current_index += self.batch_size
        return Batch(labels, imgs)
コード例 #13
0
ファイル: PymageMain.py プロジェクト: edson-gonzales/PymageC
    def read_degrees_to_rotate_image(self):
        """
        This method reads from the user the number of degrees to rotates an image file
        Evaluate the number inserted and returns the number of degrees if it is correct
        otherwise print an error message

        Return:
        degrees.- Number of degrees to rotate the image
                  If the number inserted is out of the valid range, the method returns 9999
        """
        image_handler = ImageHandler()
        msg_to_display = "Insert the number of degrees to rotate the image: "
        degrees = self.read_value_from_user(msg_to_display)
        degrees = int(degrees)
        if image_handler.validate_degrees_inserted(degrees) == True:
            return degrees
        else:
            return 9999
コード例 #14
0
    def test_address(self, img, task=""):
        word_list = []

        img_handler = ImageHandler()

        words = img_handler.split_text(img, task)

        for word in words:
            img = cv2.cvtColor(word, cv2.COLOR_BGR2GRAY)
            # img = img_handler.preprocess_normal_handwriting(img)
            img = img_handler.preprocess(img, self.img_size)
            word_list.append(img)
            # cv2.imshow('word', word)
            # cv2.waitKey(0)

        batch = Batch(None, word_list)
        recognized_list = self.model.batch_test(batch)

        return recognized_list
コード例 #15
0
ファイル: jupyter.py プロジェクト: Isan-Rivkin/MNIST_MODEL
 def predict(self,load_path):
     new_saver = tf.train.Saver()
     #graph = tf.Graph()
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         new_saver = tf.train.import_meta_graph(load_path+'.meta')
         new_saver.restore(sess, load_path)
         parser = ImageHandler()
         if(SRC != None and DEST != None):
             images, labels = parser.parse_images(SRC,DEST)
         else:
             images, labels = parser.parse_images()
         print("Accuracy for self uploaded images: {}".format(sess.run(accuracy,
                                                                       feed_dict = {
                                                                           net_input: images,
                                                                           y_true: labels
                                                                       })))
         predictions = sess.run(net_output, feed_dict = {net_input: images})
         #self.print_predictions(predictions)
     self.print_to_console(labels, predictions)
コード例 #16
0
ファイル: main.py プロジェクト: usfsoar/NSL_16-17_CV
    def __init__(self):
        """
        Sets up program variables, resets files, does not tell arduino to start
        """
        GPIO.setmode(GPIO.BCM)
        self.ei = ErrorIndicator(False)
        self.ih = ImageHandler(False, self.ei)
        self.processor = ImageProcessor(self.ih, self.ei)
        self.arduino = Arduino(self.ei)
        GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(25, GPIO.IN)
        GPIO.setwarnings(False)

        if os.path.exists("out/"):
            shutil.rmtree("out")
        os.makedirs("out")
        if os.path.exists("log.txt"):
            os.remove("log.txt")

        Log.setup_log()
コード例 #17
0
ファイル: Entities.py プロジェクト: zhw278/tritonPetCare
 def make_post_output(postInfo):
     postInfo['start_date'] = time.strftime(
         '%m/%d/%Y', time.localtime(postInfo['start_date']))
     postInfo['end_date'] = time.strftime(
         '%m/%d/%Y', time.localtime(postInfo['end_date']))
     if 'post_date' in postInfo:
         postInfo['post_date'] = time.strftime(
             '%m/%d/%Y %H:%M:%S', time.localtime(postInfo['post_date']))
     for i in ['image1', 'image2', 'image3']:
         if i in postInfo and postInfo[i] is not None:
             postInfo[i] = ImageHandler.get_image_full_path(postInfo[i])
     return postInfo
コード例 #18
0
def recur(site, interval, process=True):
  wh = WebHandler(visual=False)
  ih = ImageHandler(ImageMagick, os.path.join(tempDir, 'temp.png'))

  print('starting process...')
  i = 1
  while(1):
    currentTime = time.localtime()
    print('\nstarting frame at %s...' % time.strftime("%H:%M:%S", currentTime))
    filename = '%s.png' % time.strftime("%H-%M-%S", currentTime)
    path = os.path.join(screenshotDir, filename)
    wh.screenshot(site, path)

    # process the image if desired
    if process:
      hour = time.strftime("%H", currentTime)
      minute = time.strftime("%M", currentTime)
      outFile = os.path.join(outputDir, '%04d.png' % i)
      ih.processImage(path, outFile, [hour, minute])
      i += 1
    time.sleep(round(60 * interval))
コード例 #19
0
class PoolTableChecker:
    def __init__(self):
        self.tableFinder = PoolTableFinder()
        self.imageComparator = ImageComparator()
        self.imageHandler = ImageHandler()

    def isTableFree(self, imagePathA, imagePathB):
        imageA = self.imageHandler.readImage(imagePathA)
        imageB = self.imageHandler.readImage(imagePathB)

        if CommandHandler.areBoundsSet():
            bounds = CommandHandler.getBounds()
            croppedA = self.imageHandler.cropAndRotateRectangleInImage(
                imageA, bounds)
            croppedB = self.imageHandler.cropAndRotateRectangleInImage(
                imageB, bounds)
        else:
            tableABounds = self.tableFinder.getBoundingRectForTable(imageA)
            tableBBounds = self.tableFinder.getBoundingRectForTable(imageB)

            largestBounds = self.imageHandler.getLargestBoundingRect(
                tableABounds, tableBBounds)

            croppedA = self.imageHandler.cropImageByBoundingRect(
                imageA, largestBounds)
            croppedB = self.imageHandler.cropImageByBoundingRect(
                imageB, largestBounds)

        return self.imageComparator.areImagesEqual(croppedA, croppedB)
コード例 #20
0
def recur(site, interval, process=True):
    wh = WebHandler(visual=False)
    ih = ImageHandler(ImageMagick, os.path.join(tempDir, 'temp.png'))

    print('starting process...')
    i = 1
    while (1):
        currentTime = time.localtime()
        print('\nstarting frame at %s...' %
              time.strftime("%H:%M:%S", currentTime))
        filename = '%s.png' % time.strftime("%H-%M-%S", currentTime)
        path = os.path.join(screenshotDir, filename)
        wh.screenshot(site, path)

        # process the image if desired
        if process:
            hour = time.strftime("%H", currentTime)
            minute = time.strftime("%M", currentTime)
            outFile = os.path.join(outputDir, '%04d.png' % i)
            ih.processImage(path, outFile, [hour, minute])
            i += 1
        time.sleep(round(60 * interval))
コード例 #21
0
class Notifications(object):
    instance = None

    @classmethod
    def get_instance(cls):
        if not cls.instance:
            cls.instance = Notifications()
        return cls.instance

    def __init__(self):
        print("Creating Notifications instance")
        self.notification_subscriber = rospy.Subscriber(
            NOTIFICATION_TOPIC, String, self.notification_callback)
        self.notifications = []
        self.notification_cleanup = 0

    def cleanup_notifications(self):
        if self.notification_cleanup < time.time() - 5:
            bak = self.notifications
            self.notifications = []
            for t, nt in bak:
                if t > time.time() - 15:
                    self.notifications.append((t, nt))

    def get_notifications(self, since):
        self.cleanup_notifications()
        return [y for x, y in self.notifications if x >= since]

    def notification_callback(self, msg):
        self.log("Notification received: %s" % (repr(msg)))
        try:
            t = time.time()
            now = datetime.datetime.now()
            notification = json.loads(msg.data)
            notification['received'] = t
            self.notifications.append((t, notification))
        except ValueError, e:
            self.notifications.append((time.time(), {
                "message":
                "Incorrectly formed notification received: \"%s\"" % msg.data,
                "level":
                "HIGH",
                "timeout":
                4000,
                "received":
                time.time()
            }))

        self.log("Generating video")
        ih = ImageHandler.get_instance()
        ih.generate_video()
コード例 #22
0
class Client(tornado.websocket.WebSocketHandler):
    def __init__(self, url, timeout):
        self.url = url
        self.timeout = timeout
        self.ioloop = IOLoop.instance()
        self.ws = None
        self.imgHandler = ImageHandler()
        self.connect()
        PeriodicCallback(self.keep_alive, 20000).start()
        self.ioloop.start()

    @gen.coroutine
    def connect(self):
        print ("trying to connect")
        try:
            self.ws = yield websocket_connect(self.url)
        except Exception:
            print(Exception)
        else:
            print ("connected")
            self.run()

    @gen.coroutine
    def run(self):
        while True:
            #MARK: improve 
            msg = yield self.ws.read_message()
            if msg is None:
                print ("connection closed")
                self.ws = None
                break
            self.imgHandler.handleImageBlob(msg)

    def keep_alive(self):
        if self.ws is None:
            self.connect()
        else:
            self.ws.write_message("keep alive")
コード例 #23
0
ファイル: Entities.py プロジェクト: zhw278/tritonPetCare
 def make_post_info(userId, input, prevPost=None):
     postInfo = input.form.copy()
     if not Entities.attrNotNull(postInfo, [
             'name', 'species', 'gender', 'age', 'vaccination',
             'start_date', 'end_date', 'criteria'
     ]):
         return False
     if not Entities.attrNotZeroLength(postInfo,
                                       ['name', 'species', 'vaccination']):
         return False
     postInfo['owner_id'] = userId
     postInfo['start_date'] = int(
         time.mktime(time.strptime(postInfo['start_date'], '%Y-%m-%d')))
     postInfo['end_date'] = int(
         time.mktime(time.strptime(postInfo['end_date'], '%Y-%m-%d')))
     postInfo['post_date'] = int(time.time())
     for i in ['image1', 'image2', 'image3'
               ]:  # FIXME: what strategy to be used for uploading pictures
         if i in input.files and len(input.files[i].filename) > 0:
             postInfo[i] = ImageHandler.save_image(input.files[i])
             if prevPost is not None and i in prevPost:
                 ImageHandler.delete_image(prevPost[i])
     return postInfo
コード例 #24
0
ファイル: main.py プロジェクト: iansan5653/SOAR_NSL_CV
    def __init__(self):
        """
        Sets up program variables, resets files, does not tell arduino to start
        """
        GPIO.setmode(GPIO.BCM)
        self.ei = ErrorIndicator(False)
        self.ih = ImageHandler(False, self.ei)
        self.processor = ImageProcessor(self.ih, self.ei)
        self.arduino = Arduino(self.ei)
        GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(25, GPIO.IN)
        GPIO.setwarnings(False)

        if os.path.exists("out/"):
            shutil.rmtree("out")
        os.makedirs("out")
        if os.path.exists("log.txt"):
            os.remove("log.txt")

        logging.basicConfig(filename='log.txt',
                            level=logging.DEBUG,
                            format='%(asctime)s %(message)s',
                            datefmt='%m/%d/%Y %I:%M:%S %p')
コード例 #25
0
ファイル: jupyter.py プロジェクト: Isan-Rivkin/MNIST_MODEL
    def train(self, model_path):
        ## cut
        saver = tf.train.Saver()
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        batch_size = 128
        n_epochs = 30
        l_loss = list()
        for epoch_i in range(n_epochs):
            for batch_i in range(0, mnist.train.num_examples, batch_size):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(optimizer, feed_dict = {
                    net_input: batch_xs,
                    y_true: batch_ys
                })
            loss = sess.run(accuracy, feed_dict = {
                net_input: mnist.validation.images,
                y_true: mnist.validation.labels
            })
            print('Validation accuracy for epoch {} is: {}'.format(epoch_i + 1, loss))
            l_loss.append(loss)

        #save model
        save_path = saver.save(sess, model_path)
        #test predictions
        handler = ImageHandler()
        images,labels = handler.parse_images()
        f = {net_input: images}
        predictions = sess.run(net_output, feed_dict = f)
        self.print_predictions(predictions)
        print("Accuracy for test set: {}".format(sess.run(accuracy,
                                                          feed_dict = {
                                                              net_input: images,
                                                              y_true: labels
                                                          })))
コード例 #26
0
ファイル: Predictor.py プロジェクト: LaurentiuM1234/ML
    def __init__(train_path,
                 pc_number,
                 class_threshold=20000,
                 projection_threshold=20000):
        """
        Function that initialises the image handler and the pc handler and sets the classification
        thresholds
        :param train_path: path to training data
        :param pc_number: number of principal components
        :param class_threshold: classification scalar
        :param projection_threshold: classification scalar
        """
        Predictor._image_handler = ImageHandler(train_path)
        Predictor._image_handler.load_image_matrix()
        Predictor._image_handler.load_avg_image()
        Predictor._image_handler.load_c_image_matrix()

        Predictor._pc_handler = PCHandler(pc_number)
        Predictor._pc_handler.load_pc_matrix(
            Predictor._image_handler.get_c_image_matrix())

        Predictor._class_threshold = class_threshold
        Predictor._projection_threshold = projection_threshold
コード例 #27
0
class VideoHandler():
    
    def __init__(self, *args, **kwargs):
        self.clip_name = args[0].input_video
        self.subclip_length = args[0].subclip_length
        self.out_clip = None
        self.out_clip_name = args[0].output_video
#         self.video_clip = VideoFileClip(self.clip_name).subclip(0, self.subclip_length)
#         self.video_clip = VideoFileClip(self.clip_name).subclip(20, 25)
#         self.video_clip = VideoFileClip(self.clip_name).subclip(38, 40)
        self.video_clip = VideoFileClip(self.clip_name)
        self.frame_counter = 0
        self.image_handler = ImageHandler()
    
    def process_video(self):
        self.out_clip = self.video_clip.fl_image(self.process_image) #NOTE: this function expects color images!!
        self.out_clip.write_videofile(self.out_clip_name, audio=False)
    
    def process_image(self, img):
        self.frame_counter += 1
        out_img = self.image_handler.process_image(img)
#         print("Processing frame: {} of shape {}".format(self.frame_counter, img.shape))
        return out_img
コード例 #28
0
ファイル: text.py プロジェクト: watarus-nt/mock_ebook_app
class SimpleCalculatorTests(unittest.TestCase):
    def setUp(self):
        # set up appium
        desired_caps = {}
        desired_caps[
            "app"] = "jp.co.sharp.ebook.ebookapp_h35559jr9hy9m!jp.co.sharp.ebook.ebookapp"
        desired_caps["deviceName"] = "WindowsPC"
        desired_caps["platformName"] = "Windows"
        # desired_caps["app"] = "Microsoft.WindowsCalculator_8wekyb3d8bbwe"
        self.driver = webdriver.Remote(
            command_executor='http://127.0.0.1:4723/wd/hub',
            desired_capabilities=desired_caps)

        self.imageHandler = ImageHandler()
        self.mouseHandler = MouseHandler()

    def tearDown(self):
        for window in self.driver.window_handles:
            self.driver.switch_to.window(window)
            self.driver.close()

    def test_initialize(self):
        time.sleep(3)
        x, y = self.imageHandler.get_image_location(
            '../../images/book3title.PNG')
        self.mouseHandler.click_at_coordinate(x, y - 80)
        time.sleep(3)

        self.switch_window()
        self.driver.find_element_by_accessibility_id('button-bookmark').click()
        time.sleep(5)

    def switch_window(self):
        current = self.driver.current_window_handle
        for window in self.driver.window_handles:
            if window != current:
                self.driver.switch_to.window(window)
コード例 #29
0
class ImageqHandlerTest(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.block_thresholds = {
            1: (0, 0, 77, 78),
            2: (77, 0, 154, 78),
            3: (154, 0, 231, 78),
            4: (231, 0, 308, 78),
            5: (308, 0, 385, 78),
            6: (385, 0, 462, 78),
            7: (462, 0, 539, 78),
            8: (539, 0, 616, 78),
            9: (0, 78, 77, 156),
            10: (77, 78, 154, 156),
            11: (154, 78, 231, 156),
            12: (231, 78, 308, 156),
            13: (308, 78, 385, 156),
            14: (385, 78, 462, 156),
            15: (462, 78, 539, 156),
            16: (539, 78, 616, 156),
            17: (0, 156, 77, 234),
            18: (77, 156, 154, 234),
            19: (154, 156, 231, 234),
            20: (231, 156, 308, 234),
            21: (308, 156, 385, 234),
            22: (385, 156, 462, 234),
            23: (462, 156, 539, 234),
            24: (539, 156, 616, 234),
            25: (0, 234, 77, 312),
            26: (77, 234, 154, 312),
            27: (154, 234, 231, 312),
            28: (231, 234, 308, 312),
            29: (308, 234, 385, 312),
            30: (385, 234, 462, 312),
            31: (462, 234, 539, 312),
            32: (539, 234, 616, 312),
            33: (0, 312, 77, 390),
            34: (77, 312, 154, 390),
            35: (154, 312, 231, 390),
            36: (231, 312, 308, 390),
            37: (308, 312, 385, 390),
            38: (385, 312, 462, 390),
            39: (462, 312, 539, 390),
            40: (539, 312, 616, 390),
            41: (0, 390, 77, 468),
            42: (77, 390, 154, 468),
            43: (154, 390, 231, 468),
            44: (231, 390, 308, 468),
            45: (308, 390, 385, 468),
            46: (385, 390, 462, 468),
            47: (462, 390, 539, 468),
            48: (539, 390, 616, 468),
            49: (0, 468, 77, 546),
            50: (77, 468, 154, 546),
            51: (154, 468, 231, 546),
            52: (231, 468, 308, 546),
            53: (308, 468, 385, 546),
            54: (385, 468, 462, 546),
            55: (462, 468, 539, 546),
            56: (539, 468, 616, 546),
            57: (0, 546, 77, 624),
            58: (77, 546, 154, 624),
            59: (154, 546, 231, 624),
            60: (231, 546, 308, 624),
            61: (308, 546, 385, 624),
            62: (385, 546, 462, 624),
            63: (462, 546, 539, 624),
            64: (539, 546, 616, 624)
        }

        self.block_id_threshold_Dictionary = {
            'f8': (385, 0, 462, 78),
            'g5': (462, 234, 539, 312),
            'c3': (154, 390, 231, 468),
            'f7': (385, 78, 462, 156),
            'c5': (154, 234, 231, 312),
            'e3': (308, 390, 385, 468),
            'f4': (385, 312, 462, 390),
            'a3': (0, 390, 77, 468),
            'h4': (539, 312, 616, 390),
            'a7': (0, 78, 77, 156),
            'h7': (539, 78, 616, 156),
            'e2': (308, 468, 385, 546),
            'c1': (154, 546, 231, 624),
            'b2': (77, 468, 154, 546),
            'a4': (0, 312, 77, 390),
            'h2': (539, 468, 616, 546),
            'a2': (0, 468, 77, 546),
            'd2': (231, 468, 308, 546),
            'd1': (231, 546, 308, 624),
            'b6': (77, 156, 154, 234),
            'a1': (0, 546, 77, 624),
            'd3': (231, 390, 308, 468),
            'f3': (385, 390, 462, 468),
            'g7': (462, 78, 539, 156),
            'f6': (385, 156, 462, 234),
            'h6': (539, 156, 616, 234),
            'b4': (77, 312, 154, 390),
            'f2': (385, 468, 462, 546),
            'h1': (539, 546, 616, 624),
            'a8': (0, 0, 77, 78),
            'd8': (231, 0, 308, 78),
            'd7': (231, 78, 308, 156),
            'g2': (462, 468, 539, 546),
            'h5': (539, 234, 616, 312),
            'd4': (231, 312, 308, 390),
            'b1': (77, 546, 154, 624),
            'a5': (0, 234, 77, 312),
            'f1': (385, 546, 462, 624),
            'h8': (539, 0, 616, 78),
            'e1': (308, 546, 385, 624),
            'e5': (308, 234, 385, 312),
            'b3': (77, 390, 154, 468),
            'b5': (77, 234, 154, 312),
            'c2': (154, 468, 231, 546),
            'd6': (231, 156, 308, 234),
            'c6': (154, 156, 231, 234),
            'e4': (308, 312, 385, 390),
            'c7': (154, 78, 231, 156),
            'g4': (462, 312, 539, 390),
            'c4': (154, 312, 231, 390),
            'g3': (462, 390, 539, 468),
            'b7': (77, 78, 154, 156),
            'g1': (462, 546, 539, 624),
            'd5': (231, 234, 308, 312),
            'e6': (308, 156, 385, 234),
            'f5': (385, 234, 462, 312),
            'c8': (154, 0, 231, 78),
            'g6': (462, 156, 539, 234),
            'g8': (462, 0, 539, 78),
            'e7': (308, 78, 385, 156),
            'e8': (308, 0, 385, 78),
            'h3': (539, 390, 616, 468),
            'a6': (0, 156, 77, 234),
            'b8': (77, 0, 154, 78)
        }

        self.image_handler = ImageHandler()
        self.image_handler.load_captured_image()
        self.image_handler.set_thresholds()

    @classmethod
    def tearDownClass(cls):
        os.remove(os.getcwd() + "\Resources\CapturedImage\cropped_board.png")

    def test_load_captured_image(self):

        self.assertNotEqual(len(self.image_handler.captured_image), 0)

    def test_set_thresholds(self):
        self.assertEqual(len(self.image_handler.crop_thresholds), 2)

    def test_crop_and_save(self):
        self.image_handler.crop_and_save()
        self.assertTrue(
            os.path.exists(os.getcwd() +
                           "\Resources\CapturedImage\cropped_board.png"))

    def test_slice_image(self):
        self.image_handler.slice_image()
        self.assertEqual(len(self.image_handler.block_thresholds), 64)

    def test_create_block_id_threshold_dictionary(self):
        self.image_handler.block_thresholds = self.block_thresholds
        self.image_handler.create_block_id_threshold_dictionary()
        self.assertEqual(len(self.image_handler.block_id_threshold_dictionary),
                         64)

    def test_iterate_blocks(self):
        self.image_handler.block_id_threshold_dictionary = self.block_id_threshold_Dictionary
        self.assertTrue(type(self.image_handler.iterate_blocks()) is dict)

    def test_ml_iterate_blocks(self):
        os.chdir(os.getcwd() + '\..\\')  # change directory to load graph
        load_model()
        self.image_handler.block_id_threshold_dictionary = self.block_id_threshold_Dictionary
        os.chdir(os.getcwd() + '\Tests\\')  # change directory back to Tests
        output = self.image_handler.ml_iterate_blocks()
        self.assertTrue(type(output) is dict)
        self.assertNotEqual(len(output), 0)
        self.assertNotEqual(
            len(self.image_handler.get_second_square_value_dict()), 0)
コード例 #30
0
    def setUpClass(self):
        self.block_thresholds = {
            1: (0, 0, 77, 78),
            2: (77, 0, 154, 78),
            3: (154, 0, 231, 78),
            4: (231, 0, 308, 78),
            5: (308, 0, 385, 78),
            6: (385, 0, 462, 78),
            7: (462, 0, 539, 78),
            8: (539, 0, 616, 78),
            9: (0, 78, 77, 156),
            10: (77, 78, 154, 156),
            11: (154, 78, 231, 156),
            12: (231, 78, 308, 156),
            13: (308, 78, 385, 156),
            14: (385, 78, 462, 156),
            15: (462, 78, 539, 156),
            16: (539, 78, 616, 156),
            17: (0, 156, 77, 234),
            18: (77, 156, 154, 234),
            19: (154, 156, 231, 234),
            20: (231, 156, 308, 234),
            21: (308, 156, 385, 234),
            22: (385, 156, 462, 234),
            23: (462, 156, 539, 234),
            24: (539, 156, 616, 234),
            25: (0, 234, 77, 312),
            26: (77, 234, 154, 312),
            27: (154, 234, 231, 312),
            28: (231, 234, 308, 312),
            29: (308, 234, 385, 312),
            30: (385, 234, 462, 312),
            31: (462, 234, 539, 312),
            32: (539, 234, 616, 312),
            33: (0, 312, 77, 390),
            34: (77, 312, 154, 390),
            35: (154, 312, 231, 390),
            36: (231, 312, 308, 390),
            37: (308, 312, 385, 390),
            38: (385, 312, 462, 390),
            39: (462, 312, 539, 390),
            40: (539, 312, 616, 390),
            41: (0, 390, 77, 468),
            42: (77, 390, 154, 468),
            43: (154, 390, 231, 468),
            44: (231, 390, 308, 468),
            45: (308, 390, 385, 468),
            46: (385, 390, 462, 468),
            47: (462, 390, 539, 468),
            48: (539, 390, 616, 468),
            49: (0, 468, 77, 546),
            50: (77, 468, 154, 546),
            51: (154, 468, 231, 546),
            52: (231, 468, 308, 546),
            53: (308, 468, 385, 546),
            54: (385, 468, 462, 546),
            55: (462, 468, 539, 546),
            56: (539, 468, 616, 546),
            57: (0, 546, 77, 624),
            58: (77, 546, 154, 624),
            59: (154, 546, 231, 624),
            60: (231, 546, 308, 624),
            61: (308, 546, 385, 624),
            62: (385, 546, 462, 624),
            63: (462, 546, 539, 624),
            64: (539, 546, 616, 624)
        }

        self.block_id_threshold_Dictionary = {
            'f8': (385, 0, 462, 78),
            'g5': (462, 234, 539, 312),
            'c3': (154, 390, 231, 468),
            'f7': (385, 78, 462, 156),
            'c5': (154, 234, 231, 312),
            'e3': (308, 390, 385, 468),
            'f4': (385, 312, 462, 390),
            'a3': (0, 390, 77, 468),
            'h4': (539, 312, 616, 390),
            'a7': (0, 78, 77, 156),
            'h7': (539, 78, 616, 156),
            'e2': (308, 468, 385, 546),
            'c1': (154, 546, 231, 624),
            'b2': (77, 468, 154, 546),
            'a4': (0, 312, 77, 390),
            'h2': (539, 468, 616, 546),
            'a2': (0, 468, 77, 546),
            'd2': (231, 468, 308, 546),
            'd1': (231, 546, 308, 624),
            'b6': (77, 156, 154, 234),
            'a1': (0, 546, 77, 624),
            'd3': (231, 390, 308, 468),
            'f3': (385, 390, 462, 468),
            'g7': (462, 78, 539, 156),
            'f6': (385, 156, 462, 234),
            'h6': (539, 156, 616, 234),
            'b4': (77, 312, 154, 390),
            'f2': (385, 468, 462, 546),
            'h1': (539, 546, 616, 624),
            'a8': (0, 0, 77, 78),
            'd8': (231, 0, 308, 78),
            'd7': (231, 78, 308, 156),
            'g2': (462, 468, 539, 546),
            'h5': (539, 234, 616, 312),
            'd4': (231, 312, 308, 390),
            'b1': (77, 546, 154, 624),
            'a5': (0, 234, 77, 312),
            'f1': (385, 546, 462, 624),
            'h8': (539, 0, 616, 78),
            'e1': (308, 546, 385, 624),
            'e5': (308, 234, 385, 312),
            'b3': (77, 390, 154, 468),
            'b5': (77, 234, 154, 312),
            'c2': (154, 468, 231, 546),
            'd6': (231, 156, 308, 234),
            'c6': (154, 156, 231, 234),
            'e4': (308, 312, 385, 390),
            'c7': (154, 78, 231, 156),
            'g4': (462, 312, 539, 390),
            'c4': (154, 312, 231, 390),
            'g3': (462, 390, 539, 468),
            'b7': (77, 78, 154, 156),
            'g1': (462, 546, 539, 624),
            'd5': (231, 234, 308, 312),
            'e6': (308, 156, 385, 234),
            'f5': (385, 234, 462, 312),
            'c8': (154, 0, 231, 78),
            'g6': (462, 156, 539, 234),
            'g8': (462, 0, 539, 78),
            'e7': (308, 78, 385, 156),
            'e8': (308, 0, 385, 78),
            'h3': (539, 390, 616, 468),
            'a6': (0, 156, 77, 234),
            'b8': (77, 0, 154, 78)
        }

        self.image_handler = ImageHandler()
        self.image_handler.load_captured_image()
        self.image_handler.set_thresholds()
コード例 #31
0
    for i in range(imgAr.shape[0]):
        newImg[i, :, :64, :] = imgAr[i, :, :, :]
        newImg[i, :, 64:2 * 64, 0] = imgAr[i, :, :, 0]
        newImg[i, :, 2 * 64:3 * 64, 1] = imgAr[i, :, :, 1]
        newImg[i, :, 3 * 64:4 * 64, 2] = imgAr[i, :, :, 2]

    return newImg


if __name__ == '__main__':
    print("python msrnet.py mod.h5 None|relu|sigmoid aa.npz")
    MODEL_FILE_NAME = sys.argv[1]
    actFunc = sys.argv[2]
    npzSaveFile = sys.argv[3]

    img_hndlr = ImageHandler((64, 64))

    path = "dataset"

    if not (os.path.exists(path + "/dark/")
            and os.path.exists(path + "/true/")):
        img_hndlr.create_dataset(path)

    X = img_hndlr.load_images(path + "/dark/")
    Y = img_hndlr.load_images(path + "/true/")

    X = img_hndlr.preprocess_images(X)
    Y = img_hndlr.preprocess_images(Y)
    print("XY shapes", X.shape, Y.shape)

    print("python msrnet.py")
コード例 #32
0
    if FH_ENABLE:
        FH = []
        FHNRT = []
        DH = utils.DirsHandler(DIRS)
        FH.append(
            FilesHandlerRT(DH.all_dirs['diff_detection'],
                           run_compression=['_main', '_debug', 'mov'],
                           run_NN=['_mov'],
                           delete_org=['_main', '_debug', 'mov']).start())
        FH.append(
            FilesHandlerRT(DH.all_dirs['no_diff_detection'],
                           run_compression=['_main'],
                           delete_org=['_main', '_debug']).start())
        FHNRT.append(
            FilesHandlerNonRT(
                dir_key='diff_detection',
                max_history=FILES['max_history_detected']).start())
        FHNRT.append(
            FilesHandlerNonRT(
                dir_key='no_diff_detection',
                max_history=FILES['max_history_not_detected']).start())

    IH = ImageHandler(debug=True).start()
    if SHOW_STREAM:
        IS = ImageShow(IH, debug=True).start()

    TLGA = TelegramAlerts()

    LOGGER.info('AMIR IPCAM VERSION {} IS READY'.format(VERSION))
    while True:
        time.sleep(10000)
コード例 #33
0
ファイル: GreyBitmapProcessor.py プロジェクト: pancur/khf
 def calculateMassCenterAndMotionVector(self, greyBitmapWithDirectionInfo, box):
     image = ImageHandler().createEmptyGreyScaleBitmap(greyBitmapWithDirectionInfo.size)
     image.paste(greyBitmapWithDirectionInfo.crop(box), box)
     first = self._calculateMassCenter(image, 1)
     second = self._calculateMassCenter(image, 2)
     return first, second
コード例 #34
0
 def __init__(self):
     self.tableFinder = PoolTableFinder()
     self.imageComparator = ImageComparator()
     self.imageHandler = ImageHandler()
コード例 #35
0
ファイル: PymageMain.py プロジェクト: edson-gonzales/PymageC
        input_values = []
        image_path = self.read_image_path()
        if (file_manager.validate_type_of_image(image_path) == True):
            new_format = self.read_format_to_export_an_image()
            if (new_format != "Incorrect format"):
                input_values = [image_path, new_format]
                return input_values
            else:
                return input_values
        else:
            return input_values



main = PymageMain()        
image_handler = ImageHandler()
#result = main.read_all_values_to_rotate()
#if (len(result) != 0):
#    print result
#    image = image_handler.rotate_image(result)
#    main.save_image_file(image, result[2])

#else:
#    print "False"


input_values = main.read_all_values_to_export_an_image_file_to_another_format()
if (len (input_values) != 0):
#    print input_values
    image_handler.export_an_image_file_to_another_format(input_values)
else: