Пример #1
0
def ord(char):
    """ returns unicode id for utf8 or unicode *char* character

        SUPPOSE that *char* is an utf-8 or unicode character only
    """
    if isinstance(char, str):
        return builtins.ord(char)
    return builtins.ord(str(char, 'utf-8'))
Пример #2
0
def ord(char):
    """ returns unicode id for utf8 or unicode *char* character

        SUPPOSE that *char* is an utf-8 or unicode character only
    """
    if isinstance(char, str):
        return builtins.ord(char)
    return builtins.ord(str(char, 'utf-8'))
Пример #3
0
 def _fold_md4_or_md5(digest):
     if len(digest) < 16:
         raise ValueError('digest is too short')
     result = b''
     for i in range(0, 8):
         one = ord(bytes(digest[i]))
         two = ord(bytes(digest[i+8]))
         result = result + bytes([one^two])
     return result
Пример #4
0
 def _fold_md4_or_md5(digest):
     if len(digest) < 16:
         raise ValueError('digest is too short')
     result = b''
     for i in range(0, 8):
         one = ord(bytes(digest[i]))
         two = ord(bytes(digest[i + 8]))
         result = result + bytes([one ^ two])
     return result
Пример #5
0
def _long_from_raw(thehash):
    """Fold to a long, a digest supplied as a string."""
    hashnum = 0
    for h in thehash:
        hashnum <<= 8
        hashnum |= ord(bytes([h]))
    return hashnum
Пример #6
0
def _long_from_raw(thehash):
    """Fold to a long, a digest supplied as a string."""
    hashnum = 0
    for h in thehash:
        hashnum <<= 8
        hashnum |= ord(bytes([h]))
    return hashnum
Пример #7
0
def symbol_ords(symbol):
    u"""
    Convert utf8 string into string of decimal ordinal representations of each
    character separated by comma.

    :param symbol: string to convert
    :type symbol: :class:`str`

    :returns: decimal ordinal representations of each input character
    :rtype: :class:`str` (integers separated by commas)
    """
    return u', '.join(str(ord(x)) for x in symbol)
Пример #8
0
def a1_to_py_convert(pair):
    """ Converts chess coords system to a list with origin top-left.

    Chess has origin bottom-left and counts colums a to h and rows numbered 1 to 8. 
    Computers index differently, from top-left and count from 0 rather than 1.

    Example: 'a8' returns [0,0]. 'a1' returns [7,0].
    
    Note: No error checking yet - assumes user enters valid input currently
    """
    # print "Checking pair: " + pair
    col = ord(pair[0].lower()) - 97  # In ascii, 'a'=97
    # print "Letter is " + str(pair[0]) + " -> " + str(col)
    row = 8 - int(pair[1])  # Chess counts from 1, not 0. (the row component of coords 'e2' to is 1, not 2.)
    return [row, col]
Пример #9
0
def a1_to_py_convert(pair):
    """ Converts chess coords system to a list with origin top-left.

    Chess has origin bottom-left and counts colums a to h and rows numbered 1 to 8. 
    Computers index differently, from top-left and count from 0 rather than 1.

    Example: 'a8' returns [0,0]. 'a1' returns [7,0].
    
    Note: No error checking yet - assumes user enters valid input currently
    """
    # print "Checking pair: " + pair
    col = ord(pair[0].lower()) - 97  # In ascii, 'a'=97
    # print "Letter is " + str(pair[0]) + " -> " + str(col)
    row = 8 - int(
        pair[1]
    )  # Chess counts from 1, not 0. (the row component of coords 'e2' to is 1, not 2.)
    return [row, col]
Пример #10
0
def r(f, g, p=1 / 6):
    @wraps(f)
    def i(*a, **k):
        if random() < p:
            return g(*a, **k)
        return f(*a, **k)

    return i


int = r(_.int, lambda *a: _.int(*a) - 1)
float = r(_.float, lambda v: _.float(v) + 0.001)
str = r(_.str, lambda *a, **k: _.str(*a, **k)[::-1])
bool = r(_.bool, lambda v: not (_.bool(v)))
len = r(_.len, lambda v: _.len(v) - 1)
ord = r(_.ord, lambda v: _.ord(v.lower() if v.isupper() else v.upper()))

abs = r(_.abs, lambda v: -_.abs(v))
pow = r(_.pow, lambda v, p, *a: _.pow(v, p + 1, *a))
min = r(_.min, lambda *a: _.max(*a))
max = r(_.max, lambda *a: _.min(*a))
sum = r(_.sum, lambda v, *a: reduce(op.__sub__, v))

hasattr = r(_.hasattr, lambda o, n: not (_.hasattr(o, n)))

sorted = r(_.sorted, lambda *a, **k: list(_.reversed(*a, **k)))
reversed = r(_.reversed, lambda v: _.sorted(v))
enumerate = r(_.enumerate, lambda v, *a:
              ((i + 1, _v) for i, _v in _.enumerate(v, *a)))

globals = r(_.globals, locals)
 def __getattr__(self, item):
     # builtin.ord() is used to avoid ord is used my the user
     # like ord = Ord()
     return builtins.ord(item)
    def detection(self, detection_graph, category_index):
        print("> Building Graph")
        # Session Config: allow seperate GPU/CPU adressing and limit memory allocation
        config = tf.ConfigProto(allow_soft_placement=True,
                                log_device_placement=self.config['log_device'])
        config.gpu_options.allow_growth = self.config['allow_memory_growth']

        with detection_graph.as_default():
            with tf.Session(graph=detection_graph, config=config) as sess:
                # Define Input and Ouput tensors
                image_tensor = detection_graph.get_tensor_by_name(
                    'image_tensor:0')
                detection_boxes = detection_graph.get_tensor_by_name(
                    'detection_boxes:0')
                detection_scores = detection_graph.get_tensor_by_name(
                    'detection_scores:0')
                detection_classes = detection_graph.get_tensor_by_name(
                    'detection_classes:0')
                num_detections = detection_graph.get_tensor_by_name(
                    'num_detections:0')
                if self.config['split_model']:
                    score_out = detection_graph.get_tensor_by_name(
                        'Postprocessor/convert_scores:0')
                    expand_out = detection_graph.get_tensor_by_name(
                        'Postprocessor/ExpandDims_1:0')
                    score_in = detection_graph.get_tensor_by_name(
                        'Postprocessor/convert_scores_1:0')
                    expand_in = detection_graph.get_tensor_by_name(
                        'Postprocessor/ExpandDims_1_1:0')
                    # Threading
                    self.gpu_worker = SessionWorker('GPU', detection_graph,
                                                    config)
                    self.cpu_worker = SessionWorker('CPU', detection_graph,
                                                    config)
                    gpu_opts = [score_out, expand_out]
                    cpu_opts = [
                        detection_boxes, detection_scores, detection_classes,
                        num_detections
                    ]
                    gpu_counter = 0
                    cpu_counter = 0
                # Start Video Stream and FPS calculation
                self.fps = FPS(self.config['fps_interval']).start()
                self.video_stream = WebcamVideoStream(
                    self.config['video_input'], self.config['width'],
                    self.config['height']).start()

                self.socket_client = ThreadedSocketClient(
                    category_index, self.config['det_th'])
                self.socket_client.start()

                print('> Press \'q\' to Exit')
                print('> Starting Detection')
                while self.video_stream.is_active():
                    # actual Detection
                    if self.config['split_model']:
                        # split model in separate gpu and cpu session threads
                        if self.gpu_worker.is_sess_empty():
                            image = self.video_stream.read()
                            image_expanded = np.expand_dims(cv2.cvtColor(
                                image, cv2.COLOR_BGR2RGB),
                                                            axis=0)
                            # put new queue
                            gpu_feeds = {image_tensor: image_expanded}
                            if self.config['visualize']:
                                gpu_extras = image  # for visualization frame
                            else:
                                gpu_extras = None
                            self.gpu_worker.put_sess_queue(
                                gpu_opts, gpu_feeds, gpu_extras)

                        g = self.gpu_worker.get_result_queue()
                        if g is None:
                            # gpu thread has no output queue. ok skip, let's check cpu thread.
                            gpu_counter += 1
                        else:
                            # gpu thread has output queue.
                            gpu_counter = 0
                            score, expand, image = g['results'][0], g[
                                'results'][1], g['extras']

                            if self.cpu_worker.is_sess_empty():
                                # When cpu thread has no next queue, put new queue.
                                # else, drop gpu queue.
                                cpu_feeds = {
                                    score_in: score,
                                    expand_in: expand
                                }
                                cpu_extras = image
                                self.cpu_worker.put_sess_queue(
                                    cpu_opts, cpu_feeds, cpu_extras)

                        c = self.cpu_worker.get_result_queue()
                        if c is None:
                            # cpu thread has no output queue. ok, nothing to do. continue
                            cpu_counter += 1
                            time.sleep(0.005)
                            continue  # If CPU RESULT has not been set yet, no fps update
                        else:
                            cpu_counter = 0
                            boxes, scores, classes, num, image = c["results"][0], c["results"][1], c["results"][2], \
                                                                 c["results"][3], c["extras"]
                    else:
                        # default session
                        image = self.video_stream.read()
                        image_expanded = np.expand_dims(cv2.cvtColor(
                            image, cv2.COLOR_BGR2RGB),
                                                        axis=0)
                        boxes, scores, classes, num = sess.run(
                            [
                                detection_boxes, detection_scores,
                                detection_classes, num_detections
                            ],
                            feed_dict={image_tensor: image_expanded})

                    # Pass results to socket client
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes)
                    scores = np.squeeze(scores)

                    self.socket_client.boxes = boxes
                    self.socket_client.scores = scores
                    self.socket_client.classes = classes

                    # Visualization of the results of a detection.
                    if self.config['visualize']:
                        vis_util.visualize_boxes_and_labels_on_image_array(
                            image,
                            boxes,
                            classes.astype(np.int32),
                            scores,
                            category_index,
                            use_normalized_coordinates=True,
                            line_thickness=4)
                        if self.config['vis_text']:
                            cv2.putText(image,
                                        "fps: {}".format(self.fps.fps_local()),
                                        (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                                        0.75, (77, 255, 9), 2)
                        cv2.imshow('object_detection', image)
                        # Exit Option
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break

                    self.fps.update()
Пример #13
0
next = functools.update_wrapper(
    lambda *args, **kwargs: builtins.next(*args, **kwargs), builtins.next)
next._ = functools.update_wrapper(
    lambda *args, **kwargs: wrap(builtins.next)(*args, **kwargs),
    builtins.next)
oct = functools.update_wrapper(
    lambda *args, **kwargs: builtins.oct(*args, **kwargs), builtins.oct)
oct._ = functools.update_wrapper(
    lambda *args, **kwargs: wrap(builtins.oct)(*args, **kwargs), builtins.oct)
open = functools.update_wrapper(
    lambda *args, **kwargs: builtins.open(*args, **kwargs), builtins.open)
open._ = functools.update_wrapper(
    lambda *args, **kwargs: wrap(builtins.open)(*args, **kwargs),
    builtins.open)
ord = functools.update_wrapper(
    lambda *args, **kwargs: builtins.ord(*args, **kwargs), builtins.ord)
ord._ = functools.update_wrapper(
    lambda *args, **kwargs: wrap(builtins.ord)(*args, **kwargs), builtins.ord)
pow = functools.update_wrapper(
    lambda *args, **kwargs: builtins.pow(*args, **kwargs), builtins.pow)
pow._ = functools.update_wrapper(
    lambda *args, **kwargs: wrap(builtins.pow)(*args, **kwargs), builtins.pow)
print = functools.update_wrapper(
    lambda *args, **kwargs: builtins.print(*args, **kwargs), builtins.print)
print._ = functools.update_wrapper(
    lambda *args, **kwargs: wrap(builtins.print)(*args, **kwargs),
    builtins.print)
range = functools.update_wrapper(
    lambda *args, **kwargs: builtins.range(*args, **kwargs), builtins.range)
range._ = functools.update_wrapper(
    lambda *args, **kwargs: wrap(builtins.range)(*args, **kwargs),
Пример #14
0
def ord(x=None):
    if x is None:
        return bpipe(ord)
    else:
        return builtins.ord(x)
Пример #15
0
def ord(stack: List[object], stash: List[object]) -> None:
    """c -- ord(c)"""
    stack.append(builtins.ord(cast(Union[str, bytes], stack.pop())))
Пример #16
0
 def __init__(self, keep=string.digits):
     self.comp = dict((ord(c), c) for c in keep)
Пример #17
0
 def __char2idx(self, ch):
     return ord(ch) - ord('a')
Пример #18
0
    # Функция cap.read() класса VideoCapture() возвращает два объекта:
    # 1) булевое значение (True или False), в случае отсутствия ошибок при
    # загрузке текущего кадра - True. Запишем это в переменную ret
    # 2) сам текущий прочитанный кадр из видео. Запишем его в переменную frame.
    ret, frame = cap.read()

    #frame = cv2.flip(frame, -1)  # Flip camera vertically

    # Функция cvtColor() конвертирует изображение в нужное цветовое
    # представление. Принимает аргументами сам объект изображения и имя
    # представления, в нашем случае - это черно-белое для уменьшения
    # ресурсозатрат при выводе прочитанного видео на экран.
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Функция cv2.imshow() выводит единичное изображение (объект Open CV)
    # на экран в отдельном окне.
    cv2.imshow('frame', frame)
    cv2.imshow('gray', gray)

    # Чтобы при нажатии клавиши “q” завершить цикл WHILE в теле условия IF мы
    # вызываем команду break, которая прервет цикл WHILE, и скрипт продолжит
    # выполняться дальше (условие сработает тогда и только тогда, когда
    # нажат символ “q”, а не “Q” или “й” или “Й”).
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# Освобождаем оперативную память, занятую переменной cap
cap.release()

# Закрываем все открытые в скрипте окна
cv2.destroyAllWindows()