def auth_request():
    """
    This api is used for captcha prediction with authentication
    :return:
    """
    start_time = time.time()
    if not request.json or 'image' not in request.json:
        abort(400)

    if interface_manager.total == 0:
        logger.info('There is currently no model deployment and services are not available.')
        return json.dumps({"message": "", "success": False, "code": -999})

    bytes_batch, response = ImageUtils.get_bytes_batch(request.json['image'])

    if not bytes_batch:
        logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            request.json.get('model_type'), request.json.get('model_site'), response,
            (time.time() - start_time) * 1000)
        )
        return json.dumps(response), 200

    image_sample = bytes_batch[0]
    image_size = ImageUtils.size_of_image(image_sample)
    size_string = "{}x{}".format(image_size[0], image_size[1])

    if 'model_site' in request.json:
        interface = interface_manager.get_by_sites(request.json['model_site'], size_string, strict=system_config.strict_sites)
    elif 'model_type' in request.json:
        interface = interface_manager.get_by_type_size(size_string, request.json['model_type'])
    elif 'model_name' in request.json:
        interface = interface_manager.get_by_name(request.json['model_name'])
    else:
        interface = interface_manager.get_by_size(size_string)

    split_char = request.json['split_char'] if 'split_char' in request.json else interface.model_conf.split_char

    if 'need_color' in request.json and request.json['need_color']:
        bytes_batch = [interface.separate_color(_, color_map[request.json['need_color']]) for _ in bytes_batch]

    image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch)

    if not image_batch:
        logger.error('[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            interface.name, size_string, request.json.get('model_type'), request.json.get('model_site'), response,
            (time.time() - start_time) * 1000)
        )
        return json.dumps(response), 200

    result = interface.predict_batch(image_batch, split_char)
    logger.info('[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.format(
        interface.name,
        size_string,
        request.json.get('model_type'),
        request.json.get('model_site'),
        result,
        (time.time() - start_time) * 1000
    ))
    response['message'] = result
    return json.dumps(response), 200
    def post(self):
        start_time = time.time()
        data = self.parse_param()
        if 'image' not in data.keys():
            raise tornado.web.HTTPError(400)

        # # You can separate the http service and the gRPC service like this:
        # response = rpc_request(
        #     data['image'],
        #     data['model_name'] if 'model_name' in data else '',
        #     data['model_type'] if 'model_type' in data else ''
        # )
        model_type = ParamUtils.filter(data.get('model_type'))
        model_site = ParamUtils.filter(data.get('model_site'))
        model_name = ParamUtils.filter(data.get('model_name'))
        split_char = ParamUtils.filter(data.get('split_char'))
        need_color = ParamUtils.filter(data.get('need_color'))

        bytes_batch, response = ImageUtils.get_bytes_batch(data['image'])

        if not bytes_batch:
            logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if 'model_site' in data:
            interface = interface_manager.get_by_sites(model_site, size_string)
        elif 'model_type' in data:
            interface = interface_manager.get_by_type_size(size_string, model_type)
        elif 'model_name' in data:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)

        split_char = split_char if 'split_char' in data else interface.model_conf.split_char

        image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch, color=need_color)

        if not image_batch:
            logger.error('[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                interface.name, size_string, model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, split_char)
        logger.info('[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.format(
            interface.name, size_string, model_type, model_site, result, (time.time() - start_time) * 1000)
        )
        response['message'] = result
        return self.write(json_encode(response))
示例#3
0
    def predict(self, request, context):
        start_time = time.time()
        bytes_batch, status = ImageUtils.get_bytes_batch(request.image)

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return {"result": "", "success": False, "code": -999}

        if not bytes_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if request.model_site:
            interface = interface_manager.get_by_sites(request.model_site,
                                                       size_string)
        elif request.model_name:
            interface = interface_manager.get_by_name(request.model_name)
        elif request.model_type:
            interface = interface_manager.get_by_type_size(
                size_string, request.model_type)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return {"result": "", "success": False, "code": 999}

        if request.need_color:
            bytes_batch = [
                color_extract.separate_color(_, color_map[request.need_color])
                for _ in bytes_batch
            ]

        image_batch, status = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        result = interface.predict_batch(image_batch, request.split_char)
        logger.info(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'
            .format(interface.name, size_string, request.model_type,
                    request.model_site, result,
                    (time.time() - start_time) * 1000))
        return grpc_pb2.PredictResult(result=result,
                                      success=status['success'],
                                      code=status['code'])
示例#4
0
def auth_request():
    """
    This api is used for captcha prediction with authentication
    :return:
    """
    start_time = time.time()
    if not request.json or 'image' not in request.json:
        abort(400)

    bytes_batch, response = ImageUtils.get_bytes_batch(request.json['image'])

    if not bytes_batch:
        logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            request.json.get('model_type'), request.json.get('model_site'),
            response, (time.time() - start_time) * 1000))
        return json.dumps(response), 200

    image_sample = bytes_batch[0]
    image_size = ImageUtils.size_of_image(image_sample)
    size_string = "{}x{}".format(image_size[0], image_size[1])

    if 'model_site' in request.json:
        interface = interface_manager.get_by_sites(request.json['model_site'],
                                                   size_string)
    elif 'model_type' in request.json:
        interface = interface_manager.get_by_type_size(
            size_string, request.json['model_type'])
    elif 'model_name' in request.json:
        interface = interface_manager.get_by_name(size_string,
                                                  request.json['model_name'])
    else:
        interface = interface_manager.get_by_size(size_string)

    split_char = request.json[
        'split_char'] if 'split_char' in request.json else interface.model_conf.split_char

    image_batch, response = ImageUtils.get_image_batch(interface.model_conf,
                                                       bytes_batch)

    if not image_batch:
        logger.error(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.
            format(interface.name, size_string, request.json.get('model_type'),
                   request.json.get('model_site'), response,
                   (time.time() - start_time) * 1000))
        return json.dumps(response), 200

    result = interface.predict_batch(image_batch, split_char)
    logger.info(
        '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.
        format(interface.name, size_string, request.json.get('model_type'),
               request.json.get('model_site'), result,
               (time.time() - start_time) * 1000))
    response['message'] = result
    return json.dumps(response), 200
示例#5
0
    def post(self):
        start_time = time.time()
        data = self.parse_param()
        if 'image' not in data.keys():
            raise tornado.web.HTTPError(400)

        model_type = ParamUtils.filter(data.get('model_type'))
        model_site = ParamUtils.filter(data.get('model_site'))
        model_name = ParamUtils.filter(data.get('model_name'))
        split_char = ParamUtils.filter(data.get('split_char'))
        need_color = ParamUtils.filter(data.get('need_color'))
        if interface_manager.total == 0:
            logger.info('There is currently no model deployment and services are not available.')
            return self.finish(json_encode({"message": "", "success": False, "code": -999}))
        bytes_batch, response = ImageUtils.get_bytes_batch(data['image'])

        if not bytes_batch:
            logger.error('[{} {}] | Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if 'model_site' in data and data['model_site']:
            interface = interface_manager.get_by_sites(model_site, size_string, strict=system_config.strict_sites)
        elif 'model_type' in data and data['model_type']:
            interface = interface_manager.get_by_type_size(size_string, model_type)
        elif 'model_name' in data and data['model_name']:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(json_encode({"message": "", "success": False, "code": 999}))

        split_char = split_char if 'split_char' in data else interface.model_conf.split_char

        if need_color:
            bytes_batch = [color_extract.separate_color(_, color_map[need_color]) for _ in bytes_batch]

        image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch)

        if not image_batch:
            logger.error('[{} {}] | [{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, interface.name, size_string, model_type, model_site, response,
                round((time.time() - start_time) * 1000))
            )
            return self.finish(json_encode(response))
        response['message'] = yield self.predict(interface, image_batch, split_char, size_string, model_type, model_site, start_time)
        return self.finish(json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        start_time = time.time()
        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))

        bytes_batch, response = self.image_utils.get_bytes_batch(
            self.request.body)

        if not bytes_batch:
            logger.error('Response[{}] - {} ms'.format(
                response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        image_batch, response = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch, param_key=None)

        if not image_batch:
            logger.error(
                '[{}] | [{}] - Size[{}] - Response[{}] - {} ms'.format(
                    self.request.remote_ip, interface.name, size_string,
                    response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, None)
        logger.info('[{}] | [{}] - Size[{}] - Predict[{}] - {} ms'.format(
            self.request.remote_ip, interface.name, size_string, result,
            (time.time() - start_time) * 1000))
        response[self.message_key] = result
        return self.write(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        start_time = time.time()

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    "message": "",
                    "success": False,
                    "code": -999
                }))

        bytes_batch, response = ImageUtils.get_bytes_batch(self.request.body)

        if not bytes_batch:
            logger.error('Response[{}] - {} ms'.format(
                response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    "message": "",
                    "success": False,
                    "code": 999
                }))

        image_batch, response = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            logger.error('[{}] - Size[{}] - Response[{}] - {} ms'.format(
                interface.name, size_string, response,
                (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, None)
        logger.info('[{}] - Size[{}] - Predict[{}] - {} ms'.format(
            interface.name, size_string, result,
            (time.time() - start_time) * 1000))
        response['message'] = result
        return self.write(json_encode(response))
示例#8
0
def processFile(filePath, filename):
  rawFilePath = filePath
  print("---------开始处理: " + rawFilePath + " ---------")
  existingRawFileInDB = ImageUtils.getFaceInDBByRawImage(rawFilePath)
  if (len(existingRawFileInDB) == 0):
    resultData = cropFaces.cropFaces2(rawFilePath)
    if (resultData is not None):
      faceList = resultData['croppedImageList']
      featureList = resultData['croppedFeatureList']
      faceIndex = 0
      if (len(faceList) == 0):
        faceId = FaceUtils.createNewFaceForPerson('', rawFilePath, Constants.PERSON_ID_UNNAMED)
        FaceUtils.updateFaceFeatureFile(faceId, '')
      else:
        for index in range(0, len(faceList)):
          faceImage = faceList[index]
          featureData = featureList[index]
          
          faceFileName = os.path.join(Constants.DATA_ROOT_PATH, Constants.FACE_IMG_FILE_PATH, "face_" + filename + "_" + str(faceIndex) + ".bmp")
          cv.imwrite(faceFileName, faceImage)
          
          faceIndex = faceIndex + 1

          faceId = FaceUtils.createNewFaceForPerson(faceFileName, rawFilePath, Constants.PERSON_ID_UNNAMED)
          faceFeaturePath = os.path.join(Constants.DATA_ROOT_PATH, Constants.FEATURE_FILE_PATH, 'faceFeature_' + str(faceId) + '.npy')
          print ("开始保存feature:" + faceFeaturePath)
          saveFeatureData = np.array(featureData)
          np.save(faceFeaturePath, saveFeatureData)
          FaceUtils.updateFaceFeatureFile(faceId, faceFeaturePath)

          updateMostSimilarPerson(faceId)
  else:
    print("     " + rawFilePath + " 已处理过了")
  
  print("---------结束处理: " + rawFilePath + " ---------")
示例#9
0
def cropFaces(imagePath):
    #Loading the image to be tested
    image_raw = ImageUtils.createImageFromPath(imagePath)

    #Converting to grayscale
    image_gray = cv.cvtColor(image_raw, cv.COLOR_BGR2GRAY)

    haar_cascade_face = cv.CascadeClassifier(
        Constants.DATA_ROOT_PATH + '/' + Constants.PROGRAM_DATA_PATH + '/' +
        'haarcascade_frontalface_default.xml')

    faces_rects = haar_cascade_face.detectMultiScale(image_gray,
                                                     scaleFactor=1.2,
                                                     minNeighbors=5)

    # Let us print the no. of faces found
    print('Faces found: ', len(faces_rects))

    croppedImage_list = []
    for (x, y, w, h) in faces_rects:
        croppedImage = image_raw[y:(y + h), x:(x + w)]
        croppedImage_list.append(croppedImage)
        cv.rectangle(image_raw, (x, y), (x + w, y + h), (0, 255, 0), 2)

    result_data = {}
    result_data['croppedImageList'] = croppedImage_list
    result_data['imageHighlighed'] = image_raw
    return result_data
示例#10
0
 def LoadImage(self, request, context):
     g = GenerateData(request)
     response = Manager.LoadImage_Response()
     try:
         List = ImageUtils.LoadImage(g)
     except docker.errors.ImageLoadError:
         response.exit_code = Manager.LoadImage_Response.ExitCode.ERROR
         return response
     response.exit_code = Manager.LoadImage_Response.ExitCode.SUCCESS
     for image_obj in List:
         image = ImageUtils.GetImage(image_obj.id)
         image_attr = response.image_attr.add()
         for tag in image['RepoTags']:
             temp = image_attr.repoTags.add()
             temp = tag
         image_attr.created = image['Created']
         image_attr.size = int(image['Size'])
         image_attr.author = image['Author']
     return response
示例#11
0
 def BuildImage(self, request, context):
     response = Manager.BuildImage_Response()
     dockerfile = TemporaryFile()
     dockerfile.write(request.dockerfile)
     # print("-------")
     dockerfile.seek(0)
     image_obj = ImageUtils.BuildImageByFile(dockerfile, request.tag)
     # try:
     # except docker.errors.APIError:
     #     response.exit_code = Manager.BuildImage_Response.ExitCode.ERROR
     #     return response
     response.exit_code = Manager.BuildImage_Response.ExitCode.SUCCESS
     image = ImageUtils.GetImage(image_obj.id)
     response.image_attr.id = image['Id']
     for tag in image['RepoTags']:
         response.image_attr.repoTags.append(tag)
     response.image_attr.created = image['Created']
     response.image_attr.size = int(image['Size'])
     response.image_attr.author = image['Author']
     return response
示例#12
0
    def predict(self, request, context):
        start_time = time.time()
        bytes_batch, status = ImageUtils.get_bytes_batch(request.image)
        if not bytes_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if request.model_site:
            interface = interface_manager.get_by_sites(request.model_site,
                                                       size_string)
        elif request.model_name:
            interface = interface_manager.get_by_name(request.model_name)
        elif request.model_type:
            interface = interface_manager.get_by_type_size(
                size_string, request.model_type)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return {"result": "", "success": False, "code": 999}
        image_batch, status = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        result = interface.predict_batch(image_batch, request.split_char)
        logger.info(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'
            .format(interface.name, size_string, request.model_type,
                    request.model_site, result,
                    (time.time() - start_time) * 1000))
        return grpc_pb2.PredictResult(result=result,
                                      success=status['success'],
                                      code=status['code'])
示例#13
0
 def PullImage(self, request, context):
     if request.tag == '':
         request.tag = 'latest'
     auth_config = None
     if request.auth_config is not None:
         auth_config = {}
         auth_config['username'] = request.auth_config.username
         auth_config['password'] = request.auth_config.password
     ImageUtils.PullImage(repository=request.repository,
                          tag=request.tag,
                          auth_config=auth_config)
     return Manager.PullImage_Response()
示例#14
0
 def ListImages(self, request, context):
     List = ImageUtils.ListImages()
     response = Manager.ListImages_Response()
     for image in List:
         attr = response.images.add()
         attr.id = image['Id']
         for tag in image['RepoTags']:
             attr.repoTags.append(tag)
         attr.created = image['Created']
         attr.size = int(image['Size'])
         attr.author = image['Author']
     return response
示例#15
0
def check_exit(src, limit=4):
    count = 0
    while True:
        c.screenshot(tmp_file)
        time.sleep(1)
        pos = ImageUtils.find_position(tmp_file, img_path + "/" + src)
        count = count + 1
        if pos is not None:
            print '[' + str(count) + ']' + str(pos)
            return pos
        elif count == limit:
            print '[' + str(count) + '] %s Not Found' % src
            return None
示例#16
0
def cropFaces2(imagePath):
    #Loading the image to be tested
    result_data = None
    image_raw = ImageUtils.createImageFromPath(imagePath)
    # image_raw = resizeImage(image_raw)

    if (image_raw is not None):
        cloned_image = ImageUtils.cloneImage(image_raw)
        predictor_path = Constants.DATA_ROOT_PATH + '/' + Constants.PROGRAM_DATA_PATH + '/' + 'shape_predictor_68_face_landmarks.dat'
        face_rec_model_path = Constants.DATA_ROOT_PATH + '/' + Constants.PROGRAM_DATA_PATH + '/' + 'dlib_face_recognition_resnet_model_v1.dat'
        # prepare predict
        sp = dlib.shape_predictor(predictor_path)
        facerec = dlib.face_recognition_model_v1(face_rec_model_path)
        detector = dlib.get_frontal_face_detector()

        croppedImage_list = []
        feature_list = []
        dets = detector(image_raw, 1)
        for k, d in enumerate(dets):
            x, y, w, h = d.left(), d.top(), d.width(), d.height()
            croppedImage = image_raw[y:(y + h), x:(x + w)]
            croppedImage_list.append(croppedImage)
            cv.rectangle(cloned_image, (x, y), (x + w, y + h), (0, 255, 0), 2)

            shape = sp(cloned_image, d)
            face_descriptor = facerec.compute_face_descriptor(image_raw, shape)
            v = np.array(face_descriptor)
            print('--------------------------')
            print(str(imagePath) + ':' + str(k))
            print(v)
            print('--------------------------')
            feature_list.append(v)

        result_data = {}
        result_data['croppedImageList'] = croppedImage_list
        result_data['croppedFeatureList'] = feature_list
        result_data['imageHighlighed'] = cloned_image
    return result_data
示例#17
0
 def GetImage(self, request, context):
     response = Manager.GetImage_Response()
     try:
         image = ImageUtils.GetImage(image_id=request.image_id)
     except docker.errors.ImageNotFound:
         response.exit_code = Manager.GetImage_Response.ExitCode.NOTFOUND
         return response
     response.exit_code = Manager.GetImage_Response.ExitCode.SUCCESS
     response.image_attr.id = image['Id']
     for tag in image['RepoTags']:
         temp = response.image_attr.repoTags.add()
         temp = tag
     response.image_attr.created = image['Created']
     response.image_attr.size = int(image['Size'])
     response.image_attr.author = image['Author']
     return response
示例#18
0
def start_recognize_faces_stream():
    capture = cv2.VideoCapture(0)
    while True:
        _, frame = capture.read()
        pil_frame = ImageUtils.cv2_to_pillow_image(frame)
        time_label = []
        current_time = time.time()
        faces, bounding_boxes = FaceExtracter.extracts(pil_frame, return_bounding_boxes=True)
        extract_time = time.time() - current_time
        time_label.append("extract_time: {}".format(extract_time))

        if faces:
            for bounding_box in bounding_boxes:
                left, top, right, bottom = bounding_box.astype(numpy.int)
                cv2.rectangle(frame, (left, top), (right, bottom), (255, 255, 255))

            current_time = time.time()
            faces_embeddings = ImageEmbedder.embeds(faces)
            embed_time = time.time() - current_time
            time_label.append("embed_time: {}".format(embed_time))

            current_time = time.time()
            labels, confidents = FaceClassifier.classifies(faces_embeddings)
            classify_time = time.time() - current_time
            time_label.append("classify_time: {}".format(classify_time))

            for (label, confident, bounding_box) in zip(labels, confidents, bounding_boxes):
                left, top, right, bottom = bounding_box.astype(numpy.int)
                cv2.putText(frame,  f"{label}[{confident}]", (left, max(top - 5, 0)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))

        cv2.putText(frame, ",".join(time_label), (8, 24), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

        cv2.imshow("Video Stream", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows()
示例#19
0
    def handle(self, message, from_name_full, msg_obj):
        args = ' '.join(message[1:])
        res = self.client.get_query(args)

        for pod in res.pods:
            print(pod.id)

        notify_message = "%s, This is what I found for you:" % from_name_full

        for pod in res.pods:
            if pod.img is not None:
                img_url = pod.img
                pod_id = pod.id
                # download the image to see its size
                img_file_name = str(uuid.uuid1()) + '.gif'
                urllib.urlretrieve(img_url, img_file_name)
                (width, height) = ImageUtils.get_image_size(img_file_name)
                print "Width: %d Height: %d" % (width, height)
                if width is not None and height is not None and height < MAX_IMG_HEIGHT:
                    notify_message += "<br><strong>%s</strong><br><img src='%s'/>" % (pod_id, img_url)
                os.remove(img_file_name)

        self.bot.notify_room_html(notify_message, msg_obj['from'].bare)
        return
 def __init__(self, application, request, **kwargs):
     super().__init__(application, request, **kwargs)
     self.exception = Response(system_config.response_def_map)
     self.executor = ThreadPoolExecutor(workers)
     self.image_utils = ImageUtils(system_config)
    def post(self):
        uid = str(uuid.uuid1())
        param_key = None
        start_time = time.time()
        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))

        bytes_batch, response = self.image_utils.get_bytes_batch(
            self.request.body)

        if not bytes_batch:
            logger.error('Response[{}] - {} ms'.format(
                response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        exec_map = interface.model_conf.exec_map
        if exec_map and len(exec_map.keys()) > 1:
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}] - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri,
                    interface.name, size_string,
                    "The model is configured with ExecuteMap, but the api do not support this param.",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.message_key: "the api do not support [ExecuteMap].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))
        elif exec_map and len(exec_map.keys()) == 1:
            param_key = list(interface.model_conf.exec_map.keys())[0]

        image_batch, response = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch, param_key=param_key)

        if not image_batch:
            logger.error(
                '[{}] - [{}] | [{}] - Size[{}] - Response[{}] - {} ms'.format(
                    uid, self.request.remote_ip, interface.name, size_string,
                    response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, None)
        logger.info(
            '[{}] - [{}] | [{}] - Size[{}] - Predict[{}] - {} ms'.format(
                uid, self.request.remote_ip, interface.name, size_string,
                result, (time.time() - start_time) * 1000))
        response[self.uid_key] = uid
        response[self.message_key] = result
        return self.write(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        uid = str(uuid.uuid1())
        start_time = time.time()
        data = self.parse_param()
        request_def_map = system_config.request_def_map
        input_data_key = request_def_map['InputData']
        model_name_key = request_def_map['ModelName']
        if input_data_key not in data.keys():
            raise tornado.web.HTTPError(400)

        model_name = ParamUtils.filter(data.get(model_name_key))
        output_split = ParamUtils.filter(data.get('output_split'))
        need_color = ParamUtils.filter(data.get('need_color'))
        param_key = ParamUtils.filter(data.get('param_key'))

        request_incr = self.request_incr
        request_count = " - Count[{}]".format(request_incr)
        log_params = " - ParamKey[{}]".format(param_key) if param_key else ""
        log_params += " - NeedColor[{}]".format(
            need_color) if need_color else ""

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))
        bytes_batch, response = self.image_utils.get_bytes_batch(
            data[input_data_key])

        if not bytes_batch:
            logger.error('[{}] - [{} {}] | - Response[{}] - {} ms'.format(
                uid, self.request.remote_ip, self.request.uri, response,
                (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        # auxiliary_result = None

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        if request_limit != -1 and request_incr > request_limit:
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    request_count, log_params,
                    "Maximum number of requests exceeded",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key:
                    "The maximum number of requests has been exceeded",
                    self.status_bool_key: False,
                    self.status_code_key: -444
                }))
        if model_name_key in data and data[model_name_key]:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        output_split = output_split if 'output_split' in data else interface.model_conf.output_split

        if need_color:
            bytes_batch = [
                color_extract.separate_color(_, color_map[need_color])
                for _ in bytes_batch
            ]

        if interface.model_conf.corp_params:
            bytes_batch = corp_to_multi.parse_multi_img(
                bytes_batch, interface.model_conf.corp_params)

        if interface.model_conf.exec_map and not param_key:
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}]{}{} - Error[{}] - {} ms'.
                format(
                    uid, self.request.remote_ip, self.request.uri,
                    interface.name, size_string, request_count, log_params,
                    "The model is missing the param_key parameter because the model is configured with ExecuteMap.",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "Missing the parameter [param_key].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))

        if interface.model_conf.external_model and interface.model_conf.corp_params:
            result = []
            len_of_result = []
            pre_corp_num = 0
            for corp_param in interface.model_conf.corp_params:
                corp_size = corp_param['corp_size']
                corp_num_list = corp_param['corp_num']
                corp_num = corp_num_list[0] * corp_num_list[1]
                sub_bytes_batch = bytes_batch[pre_corp_num:pre_corp_num +
                                              corp_num]
                pre_corp_num = corp_num
                size_string = "{}x{}".format(corp_size[0], corp_size[1])

                sub_interface = interface_manager.get_by_size(size_string)

                image_batch, response = ImageUtils.get_image_batch(
                    sub_interface.model_conf,
                    sub_bytes_batch,
                    param_key=param_key)

                text = yield self.predict(sub_interface,
                                          image_batch,
                                          output_split,
                                          size_string,
                                          start_time,
                                          log_params,
                                          request_count,
                                          uid=uid)
                result.append(text)
                len_of_result.append(
                    len(result[0].split(
                        sub_interface.model_conf.category_split)))

            response[
                self.message_key] = interface.model_conf.output_split.join(
                    result)
            if interface.model_conf.corp_params and interface.model_conf.output_coord:
                # final_result = auxiliary_result + "," + response[self.message_key]
                # if auxiliary_result else response[self.message_key]
                final_result = response[self.message_key]
                response[self.message_key] = corp_to_multi.get_coordinate(
                    label=final_result,
                    param_group=interface.model_conf.corp_params,
                    title_index=[i for i in range(len_of_result[0])])
            return self.finish(
                json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
        else:
            image_batch, response = ImageUtils.get_image_batch(
                interface.model_conf, bytes_batch, param_key=param_key)

        # if interface.model_conf.batch_model:
        #     auxiliary_index = list(interface.model_conf.batch_model.keys())[0]
        #     auxiliary_name = list(interface.model_conf.batch_model.values())[0]
        #     auxiliary_interface = interface_manager.get_by_name(auxiliary_name)
        #     auxiliary_image_batch, response = ImageUtils.get_image_batch(
        #         auxiliary_interface.model_conf,
        #         bytes_batch,
        #         param_key=param_key
        #     )
        #     auxiliary_result = yield self.predict(
        #         auxiliary_interface,
        #         auxiliary_image_batch[auxiliary_index: auxiliary_index+1],
        #         output_split,
        #         size_string,
        #         start_time
        #     )
        #     image_batch = np.delete(image_batch, auxiliary_index, axis=0).tolist()

        if not image_batch:
            logger.error(
                '[{}] - [{} {}] | [{}] - Size[{}] - Response[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       interface.name, size_string, response,
                       round((time.time() - start_time) * 1000)))
            response[self.uid_key] = uid
            return self.finish(json_encode(response))

        response[self.message_key] = yield self.predict(interface,
                                                        image_batch,
                                                        output_split,
                                                        size_string,
                                                        start_time,
                                                        log_params,
                                                        request_count,
                                                        uid=uid)
        response[self.uid_key] = uid
        self.executor.submit(self.save_image, uid, response[self.message_key],
                             bytes_batch[0])
        # if interface.model_conf.corp_params and interface.model_conf.output_coord:
        #     # final_result = auxiliary_result + "," + response[self.message_key]
        #     # if auxiliary_result else response[self.message_key]
        #     final_result = response[self.message_key]
        #     response[self.message_key] = corp_to_multi.get_coordinate(
        #         label=final_result,
        #         param_group=interface.model_conf.corp_params,
        #         title_index=[0]
        #     )
        return self.finish(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.image_utils = ImageUtils(system_config)
    def post(self):
        uid = str(uuid.uuid1())
        start_time = time.time()
        data = self.parse_param()
        request_def_map = system_config.request_def_map
        input_data_key = request_def_map['InputData']
        model_name_key = request_def_map['ModelName']
        if input_data_key not in data.keys():
            raise tornado.web.HTTPError(400)

        model_name = ParamUtils.filter(data.get(model_name_key))
        output_split = ParamUtils.filter(data.get('output_split'))
        need_color = ParamUtils.filter(data.get('need_color'))
        param_key = ParamUtils.filter(data.get('param_key'))
        extract_rgb = ParamUtils.filter(data.get('extract_rgb'))

        request_incr = self.request_incr
        global_count = self.global_request_incr
        request_count = " - Count[{}]".format(request_incr)
        log_params = " - ParamKey[{}]".format(param_key) if param_key else ""
        log_params += " - NeedColor[{}]".format(
            need_color) if need_color else ""

        if interface_manager.total == 0:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))
        bytes_batch, response = self.image_utils.get_bytes_batch(
            data[input_data_key])

        if not bytes_batch:
            logger.error('[{}] - [{} {}] | - Response[{}] - {} ms'.format(
                uid, self.request.remote_ip, self.request.uri, response,
                (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if system_config.request_size_limit and size_string not in system_config.request_size_limit:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                '[{}] - [{} {}] | Size[{}] - [{}][{}] - Error[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       size_string, global_count, log_params,
                       "Image size is invalid.",
                       round((time.time() - start_time) * 1000)))
            msg = system_config.request_size_limit.get("msg")
            msg = msg if msg else "The size of the picture is wrong. " \
                                  "Only the original image is supported. " \
                                  "Please do not take a screenshot!"
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: msg,
                        self.status_bool_key: False,
                        self.status_code_key: -250
                    },
                    ensure_ascii=False))

        if system_config.use_whitelist:
            assert_whitelist = self.match_whitelist(self.request.remote_ip)
            if not assert_whitelist:
                logger.info(
                    '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                        uid, self.request.remote_ip, self.request.uri,
                        size_string, request_count, log_params,
                        "Whitelist limit",
                        round((time.time() - start_time) * 1000)))
                return self.finish(
                    json.dumps(
                        {
                            self.uid_key: uid,
                            self.message_key:
                            "Only allow IP access in the whitelist",
                            self.status_bool_key: False,
                            self.status_code_key: -111
                        },
                        ensure_ascii=False))

        if global_request_limit != -1 and global_count > global_request_limit:
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    global_count, log_params,
                    "Maximum number of requests exceeded (G)",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: system_config.exceeded_msg,
                        self.status_bool_key: False,
                        self.status_code_key: -555
                    },
                    ensure_ascii=False))

        assert_blacklist = self.match_blacklist(self.request.remote_ip)
        if assert_blacklist:
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    request_count, log_params,
                    "The ip is on the risk blacklist (IP)",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: system_config.exceeded_msg,
                        self.status_bool_key: False,
                        self.status_code_key: -110
                    },
                    ensure_ascii=False))
        if request_limit != -1 and request_incr > request_limit:
            self.risk_ip_count(self.request.remote_ip)
            assert_blacklist_trigger = system_config.blacklist_trigger_times != -1
            if self.risk_ip(
                    self.request.remote_ip
            ) > system_config.blacklist_trigger_times and assert_blacklist_trigger:
                if self.request.remote_ip not in blacklist():
                    set_blacklist(self.request.remote_ip)
                    update_blacklist()
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    request_count, log_params,
                    "Maximum number of requests exceeded (IP)",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: system_config.exceeded_msg,
                        self.status_bool_key: False,
                        self.status_code_key: -444
                    },
                    ensure_ascii=False))
        if model_name_key in data and data[model_name_key]:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            self.request_desc()
            self.global_request_desc()
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        output_split = output_split if 'output_split' in data else interface.model_conf.output_split

        if interface.model_conf.corp_params:
            bytes_batch = corp_to_multi.parse_multi_img(
                bytes_batch, interface.model_conf.corp_params)

        exec_map = interface.model_conf.exec_map
        if exec_map and len(exec_map.keys()) > 1 and not param_key:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}]{}{} - Error[{}] - {} ms'.
                format(
                    uid, self.request.remote_ip, self.request.uri,
                    interface.name, size_string, request_count, log_params,
                    "The model is missing the param_key parameter because the model is configured with ExecuteMap.",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "Missing the parameter [param_key].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))
        elif exec_map and param_key and param_key not in exec_map:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}]{}{} - Error[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       interface.name, size_string, request_count, log_params,
                       "The param_key parameter is not support in the model.",
                       round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "Not support the parameter [param_key].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))
        elif exec_map and len(exec_map.keys()) == 1:
            param_key = list(interface.model_conf.exec_map.keys())[0]

        if interface.model_conf.external_model and interface.model_conf.corp_params:
            result = []
            len_of_result = []
            pre_corp_num = 0
            for corp_param in interface.model_conf.corp_params:
                corp_size = corp_param['corp_size']
                corp_num_list = corp_param['corp_num']
                corp_num = corp_num_list[0] * corp_num_list[1]
                sub_bytes_batch = bytes_batch[pre_corp_num:pre_corp_num +
                                              corp_num]
                pre_corp_num = corp_num
                size_string = "{}x{}".format(corp_size[0], corp_size[1])

                sub_interface = interface_manager.get_by_size(size_string)

                image_batch, response = ImageUtils.get_image_batch(
                    sub_interface.model_conf,
                    sub_bytes_batch,
                    param_key=param_key)

                text = yield self.predict(sub_interface,
                                          image_batch,
                                          output_split,
                                          size_string,
                                          start_time,
                                          log_params,
                                          request_count,
                                          uid=uid)
                result.append(text)
                len_of_result.append(
                    len(result[0].split(
                        sub_interface.model_conf.category_split)))

            response[
                self.message_key] = interface.model_conf.output_split.join(
                    result)
            if interface.model_conf.corp_params and interface.model_conf.output_coord:
                # final_result = auxiliary_result + "," + response[self.message_key]
                # if auxiliary_result else response[self.message_key]
                final_result = response[self.message_key]
                response[self.message_key] = corp_to_multi.get_coordinate(
                    label=final_result,
                    param_group=interface.model_conf.corp_params,
                    title_index=[i for i in range(len_of_result[0])])
            return self.finish(
                json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
        else:
            image_batch, response = ImageUtils.get_image_batch(
                interface.model_conf,
                bytes_batch,
                param_key=param_key,
                extract_rgb=extract_rgb)

        if not image_batch:
            self.request_desc()
            self.global_request_desc()
            logger.error(
                '[{}] - [{} {}] | [{}] - Size[{}] - Response[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       interface.name, size_string, response,
                       round((time.time() - start_time) * 1000)))
            response[self.uid_key] = uid
            return self.finish(json_encode(response))

        predict_result = yield self.predict(interface, image_batch,
                                            output_split)

        # if need_color:
        #     # only support six label and size [90x35].
        #     color_batch = np.resize(image_batch[0], (90, 35, 3))
        #     need_index = color_extract.predict_color(image_batch=[color_batch], color=color_map[need_color])
        #     predict_result = "".join([v for i, v in enumerate(predict_result) if i in need_index])

        uid_str = "[{}] - ".format(uid)
        logger.info(
            '{}[{} {}] | [{}] - Size[{}]{}{} - Predict[{}] - {} ms'.format(
                uid_str, self.request.remote_ip, self.request.uri,
                interface.name, size_string, request_count, log_params,
                predict_result, round((time.time() - start_time) * 1000)))
        response[self.message_key] = predict_result
        response[self.uid_key] = uid
        self.executor.submit(self.save_image, uid, response[self.message_key],
                             bytes_batch[0])
        if interface.model_conf.corp_params and interface.model_conf.output_coord:
            # final_result = auxiliary_result + "," + response[self.message_key]
            # if auxiliary_result else response[self.message_key]
            final_result = response[self.message_key]
            response[self.message_key] = corp_to_multi.get_coordinate(
                label=final_result,
                param_group=interface.model_conf.corp_params,
                title_index=[0])
        return self.finish(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
conf_path = 'config.yaml'
model_path = 'model'
graph_path = 'graph'

system_config = Config(conf_path=conf_path,
                       model_path=model_path,
                       graph_path=graph_path)
route_map = {i['Class']: i['Route'] for i in system_config.route_map}
sign.set_auth([{
    'accessKey': system_config.access_key,
    'secretKey': system_config.secret_key
}])
logger = system_config.logger
interface_manager = InterfaceManager()
image_utils = ImageUtils(system_config)


@app.after_request
def after_request(response):
    response.headers['Access-Control-Allow-Origin'] = '*'
    return response


@app.errorhandler(400)
def server_error(error=None):
    message = "Bad Request"
    return jsonify(message=message, code=error.code, success=False)


@app.errorhandler(500)
示例#26
0
def Convert(actionPath, example, action, test_set, id, tiny_set):
    print("Started: ", example)
    examplePath = os.path.join(actionPath, example)

    frames = []
    images = []
    compressedFlowImages = []
    for frame in sorted(glob(examplePath + "/*.jpg")):
        im = open(frame, 'rb').read()
        frames.append(im)
        images.append(np.array(Image.open(io.BytesIO(im))))

    if not os.path.exists(os.path.join(flowDir, action, example)):
        os.makedirs(os.path.join(flowDir, action, example))

    flowFrames = ImageUtils.ComputeOpticalFlow(
        np.array(images), os.path.join(flowDir, action, example))
    for i, ff in enumerate(flowFrames):
        r, buf = cv2.imencode('.jpg', ff)
        compressedFlowImages.append(buf.tostring())

    if id in test_set:
        ex = test.create_group(example)
    else:
        ex = train.create_group(example)

    if debug:
        pdb.set_trace()

    ex.create_dataset("rgb", data=frames)
    ex.create_dataset("flow", data=compressedFlowImages)

    annots = ex.create_group("annot")

    if example not in annot_mapping:
        print("EXCEPTION: ", example)
        return

    example_id = annot_mapping[example]
    annots.create_dataset(
        'action', data=annotations['annot'][0][example_id][2][0][0][2][0][0])
    annots.create_dataset(
        'startFrame',
        data=annotations['annot'][0][example_id][2][0][0][1][0][0])
    annots.create_dataset(
        'endFrame', data=annotations['annot'][0][example_id][2][0][0][0][0][0])
    annots.create_dataset('bboxes',
                          data=annotations['annot'][0][example_id][2][0][0][3])

    if tiny_set:
        tiny_ex = tiny_datset.create_group(example)
        tiny_ex.create_dataset("rgb", data=frames)
        tiny_ex.create_dataset("flow", data=compressedFlowImages)
        tiny_annots = tiny_ex.create_group("annot")
        tiny_annots.create_dataset(
            'action',
            data=annotations['annot'][0][example_id][2][0][0][2][0][0])
        tiny_annots.create_dataset(
            'startFrame',
            data=annotations['annot'][0][example_id][2][0][0][1][0][0])
        tiny_annots.create_dataset(
            'endFrame',
            data=annotations['annot'][0][example_id][2][0][0][0][0][0])
        tiny_annots.create_dataset(
            'bboxes', data=annotations['annot'][0][example_id][2][0][0][3])

    print(example, ", IsTrain: ", id in test_set)
示例#27
0
import time
import cv2
import numpy as np
import airsim
from utils import ImageUtils

DEFAULT_FPS = 8
utils = ImageUtils()


class Gym:
    def __init__(self, show_cam=True):
        # connect to the AirSim simulator
        self.client = airsim.CarClient()
        self.client.confirmConnection()
        self.client.enableApiControl(True)
        self.car_controls = airsim.CarControls()

        self.episode_start = time.time()
        self.SHOW_CAM = show_cam

        self.fps = DEFAULT_FPS
        self.period = 1 / DEFAULT_FPS
        self.previous_action_time = None
        self.step_num = 0

        self.previous_distance = None

    def reset(self):
        self.client.reset()
        time.sleep(0.1)
示例#28
0
import module as _module
import numpy as np
import constant
import sys

sys.path.append('../')

import utils.ImageUtils as IGU

utils = IGU.ImageUtils()

random_image = np.random.rand(1, 400, 400, 3)

predict = _module.build_model.predict(random_image)

y_predict = utils.getlistargmax(predict)
y_predict_label = utils.getLabel(constant.root, constant.filename,
                                 np.absolute(y_predict))

print(y_predict_label)
示例#29
0
 def PruneImages(self, request, context):
     ImageUtils.PruneImages()
     return Manager.PruneImages_Response()
示例#30
0
 def RemoveImage(self, request, context):
     ImageUtils.RemoveImage(image_id=request.image_id, force=request.force)
     return Manager.RemoveImage_Response()
示例#31
0
import utils.ImageUtils as iu

# 顔にモザイクをかける

# 検出器を生成
# カスケードファイルを生成
cascade_file = "../download/haarcascade_frontalface_alt.xml"
# カスケードインスタンスを生成
cascade = cv2.CascadeClassifier(cascade_file)

# 画像をグレイスケールに変換
img = cv2.imread("subaru.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# 顔認識を実行
face_list = cascade.detectMultiScale(img_gray)

# 結果を確認
if len(face_list) == 0:
    print("失敗")
    quit()

# 認識した部分に印をつける
for (x, y, w, h) in face_list:
    img = iu.mosaic(img, (x, y, x + w, y + h), 10)

# 画像を出力
cv2.imwrite("opencv/face_match.jpg", img)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()