Ejemplo n.º 1
0
def _colorize(url):
    """Colorize the images in url.

    Args:
        url (str): a url to an image, or a path to an image, or a dir for images.
    """

    if is_url(url):
        _colorize_one_img(url)
    else:
        # Change to the dir of command which invokes this script
        if CMD_CWD != '':
            oldwd = os.getcwd()
            os.chdir(CMD_CWD)

        url = os.path.abspath(os.path.expanduser(url))

        if CMD_CWD != '':
            os.chdir(oldwd)

        if os.path.isdir(url):
            for img in os.listdir(url):
                img_file = os.path.join(url, '', img)
                _colorize_one_img(img_file)
        else:
            _colorize_one_img(url)
Ejemplo n.º 2
0
def _read_cv_image_from(url):
    """Read an image from url or file as grayscale opencv image."""

    return toolz.pipe(
        url,
        urllib.request.urlopen if is_url(url) else lambda x: open(x, 'rb'),
        lambda x: x.read(),
        bytearray,
        lambda x: np.asarray(x, dtype="uint8"),
        lambda x: cv.imdecode(x, cv.IMREAD_GRAYSCALE))
Ejemplo n.º 3
0
def azface_detect(client, img_url, **kwargs):
    """Detect faces using Azure face API."""

    if is_url(img_url):  # Photo from URL
        # For return_face_attributes, it can be a FaceAttributeType, or a list of string
        faces = client.face.detect_with_url(img_url, **kwargs)
    else:  # Photo from a file
        with open(img_url, 'rb') as file:
            # For face attributes, it can be a FaceAttributeType, or a list of string
            faces = client.face.detect_with_stream(file, **kwargs)

    return faces
Ejemplo n.º 4
0
def read_cv_image_from(url):
    """Read an image from url or file as grayscale opencv image.

    **Note**: OpenCV return the image as numpy array, which can also directly
    be used by other Python image libraries.  However, the color space in
    OpenCV is BGR instead of the popular RGB.
    """

    return toolz.pipe(
        url,
        urllib.request.urlopen if is_url(url) else lambda x: open(x, 'rb'),
        lambda x: x.read(), bytearray, lambda x: np.asarray(x, dtype="uint8"),
        lambda x: cv.imdecode(x, cv.IMREAD_COLOR))
Ejemplo n.º 5
0
def azface_add(client, img_url, name, person=None):
    """Add the face in img_url to the person name."""

    display(read_cv_image_from(img_url), frombgr=True)

    # Use the person name as person group ID and person group name

    person_group_id = name
    person_group_name = name
    person_name = name

    if not person:  # Get the person information.  Create it if not available

        # Get the list of person groups

        person_groups = client.person_group.list()
        person_groups = [x.person_group_id for x in person_groups]

        # Get the list of persons in the person group

        person_list = client.person_group_person.list(person_group_id)

        # Create a person group if not available

        if person_group_id not in person_groups:
            client.person_group.create(person_group_id, name=person_group_name)

        # Create a person belongs to the person group if not available

        try:
            person = next(x for x in person_list if x.name == person_name)
        except StopIteration:
            person = client.person_group_person.create(person_group_id,
                                                       name=person_name)

    # Add face for the person

    if is_url(img_url):  # Photo from URL
        client.person_group_person.add_face_from_url(person_group_id,
                                                     person.person_id, img_url)
    else:  # Photo from a file
        with open(img_url, 'rb') as file:
            client.person_group_person.add_face_from_url(
                person_group_id, person.person_id, file)

    return person
Ejemplo n.º 6
0
# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

# ----------------------------------------------------------------------
# URL or path
# ----------------------------------------------------------------------

path = args.path

# ----------------------------------------------------------------------
# Objects
# ----------------------------------------------------------------------

if is_url(path):
    analysis = client.detect_objects(path)
else:
    path = os.path.join(get_cmd_cwd(), path)
    with open(path, 'rb') as fstream:
        analysis = client.detect_objects_in_stream(fstream)

for object in analysis.objects:
    print(f"{object.rectangle.x} {object.rectangle.y} " +
          f"{object.rectangle.x + object.rectangle.w} " +
          f"{object.rectangle.y + object.rectangle.h}")
Ejemplo n.º 7
0
# ----------------------------------------------------------------------

subscription_key, endpoint = azkey(KEY_FILE, SERVICE, verbose=False)

# Set credentials.

credentials = CognitiveServicesCredentials(subscription_key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

url = args.path

if is_url(url):
    analysis = client.analyze_image(url,
                                    visual_features=[VisualFeatureTypes.tags])
else:
    path = os.path.join(get_cmd_cwd(), url)
    with open(path, 'rb') as fstream:
        analysis = client.analyze_image_in_stream(
            fstream, visual_features=[VisualFeatureTypes.tags])

for tag in analysis.tags:
    if tag.confidence > 0.2:
        print("{:4.2f},{}".format(round(tag.confidence, 2), tag.name))
Ejemplo n.º 8
0
parser = argparse.ArgumentParser(
    prog='detect',
    description='Detect faces in an image.'
)

parser.add_argument(
    'path',
    type=str,
    help='path or URL of a photo where faces will be detected')

args = parser.parse_args()

# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------

img_url = args.path if is_url(args.path) else get_abspath(args.path)
face_attrs = ['age', 'gender', 'glasses', 'emotion', 'occlusion']

# ----------------------------------------------------------------------
# Call face API to detect and describe faces
# ----------------------------------------------------------------------

# Request subscription key and endpoint from user.
subscription_key, endpoint = get_private()

credentials = CognitiveServicesCredentials(subscription_key)  # Set credentials
client = FaceClient(endpoint, credentials)  # Setup Azure face API client
faces = azface_detect(client, img_url, return_face_attributes=face_attrs)
print_detection_results(faces)
Ejemplo n.º 9
0
)

parser.add_argument(
    'target',
    help='path or URL of a photo of the faces to be found')

parser.add_argument(
    'candidate',
    help='path or URL of a photo to find expected target faces')

args = parser.parse_args()

# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------
target_url = args.target if is_url(args.target) else get_abspath(args.target)  # Get the photo of target faces
candidate_url = args.candidate if is_url(args.candidate) else get_abspath(args.candidate)  # Get the photo to be checked

if os.path.isdir(target_url) or os.path.isdir(candidate_url):
    stop("Only one photo allowed!")

# ----------------------------------------------------------------------
# Prepare Face API client
# ----------------------------------------------------------------------

# Request subscription key and endpoint from user.
subscription_key, endpoint = get_private()

credentials = CognitiveServicesCredentials(subscription_key)  # Set credentials
client = FaceClient(endpoint, credentials)  # Setup Azure face API client
Ejemplo n.º 10
0
    prog='similar',
    parents=[option_parser],
    description='Find similar faces between images.')

parser.add_argument('target',
                    help='path or URL of a photo of the faces to be found')

parser.add_argument(
    'candidate', help='path or URL of a photo to find expected target faces')

args = parser.parse_args()

# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------
target_url = args.target if is_url(args.target) else get_abspath(
    args.target)  # Get the photo of target faces
candidate_url = args.candidate if is_url(args.candidate) else get_abspath(
    args.candidate)  # Get the photo to be checked
subscription_key, endpoint = args.key, args.endpoint

if os.path.isdir(target_url) or os.path.isdir(candidate_url):
    stop("Only one photo allowed!")

# ----------------------------------------------------------------------
# Prepare Face API client
# ----------------------------------------------------------------------

if not subscription_key or not endpoint:  # Request subscription key and endpoint from user.
    subscription_key, endpoint = get_face_api_key_endpoint(
        *azkey(args.key_file, SERVICE, verbose=False))
Ejemplo n.º 11
0
def main():

    # -----------------------------------------------------------------------
    # Load pre-built models
    # -----------------------------------------------------------------------

    scorer = os.path.join(os.getcwd(), "deepspeech-0.9.3-models.scorer")
    model = os.path.join(os.getcwd(), "deepspeech-0.9.3-models.pbmm")

    parser = argparse.ArgumentParser(
        description='Running DeepSpeech inference.')

    parser.add_argument('--model',
                        default=model,
                        help='Path to the model (protocol buffer binary file)')
    parser.add_argument('--scorer',
                        default=scorer,
                        help='Path to the external scorer file')
    parser.add_argument('audio', help='path or url to wav file')
    parser.add_argument('--beam_width',
                        type=int,
                        help='Beam width for the CTC decoder')
    parser.add_argument(
        '--lm_alpha',
        type=float,
        help=
        'Language model weight (lm_alpha). If not specified, use default from the scorer package.'
    )
    parser.add_argument(
        '--lm_beta',
        type=float,
        help=
        'Word insertion bonus (lm_beta). If not specified, use default from the scorer package.'
    )
    parser.add_argument('--extended',
                        required=False,
                        action='store_true',
                        help='Output string from extended metadata')
    parser.add_argument(
        '--json',
        required=False,
        action='store_true',
        help='Output json from metadata with timestamp of each word')
    parser.add_argument(
        '--candidate_transcripts',
        type=int,
        default=3,
        help='Number of candidate transcripts to include in JSON output')
    parser.add_argument('--hot_words',
                        type=str,
                        help='Hot-words and their boosts.')
    args = parser.parse_args()

    path = args.audio

    if is_url(path):
        response = requests.get(path)
        if response.status_code != 200:
            print(
                f"The URL does not appear to exist. Please check.\n    {path}")
            sys.exit()
    else:
        path = os.path.join(get_cmd_cwd(), path)

    ds, desired_sample_rate = utils.load(args.model, args.scorer, False,
                                         args.beam_width, args.lm_alpha,
                                         args.lm_beta, args.hot_words)
    utils.deepspeech(ds, desired_sample_rate, path, "transcribe", False,
                     args.extended, args.json, args.candidate_transcripts)