Ejemplo n.º 1
0
# ======================

parser = get_base_parser(
    'Efficiently Trainable Text-to-Speech System Based on' +
    'Deep Convolutional Networks with Guided Attention', SENTENCE,
    SAVE_WAV_PATH)
# overwrite
parser.add_argument('--input',
                    '-i',
                    metavar='TEXT',
                    default=SENTENCE,
                    help='input text')
parser.add_argument('--ailia_audio',
                    action='store_true',
                    help='use ailia audio library')
args = update_parser(parser, check_input_type=False)

if args.ailia_audio:
    from pytorch_dc_tts_utils_ailia import get_test_data, save_to_wav
else:
    from pytorch_dc_tts_utils import get_test_data, save_to_wav


# ======================
# Main function
# ======================
def preprocess(SENTENCE):
    L = get_test_data([SENTENCE], len(SENTENCE))
    zeros = np.zeros((1, 80, 1), np.float32)
    Y = zeros
    A = None
Ejemplo n.º 2
0
MODEL_LISTS = ['small', 'large']

SLEEP_TIME = 0

# ======================
# Arguemnt Parser Config
# ======================
parser = get_base_parser('ImageNet classification Model', IMAGE_PATH, None)
parser.add_argument('-a',
                    '--arch',
                    metavar='ARCH',
                    default='small',
                    choices=MODEL_LISTS,
                    help='model lists: ' + ' | '.join(MODEL_LISTS) +
                    ' (default: small)')
args = update_parser(parser)

# ======================
# Parameters 2
# ======================
WEIGHT_PATH = f'mobilenetv3_{args.arch}.onnx'
MODEL_PATH = WEIGHT_PATH + '.prototxt'
REMOTE_PATH = 'https://storage.googleapis.com/ailia-models/mobilenetv3/'


# ======================
# Main functions
# ======================
def recognize_from_image():
    # prepare input data
    input_data = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
Ejemplo n.º 3
0
# ======================
# Arguemnt Parser Config
# ======================

parser = get_base_parser('CAIN', IMAGE_PATH, SAVE_IMAGE_PATH)
parser.add_argument('-i2',
                    '--input2',
                    metavar='IMAGE2',
                    default=None,
                    help='The second input image path.')
parser.add_argument('-hw',
                    metavar='HEIGHT,WIDTH',
                    default="256,448",
                    help='Specify the size to resize on video mode.')
args = update_parser(parser, large_model=True)

# ======================
# Main functions
# ======================


def preprocess(img):
    im_h, im_w, _ = img.shape

    ow, oh = im_w, im_h
    if im_w % (1 << 7) != 0:
        ow = (((im_w >> 7) + 1) << 7)
    if im_h % (1 << 7) != 0:
        oh = (((im_h >> 7) + 1) << 7)