Ejemplo n.º 1
0
    finally:
        pass
        clean_all([input_path, output_path])


if __name__ == '__main__':
    global upload_directory
    global results_img_directory
    global image_colorizer

    upload_directory = '/data/upload/'
    create_directory(upload_directory)

    results_img_directory = '/data/result_images/'
    create_directory(results_img_directory)

    model_directory = '/data/models/'
    create_directory(model_directory)

    artistic_model_url = 'https://www.dropbox.com/s/zkehq1uwahhbc2o/ColorizeArtistic_gen.pth?dl=0'
    get_model_bin(artistic_model_url,
                  os.path.join(model_directory, 'ColorizeArtistic_gen.pth'))

    image_colorizer = get_image_colorizer(artistic=True)
    image_colorizer.results_dir = Path(results_img_directory)

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=False)
Ejemplo n.º 2
0
    global args

    result_directory = '/src/results/'
    create_directory(result_directory)

    upload_directory = '/src/UGATIT/dataset/selfie2anime/testA/'
    create_directory(upload_directory)

    create_directory('/src/UGATIT/dataset/selfie2anime/testB/')
    create_directory('/src/UGATIT/dataset/selfie2anime/trainA/')
    create_directory('/src/UGATIT/dataset/selfie2anime/trainB/')

    model_directory = '/src/checkpoint/'
    create_directory(model_directory)

    url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra5.cloud.ovh.net/image/ugatit/selfie2anime/'

    model_file_zip = 'ugatit-selfie2anime-pretrained.zip'

    get_model_bin(url_prefix + model_file_zip,
                  os.path.join('/src', model_file_zip))

    unzip(model_file_zip)

    args = parse_args()

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)
Ejemplo n.º 3
0
    create_directory(upload_directory)

    create_directory('/src/UGATIT/dataset/selfie2anime/testB/')
    create_directory('/src/UGATIT/dataset/selfie2anime/trainA/')
    create_directory('/src/UGATIT/dataset/selfie2anime/trainB/')

    model_directory = '/src/checkpoint/'
    create_directory(model_directory)

    url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra5.cloud.ovh.net/image/'

    model_file_rar = 'UGATIT_selfie2anime_lsgan_4resblock_6dis_1_1_10_10_1000_sn_smoothing.rar'

    haarcascade_file = 'haarcascade_frontalface_default.xml'

    get_model_bin(url_prefix + "ugatit/selfie2anime/" + model_file_rar,
                  os.path.join('/src', model_file_rar))
    unrar(model_file_rar, model_directory)

    get_model_bin(url_prefix + "haarcascade/" + haarcascade_file,
                  os.path.join('/src', haarcascade_file))

    args = parse_args()

    sess = tf.InteractiveSession(config=tf.ConfigProto(
        allow_soft_placement=True, inter_op_parallelism_threads=1))

    gan = UGATIT(sess, args)
    gan.build_model()

    gan.test_endpoint_init()
Ejemplo n.º 4
0
        traceback.print_exc()
        return {'message': 'input error'}, 400

    finally:
        clean_all([
            input_path
            ])

        shutil.rmtree(os.path.join(img_output_dir, args.img_name))

if __name__ == '__main__':
    global upload_directory, weight_file
    global ALLOWED_EXTENSIONS
    ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])

    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    weight_directory = '/src/'
    weight_file = 'imagenet-vgg-verydeep-19.mat'

    url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/neural-style-tf/'

    get_model_bin(url_prefix + weight_file , weight_directory + weight_file)

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)

Ejemplo n.º 5
0
    finally:
        clean_all([
            input_path,
            output_path
            ])

if __name__ == '__main__':
    global upload_directory
    global results_video_directory
    global video_colorizer
    
    upload_directory = '/data/upload/'
    create_directory(upload_directory)

    results_video_directory = '/data/video/result/'
    create_directory(results_video_directory)

    model_directory = '/data/models/'
    create_directory(model_directory)
    
    video_model_url = 'https://www.dropbox.com/s/336vn9y4qwyg9yz/ColorizeVideo_gen.pth?dl=0'
    get_model_bin(video_model_url, os.path.join(model_directory, 'ColorizeVideo_gen.pth'))

    video_colorizer = get_video_colorizer()
    
    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=False)
Ejemplo n.º 6
0
    global deblur
    global train_dir
    global graph
    global sess
    global ALLOWED_EXTENSIONS
    ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])

    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    checkpoint_dir = "/src/checkpoints/"

    create_directory(checkpoint_dir)

    url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/SRN-Deblur/'

    model_zip = "srndeblur_models.zip"

    get_model_bin(url_prefix + model_zip, checkpoint_dir + model_zip)

    os.system("cd " + checkpoint_dir + " && unzip " + model_zip)

    checkpoint_dir = os.path.join(checkpoint_dir, args.model)

    deblur = model.DEBLUR(args)

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)
Ejemplo n.º 7
0
    except:
        traceback.print_exc()
        return {'message': 'input error'}, 400

    finally:
        clean_all([
            input_path,
            output_path
            ])

if __name__ == '__main__':
    global upload_directory
    global model, net
    
    upload_directory = 'upload'
    create_directory(upload_directory)

    model_name = "frozen_east_text_detection.pb"
    model_url = "https://storage.gra5.cloud.ovh.net/v1/AUTH_18b62333a540498882ff446ab602528b/pretrained-models/" + model_name

    get_model_bin(model_url , model_name)

    net = cv2.dnn.readNet(model_name)

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)

Ejemplo n.º 8
0
    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    mobile_net_directory = '/src/models/mobile_net/'
    xception_directory = '/src/models/xception/'
    create_directory(mobile_net_directory)
    create_directory(xception_directory)

    url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/'

    todo = []
    for i in [
            "frozen_inference_graph.pb",
            "model.ckpt-30000.data-00000-of-00001", "model.ckpt-30000.index"
    ]:
        get_model_bin(url_prefix + "mobile-net/" + i, mobile_net_directory + i)

    for i in [
            "frozen_inference_graph.pb", "model.ckpt.data-00000-of-00001  ",
            "model.ckpt.index"
    ]:
        get_model_bin(url_prefix + "xception/" + i, xception_directory + i)

    #fast_graph_def = tf.GraphDef.FromString(open(mobile_net_directory + "frozen_inference_graph.pb", "rb").read())
    slow_graph_def = tf.GraphDef.FromString(
        open(xception_directory + "frozen_inference_graph.pb", "rb").read())

    tf.import_graph_def(slow_graph_def, name='')

    sess = tf.Session()
Ejemplo n.º 9
0
    global rdn
    global ALLOWED_EXTENSIONS
    ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])

    result_directory = '/src/results/'
    create_directory(result_directory)

    upload_directory = '/src/upload/'
    create_directory(upload_directory)
    
    model_directory = '/src/weights/'
    create_directory(model_directory)


    url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/'

    model_file_rar = 'weights.rar'


    get_model_bin(url_prefix + 'super-resolution/' + model_file_rar , os.path.join('/src', model_file_rar))
    unrar(model_file_rar, '/src')

    rdn = RDN(arch_params={'C':6, 'D':20, 'G':64, 'G0':64, 'x':2})
    rdn.model.load_weights('weights/sample_weights/rdn-C6-D20-G64-G064-x2/ArtefactCancelling/rdn-C6-D20-G64-G064-x2_ArtefactCancelling_epoch219.hdf5')

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=False)

Ejemplo n.º 10
0
BOT_TOKEN = os.environ["BOT_TOKEN"]

# Set upload directory and create if not exists
upload_directory = '/data/upload'
create_directory(upload_directory)

# Set result images directory and create if not exists
results_img_directory = '/data/result_images'
create_directory(results_img_directory)

# Set data model directory and create if not exists
model_directory = '/data/models'
create_directory(model_directory)

# only get the model binay if it not present in /data/models
get_model_bin(artistic_model_url,
              os.path.join(model_directory, "ColorizeArtistic_gen.pth"))
image_colorizer = get_image_colorizer(artistic=True)

get_model_bin(video_model_url,
              os.path.join(model_directory, "ColorizeVideo_gen.pth"))
video_colorizer = get_video_colorizer()
video_colorizer.result_folder = Path(results_img_directory)


def color(file_path, chat_id):
    # set input and outpu file path
    input_path = file_path
    output_path = os.path.join(results_img_directory,
                               os.path.basename(input_path))

    try:
Ejemplo n.º 11
0
    finally:
        clean_all([
            input_path,
            output_path
            ])

if __name__ == '__main__':
    global upload_directory
    global config, res_sizes

    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    config = tf.ConfigProto(device_count={'GPU': 0})

    # get all available image resolutions
    res_sizes = utils.get_resolutions()

    url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/deep-photo-enhancement/'

    for i in ["blackberry_orig.data-00000-of-00001", "blackberry_orig.index", "iphone_orig.data-00000-of-00001", "iphone_orig.index", "sony_orig.data-00000-of-00001", "sony_orig.index"]:
        get_model_bin(url_prefix + i , "models_orig/" + i)

    get_model_bin(url_prefix + "imagenet-vgg-verydeep-19.mat" , "models/imagenet-vgg-verydeep-19.mat")
    
    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)

Ejemplo n.º 12
0
    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    model_directory = '/src/model/'
    create_directory(model_directory)

    model_name = 'efficientnet-b5'
    model = EfficientNet.from_pretrained(model_name)
    model.eval()

    model_url = "https://storage.gra.cloud.ovh.net/v1/AUTH_18b62333a540498882ff446ab602528b/pretrained-models/image/EfficientNet-PyTorch/"

    labels_file = 'labels_map.txt'

    get_model_bin(model_url + labels_file, model_directory + labels_file)

    labels_map = json.load(open(model_directory + labels_file))
    labels_map = [labels_map[str(i)] for i in range(1000)]

    # Preprocess image
    tfms = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)
Ejemplo n.º 13
0
    ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
    

    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    model_path = "/src/models/"
    model_file = "resnet152_weights_tf.h5"
    weights_file = "model.96-0.89.hdf5"

    url_prefix = "https://storage.gra.cloud.ovh.net/v1/AUTH_18b62333a540498882ff446ab602528b/pretrained-models/image/"



    for i in [model_file, weights_file]:
        get_model_bin(url_prefix + "car-classifier/v0/" + i , model_path + i)

    model, graph = load_my_model(model_path + model_file, model_path + weights_file)


    img_width, img_height = 224, 224

    cars_meta = scipy.io.loadmat('devkit/cars_meta')
    class_names = cars_meta['class_names']  # shape=(1, 196)
    class_names = np.transpose(class_names)

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)
Ejemplo n.º 14
0
if __name__ == '__main__':
    global upload_directory, model_directory
    global encode, decoder, start_text, hidden

    upload_directory = 'upload/'
    create_directory(upload_directory)

    model_directory = 'model_weights/'
    create_directory(model_directory)

    encoder_file = 'encoder_resnet34_0.061650436371564865.pt'
    decoder_file = 'decoder_resnet34_0.061650436371564865.pt'

    model_url = "https://storage.gra.cloud.ovh.net/v1/AUTH_18b62333a540498882ff446ab602528b/pretrained-models/image/sketch2code/"

    get_model_bin(model_url + encoder_file,
                  os.path.join(model_directory, encoder_file))
    get_model_bin(model_url + decoder_file,
                  os.path.join(model_directory, decoder_file))

    encoder = torch.load(os.path.join(model_directory, encoder_file))
    decoder = torch.load(os.path.join(model_directory, decoder_file))

    star_text = '<START>'
    hidden = decoder.init_hidden()

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)
Ejemplo n.º 15
0
    result_directory = '/src/results/'
    create_directory(result_directory)

    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    prewarm = True if os.getenv('PREWARM', 'TRUE') == 'TRUE' else False

    if prewarm:
        model_scene_parsing = pretrained.pspnet_50_ADE_20K(
        )  # load the pretrained model trained on ADE20k dataset
        model_cityscapes = pretrained.pspnet_101_cityscapes(
        )  # load the pretrained model trained on Cityscapes dataset
        model_visual_object = pretrained.pspnet_101_voc12(
        )  # load the pretrained model trained on Pascal VOC 2012 dataset
    else:
        get_model_bin(
            "https://www.dropbox.com/s/0uxn14y26jcui4v/pspnet50_ade20k.h5?dl=1",
            "/root/.keras/dataset/pspnet50_ade20k.h5")
        get_model_bin(
            "https://www.dropbox.com/s/c17g94n946tpalb/pspnet101_cityscapes.h5?dl=1",
            "/root/.keras/dataset/pspnet101_cityscapes.h5")
        get_model_bin(
            "https://www.dropbox.com/s/uvqj2cjo4b9c5wg/pspnet101_voc2012.h5?dl=1",
            "/root/.keras/dataset/pspnet101_voc2012.h5")

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=False)
        return json.dumps(results), 200

    except:
        traceback.print_exc()
        return {'message': 'input error'}, 400

    finally:
        clean_all([input_path])


if __name__ == '__main__':
    global model, graph

    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    model_directory = '/src/models/'
    create_directory(model_directory)

    moodel_url_prefix = "http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/text/programming-language/"
    get_model_bin(moodel_url_prefix + "save_tmp.h5",
                  model_directory + "save_tmp.h5")

    model = keras.models.load_model(model_directory + "save_tmp.h5")
    graph = tf.get_default_graph()

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)