def process(): input_path = generate_random_filename(upload_directory, "mp3") folder_random = str(uuid4()) output_path = '/src/output/' + folder_random create_directory(output_path) zip_output_path = generate_random_filename(upload_directory, "zip") try: url = request.json["url"] #2stems or 4stems or 5stems nb_stems = request.json["nb_stems"] download(url, input_path) separator = separators[int(nb_stems)] waveform, rate = load_audio(input_path) result = separator.separate(waveform) zip = ZipFile(zip_output_path + '.zip', 'w') for instrument, data in result.items(): save_audio(output_path, instrument, data, rate) for (root, dirs, files) in os.walk(output_path): with ZipFile(zip_output_path, 'w') as zip: for file in files: print(file) zip.write(output_path + '/' + file, basename(file)) callback = send_file(zip_output_path, mimetype='application/zip') return callback, 200 except: traceback.print_exc() return {'message': 'input error'}, 400 finally: clean_all([input_path, output_path, zip_output_path])
except: traceback.print_exc() return {'message': 'input error'}, 400 finally: pass clean_all([input_path, output_path]) if __name__ == '__main__': global upload_directory global results_img_directory global image_colorizer upload_directory = '/data/upload/' create_directory(upload_directory) results_img_directory = '/data/result_images/' create_directory(results_img_directory) model_directory = '/data/models/' create_directory(model_directory) artistic_model_url = 'https://www.dropbox.com/s/zkehq1uwahhbc2o/ColorizeArtistic_gen.pth?dl=0' get_model_bin(artistic_model_url, os.path.join(model_directory, 'ColorizeArtistic_gen.pth')) image_colorizer = get_image_colorizer(artistic=True) image_colorizer.results_dir = Path(results_img_directory) port = 5000
except: traceback.print_exc() return {'message': 'input error'}, 400 finally: clean_all([input_path, output_path]) if __name__ == '__main__': global upload_directory global model_directory global args global gan result_directory = '/src/results/' create_directory(result_directory) upload_directory = '/src/UGATIT/dataset/selfie2anime/testA/' create_directory(upload_directory) create_directory('/src/UGATIT/dataset/selfie2anime/testB/') create_directory('/src/UGATIT/dataset/selfie2anime/trainA/') create_directory('/src/UGATIT/dataset/selfie2anime/trainB/') model_directory = '/src/checkpoint/' create_directory(model_directory) url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra5.cloud.ovh.net/image/' model_file_rar = 'UGATIT_selfie2anime_lsgan_4resblock_6dis_1_1_10_10_1000_sn_smoothing.rar'
traceback.print_exc() return {'message': 'input error'}, 400 finally: clean_all([ input_path, output_path ]) if __name__ == '__main__': global upload_directory global results_video_directory global video_colorizer upload_directory = '/data/upload/' create_directory(upload_directory) results_video_directory = '/data/video/result/' create_directory(results_video_directory) model_directory = '/data/models/' create_directory(model_directory) video_model_url = 'https://www.dropbox.com/s/336vn9y4qwyg9yz/ColorizeVideo_gen.pth?dl=0' get_model_bin(video_model_url, os.path.join(model_directory, 'ColorizeVideo_gen.pth')) video_colorizer = get_video_colorizer() port = 5000 host = '0.0.0.0'
traceback.print_exc() return {'message': 'input error'}, 400 finally: clean_all([ input_path ]) shutil.rmtree(os.path.join(img_output_dir, args.img_name)) if __name__ == '__main__': global upload_directory, weight_file global ALLOWED_EXTENSIONS ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg']) upload_directory = '/src/upload/' create_directory(upload_directory) weight_directory = '/src/' weight_file = 'imagenet-vgg-verydeep-19.mat' url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/neural-style-tf/' get_model_bin(url_prefix + weight_file , weight_directory + weight_file) port = 5000 host = '0.0.0.0' app.run(host=host, port=port, threaded=True)
finally: clean_all([input_path, output_path]) if __name__ == '__main__': global upload_directory global checkpoint_dir global deblur global train_dir global graph global sess global ALLOWED_EXTENSIONS ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg']) upload_directory = '/src/upload/' create_directory(upload_directory) checkpoint_dir = "/src/checkpoints/" create_directory(checkpoint_dir) url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/SRN-Deblur/' model_zip = "srndeblur_models.zip" get_model_bin(url_prefix + model_zip, checkpoint_dir + model_zip) os.system("cd " + checkpoint_dir + " && unzip " + model_zip) checkpoint_dir = os.path.join(checkpoint_dir, args.model)
except: traceback.print_exc() return {'message': 'input error'}, 400 finally: clean_all([input_path, output_path]) if __name__ == '__main__': global upload_directory global fast_graph_def, slow_graph_def global ALLOWED_EXTENSIONS ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg']) upload_directory = '/src/upload/' create_directory(upload_directory) mobile_net_directory = '/src/models/mobile_net/' xception_directory = '/src/models/xception/' create_directory(mobile_net_directory) create_directory(xception_directory) url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/image/' todo = [] for i in [ "frozen_inference_graph.pb", "model.ckpt-30000.data-00000-of-00001", "model.ckpt-30000.index" ]: get_model_bin(url_prefix + "mobile-net/" + i, mobile_net_directory + i)
traceback.print_exc() return {'message': 'input error'}, 400 finally: clean_all([input_path]) if __name__ == '__main__': global upload_directory, model_directory global model, labels_map global tfms global ALLOWED_EXTENSIONS ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg']) upload_directory = '/src/upload/' create_directory(upload_directory) model_directory = '/src/model/' create_directory(model_directory) model_name = 'efficientnet-b5' model = EfficientNet.from_pretrained(model_name) model.eval() model_url = "https://storage.gra.cloud.ovh.net/v1/AUTH_18b62333a540498882ff446ab602528b/pretrained-models/image/EfficientNet-PyTorch/" labels_file = 'labels_map.txt' get_model_bin(model_url + labels_file, model_directory + labels_file) labels_map = json.load(open(model_directory + labels_file))
traceback.print_exc() return {'message': 'input error'}, 400 finally: clean_all([input_path, output_path, zip_output_path]) if __name__ == '__main__': global separator global model_directory global audio_loader global upload_directory global separators upload_directory = "/src/upload/" create_directory(upload_directory) model_directory = "/src/pretrained_models/" create_directory(model_directory) model_url_prefix = 'http://pretrained-models.auth-18b62333a540498882ff446ab602528b.storage.gra.cloud.ovh.net/sound/spleeter/' separators = dict() for model in ["2stems", "4stems", "5stems"]: separators[int(model[0])] = Separator('spleeter:' + model) download(model_url_prefix + model + '.tar.gz', model_directory + model + '.tar.gz') create_directory(model_directory + model + '/') cmd = 'tar zxvf ' + model_directory + model + '.tar.gz' + ' -C ' + model_directory + model
finally: clean_all([input_path, output_path]) if __name__ == '__main__': global upload_directory global result_directory global model_scene_parsing, model_cityscapes, model_visual_object global ALLOWED_EXTENSIONS global prewarm ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg']) result_directory = '/src/results/' create_directory(result_directory) upload_directory = '/src/upload/' create_directory(upload_directory) prewarm = True if os.getenv('PREWARM', 'TRUE') == 'TRUE' else False if prewarm: model_scene_parsing = pretrained.pspnet_50_ADE_20K( ) # load the pretrained model trained on ADE20k dataset model_cityscapes = pretrained.pspnet_101_cityscapes( ) # load the pretrained model trained on Cityscapes dataset model_visual_object = pretrained.pspnet_101_voc12( ) # load the pretrained model trained on Pascal VOC 2012 dataset else: get_model_bin(