from flask import Flask, request, redirect, url_for, jsonify, send_file from werkzeug.utils import secure_filename import random import string import json from datetime import datetime UPLOAD_FOLDER = 'uploads' app = Flask(__name__) app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 1024 #1GB app.debug = True import generate z = generate.load_all() @app.route('/upload', methods=['GET', 'POST']) def upload_file(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: return jsonify({'result': False, 'message': 'no file'}) file = request.files['file'] if file.filename == '': return jsonify({'result': False, 'message': 'no file name'}) if file:
def optimize(args): """ Gatys et al. CVPR 2017 ref: Image Style Transfer Using Convolutional Neural Networks """ z = load_all() # load the content and style target content_image = utils.tensor_load_rgbimage(args.content_image, size=args.content_size, keep_asp=True) content_image = content_image.unsqueeze(0) content_image = Variable(utils.preprocess_batch(content_image), requires_grad=False) content_image = utils.subtract_imagenet_mean_batch(content_image) style_image = utils.tensor_load_rgbimage(args.style_image, size=args.style_size) style_image = style_image.unsqueeze(0) style_image = Variable(utils.preprocess_batch(style_image), requires_grad=False) style_image = utils.subtract_imagenet_mean_batch(style_image) # load the pre-trained vgg-16 and extract features vgg = Vgg16() utils.init_vgg16(args.vgg_model_dir) vgg.load_state_dict( torch.load(os.path.join(args.vgg_model_dir, "vgg16.weight"))) if args.cuda: content_image = content_image.cuda() style_image = style_image.cuda() vgg.cuda() features_content = vgg(content_image) f_xc_c = Variable(features_content[1].data, requires_grad=False) features_style = vgg(style_image) gram_style = [utils.gram_matrix(y) for y in features_style] # init optimizer output = Variable(content_image.data, requires_grad=True) optimizer = Adam([output], lr=args.lr) mse_loss = torch.nn.MSELoss() # optimizing the images for e in range(args.iters): utils.imagenet_clamp_batch(output, 0, 255) temp = utils.add_imagenet_mean_batch(output) utils.tensor_save_bgrimage(temp.data[0], 'output/temp' + str(e) + '.jpg', args.cuda) optimizer.zero_grad() features_y = vgg(output) content_loss = args.content_weight * mse_loss(features_y[1], f_xc_c) skip_vec, bneg, bpos = generate_story_loss( z, 'output/temp' + str(e) + '.jpg') skip_vec = Variable(torch.from_numpy(skip_vec), requires_grad=False) bpos = Variable(torch.from_numpy(bpos), requires_grad=False) # style_loss = args.style_weight * mse_loss(skip_vec, bpos).cuda() # print "Content Loss:"+str(content_loss)+"Style Loss:"+str(style_loss) for m in range(len(features_y)): gram_y = utils.gram_matrix(features_y[m]) gram_s = Variable(gram_style[m].data, requires_grad=False) style_loss += args.style_weight * mse_loss(gram_y, gram_s) total_loss = content_loss + style_loss if (e + 1) % args.log_interval == 0: print(total_loss.data.cpu().numpy()[0]) total_loss.backward() optimizer.step() # save the image output = utils.add_imagenet_mean_batch(output) utils.tensor_save_bgrimage(output.data[0], args.output_image, args.cuda)
def loading(): import generate z = generate.load_all() return z
import sys,time import argparse import config import generate if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_cache_path', help = 'model cache path') parser.add_argument('--type', help = 'train or inference',default='inference') parser.add_argument('--input', help = 'input file') parser.add_argument('--style', help = 'use style') parser.add_argument('--condition_count', type=int,default=100) parser.add_argument('--beamwidth', type=int,default=50) args = parser.parse_args() print args if args.type == 'inference': config.init(args.model_cache_path) z = generate.load_all() if args.style: s = generate.story(z, args.input,args.condition_count,args.beamwidth,lyric=True) else: s = generate.story(z, args.input,args.condition_count,args.beamwidth) #s = generate.story(z, args.input) output_file = '/data/output/{}.txt'.format(str(int(time.time()))); with open(output_file, "w") as f: f.write('{}'.format(s)) print output_file
def get(filename): z = generate.load_all() output = generate.story(z, filename) return output