Пример #1
0
def tower_loss(scope):
    reader = read.Reader(path=FLAGS.buckets + 'wavFile_train_frame_60.tfr', batch_size=FLAGS.batch_size,
                         window_size=FLAGS.frequency // FLAGS.frame_count, kwidth=FLAGS.kwidth)
    logits = inference.Inference(reader.wav_raw, FLAGS.kwidth, 2, FLAGS.isTrain, scope=scope).build_model()
    loss.loss(logits=logits, labels=reader.label)
    losses = tf.get_collection('losses', scope)
    total_loss = tf.add_n(losses, name='total_loss')
    tf.add_to_collection('summary', tf.summary.scalar(scope + 'loss', losses[0]))
    return total_loss
Пример #2
0
def analyzePdf():
    """Handler to analyze (classify and summarize) a EULA PDF input.
    
    Returns:
        flask.Response: a JSON response containing classification, summarization, and error information

    """
    # Check if file exist
    if 'file' not in request.files:
        return jsonify({
            'classification': 'None',
            'summary': 'None',
            'error': 'Files not included in Request.'
        })

    uploaded_file = request.files['file']
    filename = uploaded_file.filename

    # Check if file has a name
    if filename == '':
        return jsonify({
            'classification': 'None',
            'summary': 'None',
            'error': 'File either has no name or no file uploaded.'
        })

    # Check if file extension is .pdf
    if filename.rsplit('.', 1)[1].lower() != 'pdf':
        return jsonify({
            'classification': 'None',
            'summary': 'None',
            'error': 'File must have file extension .pdf'
        })

    # Save file
    uploaded_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))

    # Read/Convert file
    path = UPLOAD_FOLDER + '/' + filename
    #text = convert_pdf(path)

    # Create EULA class and run inference
    eula = inference.EULA(None, path)
    infer = inference.Inference()
    classification = infer.get_ethicality_classification(eula)
    summary = infer.get_EULA_summary(eula)

    # Delete file
    os.remove(UPLOAD_FOLDER + '/' + filename)

    # Format return message
    return jsonify({
        'classification': classification,
        'summary': summary,
        'error': 'None'
    })
Пример #3
0
    def train_on_single_gpu(self):
        global_step = tf.get_variable(
            'global_step', [],
            initializer=tf.constant_initializer(0), trainable=False)
        wav_data = tf.placeholder(tf.float32,
                                  [self.bach_size, self.canvas_size - self.window_size - 1, self.window_size])

        generate = inference.Inference(wav_data, self.kwidth, self.stride, self.is_train)
        generator = generate.build_ae_model()

        lpca = tf.placeholder(tf.float32, [self.window_size, None], name="lpca")
        label_data = tf.placeholder(tf.float32, [self.bach_size, self.canvas_size - self.window_size - 1, 1],
                                    name="label")

        loss = losses.Losses(generator, lpca, label_data).get_loss()
        tf.summary.scalar('losses', loss)

        train_op = self.get_optimizer(self.optimizer).minimize(loss, var_list=tf.trainable_variables(),
                                                               global_step=global_step)

        tf.global_variables_initializer().run()
        tf.train.start_queue_runners()

        saver = tf.train.Saver()
        summary_writer = tf.summary.FileWriter(self.save_path, graph=self.sess.graph)
        summary_op = tf.summary.merge_all()

        for i in xrange(self.max_step):

            train_collect, label_collect = self.sess.run([self.wav_data, self.label_data])
            generate_data = generator.eval(feed_dict={
                wav_data: train_collect
            })
            lpca_data = generate.get_lpc_a(generate_data)
            train_op.run(feed_dict={
                wav_data: train_collect,
                lpca: lpca_data,
                label_data: label_collect
            })

            if i % self.summary_step or i + 1 == self.max_step:
                summary_data = summary_op.eval(feed_dict={
                    wav_data: train_collect,
                    lpca: lpca_data,
                    label_data: label_collect
                })
                summary_writer.add_summary(summary_data, global_step.eval())
            if i % self.saver_step or i + 1 == self.max_step:
                saver.save(self.sess, self.save_path + 'model.ckpt', global_step)
Пример #4
0
def analyzeText():
    """Handler to analyze (classify and summarize) a EULA text input.
    
    Returns:
        flask.Response: a JSON response containing classification, summarization, and error information

    """
    request_data = request.get_json()
    print(request_data)

    if 'text' not in request_data:
        return jsonify({
            'classification': 'None',
            'summary': 'None',
            'error': 'Text not included in request.'
        })

    text = request_data['text']

    if not isinstance(text, str):
        return jsonify({
            'classification': 'None',
            'summary': 'None',
            'error': 'Data in incorrect format'
        })
    elif text == '':
        return jsonify({
            'classification': 'None',
            'summary': 'None',
            'error': 'No text received'
        })

    # Create EULA class and run inference
    eula = inference.EULA(text, None)
    infer = inference.Inference()
    classification = infer.get_ethicality_classification(eula)
    summary = infer.get_EULA_summary(eula)

    # Format return message
    return jsonify({
        'classification': classification,
        'summary': summary,
        'error': 'None'
    })
tf.flags.DEFINE_integer('hidden_2_size', 256, '隐藏层2神经元数')
tf.flags.DEFINE_integer('output_size', 2, '输出数')

# 构造reader
reader = read.CTRReader(path=FLAGS.buckets,
                        pattem='test.csv',
                        is_training=False,
                        num_classes=FLAGS.output_size)

# 获得数据和标签
datas, labels = reader.read()

# 构造神经网络
inference = inference.Inference(data_input=datas,
                                h1_size=FLAGS.hidden_1_size,
                                h2_size=FLAGS.hidden_2_size,
                                num_classes=FLAGS.output_size,
                                is_training=False)

logits = inference.get_softmax()

# 初始化
sess = tf.Session()
saver = tf.train.Saver()
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
# 恢复保存的模型
saver.restore(sess=sess,
              save_path=os.path.join(FLAGS.checkpointDir, 'CTR.model'))

# 计算AUC
tf.flags.DEFINE_string('summaryDir', 'logs', "tensorboard保存路径")
tf.flags.DEFINE_integer('batch_size', 1, '批大小')
tf.flags.DEFINE_integer('frame_count', 60, "帧数")
tf.flags.DEFINE_integer('frequency', 16000, "采样率")
tf.flags.DEFINE_integer('kwidth', 18, '窗格大小')
tf.flags.DEFINE_integer('num_train', 1000, "训练次数")
tf.flags.DEFINE_float('learning_rate', 3e-4, "学习速率")
tf.flags.DEFINE_float('beta1', 0.5, "Adam动量")

sess = tf.InteractiveSession()
coord = tf.train.Coordinator()
reader = read.Reader(path=FLAGS.test_file, batch_size=FLAGS.batch_size,
                     window_size=FLAGS.frequency // FLAGS.frame_count, kwidth=FLAGS.kwidth)
tf.train.start_queue_runners(sess=sess, coord=coord)

logits = inference.Inference(reader.wav_raw, FLAGS.kwidth, 2, isTrain=False).build_model()
loss_val = loss.loss(logits=logits, labels=reader.label)

saver = tf.train.Saver()

tf.global_variables_initializer().run()

saver.restore(sess, os.path.join(FLAGS.checkpointDir))

tf.train.start_queue_runners(sess=sess, coord=coord)
labels = tf.reshape(reader.label, [-1])
logits_predict, ground_truth = sess.run([logits, labels])

plt.figure(1, [20, 12])
plt.subplot(411)
plt.title('predict')
Пример #7
0
 def __init__(self):
     self.inf = inference.Inference()
     self.type_mappings = {}
Пример #8
0
if __name__ == "__main__":
	p = argparse.ArgumentParser()
	p.add_argument("source")
	args = p.parse_args()

	with open(args.source) as f:
		source = f.read()

	ast = parsing.parse(source)

	lowerer = Lowerer()
	lowerer.add_code_block(lowerer.top_level.root_block, ast)

	print utils.pretty(lowerer.top_level)

	print "=" * 20, "Doing inference."

	# Define a global typing context with an entry for nil.
	import prelude
	root_gamma = prelude.make_gamma()

	# Do inference.
	inf = inference.Inference()
	inf.infer_code_block(root_gamma, lowerer.top_level.root_block)

	print "=" * 20, "Inference complete."

	print utils.pretty(lowerer.top_level)

Пример #9
0
from gevent.pywsgi import WSGIServer

# Some utilites
import numpy as np
from util import base64_to_pil

TCP_PORT = 5000
aws_access_key_id = ''
aws_secret_access_key = ''
# Declare a flask app
app = Flask(__name__)

s3 = boto3.client('s3',
                  aws_access_key_id=aws_access_key_id,
                  aws_secret_access_key=aws_secret_access_key)
inference_handler = inference.Inference(s3)
print('Model loaded. Check http://127.0.0.1:' + str(TCP_PORT))


@app.route('/', methods=['GET'])
def index():
    # Main page
    return render_template('index.html')


@app.route('/predict', methods=['GET', 'POST'])
def predict():
    if request.method == 'POST':
        img = base64_to_pil(request.json)
        hash_value = str(hex(hash(img.tobytes())))
        response = inference_handler.predict(img)
Пример #10
0
max_label_length=config.MAX_LABEL_LENGTH

train_dir=config.TRAIN_DIR
valid_dir=config.VALID_DIR

check_dir=config.CHECKPOINT_DIR
check_path=os.path.join(check_dir,'024-loss:0.233-val_loss:2.540.h5')
char_map_dict =config.CHAR_MAP_DICT


dataset=load_data.TextData(valid_dir,char_map_dict,None,max_label_length,None)
indexs=dataset.__index__

infer=inference.Inference(phase='test',
                hidden_num=256,
                layers_num=20,
                num_classes=37,
                input_tensor_shape=(32,None,3),
                check_path=check_path,
                char_map_dict=char_map_dict)
total=0
for i in indexs:
    image_raw,_,groundtruth=dataset.__getitem__(i)
    _,pred,_=infer.predict(image_raw)
    print (i)
#    edit=editDistance(pred,groundtruth)
#    print (i,edit,groundtruth,pred)
#    total+=edit
#mean_edit=total/float(len(indexs))
Пример #11
0
        self.id += 1

    def stream(self):
        print('Listening')
        buf = b''
        while True:
            (clientsocket, address) = self.server.accept()
            print('Connected', address)
            while True:
                chunk = clientsocket.recv(1024)
                if chunk == b'':
                    print('Disconnected', address)
                    break
                else:
                    buf += chunk
                if len(buf) >= self.block_length:
                    yield buf[0:self.block_length]
                    buf = buf[self.block_length:]


if __name__ == '__main__':
    #infer = inference.Inference('./checkpoint/checkpoint_frame_batch_64/iter_5000.pth')
    infer = inference.Inference(
        './checkpoint/checkpoint_batch_128/iter_13000.pth')
    server = AudioSocket('0.0.0.0', 8001, 1)
    for block in server.stream():
        print(len(block))
        #server.save(block)
        block = np.fromstring(block, dtype=np.short) / 32768.
        infer.inference(None, block)
Пример #12
0
sess = tf.InteractiveSession()

# 构造读取管线
read = reader.Reader(path=FLAGS.buckets,
                     pattem='*.tfr',
                     batch_size=FLAGS.batch_size,
                     is_training=FLAGS.isTrain,
                     num_threads=FLAGS.threads)
# 获得数据和标签
ids, labels, datas = read.read()
labels_one_hot = tf.one_hot(labels, FLAGS.num_classes)

# 构造网络
inference = inference.Inference(data_input=datas,
                                h1_size=FLAGS.hidden_1_size,
                                h2_size=FLAGS.hidden_2_size,
                                is_training=FLAGS.isTrain,
                                num_classes=FLAGS.num_classes)

logits = inference.get_inference()
logits_softmax = inference.get_softmax()

# 构造损失
losses = losses.Losses(logits=logits, labels=labels_one_hot).get_losses()

# 构造优化器
train_op = tf.train.AdamOptimizer(
    learning_rate=FLAGS.learning_rate).minimize(losses)

# 初始化
summary = tf.summary.FileWriter(FLAGS.summaryDir, graph=sess.graph)