Ejemplo n.º 1
0
def handle_document(message):
    global users
    if users.get(message.chat.id):
        if os.path.exists(f'predict{message.chat.id}.csv'):
            os.remove(f'predict{message.chat.id}.csv')
        if users[message.chat.id].is_document:
            id = message.document.file_id
            document_URL = bot.get_file_url(id)
            users[message.chat.id].document_url = document_URL
            users[message.chat.id].is_document = False

        if users[message.chat.id].document_url != '':
            bot.send_message(
                message.chat.id,
                "Данные получены, теперь надо немного потерпеть...")

            result = model.read_document(users[message.chat.id].document_url,
                                         message.chat.id)
            if users[message.chat.id].mode == 'csv':
                model.get_predict(predict_model, result, 'csv',
                                  message.chat.id)
                predict = open(f'predict{message.chat.id}.csv', 'rb')
                bot.send_document(message.chat.id, predict)
            else:
                predict = model.get_predict(predict_model, result, 'текст',
                                            message.chat.id)
                if len(predict) > 30:
                    bot.send_message(
                        message.chat.id,
                        "Слишком много данных, посылаю первые 30 предсказаний")
                    for value in predict[:30]:
                        bot.send_message(message.chat.id, value)
                else:
                    for value in predict:
                        bot.send_message(message.chat.id, value)
Ejemplo n.º 2
0
def run_training():
    # Get the sets of images and labels for training, validation, and
    # Tell TensorFlow that the model will be built into the default Graph.

    # Create model directory
    print('loading and init vgg16.........')
    vgg = vgg16.Vgg16()
    with tf.Graph().as_default():
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        images_placeholder, sc_labels_placeholder, keep_pro = placeholder_inputs(
            FLAGS.batch_size * gpu_num)
        tower_grads1 = []
        tower_grads2 = []
        tower_grads3 = []
        sc_logits = []

        learning_rate = tf.train.exponential_decay(
            1e-5,
            global_step,
            decay_steps=FLAGS.max_steps / 50,
            decay_rate=0.99,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)
        opt_sc = tf.train.AdamOptimizer(learning_rate)
        with tf.variable_scope('var_name') as var_scope:

            sc_fea_weights = {
                'w1':
                _variable_with_weight_decay('sc_w1', [4096, 2048], 0.005),
                'out':
                _variable_with_weight_decay('sc_feawout', [2048, 100], 0.005)
            }
            sc_fea_biases = {
                'b1': _variable_with_weight_decay('sc_b1', [2048], 0.000),
                'out': _variable_with_weight_decay('sc_feabout', [100], 0.000),
            }
            ac_fea_weights = {
                'w1':
                _variable_with_weight_decay('ac_w1', [4096, 2048], 0.005),
                'out':
                _variable_with_weight_decay('ac_feawout', [2048, 100], 0.005)
            }
            ac_fea_biases = {
                'b1': _variable_with_weight_decay('ac_b1', [2048], 0.000),
                'out': _variable_with_weight_decay('ac_feabout', [100], 0.000),
            }
            mc_fea_weights = {
                'w1':
                _variable_with_weight_decay('mc_w1', [4096, 2048], 0.005),
                'out':
                _variable_with_weight_decay('mc_feawout', [2048, 256], 0.005)
            }
            mc_fea_biases = {
                'b1': _variable_with_weight_decay('mc_b1', [2048], 0.000),
                'out': _variable_with_weight_decay('mc_feabout', [256], 0.000),
            }

        for gpu_index in range(0, gpu_num):
            with tf.device('/gpu:%d' % gpu_index):

                varlist1 = [sc_fea_weights.values(), sc_fea_biases.values()]

                vgg.build(images_placeholder[gpu_index *
                                             FLAGS.batch_size:(gpu_index + 1) *
                                             FLAGS.batch_size, :, :, :])
                train_features = vgg.relu7

                sc_logit = model.get_predict(train_features, keep_pro,
                                             FLAGS.batch_size, sc_fea_weights,
                                             sc_fea_biases)

                loss_name_scope = ('gpud_%d_loss' % gpu_index)

                sc_loss = tower_loss(
                    loss_name_scope, sc_logit,
                    sc_labels_placeholder[gpu_index *
                                          FLAGS.batch_size:(gpu_index + 1) *
                                          FLAGS.batch_size], 100)
                grads1 = opt_sc.compute_gradients(sc_loss, varlist1)
                tower_grads1.append(grads1)
                sc_logits.append(sc_logit)

        sc_logits = tf.concat(sc_logits, 0)
        sc_accuracy = topk_acc(sc_logits, sc_labels_placeholder, 5)
        #sc_accuracy = tower_acc(sc_logits, sc_labels_placeholder)
        tf.summary.scalar('sc_accuracy', sc_accuracy)

        grads1 = average_gradients(tower_grads1)

        apply_gradient_sc = opt_sc.apply_gradients(grads1,
                                                   global_step=global_step)

        train_sc = tf.group(apply_gradient_sc)

        null_op = tf.no_op()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver(sc_fea_weights.values() +
                               sc_fea_biases.values())
        init = tf.global_variables_initializer()

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)

        # Create summary writter
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(
            './visual_logs/baseline_sc_visual_logs/train', sess.graph)
        test_writer = tf.summary.FileWriter(
            './visual_logs/baseline_sc_visual_logs/test', sess.graph)
        for step in xrange(FLAGS.max_steps + 1):

            start_time = time.time()
            trian_actions, train_images, train_ac_labels, train_sc_labels, train_mc_labels, _, _ = input_data(
                filename='./list/train.list',
                batch_size=FLAGS.batch_size * gpu_num,
                start_pos=-1,
                shuffle=True)

            sess.run(train_sc,
                     feed_dict={
                         images_placeholder: train_images,
                         sc_labels_placeholder: train_sc_labels,
                         keep_pro: 0.5
                     })

            duration = time.time() - start_time
            print('Batchnum %d: %.3f sec' % (step, duration))

            if (step) % 50 == 0 or (step + 1) == FLAGS.max_steps:

                print('Step %d/%d: %.3f sec' %
                      (step, FLAGS.max_steps, duration))
                print('Training Data Eval:')
                summary, sc_acc, sc_loss_value = sess.run(
                    [merged, sc_accuracy, sc_loss],
                    feed_dict={
                        images_placeholder: train_images,
                        sc_labels_placeholder: train_sc_labels,
                        keep_pro: 1
                    })

                print("sc_accuracy: " + "{:.5f}".format(sc_acc))
                print 'sc_loss= %.2f' % np.mean(sc_loss_value)
                train_writer.add_summary(summary, step)

            if (step) % 100 == 0 or (step + 1) == FLAGS.max_steps:
                print('Validation Data Eval:')
                val_actions, val_images, val_ac_labels, val_sc_labels, val_mc_labels, _, _ = input_data(
                    filename='./list/test.list',
                    start_pos=-1,
                    batch_size=FLAGS.batch_size * gpu_num,
                    shuffle=True)
                summary, sc_acc, sc_loss_value = sess.run(
                    [merged, sc_accuracy, sc_loss],
                    feed_dict={
                        images_placeholder: val_images,
                        sc_labels_placeholder: val_sc_labels,
                        keep_pro: 1
                    })
                print("sc_accuracy: " + "{:.5f}".format(sc_acc))
                print 'sc_loss= %.2f' % np.mean(sc_loss_value)
                test_writer.add_summary(summary, step)
            # Save the model checkpoint periodically.
            if step > 1 and step % 5000 == 0:
                checkpoint_path = os.path.join('./models/baseline_sc_models',
                                               'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=global_step)

    print("done")
Ejemplo n.º 3
0
def week_prediction(state, district, market, variety, mode, obs_no):
    # prices = np.random.randint(10000, 35000, obs_no).tolist() # change this line
    prices = model.get_predict(market, variety, mode, obs_no)
    return jsonify(prices)
def run_training():
  # Get the sets of images and labels for training, validation, and
  # Tell TensorFlow that the model will be built into the default Graph.

  # Create model directory
  print ('loading and init vgg16.........')
  vgg=vgg16.Vgg16()
  with tf.Graph().as_default():
    global_step = tf.get_variable(
                    'global_step',
                    [],
                    initializer=tf.constant_initializer(0),
                    trainable=False
                    )
    images_placeholder,mc_labels_placeholder,keep_pro =              placeholder_inputs(
                    FLAGS.batch_size * gpu_num
                    )
    tower_grads1 = []
    tower_grads2 = []
    tower_grads3 = []
    mc_logits = []

    learning_rate=tf.train.exponential_decay(1e-4,global_step,decay_steps=FLAGS.max_steps/50,decay_rate=0.99,staircase=True)
    tf.summary.scalar('learning_rate', learning_rate)
    opt_mc = tf.train.AdamOptimizer(learning_rate)
    with tf.variable_scope('var_name') as var_scope:
      
      sc_fea_weights = {
              'w1': _variable_with_weight_decay('sc_w1', [4096, 2048], 0.005),
              'out': _variable_with_weight_decay('sc_feawout', [2048, 100], 0.005)
              }
      sc_fea_biases = {
              'b1': _variable_with_weight_decay('sc_b1', [2048], 0.000),
              'out': _variable_with_weight_decay('sc_feabout', [100], 0.000),
              }
      ac_fea_weights = {
              'w1': _variable_with_weight_decay('ac_w1', [4096, 2048], 0.005),
              'out': _variable_with_weight_decay('ac_feawout', [2048, 100], 0.005)
              }
      ac_fea_biases = {
              'b1': _variable_with_weight_decay('ac_b1', [2048], 0.000),
              'out': _variable_with_weight_decay('ac_feabout', [100], 0.000),
              }
      mc_fea_weights = {
              'w1': _variable_with_weight_decay('mc_w1', [4096, 2048], 0.005),
              'out': _variable_with_weight_decay('mc_feawout', [2048, 256], 0.005)
              }
      mc_fea_biases = {
              'b1': _variable_with_weight_decay('mc_b1', [2048], 0.000),
              'out': _variable_with_weight_decay('mc_feabout', [256], 0.000),
              }
    for gpu_index in range(0, gpu_num):
      with tf.device('/gpu:%d' % gpu_index):

        varlist1 = [ mc_fea_weights.values(),mc_fea_biases.values() ]
        
        vgg.build(images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:,:,:])
        train_features=vgg.relu7
        
        mc_logit = model.get_predict(
                        train_features,
                        keep_pro,
                        FLAGS.batch_size,
                        mc_fea_weights,
                        mc_fea_biases
                        )
        
        
        loss_name_scope = ('gpud_%d_loss' % gpu_index)
        '''
        regularizer = tf.contrib.layers.l1_regularizer(0.1)
        with tf.variable_scope('var', initializer=tf.random_normal_initializer(), 
        regularizer=regularizer):
            weight = tf.get_variable('weight', shape=[8], initializer=tf.ones_initializer())
        with tf.variable_scope('var2', initializer=tf.random_normal_initializer(), 
        regularizer=regularizer):
            weight2 = tf.get_variable('weight', shape=[8], initializer=tf.ones_initializer())

        regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        '''

        mc_loss = tower_loss(
                        loss_name_scope+'_scene',
                        mc_logit,
                        mc_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size]
                        )
        grads1 = opt_mc.compute_gradients(mc_loss, varlist1)
        tower_grads1.append(grads1)
        mc_logits.append(mc_logit)

    mc_logits = tf.concat(mc_logits,0)
    predictions = tf.nn.top_k(tf.nn.softmax(mc_logits),5)
    #mc_accuracy = tower_acc(mc_logits, mc_labels_placeholder)
    mc_accuracy = topk_acc(tf.nn.softmax(mc_logits), mc_labels_placeholder,5)
    
    grads1 = average_gradients(tower_grads1)
    
    apply_gradient_mc = opt_mc.apply_gradients(grads1, global_step=global_step)

    '''
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    '''
    train_mc = tf.group(apply_gradient_mc)
    
    null_op = tf.no_op()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver(mc_fea_weights.values() + mc_fea_biases.values())
    init = tf.global_variables_initializer()

    # Create a session for running Ops on the Graph.
    sess = tf.Session(
                    config=tf.ConfigProto(allow_soft_placement=True)
                    )
    sess.run(init)

    # Create summary writter
    merged = tf.summary.merge_all()
    
  ckpt = tf.train.get_checkpoint_state(pre_model_save_dir)  
  if ckpt and ckpt.model_checkpoint_path:  
    print "loading checkpoint,waiting......"
    saver.restore(sess, ckpt.model_checkpoint_path)
    print "load complete!"
    
  next_strat_pos=0
  predict_labels=[]
  for step in xrange(FLAGS.max_steps):
      
    start_time = time.time()
    print('TEST Data Eval:')
    val_actions,val_images,val_ac_labels,val_sc_labels,val_mc_labels, next_strat_pos, _= input_data(
                        filename='./list/test.list',
                        start_pos=next_strat_pos,
                        batch_size=FLAGS.batch_size * gpu_num,
                        shuffle=False)
    predict,mc_acc,mc_loss_value = sess.run(
                       [predictions,mc_accuracy,mc_loss],
                        feed_dict={
                                  images_placeholder: val_images,
                                  mc_labels_placeholder: val_mc_labels,
                                  keep_pro : 1
                                        })
    print ("mc_accuracy: " + "{:.5f}".format(mc_acc))
    print 'mc_loss= %.2f'% np.mean(mc_loss_value)
    for i in range(FLAGS.batch_size):
        predict_labels.append(predict[1][i])

    duration = time.time() - start_time
    print('Batchnum %d: %.3f sec' % (step+1, duration))
    #print predict_labels
    #print val_mc_labels

  print("get_predict_label_done!")
  return predict_labels
Ejemplo n.º 5
0
def run_training():
    # Get the sets of images and labels for training, validation, and
    # Tell TensorFlow that the model will be built into the default Graph.

    # Create model directory
    print('loading and init vgg16.........')
    vgg = vgg16.Vgg16()
    with tf.Graph().as_default():
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        images_placeholder, sc_labels_placeholder, ac_labels_placeholder, mc_labels_placeholder, keep_pro = placeholder_inputs(
            FLAGS.batch_size * gpu_num)
        tower_grads1 = []
        tower_grads2 = []
        tower_grads3 = []
        multi_logits = []

        learning_rate = tf.train.exponential_decay(
            1e-4,
            global_step,
            decay_steps=FLAGS.max_steps / 50,
            decay_rate=0.99,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)
        opt_multi = tf.train.AdamOptimizer(learning_rate)
        with tf.variable_scope('var_name') as var_scope:

            multi_fea_weights = {
                'w1':
                _variable_with_weight_decay('multi_w1', [4096, 2048], 0.005),
                'out':
                _variable_with_weight_decay('multi_feawout', [2048, 456],
                                            0.005)
            }
            multi_fea_biases = {
                'b1': _variable_with_weight_decay('multi_b1', [2048], 0.000),
                'out': _variable_with_weight_decay('multi_feabout', [456],
                                                   0.000),
            }
            sc_fea_weights = {
                'w1':
                _variable_with_weight_decay('sc_w1', [4096, 2048], 0.005),
                'out':
                _variable_with_weight_decay('sc_feawout', [2048, 100], 0.005)
            }
            sc_fea_biases = {
                'b1': _variable_with_weight_decay('sc_b1', [2048], 0.000),
                'out': _variable_with_weight_decay('sc_feabout', [100], 0.000),
            }
            ac_fea_weights = {
                'w1':
                _variable_with_weight_decay('ac_w1', [4096, 2048], 0.005),
                'out':
                _variable_with_weight_decay('ac_feawout', [2048, 100], 0.005)
            }
            ac_fea_biases = {
                'b1': _variable_with_weight_decay('ac_b1', [2048], 0.000),
                'out': _variable_with_weight_decay('ac_feabout', [100], 0.000),
            }
            mc_fea_weights = {
                'w1':
                _variable_with_weight_decay('mc_w1', [4096, 2048], 0.005),
                'out':
                _variable_with_weight_decay('mc_feawout', [2048, 256], 0.005)
            }
            mc_fea_biases = {
                'b1': _variable_with_weight_decay('mc_b1', [2048], 0.000),
                'out': _variable_with_weight_decay('mc_feabout', [256], 0.000),
            }

        for gpu_index in range(0, gpu_num):
            with tf.device('/gpu:%d' % gpu_index):

                varlist1 = [
                    multi_fea_weights.values(),
                    multi_fea_biases.values()
                ]

                vgg.build(images_placeholder[gpu_index *
                                             FLAGS.batch_size:(gpu_index + 1) *
                                             FLAGS.batch_size, :, :, :])
                train_features = vgg.fc7

                multi_logit = model.get_predict(train_features, keep_pro,
                                                FLAGS.batch_size,
                                                multi_fea_weights,
                                                multi_fea_biases)

                loss_name_scope = ('gpud_%d_loss' % gpu_index)

                multi_loss = tower_loss(
                    'multi', multi_logit,
                    sc_labels_placeholder[gpu_index *
                                          FLAGS.batch_size:(gpu_index + 1) *
                                          FLAGS.batch_size],
                    ac_labels_placeholder[gpu_index *
                                          FLAGS.batch_size:(gpu_index + 1) *
                                          FLAGS.batch_size],
                    mc_labels_placeholder[gpu_index *
                                          FLAGS.batch_size:(gpu_index + 1) *
                                          FLAGS.batch_size])
                grads1 = opt_multi.compute_gradients(multi_loss, varlist1)
                tower_grads1.append(grads1)
                multi_logits.append(multi_logit)

        multi_logits = tf.concat(multi_logits, 0)
        sc_logits = tf.slice(multi_logits, [0, 0], [6, 100])
        sc_predictions = tf.nn.top_k(tf.nn.softmax(sc_logits), 5)
        sc_accuracy = topk_acc(sc_logits, sc_labels_placeholder, 5)
        #sc_accuracy = tower_acc(sc_logits, sc_labels_placeholder)
        tf.summary.scalar('sc_accuracy', sc_accuracy)
        ac_logits = tf.slice(multi_logits, [0, 100], [6, 100])
        ac_predictions = tf.nn.top_k(tf.nn.softmax(ac_logits), 5)
        ac_accuracy = topk_acc(ac_logits, ac_labels_placeholder, 5)
        #ac_accuracy = tower_acc(ac_logits, ac_labels_placeholder)
        tf.summary.scalar('ac_accuracy', ac_accuracy)
        mc_logits = tf.slice(multi_logits, [0, 200], [6, 256])
        mc_predictions = tf.nn.top_k(tf.nn.softmax(mc_logits), 5)
        mc_accuracy = topk_acc(mc_logits, mc_labels_placeholder, 5)
        #mc_accuracy = tower_acc(mc_logits, mc_labels_placeholder)
        tf.summary.scalar('mc_accuracy', mc_accuracy)

        grads1 = average_gradients(tower_grads1)

        apply_gradient_multi = opt_multi.apply_gradients(
            grads1, global_step=global_step)

        train_multi = tf.group(apply_gradient_multi)

        null_op = tf.no_op()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver(multi_fea_weights.values() +
                               multi_fea_biases.values())
        init = tf.global_variables_initializer()

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)

    ckpt = tf.train.get_checkpoint_state(pre_model_save_dir)
    if ckpt and ckpt.model_checkpoint_path:
        print "loading checkpoint,waiting......"
        saver.restore(sess, ckpt.model_checkpoint_path)
        print "load complete!"
    next_strat_pos = 0
    sc_predict_labels = []
    ac_predict_labels = []
    mc_predict_labels = []
    for step in xrange(FLAGS.max_steps):

        start_time = time.time()
        print('TEST Data Eval:')
        val_actions, val_images, val_ac_labels, val_sc_labels, val_mc_labels, next_strat_pos, _ = input_data(
            filename='./list/test.list',
            start_pos=next_strat_pos,
            batch_size=FLAGS.batch_size * gpu_num,
            shuffle=False)

        sc_predict, ac_predict, mc_predict, sc_acc, ac_acc, mc_acc = sess.run(
            [
                sc_predictions, ac_predictions, mc_predictions, sc_accuracy,
                ac_accuracy, mc_accuracy
            ],
            feed_dict={
                images_placeholder: val_images,
                ac_labels_placeholder: val_ac_labels,
                sc_labels_placeholder: val_sc_labels,
                mc_labels_placeholder: val_mc_labels,
                keep_pro: 1
            })
        #print (ac_predict)
        for i in range(FLAGS.batch_size):
            sc_predict_labels.append(sc_predict[1][i])
            ac_predict_labels.append(ac_predict[1][i])
            mc_predict_labels.append(mc_predict[1][i])

        duration = time.time() - start_time
        print('Batchnum %d: %.3f sec' % (step + 1, duration))
        #print predict_labels
        #print val_mc_labels

    print("get_predict_label_done!")
    return sc_predict_labels, ac_predict_labels, mc_predict_labels
Ejemplo n.º 6
0
                          aspect_max_len=22,
                          embedding_matrix=embedding_matrix,
                          position_embedding_matrix=position_matrix,
                          num_words=5144)

    evaluator = Evaluator(true_labels=test_true_labels, sentences=test_sentence_inputs, aspects=test_aspect_text_inputs)

    epoch = 1
    while epoch <= 50:
        model = m.train_model(sentence_inputs=train_sentence_inputs,
                              position_inputs= train_positions,
                              aspect_input=train_aspects,
                              labels=train_aspect_labels,
                              model=model)
        results = m.get_predict(sentence_inputs=test_sentence_inputs,
                            position_inputs=test_positions,
                            aspect_input=test_aspects,
                            model=model)
        print("\n--epoch"+str(epoch)+"--")
        F, acc = evaluator.get_macro_f1(predictions=results, epoch=epoch)
        if epoch % 2 == 0:
            print("current max f1 score"+str(evaluator.max_F1))
            print("max f1 is gained in epoch"+str(evaluator.max_F1_epoch))
            print("current max acc"+str(evaluator.max_acc))
            print("max acc is gained in epoch"+str(evaluator.max_acc_epoch))
        print("happy ending")

        if acc > 0.8000:
            model.save_weights(model_path+"_acc_"+str(acc*100)+"_F_"+str(F*100)+"_"+str(epoch))
        elif F > 0.7100:
            model.save_weights(model_path + "_acc_" + str(acc * 100) + "_F_" + str(F * 100) + "_" + str(epoch))
        epoch += 1