import mlsql_model import mlsql from sklearn.svm import SVC clf = SVC() mlsql.sklearn_configure_params(clf) X, y = mlsql.sklearn_all_data() clf.fit(X, y) X_test, y_test = mlsql.get_validate_data() if len(X_test) > 0: testset_score = clf.score(X_test, y_test) print("mlsql_validation_score:%f" % testset_score) mlsql_model.sk_save_model(clf)
accurate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy") tf.summary.scalar("accuracy", accurate) summ = tf.summary.merge_all() sess.run(tf.global_variables_initializer()) # writer = tf.summary.FileWriter(TENSOR_BORAD_DIR) # writer.add_graph(sess.graph) # # writer0 = tf.summary.FileWriter(TENSOR_BORAD_DIR + "/0") # writer0.add_graph(sess.graph) saver = tf.train.Saver() TEST_X, TEST_Y = mlsql.get_validate_data() TEST_Y = [item.toArray() for item in TEST_Y] for ep in range(epochs): for items in rd(max_records=batch_size): X = [item[input_col].toArray() for item in items] Y = [item[label_col].toArray() for item in items] _, gs = sess.run([train_step, global_step], feed_dict={ input_x: X, input_y: Y }) if gs % print_interval == 0: [train_accuracy, s, loss] = sess.run([accurate, summ, xent], feed_dict={ input_x: X, input_y: Y
) tf.summary.scalar("xent", xent) with tf.name_scope("train"): train_step = tf.train.AdamOptimizer(0.001).minimize(xent, global_step=global_step) with tf.name_scope("accuracy"): correct_prediction = tf.equal(tf.argmax(_logits, 1), tf.argmax(input_y, 1)) accurate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy") tf.summary.scalar("accuracy", accurate) summ = tf.summary.merge_all() sess.run(tf.global_variables_initializer()) TEST_X, TEST_Y = mlsql.get_validate_data() TEST_Y = [item.toArray() for item in TEST_Y] for ep in range(epochs): for items in rd(max_records=batch_size): X = [item[input_col].toArray() for item in items] Y = [item[label_col].toArray() for item in items] if len(X) == 0: print("bad news , this round no message fetched") if len(X) > 0: _, gs = sess.run([train_step, global_step], feed_dict={input_x: X, input_y: Y}) if gs % print_interval == 0: [train_accuracy, s, loss] = sess.run([accurate, summ, xent], feed_dict={input_x: X, input_y: Y}) [test_accuracy, test_s, test_lost] = sess.run([accurate, summ, xent],
with tf.name_scope("train"): train_step = tf.train.AdamOptimizer(0.001).minimize( xent, global_step=global_step) with tf.name_scope("accuracy"): correct_prediction = tf.equal(tf.argmax(_logits, 1), tf.argmax(input_y, 1)) accurate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy") tf.summary.scalar("accuracy", accurate) summ = tf.summary.merge_all() sess.run(tf.global_variables_initializer()) test_items = mlsql.get_validate_data() TEST_X = [item[input_col].toArray() for item in test_items] TEST_Y = [item[label_col].toArray() for item in test_items] for ep in range(epochs): for items in rd(max_records=batch_size): X = [item[input_col].toArray() for item in items] Y = [item[label_col].toArray() for item in items] if len(X) == 0: print("bad news , this round no message fetched") if len(X) > 0: _, gs = sess.run([train_step, global_step], feed_dict={ input_x: X, input_y: Y })