コード例 #1
0
class TestPickleReadingAndForecast(unittest.TestCase):
    # Simulating backend
    days = random.randint(1, 13)

    start_time = time.time()
    state_data, forecast_dates = regression(days)
    end_time = time.time() - start_time

    create_json(state_data, forecast_dates)

    f = open('forecast_data.json')
    data = json.load(f)
    f.close()

    def test_forecast(self):
        # 29 days from the last submission date which is a day less
        # then the current date
        thirty_days_out = datetime.today() + timedelta(29)
        thirty_days_out_str = datetime.strftime(thirty_days_out, '%Y-%m-%d')
        all_dates_thirty_out = True

        for i, state in enumerate(self.__class__.state_data):
            date = self.__class__.data[i]['data'][29]['date'].split(' ')[0]
            all_dates_thirty_out = all_dates_thirty_out and (
                date == thirty_days_out_str)

        self.assertTrue(all_dates_thirty_out, 'Should be True')

    def test_pickle_loading(self):
        self.assertTrue(self.__class__.end_time < 60.0, 'Should be True')
コード例 #2
0
ファイル: main.py プロジェクト: xingjunxia725/deepLearning
def mnist():

    sess = tf.Session()
    x = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1])
    with tf.variable_scope("reg"):
        pred_reg, variables_reg = regression(x)
    saver = tf.train.Saver(variables_reg)
    saver.restore(sess, "my_net/reg_net.ckpt")

    with tf.variable_scope("cnn"):
        keep_prob = tf.placeholder(dtype=tf.float32)
        pred_cnn, variables_cnn = cnn(x, keep_prob)
    saver = tf.train.Saver(variables_cnn)
    saver.restore(sess, "my_net/cnn_net.ckpt")

    def calc_reg(input):
        return sess.run(pred_reg, feed_dict={x: input}).flatten().tolist()

    def calc_cnn(input):
        return sess.run(pred_cnn, feed_dict={
            x: input,
            keep_prob: 1
        }).flatten().tolist()

    input = ((255 - np.array(request.json, dtype=np.uint8)) / 255.0).reshape(
        1, 28, 28, 1)
    output1 = calc_reg(input)
    print(output1)
    output2 = calc_cnn(input)
    print(output2)
    sess.close()
    return jsonify(results=[output1, output2])
コード例 #3
0
    def test_retrain(self):
        days = 14  # two weeks

        start_time = time.time()
        state_data, forecast_dates = regression(days)
        end_time = time.time() - start_time

        self.assertTrue(end_time >= 60.0, 'Should be True')
コード例 #4
0
    def __init__(self, model_name, checkpoint_dir):
        self.graph = tf.Graph()  # create graph for each instance individuly
        self.model_name = model_name

        with self.graph.as_default():
            self.x = tf.placeholder(tf.float32, [None, 784])
            self.keep_prob = tf.placeholder(tf.float32)

            if self.model_name == 'regression':
                self.output = model.regression(self.x)
            else:
                self.output = model.cnn(self.x, self.keep_prob)
            self.saver = tf.train.Saver()

        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                self.saver.restore(self.sess, ckpt.model_checkpoint_path)
コード例 #5
0
ファイル: test-U029.py プロジェクト: SJJacques/myCovidTracker
class TestModel(unittest.TestCase):
    state_data = regression()

    def test_accuracy(self):
        avg_accuracy = sum([
            state.metrics.model_accuracy for state in self.__class__.state_data
        ]) / 50
        self.assertTrue(avg_accuracy >= 0.95, 'Should be True')

    def test_forecast_length(self):
        forecasts_thirty_days = True

        for state in self.__class__.state_data:
            forecasts_thirty_days = forecasts_thirty_days and len(
                state.metrics.predictions) == 30

        self.assertTrue(forecasts_thirty_days, 'Should be True')

    def test_each_state_has_own_object(self):
        all_states_have_data = True

        for i, state in enumerate(states):
            all_states_have_data = all_states_have_data and self.__class__.state_data[
                i].name == state

        self.assertTrue(all_states_have_data, 'Should be True')

    def test_each_state_has_pickle(self):
        all_states_have_pickle = True
        dir_path = dir_path = os.path.dirname(os.path.realpath(__file__))

        for state in states:
            pickle_path = dir_path + '/pickle_files/' + state + '_model.pickle'
            all_states_have_pickle = all_states_have_pickle and os.path.exists(
                pickle_path)

        self.assertTrue(all_states_have_pickle)

    def test_existence_of_json(self):
        dir_path = os.path.dirname(os.path.realpath(__file__))
        path_to_json = dir_path + '/forecast_data.json'
        self.assertTrue(os.path.exists(path_to_json), 'Should be True')
コード例 #6
0
import tensorflow as tf
import model


x = tf.placeholder(tf.float32, [None, 784])

sess = tf.Session()

with tf.variable_scope("regression"):
    y1, variables = model.regression(x)

saver = tf.train.Saver(variables)
saver.restore(sess, "../models/regression.ckpt")

with tf.variable_scope("convolutional"):
    keep_prob = tf.placeholder(tf.float32)
    y2, variables = model.convolutional(x, keep_prob)

saver = tf.train.Saver(variables)
saver.restore(sess, "../models/convolutional.ckpt")


def regression(input):
    return sess.run(y1, feed_dict={x: input}).flatten().tolist()


def convolutional(input):
    return sess.run(y2, feed_dict={x: input, keep_prob: 1.0}).flatten().tolist()
コード例 #7
0
# -*- coding: utf-8 -*-
# author:yaoyao time:2019/7/24
"""卷积"""

import os
import input_data
import tensorflow as tf
import model
# 下载数据集
data = input_data.read_data_sets("MNIST_data", one_hot=True)

#创建模型

with tf.variable_scope("regession"):
    x = tf.placeholder(tf.float32, [None, 784])
    y, varibles = model.regression(x)

# 训练
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

#保存好训练的参数
saver = tf.train.Saver(varibles)

#开始训练
with tf.Session() as sess:
    #初始化所有的变量
    sess.run(tf.global_variables_initializer())
コード例 #8
0
ファイル: regression.py プロジェクト: pzs741/piggyrush
import os

import tensorflow as tf
from model import regression
from tensorflow.examples.tutorials.mnist import input_data

data = input_data.read_data_sets("MNIST_data", one_hot=True)

# model 共享变量
with tf.variable_scope("regression"):
    x = tf.placeholder(tf.float32, [None, 784])
    # softmax函数
    y, variables = regression(x)

# train
y_ = tf.placeholder("float", [None, 10])
# 交叉熵
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
# 梯度下降
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
'''
tf.argmax(input, axis=None, name=None, dimension=None)
此函数是对矩阵按行或列计算最大值

参数
input:输入Tensor
axis:0表示按列,1表示按行
name:名称
dimension:和axis功能一样,默认axis取值优先。新加的字段
'''
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
コード例 #9
0
import os
import model
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("/tmp/data/", one_hot=True)

# model
with tf.variable_scope("regression"):
    x = tf.placeholder(tf.float32, [None, 784])
    y, variables = model.regression(x)

# train
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

saver = tf.train.Saver(variables)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for _ in range(1000):
        batch_xs, batch_ys = data.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels}))

    path = saver.save(
        sess, os.path.join(os.path.dirname(__file__), 'data', 'regression.ckpt'),
        write_meta_graph=False, write_state=False)
コード例 #10
0
import tensorflow as tf
import input_data
import model
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

mnist = input_data.read_data_sets('mnist_data/', one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
y = model.regression(x)
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(1000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        print(
            i,
            sess.run(accuracy,
                     feed_dict={
                         x: mnist.test.images,
                         y_: mnist.test.labels
コード例 #11
0
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets(
    "/home/shiva/junk/tf-learning/mnist/mnist/input_data", one_hot=True)

LEARNING_RATE = 1e-4
N_TRAIN_STEPS = 20000
BATCH_SIZE = 100
# model
x = tf.placeholder(tf.float32, [None, 784])

with tf.variable_scope("perceptron"):
    y_percep, perceptron_variables = model.multilayer_perceptron(x)

with tf.variable_scope("regression"):
    y_reg, regression_variables = model.regression(x)

with tf.variable_scope("convolutional"):
    keep_prob = tf.placeholder(tf.float32)
    y_conv, conv_variables = model.convolutional(x, keep_prob)

with tf.variable_scope("rnn"):
    y_rnn, _ = model.rnn_network(x)
rnn_variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='rnn')
# train
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy_reg = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_reg))
cross_entropy_conv = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
コード例 #12
0
import os
import model
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("/tmp/data/", one_hot=True)

# model
with tf.variable_scope("regression"):
    x = tf.placeholder(tf.float32, [None, 784])
    logits, variables = model.regression(x)
    y = tf.nn.softmax(logits)

# train
y_ = tf.placeholder("float", [None, 10])
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                        logits=logits)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

saver = tf.train.Saver(variables)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(10000):
        batch_xs, batch_ys = data.train.next_batch(100)
        if i % 1000 == 0:
            train_accuracy = accuracy.eval(feed_dict={
                x: batch_xs,
                y_: batch_ys
            })
コード例 #13
0
def plot_trend(roi):
    '''Input a dataframe, smooth actual data, run regression and plot the predicted values'''

    import numpy as np
    from scipy.interpolate import interp1d

    #smoothing actual data
    x = roi['t']
    y = roi['vol']

    x_smooth = np.linspace(x.min(), x.max(), 500)

    f = interp1d(x, y, kind='quadratic')
    y_smooth = f(x_smooth)

    #input plotting libraries
    import matplotlib.pyplot as plt
    import seaborn as sns

    #generate predict dataframe
    predict = regression(roi)

    #set up figure and alter settings
    fig, ax1 = plt.subplots(figsize=(20, 8.5))
    plt.rcParams.update({'font.size': 14})
    sns.set()

    #plot smoothed actuals and regression with upper and lower bounds
    ax1.plot(x_smooth,
             y_smooth,
             color='xkcd:cobalt',
             label='VOL',
             linewidth=3.5)
    ax1.plot(predict['t'],
             predict['prediction'],
             color='xkcd:darkgreen',
             label='Prediction',
             linestyle=':',
             linewidth=2.5)
    ax1.plot(predict['t'],
             predict['Lower Bound'],
             color='xkcd:darkgreen',
             label='Lower Bound',
             linestyle=':',
             linewidth=1)
    ax1.plot(predict['t'],
             predict['Upper Bound'],
             color='xkcd:darkgreen',
             label='Upper Bound',
             linestyle=':',
             linewidth=1)
    ax1.set_xlabel('Time Period (Weeks)')
    ax1.set_ylabel('Volume')
    ax1.set_xlim(0, 52)
    ax1.set_ylim(0, 350000)
    ax1.set_xticks(range(0, 53, 2))

    #set title, legened, and savefig
    plt.title('Final Target Contest Trend')
    plt.legend(loc='lower center')
    fig.savefig('data/trend.png', dpi=fig.dpi)
コード例 #14
0
import os
# import input_data  # 进行下载数据
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import model  # 就是引用的model。py文件
# 下载数据
data = input_data.read_data_sets('MNIST_data/', one_hot = True)   # 进行下载数据,下载到目录MNIST_data/

# 建立回归模型
with tf.variable_scope("regression"):
    x = tf.placeholder(tf.float32, [None, 784])   # placeholder占位符-待用户输入
    y, variables = model.regression(x) # model.regression引用函数

# 训练
y_ = tf.placeholder('float', [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
    cross_entropy)  # 优化器GradientDescentOptimizer
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # 预测
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 准确率

# 保存训练好的参数
saver = tf.train.Saver(variables)

# 开始训练
with tf.Session() as sess:
    # 初始化所有的变量
    sess.run(tf.global_variables_initializer())
    # 定义训练1000次
    for _ in range(1000):
        batch_xs, batch_ys = data.train.next_batch(100)