コード例 #1
0
class EnvSetup:
    SITE = os.getenv('SITE', 'https://unsplash.com')
    API = os.getenv('API', 'https://unsplash.com/napi')
    TOKEN = os.getenv(
        'TOKEN',
        '42ef72d98b48573570f34df9496bdfc643607580f0246152413601e1bd2d5a04')
    PAGE_LOAD_TIMEOUT_SECONDS = 60
    SELENIUM_TIMEOUT_SECONDS = 60

    HEADLESS = DataHelper.str_to_bool(os.getenv('HEADLESS', 'False'))
    NO_SANDBOX = DataHelper.str_to_bool(os.getenv('NO_SANDBOX', 'True'))
    BROWSER_NAME = os.getenv('BROWSER', 'Chrome')
    PLATFORM = os.getenv('PLATFORM', 'WINDOWS')

    SAUCE_LABS = DataHelper.str_to_bool(os.getenv('SAUCE_LABS', 'True'))
    BUILD_TAG = os.getenv('BUILD_TAG', 'Default Sauce Labs Build')
    SAUCE_LABS_RDC_USER = '******'
    SAUCE_LABS_RDC_KEY = '26fcdbb3-0a93-4f51-bc44-8f00e81f46bf'
コード例 #2
0
imdb = tf.keras.datasets.imdb

vocab_size = 10000
embed_size = 32
seq_len = 256
batch_size = 512
nb_epoch = 500

(train_x, train_y), (test_x, test_y) = imdb.load_data(num_words=vocab_size)

train_x = tf.keras.preprocessing.sequence.pad_sequences(train_x,
                                                        maxlen=seq_len)
test_x = tf.keras.preprocessing.sequence.pad_sequences(test_x, maxlen=seq_len)

data_helper = DataHelper(train_x, train_y)

# Input
X = tf.placeholder(dtype=tf.int32, shape=[None, seq_len])
Y = tf.placeholder(dtype=tf.int32, shape=[None, 1])

global_step = tf.Variable(0, trainable=False)

# Model
embedding = tf.Variable(tf.truncated_normal([vocab_size, embed_size]))
inputs = tf.nn.embedding_lookup(embedding, X)  # [batch_size * 256 * 100]

# Pooling
pooling = tf.layers.max_pooling1d(inputs, 2, strides=1,
                                  padding="valid")  # [?,32]
global_pooling = tf.reduce_mean(pooling, 1)
コード例 #3
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow import keras

from helper.data_helper import DataHelper

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train, y_test = keras.utils.to_categorical(
    y_train), keras.utils.to_categorical(y_test)

data_helper = DataHelper(x_train, y_train)

n_input = 28  # MNIST data input (img shape: 28*28)
n_steps = 28  # timesteps
n_hidden = 128  # hidden layer num of features
n_classes = 10  # MNIST total classes (0-9 digits)

learning_rate = 0.001
training_iters = 100000
batch_size = 100
display_step = 10

x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])

weights = tf.Variable(tf.truncated_normal([n_hidden, n_classes], stddev=0.1))
コード例 #4
0
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow import keras

from helper.data_helper import DataHelper

(train_data, train_labels), (test_data, test_labels) = keras.datasets.boston_housing.load_data()

mean = train_data.mean(axis=0)
std = train_data.std(axis=0)

train_data = (train_data - mean) / std
test_data = (test_data - mean) / std

data_helper = DataHelper(train_data, train_labels)

feature_num = train_data.shape[1]

x = tf.placeholder(dtype=tf.float32, shape=[None, feature_num], name='feature')
y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='target')

global_step = tf.Variable(0, trainable=False, name='global_step')

l1 = tf.layers.dense(x, 64, activation=tf.nn.relu)
l2 = tf.layers.dense(l1, 64, activation=tf.nn.relu)
pred = tf.layers.dense(l2, 1)

mae, mae_op = tf.metrics.mean_absolute_error(labels=y, predictions=pred)
loss = tf.losses.mean_squared_error(labels=y, predictions=pred)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
コード例 #5
0
import numpy as np
import tensorflow as tf

from helper.data_helper import DataHelper

mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
m_train = x_train.shape[0]
m_test = x_test.shape[0]

x_train = np.reshape(x_train, [m_train, -1])
x_test = np.reshape(x_test, [m_test, -1])

data_helper = DataHelper(data=x_train, label=y_train)

X = tf.placeholder(tf.float32, [None, 784], name="X")
Y_truth = tf.placeholder(tf.float32, [None, 10], name="Y")
PKeep = tf.placeholder(tf.float32)

W1 = tf.Variable(tf.truncated_normal([784, 512], stddev=0.1), name='W1')
B1 = tf.Variable(tf.ones([512]) / 10, name='B1')
W2 = tf.Variable(tf.truncated_normal([512, 128], stddev=0.1), name='W2')
B2 = tf.Variable(tf.ones([128]) / 10, name='B2')
W3 = tf.Variable(tf.truncated_normal([128, 10], stddev=0.1), name='W3')
B3 = tf.Variable(tf.ones([10]) / 10, name='B3')

Y1 = tf.nn.relu(tf.matmul(X, W1) + B1)
Y1 = tf.nn.dropout(Y1, PKeep)
Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
コード例 #6
0
import tensorflow as tf
from tensorflow import keras

from helper.data_helper import DataHelper

(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train, x_test = x_train / 255, x_test / 255
# x_train = x_train[:20]
# x_test = x_test[:20]
# y_train = y_train[:20]
# y_test = y_test[:20]

y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)

data_helper = DataHelper(x_train, y_train)

X = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='image')
Y = tf.placeholder(tf.float32, shape=[None, 10], name='label')
pKeep = tf.placeholder(tf.float32)

K = 6
L = 12
M = 18

# convolution network
W1 = tf.Variable(tf.truncated_normal([5, 5, 3, K], stddev=0.1))
B1 = tf.Variable(tf.ones([K]) / 10)
W2 = tf.Variable(tf.truncated_normal([4, 4, K, L], stddev=0.1))
B2 = tf.Variable(tf.ones([L]) / 10)
W3 = tf.Variable(tf.truncated_normal([3, 3, L, M], stddev=0.1))