Example #1
0
# In[4]:

tf.reset_default_graph()
batch_size = tf.placeholder_with_default(16, [], name='batch_size')
(input_op, seq_len,
 label), input_prods = data.ops.get_even_batch_producer(paths=paths,
                                                        batch_size=batch_size)

# In[5]:

cnn_params = {
    'out_dims': [256, 256, 256, 128],
    'kernel_sizes': 64,
    'pool_sizes': 1
}
c = cnn.model(seq_len=seq_len, input_op=input_op, **cnn_params)

#a = tf.transpose(c.output, perm=[0, 2, 1])
#a = tf.nn.top_k(a, k=8, sorted=False, name='MAX_POOL').values
#a = tf.transpose(a, perm=[0, 2, 1])
a = tf.reduce_mean(c.output, axis=1)
fc = classifier.model(input_op=a, fc_sizes=[])

logits = fc.logits
pred = fc.pred

MODEL_PATH = '/tmp/balanced/' + c.name + fc.name
MODEL_EXISTS = os.path.exists(MODEL_PATH)
if MODEL_EXISTS:
    print('Model directory is not empty, removing old files')
    shutil.rmtree(MODEL_PATH)
Example #2
0
data /= data.std()

# ### Set up predictor

print('Building model graph...')
tf.reset_default_graph()
batch_size = tf.placeholder_with_default(1, [], name='batch_size')
keep_prob = tf.placeholder_with_default(1., [], name='keep_prob')
input_op = tf.placeholder(tf.float32, [1, None])
seq_len = tf.placeholder(tf.float32, [1])

cnn_params = {'out_dims': [32, 64, 64], 'kernel_sizes': 64, 'pool_sizes': 1}

c = cnn.model(seq_len=seq_len,
              input_op=input_op,
              keep_prob=keep_prob,
              model_name='CNN_block',
              **cnn_params)

RESIDUAL_POOL = 4
residual_input = c.output[..., None, :]

for i in range(1, 4):
    residual_input = tf.contrib.layers.max_pool2d(
        residual_input,
        kernel_size=[RESIDUAL_POOL, 1],
        stride=[RESIDUAL_POOL, 1])

    c = cnn.model(seq_len=seq_len,
                  input_op=residual_input,
                  residual=True,