Example #1
0
    # print "X shape : ",X.get_shape()
    # i+=1
    x = np.reshape(x, (words_to_read, embedding_size))
    # print x.shape
    return x, y


# nX = len(X)

# print "DATA VECTORIZED............."

# X = np.reshape(X,(nX,words_to_read,1))
# print X.get_shape(),Y.get_shape()

sess = tf.Session()
K.set_session(sess)

model = Sequential()
model.add(
    LSTM(256,
         input_shape=(int(X.get_shape()[1]), int(X.get_shape()[2])),
         return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(vocab_size))  #, activation="softmax"))
# model.compile(loss="categorical_crossentropy",optimizer="adam")

# model = LSTM(256,input_shape=(X.get_shape()[1],X.get_shape()[2]),init='uniform',return_sequences=True)(X)
# model = Dropout(0.2)(model)
# model = LSTM(256)(model)
Example #2
0

def ini_chainer():
    chainer.cuda.get_device(chainer_ID).use()
    google_net.to_gpu(chainer_ID)
    paintschainer.to_gpu(chainer_ID)
    print('chainer initialized')


chainer_thread = threading.Thread(target=ini_chainer)
chainer_thread.start()

session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
    visible_device_list=str(tensorflow_GPU_ID),
    per_process_gpu_memory_fraction=k_between_tf_and_chainer)))
K.set_session(session)
EPS = 1e-12
lr = 1e-6
beta1 = 0.5

with tf.variable_scope("generator"):
    base_generator = load_model('base_generator.net')

sketch_ref_input_448 = tf.placeholder(dtype=tf.float32,
                                      shape=(None, None, None, 1))
local_hint_input_448 = tf.placeholder(dtype=tf.float32,
                                      shape=(None, None, None, 3))
hint_s57c64_0 = tf.placeholder(dtype=tf.float32, shape=(None, 64))
hint_s29c192_0 = tf.placeholder(dtype=tf.float32, shape=(None, 192))
hint_s29c256_0 = tf.placeholder(dtype=tf.float32, shape=(None, 256))
hint_s29c320_0 = tf.placeholder(dtype=tf.float32, shape=(None, 320))