def layer_init(self):
        self.layers = []
        self.x_layer_1 = self.layer_const(
            Conv1d(32, 2, dilation_rate=1, name="Atrous_tanh_1"))
        self.y_layer_1 = self.layer_const(
            Conv1d(32, 2, dilation_rate=1, name="Atrous_sigmoid_1"))

        self.z_dense_tan_1 = self.layer_const(Dense(32, name="z_dense_tan_1"))
        self.z_dense_sig_1 = self.layer_const(Dense(32, name="z_dense_sig_1"))

        self.lambda1 = Lambda(self.gated_activation, name='gate_1')

        self.x_layer_2 = self.layer_const(
            Conv1d(32, 3, dilation_rate=2, name="Atrous_tanh_2"))
        self.y_layer_2 = self.layer_const(
            Conv1d(32, 3, dilation_rate=2, name="Atrous_sigmoid_2"))

        self.z_dense_tan_2 = self.layer_const(Dense(32, name="z_dense_tan_2"))
        self.z_dense_sig_2 = self.layer_const(Dense(32, name="z_dense_sig_2"))

        self.lambda2 = Lambda(self.gated_activation, name='gate_2')

        self.x_layer_3 = self.layer_const(
            Conv1d(32, 2, dilation_rate=4, name="Atrous_tanh_3"))
        self.y_layer_3 = self.layer_const(
            Conv1d(32, 2, dilation_rate=4, name="Atrous_sigmoid_3"))

        self.z_dense_tan_3 = self.layer_const(Dense(32, name="z_dense_tan_3"))
        self.z_dense_sig_3 = self.layer_const(Dense(32, name="z_dense_sig_3"))

        self.lambda3 = Lambda(self.gated_activation, name='gate_3')

        self.last = self.layer_const(
            Conv1d(self.state_dim, 1, name='last_layer'))
    def build_network(self):
        x_plus_ph = Input(shape=[self.H, self.state_dim], name="x_plus")
        x_m = Input(shape=[self.H, self.state_dim], name="x_min")
        u_plus = Input(shape=[self.H, self.action_dim], name="u_plus")
        u_m = Input(shape=[self.H, self.action_dim], name="u_min")

        # encoder

        h_1 = Conv1d(32, 2, activation='relu')(x_plus_ph)
        h_2 = Conv1d(16, 2, strides=2, activation='relu')(h_1)
        h_z = Flatten()(h_2)
        mu = Dense(self.z_dim)(h_z)
        var = Dense(self.z_dim, activation="softplus")(h_z)

        def sampling(t):
            z_mean, z_var = t
            z_std = K.sqrt(z_var)
            eps = K.random_normal(shape=(self.z_dim, ), mean=0.0, stddev=1.0)
            return z_mean + eps * z_std

        def slicing(t, ix):
            c_u, c_x = t
            begin_index = tf.constant([0, ix, 0])
            size_index = tf.constant([-1, self.H, -1])
            u = tf.slice(c_u, begin_index, size_index)
            x = tf.slice(c_x, begin_index, size_index)
            k = tf.concat([u, x], axis=-1)
            return k

        def vae_loss_f(x_original, x_generated):
            square_loss = K.mean((x_original - x_generated)**2)
            kl_loss = K.sum((-0.5 * K.log(var)) +
                            ((K.square(mu) + var) / 2.0) - 0.5)
            return square_loss + kl_loss

        z = Lambda(sampling,
                   output_shape=(self.z_dim, ),
                   name="sampling_lambda")([mu, var])

        # decoder

        connected_u = concatenate([u_m, u_plus], axis=1)
        connected_x = x_m

        x_plus = []
        for idx in xrange(self.H):
            arg = {"ix": idx}
            in_px = Lambda(slicing,
                           arguments=arg,
                           name='slicing_lambda',
                           output_shape=(self.H,
                                         self.state_dim + self.action_dim))(
                                             [connected_u, connected_x])

            atrous_out = self.dilated_causal_conv(in_px, z)

            x_plus.append(atrous_out)
            if self.stop_grad:
                print "stop grad"
                stopped_atrous = tf.stop_gradient(atrous_out)
                connected_x = concatenate([connected_x, stopped_atrous],
                                          axis=1)
            else:
                print "use grad"
                connected_x = concatenate([connected_x, atrous_out], axis=1)

        x_plus = concatenate(x_plus, axis=1)

        mse_loss = tf.reduce_mean(mse(x_plus_ph, x_plus))
        vae_loss = tf.reduce_mean(vae_loss_f(x_plus_ph, x_plus))

        tf.summary.scalar("mse_loss", mse_loss)

        vae_mse = tf.train.RMSPropOptimizer(0.001).minimize(mse_loss)
        vae = tf.train.RMSPropOptimizer(0.001).minimize(vae_loss)

        # generator
        sampled_z = Input(shape=(self.z_dim, ))

        connected_u = concatenate([u_m, u_plus], axis=1)
        connected_x = x_m

        g_out = []
        for idx in xrange(self.H):
            arg = {"ix": idx}
            in_px = Lambda(slicing, arguments=arg,
                           name="slicing_lambda")([connected_u, connected_x])

            atrous_out = self.dilated_causal_conv(in_px, sampled_z)
            connected_x = concatenate([connected_x, atrous_out], axis=1)

            g_out.append(atrous_out)

        g_out = concatenate(g_out, axis=1)

        generator_mse = tf.reduce_mean(mse(x_plus_ph, g_out))

        generator = K.function(
            [K.learning_phase(), x_m, u_plus, u_m, sampled_z], [g_out])

        return x_m, x_plus_ph, u_m, u_plus, mse_loss, vae_loss, generator_mse, vae_mse, vae, generator, sampled_z
A = tokenizer.textSentences
A= pad_sequences(A)

#encode
Length = preprocessing.LabelEncoder()
b = Length.fit_transform(b)
b = Caterogical(b)

# Testing
X_train, x_test, y_train, y_test = train_train_split(A,b,test_size=0.5, random=2000)

#Convolutional Neural Network Layers
model = Sequential()
model.add(embedding(2000,A.shape[1]))
model.add(Dropout(0.5))
model.add(Conv1d(filter=20, kernelSize=5, activation='relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(b.shape[1], activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optemizer='adam', metrics=["Accuracy"])
history = model.fit (x_train, y_train, eposh=10, verbose=true, validationData = (x_test, y_test))

#gathering accuracy score

ACC_score = model.evaluate(x_test,y_test)
print("Accuracy Model " + str((ACC_score[1]*100)))

#plotting of the loss model
plt.plot(history.history['Loss'])
plt.plot(history.history['Validation Loss'])
Beispiel #4
0
    def build_network(self):
        u_plus_ph = Input(shape=[self.H, self.action_dim], name="u_plus")
        u_m = Input(shape=[self.H, self.action_dim], name="u_min")

        #encoder

        h_1 = Conv1d(12, 2, activation='relu')(u_plus_ph)
        h_2 = Conv1d(12, 2, subsample_length=2, activation='relu')(h_1)
        h_z = Flatten()(h_2)
        mu = Dense(self.z_dim)(h_z)
        sigma = Dense(self.z_dim, activation="softplus")(h_z)

        def sampling(t):
            z_mean, z_std = t
            eps = K.random_normal(shape=(self.z_dim, ), mean=0., stddev=1.0)
            return z_mean + eps * z_std

        z = Lambda(sampling, output_shape=(self.z_dim, ))([mu, sigma])

        #layer instantiation
        tan_layer_1 = Conv1d(32, 2, dilation_rate=1, name="Atrous_tanh_1")
        sig_layer_1 = Conv1d(32, 2, dilation_rate=1, name="Atrous_sigmoid_1")
        tan_dense_1 = Dense(32, name="z_dense_tan_1")
        sig_dense_1 = Dense(32, name="z_dense_sigm_1")
        lambda1 = Lambda(self.gated_activation, name='gate_1')

        tan_layer_2 = Conv1d(32, 3, dilation_rate=2, name="Atrous_tanh_2")
        sig_layer_2 = Conv1d(32, 3, dilation_rate=2, name="Atrous_sigmoid_2")
        tan_dense_2 = Dense(32, name="z_dense_tan_2")
        sig_dense_2 = Dense(32, name="z_dense_sigm_2")
        lambda2 = Lambda(self.gated_activation, name='gate_2')

        tan_layer_3 = Conv1d(32, 2, dilation_rate=4, name="Atrous_tanh_3")
        sig_layer_3 = Conv1d(32, 2, dilation_rate=4, name="Atrous_sigmoid_3")
        tan_dense_3 = Dense(32, name="z_dense_tan_3")
        sig_dense_3 = Dense(32, name="z_dense_sigm_3")
        lambda3 = Lambda(self.gated_activation, name='gate_3')

        last_layer = Conv1d(self.H * self.action_dim, 1, name='last_layer')
        #decoder

        tanh_elem = tan_layer_1(u_m)
        sigm_elem = sig_layer_1(u_m)
        tanh_z = tan_dense_1(z)
        sigm_z = sig_dense_1(z)
        px_h = lambda1([tanh_elem, sigm_elem, tanh_z, sigm_z])

        tanh_elem = tan_layer_2(px_h)
        sigm_elem = sig_layer_2(px_h)
        tanh_z = tan_dense_2(z)
        sigm_z = sig_dense_2(z)
        px_h = lambda2([tanh_elem, sigm_elem, tanh_z, sigm_z])

        tanh_elem = tan_layer_3(px_h)
        sigm_elem = sig_layer_3(px_h)
        tanh_z = tan_dense_3(z)
        sigm_z = sig_dense_3(z)
        px_h = lambda3([tanh_elem, sigm_elem, tanh_z, sigm_z])

        u_plus = last_layer(px_h)
        u_plus = Reshape((self.H, self.action_dim))(u_plus)

        def vae_loss(x_original, x_generated):
            square_loss = K.mean((x_original - x_generated)**2)
            kl_loss = K.sum(-K.log(sigma) +
                            (K.square(mu) + K.square(sigma)) / 2 - 0.5)
            return square_loss + kl_loss

        vae = Model([u_plus_ph, u_m], u_plus)

        vae.compile(optimizer='rmsprop', loss="mse")

        sampled_z = Input(shape=(self.z_dim, ))

        tanh_elem = tan_layer_1(u_m)
        sigm_elem = sig_layer_1(u_m)
        tanh_z = tan_dense_1(sampled_z)
        sigm_z = sig_dense_1(sampled_z)
        px_h = lambda1([tanh_elem, sigm_elem, tanh_z, sigm_z])

        tanh_elem = tan_layer_2(px_h)
        sigm_elem = sig_layer_2(px_h)
        tanh_z = tan_dense_2(sampled_z)
        sigm_z = sig_dense_2(sampled_z)
        px_h = lambda2([tanh_elem, sigm_elem, tanh_z, sigm_z])

        tanh_elem = tan_layer_3(px_h)
        sigm_elem = sig_layer_3(px_h)
        tanh_z = tan_dense_3(sampled_z)
        sigm_z = sig_dense_3(sampled_z)
        px_h = lambda3([tanh_elem, sigm_elem, tanh_z, sigm_z])

        u_plus = last_layer(px_h)
        u_plus = Reshape((self.H, self.action_dim))(u_plus)

        generator = Model([u_m, sampled_z], u_plus)

        return vae, generator, vae_loss
Beispiel #5
0
    def build_network(self):
        x_plus_ph = Input(shape=[self.H, self.state_dim], name="x_plus")
        x_m = Input(shape=[self.H, self.state_dim], name="x_min")
        #u_plus = Input(shape=[self.H, self.action_dim], name="u_plus")
        u_m = Input(shape=[self.H, self.action_dim], name="u_min")

        #encoder

        h_1 = Conv1d(32, 2, activation='relu')(x_plus_ph)
        h_2 = Conv1d(16, 2, strides=2, activation='relu')(h_1)
        h_z = Flatten()(h_2)
        mu = Dense(self.z_dim)(h_z)
        var = Dense(self.z_dim, activation="softplus")(h_z)

        #sigma is sigma^2

        def sampling(t):
            z_mean, z_var = t
            z_std = K.sqrt(z_var)
            eps = K.random_normal(shape=(self.z_dim, ), mean=0.0, stddev=1.0)
            return z_mean + eps * z_std

        z = Lambda(sampling, output_shape=(self.z_dim, ))([mu, var])

        #decoder

        #layer instantiate

        x_layer_1 = Conv1d(32, 2, dilation_rate=1, name="Atrous_tanh_1")
        y_layer_1 = Conv1d(32, 2, dilation_rate=1, name="Atrous_sigmoid_1")

        z_dense_tan_1 = Dense(32, name="z_dense_tan_1")
        z_dense_sig_1 = Dense(32, name="z_dense_sig_1")

        lambda1 = Lambda(self.gated_activation, name='gate_1')

        x_layer_2 = Conv1d(32, 3, dilation_rate=2, name="Atrous_tanh_2")
        y_layer_2 = Conv1d(32, 3, dilation_rate=2, name="Atrous_sigmoid_2")

        z_dense_tan_2 = Dense(32, name="z_dense_tan_2")
        z_dense_sig_2 = Dense(32, name="z_dense_sig_2")

        lambda2 = Lambda(self.gated_activation, name='gate_2')

        x_layer_3 = Conv1d(32, 2, dilation_rate=4, name="Atrous_tanh_3")
        y_layer_3 = Conv1d(32, 2, dilation_rate=4, name="Atrous_sigmoid_3")

        z_dense_tan_3 = Dense(32, name="z_dense_tan_3")
        z_dense_sig_3 = Dense(32, name="z_dense_sig_3")

        lambda3 = Lambda(self.gated_activation, name='gate_3')

        last = Conv1d(self.state_dim, 1, name='last_layer')

        in_px = concatenate([x_m, u_m], -1)
        #in_px = Lambda(lambda x: K.concatenate, name="concat")([x_m, u_plus, u_m])

        xx1 = x_layer_1(in_px)
        yy1 = y_layer_1(in_px)

        z1 = z_dense_tan_1(z)
        z2 = z_dense_sig_1(z)

        px_h1 = lambda1([xx1, yy1, z1, z2])

        xx2 = x_layer_2(px_h1)
        yy2 = y_layer_2(px_h1)

        z1 = z_dense_tan_2(z)
        z2 = z_dense_sig_2(z)

        px_h2 = lambda2([xx2, yy2, z1, z2])

        xx3 = x_layer_3(px_h2)
        yy3 = y_layer_3(px_h2)

        z1 = z_dense_tan_3(z)
        z2 = z_dense_sig_3(z)

        atrous_out = lambda3([xx3, yy3, z1, z2])

        atrous_out = last(atrous_out)

        print atrous_out.shape

        x_plus = Reshape((self.state_dim, ))(atrous_out)

        vae = Model([x_plus_ph, x_m, u_m], x_plus)

        def vae_loss(x_original, x_generated):
            square_loss = K.mean((x_original - x_generated)**2)
            kl_loss = K.sum((-0.5 * K.log(var)) +
                            ((K.square(mu) + var) / 2.0) - 0.5)
            return square_loss + kl_loss

        def mean_squared(y_true, y_pred):
            #assert K.ndim(y_true) == 3
            #y_true = K.reshape(y_true, (K.shape(y_true)[0], K.shape(y_true)[1]*K.shape(y_true)[2]))
            #y_pred = K.reshape(y_true, (K.shape(y_pred)[0], K.shape(y_pred)[1]*K.shape(y_pred)[2]))
            return K.mean(K.square(y_pred - y_true), axis=-1)

        optimize = keras.optimizers.RMSprop(lr=0.001,
                                            rho=0.9,
                                            epsilon=1e-08,
                                            decay=0.0)

        vae.compile(optimizer="adam", loss='mean_squared_logarithmic_error')

        #generator
        sampled_z = Input(shape=(self.z_dim, ))

        z1 = z_dense_tan_1(sampled_z)
        z2 = z_dense_sig_1(sampled_z)

        px_z1 = lambda1([xx1, yy1, z1, z2])

        xxz2 = x_layer_2(px_z1)
        yyz2 = y_layer_2(px_z1)

        z1 = z_dense_tan_2(sampled_z)
        z2 = z_dense_sig_2(sampled_z)

        px_z2 = lambda2([xxz2, yyz2, z1, z2])

        xxz3 = x_layer_3(px_z2)
        yyz3 = y_layer_3(px_z2)

        z1 = z_dense_tan_3(sampled_z)
        z2 = z_dense_sig_3(sampled_z)

        g_out = lambda3([xxz3, yyz3, z1, z2])

        g_out = last(g_out)

        g_out = Reshape((self.state_dim, ))(g_out)

        generator = Model([x_m, u_m, sampled_z], g_out)

        return vae, generator, vae_loss
    def build_network(self):
        x_plus_ph = Input(shape=[self.H, self.state_dim], name="x_plus")
        x_m = Input(shape=[self.H, self.state_dim], name="x_min")
        u_m = Input(shape=[self.H, self.action_dim], name="u_min")

        #encoder

        h_1 = Conv1d(12, 2, activation='relu')(x_plus_ph)
        h_2 = Conv1d(12, 2, subsample_length=2, activation='relu')(h_1)
        h_z = Flatten()(h_2)
        mu = Dense(self.z_dim)(h_z)
        sigma = Dense(self.z_dim, activation="softplus")(h_z)

        def sampling(t):
            z_mean, z_std = t
            eps = K.random_normal(shape=(self.z_dim, ), mean=0., stddev=1.0)
            return z_mean + eps * z_std

        z = Lambda(sampling, output_shape=(self.z_dim, ))([mu, sigma])

        #decoder

        in_px = merge([x_m, u_m], mode="concat", concat_axis=-1)

        h = Conv1d(16, 2, strides=1, activation='relu')(in_px)
        h = Conv1d(16, 3, strides=2, activation='relu')(h)
        hz = Flatten()(h)

        muz = Dense(hz)

        sigmaz = K.variable(1.0)

        policy_z = sampling([muz, sigmaz])

        tanh_elem = Conv1d(32, 2, dilation_rate=1, name="Atrous_tanh_1")(u_m)
        sigm_elem = Conv1d(32, 2, dilation_rate=1,
                           name="Atrous_sigmoid_1")(u_m)

        tanh_z = Dense(32, name="z_dense_tan_1")(policy_z)
        sigm_z = Dense(32, name="z_dense_sigm_1")(policy_z)

        px_h = Lambda(self.gated_activation,
                      name='gate_1')([tanh_elem, sigm_elem, tanh_z, sigm_z])

        tanh_elem = Conv1d(32, 3, dilation_rate=2, name="Atrous_tanh_2")(px_h)
        sigm_elem = Conv1d(32, 3, dilation_rate=2,
                           name="Atrous_sigmoid_2")(px_h)

        tanh_z = Dense(32, name="z_dense_tan_2")(policy_z)
        sigm_z = Dense(32, name="z_dense_sigm_2")(policy_z)

        px_h = Lambda(self.gated_activation,
                      name='gate_2')([tanh_elem, sigm_elem, tanh_z, sigm_z])

        tanh_elem = Conv1d(32, 2, dilation_rate=4, name="Atrous_tanh_3")(px_h)
        sigm_elem = Conv1d(32, 2, dilation_rate=4,
                           name="Atrous_sigmoid_3")(px_h)

        tanh_z = Dense(32, name="z_dense_tan_3")(policy_z)
        sigm_z = Dense(32, name="z_dense_sigm_3")(policy_z)

        px_h = Lambda(self.gated_activation,
                      name='gate_3')([tanh_elem, sigm_elem, tanh_z, sigm_z])

        last = Conv1d(110, 1, name='last_layer')(px_h)