def fnm_text(inputnewstext): manual_variable_initialization(True) stemnews = re.sub('[^a-zA-Z]', ' ', inputnewstext) stemnews = stemnews.lower() stemnews = stemnews.split() ps = PorterStemmer() stemnews = [ ps.stem(word) for word in stemnews if not word in stopwords.words('english') ] stemnews = ' '.join(stemnews)[0:1000] vocabulary_size = 10000 onehot_repr = [one_hot(stemnews, vocabulary_size)] embedded_text = pad_sequences(onehot_repr, padding='pre', maxlen=1000) embedding_vector_features = 50 tmodel = Sequential() tmodel.add( Embedding(vocabulary_size, embedding_vector_features, input_length=1000)) tmodel.add(Bidirectional(LSTM(100))) tmodel.add(Dense(1, activation='sigmoid')) tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) tmodel.load_weights(os.path.join(settings.MODEL_ROOT, 'textweights.h5')) x = embedded_text.reshape(1, 1000) result = tmodel.predict_classes(x) return result[0][0]
def __init__(self, flags, summary_writer): self.lr = flags.learning_rate self.er = flags.entropy_rate self.flags = flags self.summary_writer = summary_writer self.N_STEP_RETURN = 40 self.GAMMA = .99 self.LAMBDA = 1 self.eps = .2 self.ssize = 32 self.isize = len(U.useful_actions) self.custom_input_size = 1 + len(U.useful_actions) self.stop_signal = False self.lock_queue = threading.Lock() self.train_queue = [[], [], [], [], [], [], [], []] self.counter_lock = threading.Lock() self.training_counter = 0 config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True self.session = tf.Session(config=config) K.set_session(self.session) K.manual_variable_initialization(True) self.build_net('/gpu:0') self.build_model('/gpu:0') self.session.run(tf.global_variables_initializer()) self.default_graph = tf.get_default_graph() self.default_graph.finalize() self.summary_writer.add_graph(self.default_graph)
def __init__(self): self.session = tf.Session() K.set_session(self.session) K.manual_variable_initialization(True) self.model = self._build_model() self.graph = self._build_graph(self.model) self.session.run(tf.global_variables_initializer()) self.default_graph = tf.get_default_graph() self.default_graph.finalize() # avoid modifications
def _build_model(self): first_input = Input(shape=(5, self.state_size)) layer1 = Dense(64, activation='relu')(first_input) layer2 = Dense(10, activation='relu')(layer1) second_input = Input(shape=(5, 625)) layer1_b = Dense(100, activation='relu')(second_input) layer2_b = Dense(100, activation='relu')(layer1_b) merge = concatenate([layer2, layer2_b]) lstm = LSTM(110)(merge) layer3 = Dense(self.action_size, activation='linear')(lstm) model = Model(inputs=[first_input, second_input], outputs=layer3) model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate)) manual_variable_initialization(True) return model
def __init__(self, network, input_shape, output_shape, summary_writer): self.input_shape = input_shape self.output_shape = output_shape self.training_counter = 0 self.learning_rate = 1e-3 self.discount = .99 self.LAMBDA = 1 self.summary_writer = summary_writer config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True self.session = tf.Session(config=config) K.set_session(self.session) K.manual_variable_initialization(True) self.input, self.value, self.policy, self.h_state, self.c_state, self.h_state_out, self.c_state_out, self.state_shape = CNN('A2C', input_shape, output_shape, network) self.buildLoss('A2C') self.session.run(tf.global_variables_initializer()) self.default_graph = tf.get_default_graph() self.default_graph.finalize()
from tensorflow.keras import optimizers import numpy as np from numpy import asfarray from numpy import asarray_chkfinite import datetime from scipy.optimize import rosen, differential_evolution from random import random from random import seed import time import math import tensorflow.keras.backend as K from tensorflow.keras.models import model_from_json from tensorflow.keras.models import model_from_yaml from tensorflow.keras.backend import manual_variable_initialization manual_variable_initialization(True) # bounds 7 neural the size of the array # and then the learning rate/number of epochs def funcc(x): x = asarray_chkfinite(x) size = len(x) - 6 extra1 = x[size] epoc = int(extra1) extra2 = x[size + 1] extra3 = x[size + 2] extra5 = x[size + 3] extra6 = x[size + 4] extra7 = x[size + 5] OP = int(extra7)