action_ = np.array([actions[i]]) state_ = np.array([states[i]]) # print "Action: " + str([actions[i]]) # tmp_action = f(states[i]) # print ("Ation diff: " , tmp_action, action_) experience.insert(state_, action_, state_, np.array([0])) # tmp_state_ = scale_state(experience._state_history[experience.samples()-1], state_bounds) # tmp_action_ = scale_action(experience._action_history[experience.samples()-1], action_bounds) # tmp_action = f(tmp_state_) # print ("State diff: " , tmp_state_, states[i]) # print ("Ation diff2: " , tmp_action, action_) # model = Sequential() # 2 inputs, 10 neurons in 1 hidden layer, with tanh activation and dropout input = Input(shape=[1]) input.trainable = True print("Input ", input) network = Dense(128, init='uniform')(input) print("Network: ", network) network = Activation('relu')(network) network = Dense(64, init='uniform')(network) network = Activation('relu')(network) # 1 output, linear activation network = Dense(1, init='uniform')(network) network = Activation('linear')(network) model = Model(input=input, output=network) sgd = SGD(lr=0.01, momentum=0.9) print("Clipping: ", sgd.decay) model.compile(loss='mse', optimizer=sgd) print("Loss ", model.total_loss) weights = [input] + model.trainable_weights # weight tensors
def __init__(self, n_in, n_out, state_bounds, action_bounds, reward_bound, settings_): super(DeepCNNKeras,self).__init__(n_in, n_out, state_bounds, action_bounds, reward_bound, settings_) ### Apparently after the first layer the patch axis is left out for most of the Keras stuff... input = Input(shape=(self._state_length,)) input.trainable = True print ("Input ", input) ## Custom slice layer, Keras does not have this layer type... taskFeatures = Lambda(lambda x: x[:,0:self._settings['num_terrain_features']], output_shape=(self._settings['num_terrain_features'],))(input) # taskFeatures = Lambda(lambda x: x[:,0:self._settings['num_terrain_features']])(input) characterFeatures = Lambda(lambda x: x[:,self._settings['num_terrain_features']:self._state_length], output_shape=(self._state_length-self._settings['num_terrain_features'],))(input) # characterFeatures = Reshape((-1, self._state_length-self._settings['num_terrain_features']))(characterFeatures) # characterFeatures = Lambda(lambda x: x[:,self._settings['num_terrain_features']:self._state_length], output_shape=(1,))(input) # print("TaskFeature shape: ", taskFeatures.output_shape) network = Reshape((self._settings['num_terrain_features'], 1))(taskFeatures) network = Conv1D(filters=16, kernel_size=8)(network) network = Activation('relu')(network) network = Conv1D(filters=16, kernel_size=8)(network) network = Activation('relu')(network) self._critic_task_part = network network = Flatten()(network) # characterFeatures = Flatten()(characterFeatures) # network = Concatenate(axis=1)([network, characterFeatures]) ## network.shape should == (None, self._settings['num_terrain_features']) and ## characterFeatures.shape should == (None, state_length-self._settings['num_terrain_features']) print ("characterFeatures ", characterFeatures) print ("network ", network) network = Concatenate(axis=1)([network, characterFeatures]) network = Dense(128, init='uniform')(network) print ("Network: ", network) network = Activation('relu')(network) network = Dense(64, init='uniform')(network) network = Activation('relu')(network) network = Dense(32, init='uniform')(network) network = Activation('relu')(network) # 1 output, linear activation network = Dense(1, init='uniform')(network) network = Activation('linear')(network) self._critic = Model(input=input, output=network) inputAct = Input(shape=(self._state_length, )) inputAct.trainable = True print ("Input ", inputAct) ## Custom slice layer, Keras does not have this layer type... taskFeaturesAct = Lambda(lambda x: x[:,0:self._settings['num_terrain_features']], output_shape=(self._settings['num_terrain_features'],))(inputAct) characterFeaturesAct = Lambda(lambda x: x[:,self._settings['num_terrain_features']:self._state_length], output_shape=(self._state_length-self._settings['num_terrain_features'], ))(inputAct) ## Keras/Tensorflow likes the channels to be at the end networkAct = Reshape((self._settings['num_terrain_features'], 1))(taskFeaturesAct) networkAct = Conv1D(filters=16, kernel_size=8)(networkAct) networkAct = Activation('relu')(networkAct) networkAct = Conv1D(filters=16, kernel_size=8)(networkAct) networkAct = Activation('relu')(networkAct) self._actor_task_part = networkAct networkAct = Flatten()(networkAct) networkAct = Concatenate(axis=1)([networkAct, characterFeaturesAct]) networkAct = Dense(128, init='uniform')(networkAct) print ("Network: ", networkAct) networkAct = Activation('relu')(networkAct) networkAct = Dense(64, init='uniform')(networkAct) networkAct = Activation('relu')(networkAct) networkAct = Dense(32, init='uniform')(networkAct) networkAct = Activation('relu')(networkAct) # 1 output, linear activation networkAct = Dense(self._action_length, init='uniform')(networkAct) networkAct = Activation('linear')(networkAct) self._actor = Model(input=inputAct, output=networkAct)
video_input = Input(shape=(maxlen, vecin.shape[1]), name='video_input') video_embed = Dropout(dropout_rate)(Dense(128, input_shape=(maxlen, vecin.shape[1]), activation='softmax', name='video_dense')(video_input)) # kernel_regularizer=regularizers.l2(0.01), activation='softmax', name='video_dense')(video_input)) texts_input = Input(shape=(maxlen, len(word_indices)), name='texts_input') texts_lstm = LSTM(lstm_size, return_sequences=True, input_shape=(maxlen, len(word_indices)), dropout=dropout_rate, recurrent_dropout=dropout_rate, name='LSTM_input')(texts_input) layer_0 = Dropout(dropout_rate)(concatenate( [topic_embed, video_embed, texts_lstm], axis=2, name='merged_input')) layer_1 = LSTM(lstm_size, return_sequences=False, input_shape=(lstm_size,), dropout=dropout_rate, recurrent_dropout=dropout_rate, name='meta-LSTM')(layer_0) layer_2 = Dense(len(word_indices), name='word_selector')(layer_1) output = Activation('softmax', name='output')(layer_2) topic_input.trainable = True topic_embed.trainable = True video_input.trainable = True video_embed.trainable = True texts_input.trainable = True texts_lstm.trainable = True layer_1.trainable = True model_video = Model(inputs=[topic_input, video_input, texts_input], outputs=[output]) model_video.compile(loss='categorical_crossentropy', optimizer=optimiser) params = np.repeat(np.atleast_2d(video_param), maxlen, axis=0) Writing = True while Writing or Loop: model_video.load_weights(brain_file)