def _build_model(self):
     model = self.load()
     if model is None:
         main_input = Input(shape=self.input_dim, name='main_input')
         block = self._add_conv_block(
             prev_block=main_input,
             filters=self.layers_metadata[0]['filters'],
             kernel_size=self.layers_metadata[0]['kernel_size'])
         if len(self.layers_metadata) > 1:
             for metadata in self.layers_metadata[1:]:
                 block = self._add_residual_block(
                     prev_block=block,
                     filters=metadata['filters'],
                     kernel_size=metadata['kernel_size'])
         value_head = self._add_value_head(prev_block=block)
         policy_head = self._add_policy_head(prev_block=block)
         model = Model(inputs=main_input, outputs=[value_head, policy_head])
         model.compile(loss={
             'value_head': 'mse',
             'policy_head': 'categorical_crossentropy'
         },
                       optimizer=SGD(lr=self.learning_rate),
                       loss_weights={
                           'value_head': 0.5,
                           'policy_head': 0.5
                       },
                       metrics={
                           'value_head': 'mse',
                           'policy_head': 'acc'
                       })
     # invoke following 3 protected methods to make the model can be shared by multiple threads
     model._make_predict_function()
     model._make_train_function()
     model._make_test_function()
     return model
Example #2
0
    def build_local_model(self):
        """
        This method builds local networks (actor network and critic network)
        """
        input = Input(shape=self.state_size)

        conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input)
        conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv)
        conv = Flatten()(conv)
        fc = Dense(256, activation='relu')(conv)

        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        local_actor = Model(inputs=input, outputs=policy)
        local_critic = Model(inputs=input, outputs=value)

        local_actor._make_predict_function()
        local_critic._make_predict_function()

        local_actor.set_weights(self.actor.get_weights())
        local_critic.set_weights(self.critic.get_weights())

        local_actor.summary()
        local_critic.summary()

        return local_actor, local_critic
Example #3
0
    def build_model(self):
        state = Input(batch_shape=(None, self.state_size))
        actor_input = Dense(30, input_dim=self.state_size, activation='relu', kernel_initializer='he_uniform')(state)
        # actor_hidden = Dense(self.hidden2, activation='relu')(actor_input)
        #tanh output of [-1,1]
        mu_0 = Dense(self.action_size, activation='tanh', kernel_initializer='he_uniform')(actor_input)
        #softplus gives output [0, inf] and deriv is sigmoid
        sigma_0 = Dense(self.action_size, activation='softplus', kernel_initializer='he_uniform')(actor_input)
        #mu is doubled to fit the action space of [-2, 2]?
        mu = Lambda(lambda x: x * 2)(mu_0)
        #custom layer ensures that sigma is not 0
        sigma = Lambda(lambda x: x + 0.0001)(sigma_0)
        #critic also takes in state and outputs a value
        critic_input = Dense(30, input_dim=self.state_size, activation='relu', kernel_initializer='he_uniform')(state)
        # value_hidden = Dense(self.hidden2, activation='relu')(critic_input)
        state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(critic_input)

        actor = Model(inputs=state, outputs=(mu, sigma))
        critic = Model(inputs=state, outputs=state_value)
        #must declare before use
        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
Example #4
0
    def build_model(self, actor_only=False):
        state = Input(shape=self.state_size)
        state_process = BatchNormalization()(state)
        state_process = Dense(100, activation='elu')(state)
        state_process = BatchNormalization()(state_process)
        state_process = Dense(100, activation='elu')(state_process)
        state_process = BatchNormalization()(state_process)
        policy = Dense(self.action_size,
                       activation='tanh',
                       kernel_initializer=tf.random_uniform_initializer(
                           minval=-3e-3, maxval=3e-3))(state_process)
        value = Dense(1,
                      activation='linear',
                      kernel_initializer=tf.random_uniform_initializer(
                          minval=-3e-3, maxval=3e-3))(state_process)

        actor = Model(inputs=state, outputs=policy, name='Actor')
        actor._make_predict_function()

        if actor_only:
            return actor

        critic = Model(inputs=state, outputs=value, name='Critic')
        critic._make_predict_function()

        return actor, critic
Example #5
0
    def _initModel(self, hyperparameters, shape):

        print(shape)
        base_model = InceptionV3(weights=None,
                                 include_top=False,
                                 input_shape=shape)

        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)

        # add a fully-connected layer
        x = Dense(hyperparameters["fc_size"],
                  activation=hyperparameters["fc_activation"])(x)

        # add a logistic layer
        predictions = Dense(1, kernel_initializer='normal')(x)

        # train this model
        model = Model(inputs=base_model.input, outputs=predictions)
        for layer in base_model.layers:
            layer.trainable = False

        # compile the model (should be done *after* setting layers to non-trainable)
        model.compile(optimizer='adam',
                      loss=hyperparameters["loss"],
                      metrics=hyperparameters["metrics"])

        # fix model loading bug
        # https://github.com/keras-team/keras/issues/2397
        model._make_predict_function()

        print(model.summary())

        return model
Example #6
0
    def _build_model(self):
        with tf.variable_scope(self.name):
            input_layer = Input(shape=self.input_shape)
            cnn_input_layer = Conv2D(input_shape=self.input_shape,
                                     filters=self.filter_count,
                                     kernel_size=self.filter_size,
                                     padding='same',
                                     activation='relu',
                                     name='cnn_input')(input_layer)
            prev_layer = cnn_input_layer
            # TODO: adjust cnn layer count.
            cnn_layer_count = 0
            for i in range(cnn_layer_count):
                hidden_layer = Conv2D(filters=self.filter_count,
                                      kernel_size=self.filter_size,
                                      padding='same',
                                      activation='relu',
                                      name='conv{}'.format(i))(prev_layer)
                prev_layer = hidden_layer

            flatten_layer = Flatten()(prev_layer)

            output_policy_layer = Dense(units=self.action_count,
                                        activation='softmax')(flatten_layer)
            output_value_layer = Dense(1, activation='linear')(flatten_layer)

            model = Model(input=[input_layer],
                          outputs=[output_policy_layer, output_value_layer])

            # Since this model will be not evaluated in a3c algorithm, expressly do it to be available vars.
            model._make_predict_function()

            self.model = model
Example #7
0
    def build_model(self):
        state_size = list(self.state_size)
        state_size.append(1)
        state = Input(shape=self.state_size)
        reshape = Reshape(state_size)(state)

        conv = TimeDistributed(Conv2D(16, (8, 8), strides=(4, 4), activation='relu'))(reshape)
        conv = TimeDistributed(Conv2D(32, (4, 4), strides=(2, 2), activation='relu'))(conv)
        conv = TimeDistributed(Flatten())(conv)
        
        lstm_state = LSTM(512, activation='relu')(conv)
        action_output = Dense(self.action_size, activation='tanh')(lstm_state)
        actor_output = Lambda(lambda x: x * np.pi)(action_output)

        actor = Model(inputs=state, outputs=action_output)

        action = Input([self.action_size])
        state_action = Concatenate()([lstm_state, action])
        fc = Dense(512, activation='relu')(state_action)
        Q_output = Dense(1)(fc)
    
        critic = Model(inputs=[state, action], outputs=Q_output)

        actor._make_predict_function()
        critic._make_predict_function()
        
        if VERBOSE:
            actor.summary()
            critic.summary()

        return actor, critic
Example #8
0
    def build_model(self):
        state = Input(batch_shape=(None, self.state_size))
        actor_input = Dense(self.hidden1,
                            input_dim=self.state_size,
                            activation='relu')(state)
        actor_hidden = Dense(self.hidden2, activation='relu')(actor_input)
        mu_0 = Dense(self.action_size, activation='tanh')(actor_hidden)
        sigma_0 = Dense(self.action_size, activation='softplus')(actor_hidden)

        mu = Lambda(lambda x: x * 2)(mu_0)
        sigma = Lambda(lambda x: x + 0.0001)(sigma_0)

        critic_input = Dense(self.hidden1,
                             input_dim=self.state_size,
                             activation='relu')(state)
        value_hidden = Dense(self.hidden2,
                             activation='relu',
                             kernel_initializer='he_uniform')(critic_input)
        state_value = Dense(1,
                            activation='linear',
                            kernel_initializer='he_uniform')(value_hidden)

        actor = Model(inputs=state, outputs=(mu, sigma))
        critic = Model(inputs=state, outputs=state_value)

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
Example #9
0
    def build_model(self):
        state = Input(batch_shape=(None, 2, int(self.state_size / 2)))
        lstm_layer = LSTM(self.hidden0,
                          activation='tanh',
                          kernel_initializer='glorot_uniform')(state)
        shared = Dense(self.hidden1,
                       activation='relu',
                       kernel_initializer='glorot_uniform')(lstm_layer)

        actor_hidden = Dense(self.hidden2,
                             activation='relu',
                             kernel_initializer='glorot_uniform')(shared)
        action_prob = Dense(self.action_size,
                            activation='softmax',
                            kernel_initializer='glorot_uniform')(actor_hidden)

        value_hidden = Dense(self.hidden2,
                             activation='relu',
                             kernel_initializer='he_uniform')(shared)
        state_value = Dense(1,
                            activation='linear',
                            kernel_initializer='he_uniform')(value_hidden)

        actor = Model(inputs=state, outputs=action_prob)
        critic = Model(inputs=state, outputs=state_value)

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
Example #10
0
    def build_actor(self):

        #base for two heads of mean and variance
        base = Input(batch_shape=(None, self.state_size), name='states')
        net = Dense(units=self.hidden_size, use_bias=False,
                    activation='relu')(base)

        #mu head
        mu = Dense(units=1, activation='tanh')(net)
        #custom layer for Pendulum
        mu = Lambda(lambda x: x * 2)(mu)

        #sigma head
        sigma_sq = Dense(units=1, activation='softplus')(net)
        #custom layer to ensure non-zero variance
        sigma_sq = Lambda(lambda x: x + 0.0001)(sigma_sq)

        actor = Model(inputs=base, outputs=(mu, sigma_sq))

        #prep the function
        actor._make_predict_function()

        actor.summary()

        return actor
Example #11
0
    def build_local_model(self):
        input = Input(shape=self.state_size)
        conv = Conv2D(32, (8, 8), strides=(4, 4), activation='relu')(input)
        conv = Conv2D(64, (4, 4), strides=(2, 2), activation='relu')(conv)
        # Simplifying network from original DQN
        conv = Conv2D(64, (3, 3), strides=(1, 1), activation='relu')(conv)
        conv = Flatten()(conv)
        # Simplifying network from original DQN
        fc = Dense(512, activation='relu')(conv)
        #fc = Dense(256)(conv)

        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        local_actor = Model(inputs=input, outputs=policy)
        local_critic = Model(inputs=input, outputs=value)

        local_actor._make_predict_function()
        local_critic._make_predict_function()

        # Synchronizing with global network
        local_actor.set_weights(self.actor.get_weights())
        local_critic.set_weights(self.critic.get_weights())

        local_actor.summary()
        local_critic.summary()

        return local_actor, local_critic
Example #12
0
    def build_model(self):
        input = Input(shape=self.state_size)
        # conv = TimeDistributed(Conv2D(64, (8, 8), strides=(4, 4), padding='same', activation='elu', kernel_initializer='he_normal'))(input)
        # conv = TimeDistributed(Conv2D(32, (4, 4), strides=(2, 2), activation='elu', kernel_initializer='he_normal'))(conv)
        # conv = TimeDistributed(Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal'))(conv)
        # conv = TimeDistributed(Conv2D(16, (1, 1), activation='elu', kernel_initializer='he_normal'))(conv)
        conv = TimeDistributed(Flatten())(input)
        batch_norm = BatchNormalization()(conv)
        gru = GRU(256, activation='tanh',
                  kernel_initializer='he_normal')(batch_norm)
        policy = Dense(self.action_size,
                       activation='softmax',
                       kernel_initializer='he_normal')(gru)
        value = Dense(1, activation='linear',
                      kernel_initializer='he_normal')(gru)

        actor = Model(inputs=input, outputs=policy)
        critic = Model(inputs=input, outputs=value)

        actor._make_predict_function()
        critic._make_predict_function()

        if self.verbose:
            actor.summary()
            critic.summary()

        return actor, critic
Example #13
0
    def build_local_model(self, state_size, action_size):
        input = Input(shape=(1, state_size))
        d = LSTM(48, kernel_initializer='he_uniform')(input)
        # input = Input(shape=(state_size,))
        # d = Dense(48, activation='relu',
        #         kernel_initializer='he_uniform')(input)
        # d = Dense(48, activation='relu',
        #         kernel_initializer='he_uniform')(d)
        # d = Dense(24, activation='relu',
        #         kernel_initializer='he_uniform')(d)

        policy = Dense(action_size, activation='softmax')(d)
        value = Dense(1, activation='linear')(d)

        local_actor = Model(inputs=input, outputs=policy)
        local_critic = Model(inputs=input, outputs=value)

        local_actor._make_predict_function()
        local_critic._make_predict_function()

        local_actor.set_weights(self.actor.get_weights())
        local_critic.set_weights(self.critic.get_weights())

        local_actor.summary()
        local_critic.summary()

        return local_actor, local_critic
Example #14
0
    def build_local_model(self):
        input = Input(shape=(self.state_size, ))
        dropout = 0.2
        fc = Dense(64, activation='relu',
                   kernel_initializer='lecun_uniform')(input)
        fc = Dense(64, activation='relu',
                   kernel_initializer='lecun_uniform')(fc)
        fc = Dropout(dropout)(fc)

        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        local_actor = Model(inputs=input, outputs=policy)
        local_critic = Model(inputs=input, outputs=value)

        local_actor._make_predict_function()
        local_critic._make_predict_function()

        local_actor.set_weights(self.actor.get_weights())
        local_critic.set_weights(self.critic.get_weights())

        local_actor.summary()
        local_critic.summary()

        return local_actor, local_critic
Example #15
0
    def build_model(self):

        input = Input(self.state_size)
        #############  CNN  #################
        conv = Conv2D(16, (4, 4), strides=(2, 2),
                      activation='relu')(input)  # output = 7x14x16
        conv = Conv2D(32, (3, 3), strides=(1, 1),
                      activation='relu')(conv)  # output = 5x10x32
        conv = Flatten()(conv)  # output = 1600
        fc = Dense(256, activation='relu')(conv)
        #####################################

        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        # actor: 상태를 받아 각 행동의 확률을 계산
        actor = Model(inputs=input, outputs=policy)

        # critic: 상태를 받아서 상태의 가치를 계산
        critic = Model(inputs=input, outputs=value)

        # 멀티스레딩을 케라스에서 이용할 때 발셍하는 에러 제거
        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
Example #16
0
    def build_local_model(self):
        input = Input(shape=self.state_size)
        #############  CNN  #################
        conv = Conv2D(16, (4, 4), strides=(2, 2),
                      activation='relu')(input)  # output = 7x14x16
        conv = Conv2D(32, (3, 3), strides=(1, 1),
                      activation='relu')(conv)  # output = 5x10x32
        conv = Flatten()(conv)  # output = 1600
        fc = Dense(256, activation='relu')(conv)
        #####################################

        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        # actor: 상태를 받아 각 행동의 확률을 계산
        # critic: 상태를 받아서 상태의 가치를 계산
        local_actor = Model(inputs=input, outputs=policy)
        local_critic = Model(inputs=input, outputs=value)

        local_actor._make_predict_function()
        local_critic._make_predict_function()

        local_actor.set_weights(self.actor.get_weights())
        local_critic.set_weights(self.critic.get_weights())

        local_actor.summary()
        local_critic.summary()

        return local_actor, local_critic
Example #17
0
    def build_model(self):
        state = Input(batch_shape=(None, self.state_size))
        shared = Dense(self.hidden1,
                       input_dim=self.state_size,
                       activation='relu',
                       kernel_initializer='he_uniform')(state)

        actor_hidden1 = Dense(self.hidden2,
                              activation='relu',
                              kernel_initializer='he_uniform')(shared)
        action_prob = Dense(self.action_size,
                            activation='softmax',
                            kernel_initializer='he_uniform')(actor_hidden1)

        value_hidden1 = Dense(self.hidden2,
                              activation='relu',
                              kernel_initializer='he_uniform')(shared)
        state_value = Dense(1,
                            activation='linear',
                            kernel_initializer='he_uniform')(value_hidden1)

        actor = Model(inputs=state, outputs=action_prob)
        critic = Model(inputs=state, outputs=state_value)

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
Example #18
0
    def create_model(self):
        input_layer = Input(shape=(None, self.action_space_size))
        lstm_layer_1 = LSTM(10, return_sequences=True)(input_layer)
        lstm_layer_2 = LSTM(10, return_sequences=True)(lstm_layer_1)

        sub_last_layer = Dense(10)(lstm_layer_2)

        ### splitting last layer
        output_angles = Dense(self.angle_vect_size)(sub_last_layer)  #analog output
        output_pressure = Dense(
            self.pressure_vect_size,
            activation='sigmoid')(sub_last_layer)  #binary output
        output_reward = Dense(self.reward_vect_size)(
            sub_last_layer)  #analog output

        model = Model(
            inputs=input_layer,
            outputs=[output_angles, output_pressure, output_reward])
        model.compile(
            optimizer=SGD(lr=0.00001),
            loss=['mse', 'binary_crossentropy', 'mse'],
            metrics=['accuracy'],
            loss_weights=[0.10, 0.06, 0.04])

        model._make_predict_function()
        return model
Example #19
0
    def _build_model(self):
        if self.conv:
            main_input, x = self.get_conv_layers()
            out_value = self.conv_layer(x, 1, (1, 1))
            out_value = Flatten()(out_value)
            out_value = self.dense_layer(out_value, 1)
            out_actions = self.policy_head(x)
        else:
            main_input = Input(batch_shape=(None, self.stateCnt))

            x = main_input
            if len(self.layers) > 0:
                for h in self.layers:
                    x = self.dense_layer(x, h['size'], reg=None)
                    x = LeakyReLU()(x)

            out_value = self.dense_layer(x, 1, reg=None)
            out_actions = self.dense_layer(x,
                                           self.actionCnt,
                                           'softmax',
                                           'policy_head',
                                           reg=None)

        model = Model(inputs=[main_input], outputs=[out_actions, out_value])
        model._make_predict_function()  # have to initialize before threading

        return model
Example #20
0
    def build_model(self):
        state = Input([self.state_size])
        fc1 = Dense(100, activation='elu')(state)
        # fc1 = BatchNormalization()(fc1)
        # fc1 = Dense(100, activation='elu')(fc1)
        action_output = Dense(self.action_size, activation='tanh')(fc1)

        actor = Model(inputs=state, outputs=action_output)

        # state = Input([self.state_size], batch_shape=[self.batch_size, self.state_size])
        action = Input([self.action_size])
        action_process = Dense(100, activation='elu')(action)
        # action_process = BatchNormalization()(action_process)
        # state_process  = Dense(100, activation='elu')(state)
        # state_process  = BatchNormalization()(state_process)
        # state_process  = Dense(100, activation='elu')(state_process)
        state_action = Add()([fc1, action_process])
        fc2 = Dense(50, activation='elu')(state_action)
        Q_output = Dense(1)(fc2)

        critic = Model(inputs=[state, action], outputs=Q_output)
        # action_grad = tf.gradients(critic.output, action)

        actor._make_predict_function()
        critic._make_predict_function()

        if VERBOSE:
            actor.summary()
            critic.summary()

        return actor, critic
Example #21
0
def load_model(input_shape):
    global model

    # load the pre-trained keras model
    base_network = create_base_network_signet(input_shape)
    base_network.summary()

    input_a = Input(shape=(input_shape))
    input_b = Input(shape=(input_shape))

    # because we re-use the same instance `base_network`,
    # the weights of the network will be shared across the two branches
    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)(
        [processed_a, processed_b])

    model_path = 'model_v4.hdf5'
    exists = os.path.isfile(model_path)

    if not exists:
        print('Beginning file download with wget module...')
        url = "https://docs.google.com/uc?export=download&id=1Z_dmvFepAjJvlJ-ut0OywnOsFw4n4Onh"
        wget.download(url)

    model = Model(inputs=[input_a, input_b], outputs=distance)
    model.load_weights(model_path)
    model._make_predict_function()
Example #22
0
    def build_model(self):
        state = Input([self.state_size],
                      batch_shape=[self.batch_size, self.state_size])
        fc1 = Dense(256, activation='relu',
                    kernel_initializer='he_normal')(state)
        action_output = Dense(self.action_size,
                              activation='linear',
                              kernel_initializer='he_normal')(fc1)

        actor = Model(inputs=state, outputs=action_output)

        # state = Input([self.state_size], batch_shape=[self.batch_size, self.state_size])
        action = Input([self.action_size],
                       batch_shape=[self.batch_size, self.action_size])
        state_action = Concatenate()([state, action])
        fc2 = Dense(256, activation='relu',
                    kernel_initializer='he_normal')(state_action)
        Q_output = Dense(1,
                         activation='linear',
                         kernel_initializer='he_normal')(fc2)

        critic = Model(inputs=[state, action], outputs=Q_output)
        # action_grad = tf.gradients(critic.output, action)

        actor._make_predict_function()
        critic._make_predict_function()

        if VERBOSE:
            actor.summary()
            critic.summary()

        return actor, critic
Example #23
0
def loadNet(NET):
    # Utilizar la memoria necesaria
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    if (NET == "RES"):
        net = keras.applications.resnet50.ResNet50(include_top=True,
                                                   weights='imagenet',
                                                   input_tensor=None,
                                                   input_shape=None,
                                                   pooling=None,
                                                   classes=1000)
        inP = net.input
        ouT = net.get_layer('flatten_1').output
        net = Model(inputs=inP, outputs=ouT)

    if (NET == "RESV2"):
        net = keras.applications.inception_resnet_v2.InceptionResNetV2(
            include_top=True,
            weights='imagenet',
            input_tensor=None,
            input_shape=None,
            pooling=None,
            classes=1000)
        inP = net.input
        ouT = net.get_layer('avg_pool').output
        net = Model(inputs=inP, outputs=ouT)

    net._make_predict_function()

    return net
Example #24
0
    def build_model(self):
        input = Input(shape=self.state_size)
        conv = Conv2D(32, (8, 8), strides=(4, 4), activation='relu')(input)
        conv = Conv2D(64, (4, 4), strides=(2, 2), activation='relu')(conv)
        # Simplifying network from original DQN
        conv = Conv2D(64, (3, 3), strides=(1, 1), activation='relu')(conv)

        conv = Flatten()(conv)
        # Simplifying network from original DQN
        fc = Dense(512, activation='relu')(conv)
        #fc = Dense(256)(conv)

        # Using softmax function to make probability
        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        actor = Model(inputs=input, outputs=policy)
        critic = Model(inputs=input, outputs=value)

        # Making predicting function
        # According to textbook, this is not for training itself
        # For preventing error in multi-threading
        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
Example #25
0
 def __build_actor_critic__(self):
     input_ = Input(shape=self.input_shape_)
     conv = Conv2D(filters=32,kernel_size=(8,8),padding="valid",strides=(2,2))(input_)
     conv = BatchNormalization()(conv)
     conv = Activation('relu')(conv)
     conv = Conv2D(filters=32,kernel_size=(8,8),padding="valid",strides=(2,2))(conv)
     conv = BatchNormalization()(conv)
     conv = Activation('relu')(conv)
     conv = Conv2D(filters=32,kernel_size=(8,8),padding="valid",strides=(2,2))(conv)
     conv = BatchNormalization()(conv)
     conv = Activation('relu')(conv)
     
     flat = Flatten()(conv)
     
     dense = Dense(units=512)(flat)
     dense = Activation('relu')(dense)
     dense = Dense(units=128)(dense)
     dense = Activation('relu')(dense)
     
     policy = Dense(units=self.output_shape_, activation='softmax')(dense)
     value = Dense(units=1,activation='linear')(dense)
     
     actor = Model(inputs=input_,outputs=policy)
     critic = Model(inputs=input_,outputs=value)
     
     actor._make_predict_function()
     critic._make_predict_function()
     
     return actor,critic
Example #26
0
class PredictionModel:
    def __init__(self):
        _NUM_CLASSES = 4
        input_shape = (128, 128, 1,)
        input_layer = Input(shape=input_shape)
        bn_axis = 2

        # Block 1
        x = BatchNormalization(axis=bn_axis)(input_layer)
        x = Conv2D(64, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv1')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool1')(x)

        # Block 2
        x = BatchNormalization(axis=bn_axis)(x)
        x = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool2')(x)

        # Block 3
        x = BatchNormalization(axis=bn_axis)(x)
        x = Conv2D(256, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv3/conv3_1')(x)
        x = Conv2D(256, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv3/conv3_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool3')(x)

        # Block 4
        x = BatchNormalization(axis=bn_axis)(x)
        x = Conv2D(512, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv4/conv4_1')(x)
        x = Conv2D(512, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv4/conv4_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool4')(x)

        # x = GlobalAveragePooling2D()(x)
        x = Reshape((8, -1))(x)
        # # Previously LSTM layer was 64 nodes
        x = LSTM(128, return_sequences=True, kernel_regularizer=regularizers.l2(0.01), stateful=False)(x)
        x = LSTM(128, return_sequences=True, kernel_regularizer=regularizers.l2(0.01), stateful=False)(x)
        x = Dropout(0.5)(x)
        # x = Dense(64, activation='relu')(x)
        x = Dense(_NUM_CLASSES, activation='softmax')(x)

        self.model = Model(inputs=input_layer, outputs=x)
        self.model.load_weights(weights_path)
        self.model._make_predict_function()
        self.threshold = 0.0

    def predict(self, x):
        x = np.expand_dims(x, axis=-1)
        predictions = self.model.predict(x)
        labels = np.full((predictions.shape[0], predictions.shape[1]), 3)
        # labels = np.array([3]*predictions.shape[1], dtype=np.int64)
        for j in range(predictions.shape[0]):
            for i in range(predictions.shape[1]):
                label = np.argmax(predictions[j, i])
                labels[j, i] = label if predictions[j, i, label] > self.threshold else 3
        return labels

    def predict_one_label(self, x):
        labels = self.predict(x)
        print(labels)
        labels = np.squeeze(labels, axis=0)
        return class_mapping[np.argmax(np.bincount(labels))]
Example #27
0
    def build_model(self, net_type='DNN', in_pa=1, ou_pa=1, time_leg=1):
        from keras.layers import Dense, Input, Conv1D, MaxPooling1D, LSTM, Flatten
        from keras.models import Model
        # 8 16 32 64 128 256 512 1024 2048
        if net_type == 'DNN':
            state = Input(batch_shape=(None, in_pa))
            shared = Dense(32,
                           input_dim=in_pa,
                           activation='relu',
                           kernel_initializer='glorot_uniform')(state)
            # shared = Dense(48, activation='relu', kernel_initializer='glorot_uniform')(shared)

        elif net_type == 'CNN' or net_type == 'LSTM' or net_type == 'CLSTM':
            state = Input(batch_shape=(None, time_leg, in_pa))
            if net_type == 'CNN':
                shared = Conv1D(filters=10,
                                kernel_size=3,
                                strides=1,
                                padding='same')(state)
                shared = MaxPooling1D(pool_size=2)(shared)
                shared = Flatten()(shared)

            elif net_type == 'LSTM':
                shared = LSTM(32, activation='relu')(state)
                shared = Dense(64)(shared)

            elif net_type == 'CLSTM':
                shared = Conv1D(filters=10,
                                kernel_size=3,
                                strides=1,
                                padding='same')(state)
                shared = MaxPooling1D(pool_size=2)(shared)
                shared = LSTM(8)(shared)

        # ----------------------------------------------------------------------------------------------------
        # Common output network
        actor_hidden = Dense(64,
                             activation='relu',
                             kernel_initializer='glorot_uniform')(shared)
        action_prob = Dense(ou_pa,
                            activation='softmax',
                            kernel_initializer='glorot_uniform')(actor_hidden)

        value_hidden = Dense(32,
                             activation='relu',
                             kernel_initializer='he_uniform')(shared)
        state_value = Dense(1,
                            activation='linear',
                            kernel_initializer='he_uniform')(value_hidden)

        actor = Model(inputs=state, outputs=action_prob)
        critic = Model(inputs=state, outputs=state_value)

        actor._make_predict_function()
        critic._make_predict_function()

        return actor, critic
Example #28
0
def load_detect_model(model_path):
    print('loading saved ocr detection model from - {}'.format(model_path))
    input_image = Input(shape=(None, None, 3), name='image', dtype=tf.float32)
    region, affinity = VGG16_UNet(input_tensor=input_image, weights=None)
    model = Model(inputs=[input_image], outputs=[region, affinity])
    model.load_weights(model_path)
    model._make_predict_function()

    return model
Example #29
0
def build_network(state_dim):
    state_input = Input((state_dim, ))
    h1 = Dense(64, activation='relu')(state_input)
    h2 = Dense(32, activation='relu')(h1)
    h3 = Dense(16, activation='relu')(h2)
    v_output = Dense(1, activation='linear')(h3)
    model = Model(state_input, v_output)
    model._make_predict_function()
    return model, model.trainable_weights, state_input
    def build_model(self, net_type='DNN', in_pa=1, ou_pa=1, time_leg=1):
        # 8 16 32 64 128 256 512 1024 2048
        if net_type == 'DNN':
            state = Input(batch_shape=(None, in_pa))
            shared = Dense(32, input_dim=in_pa, activation='relu', kernel_initializer='glorot_uniform')(state)
            shared = Dense(64, activation='relu', kernel_initializer='glorot_uniform')(shared)
            shared = Dense(70, activation='relu', kernel_initializer='glorot_uniform')(shared)

        elif net_type == 'CNN' or net_type == 'LSTM' or net_type == 'CLSTM':
            state = Input(batch_shape=(None, time_leg, in_pa))
            if net_type == 'CNN':
                shared = Conv1D(filters=10, kernel_size=3, strides=1, padding='same')(state)
                shared = MaxPooling1D(pool_size=3)(shared)
                shared = Flatten()(shared)
                shared = Dense(64)(shared)
                shared = Dense(70)(shared)

            elif net_type == 'LSTM':
                shared = LSTM(32)(state)
                shared = Dense(64)(shared)

            elif net_type == 'CLSTM':
                shared = Conv1D(filters=10, kernel_size=5, strides=1, padding='same')(state)
                shared = MaxPooling1D(pool_size=3)(shared)
                shared = LSTM(12)(shared)
                shared = Dense(24)(shared)

        # ----------------------------------------------------------------------------------------------------
        # Common output network
        # actor_hidden = Dense(64, activation='relu', kernel_initializer='glorot_uniform')(shared)

        if MULTTACT:
            actor_hidden = Dense(24, activation='relu', kernel_initializer='glorot_uniform')(shared)
            action_prob = Dense(ou_pa, activation='tanh', kernel_initializer='glorot_uniform')(actor_hidden)
        else:
            actor_hidden = Dense(24, activation='sigmoid', kernel_initializer='glorot_uniform')(shared)
            action_prob = Dense(ou_pa, activation='softmax', kernel_initializer='glorot_uniform')(actor_hidden)

        # value_hidden = Dense(32, activation='relu', kernel_initializer='he_uniform')(shared)
        value_hidden = Dense(12, activation='sigmoid', kernel_initializer='he_uniform')(shared)
        state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(value_hidden)

        actor = Model(inputs=state, outputs=action_prob)
        critic = Model(inputs=state, outputs=state_value)

        fully_model = Model(inputs=state, outputs=[action_prob, state_value])

        print('Make {} Network'.format(net_type))

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary(print_fn=logging.info)
        critic.summary(print_fn=logging.info)

        return actor, critic, fully_model
    def build_model(self):
        input = Input(shape=self.state_size)
        conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input)
        conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv)
        conv = Flatten()(conv)
        fc = Dense(256, activation='relu')(conv)
        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        actor = Model(inputs=input, outputs=policy)
        critic = Model(inputs=input, outputs=value)

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
    def build_model(self):
        state = Input(batch_shape=(None,  self.state_size))
        shared = Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform')(state)

        actor_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform')(shared)
        action_prob = Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform')(actor_hidden)

        value_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='he_uniform')(shared)
        state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(value_hidden)

        actor = Model(inputs=state, outputs=action_prob)
        critic = Model(inputs=state, outputs=state_value)

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
Example #33
0
dis_op = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
cls_op = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
gen_op = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
#gen_op = RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)

## MODEL
(gen, (dis, cls)) = get_models(dataset,smallmodel)
dis.trainable = False
x = Input(shape=(gen_dim,))
h = gen(x)
y = dis(h)
disgen = Model(input=x, output=y)
disgen.compile(loss='binary_crossentropy', optimizer=gen_op, metrics=['accuracy'])
disgen._make_train_function()
disgen._make_test_function()
disgen._make_predict_function()

#noise_batch = np.random.uniform(-1, 1, (batch_size, gen_dim)).astype('float32')
#labels2 = np.ones((batch_size, 1)).astype('float32')
#g_loss = disgen.train_on_batch(noise_batch, labels2)

dis.trainable = True
dis.compile(loss='binary_crossentropy', optimizer=dis_op, metrics=['accuracy'])

if classify:
    cls.trainable = True
    cls.compile(loss='categorical_crossentropy', optimizer=cls_op, metrics=['accuracy'])

gen.compile(loss='mean_squared_error', optimizer=gen_op)