def runRnnlstm(trainX, testX, trainY, testY, scal):
    # create and fit the LSTM network
    model = Sequential()
    scale = normVale(scal)
    model.add(
        LSTM(2,
             init=lambda shape, name: initializations.normal(
                 shape, scale=scale[0], name="Layer1"),
             input_dim=1,
             return_sequences=True))
    model.add(
        LSTM(2,
             init=lambda shape, name: initializations.normal(
                 shape, scale=scale[1], name="Layer2")))
    print("==============================")
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=False)

    # make predictions
    trainPredict = model.predict(trainX)
    testPredict = model.predict(testX)

    # invert predictions
    trainPredict = scaler.inverse_transform(trainPredict)
    trainY = scaler.inverse_transform([trainY])
    testPredict = scaler.inverse_transform(testPredict)
    testY = scaler.inverse_transform([testY])

    # calculate root mean squared error
    trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
    print('Train Score: %.2f RMSE' % (trainScore))
    testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
    print('Test Score: %.2f RMSE' % (testScore))
    return (1000 - trainScore)
Beispiel #2
0
	def buildmodel(self):
		print("Now we build the model")
		model = Sequential()
		model.add(Convolution2D(32, 8, 8, subsample=(4,4),init=lambda shape, name,dim_ordering=None: normal(shape, scale=0.01, name=name), border_mode='same',input_shape=(img_channels,img_rows,img_cols)))
		model.add(Activation('relu'))
		model.add(Convolution2D(64, 4, 4, subsample=(2,2),init=lambda shape, name,dim_ordering=None: normal(shape, scale=0.01, name=name), border_mode='same'))
		model.add(Activation('relu'))
		model.add(Convolution2D(64, 3, 3, subsample=(1,1),init=lambda shape, name, dim_ordering=None: normal(shape, scale=0.01, name=name), border_mode='same'))
		model.add(Activation('relu'))
		model.add(Flatten())
		model.add(Dense(512, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
		model.add(Activation('relu'))
		# 2个输出,表示预测的结果是两种输入状态的长远估值
		model.add(Dense(2,init=lambda shape, name: normal(shape, scale=0.01, name=name)))
	   

		try:
			model.load_weights("model.h5")
		except:
			pass

		adam = Adam(lr=1e-6)
		model.compile(loss='mse',optimizer=adam)
		print("We finish building the model")
		return model
def model_create():
    input_shape = (img_rows, img_cols, opt.hist_len)

    model = Sequential()
    model.add(
        Convolution2D(
            128,
            8,
            8,
            subsample=(8, 8),
            activation='relu',
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same',
            input_shape=input_shape))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(
        Dense(128,
              activation='relu',
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(
        Dense(opt.act_num,
              init=lambda shape, name: normal(shape, scale=0.01, name=name),
              activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=1e-5),
                  sample_weight=None)
    return model
def target_model():
    input_shape = (img_rows, img_cols, opt.hist_len)

    model = Sequential()
    model.add(
        Convolution2D(
            32,
            8,
            8,
            subsample=(4, 4),
            activation='relu',
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same',
            input_shape=input_shape))

    model.add(Flatten())
    model.add(
        Dense(32,
              activation='relu',
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(
        Dense(opt.act_num,
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))

    model.compile(loss='mse', optimizer=Adam(lr=1e-6))
    return model
 def create_actor_network(self, state_size, action_dim):
     print("create_actor_network")
     S = Input(shape=[state_size])
     # used 2 hidden layers with 300 and 600 hidden units respectively
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
     #为了限定policy网络的输出action范围,使用tanh对steer,sigmoid对accelerate和brake,作为bound函数,进行范围限定
     #The output consist of 3 continuous actions, Steering, which is a single unit with tanh activation function (where -1 means max right turn and +1 means max left turn).
     #Acceleration, which is a single unit with sigmoid activation function (where 0 means no gas, 1 means full gas).
     # Brake, another single unit with sigmoid activation function (where 0 means no brake, 1 bull brake)
     Steering = Dense(
         1,
         activation='tanh',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     Acceleration = Dense(
         1,
         activation='sigmoid',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     Brake = Dense(
         1,
         activation='sigmoid',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     #V = merge([Steering,Acceleration,Brake],mode='concat')
     V = Dense(3, activation='tanh')(h1)
     model = Model(input=S, output=V)
     return model, model.trainable_weights, S
     """
Beispiel #6
0
    def create_generator(self, feat_dim, aux_dim, encode_dim):
        feats = Input(shape=[feat_dim[0], feat_dim[1], feat_dim[2]])
        x = Convolution2D(256, 3, 3)(feats)
        x = LeakyReLU()(x)
        x = Convolution2D(256, 3, 3, subsample=(2, 2))(x)
        x = LeakyReLU()(x)
        x = Flatten()(x)
        auxs = Input(shape=[aux_dim])
        h = merge([x, auxs], mode='concat')
        h = Dense(256)(h)
        h = LeakyReLU()(h)
        h = Dense(128)(h)
        encodes = Input(shape=[encode_dim])
        c = Dense(128)(encodes)
        h = merge([h, c], mode='sum')
        h = LeakyReLU()(h)

        steer = Dense(1, activation='tanh', init=lambda shape, name:
                      normal(shape, scale=1e-4, name=name))(h)
        accel = Dense(1, activation='sigmoid', init=lambda shape, name:
                             normal(shape, scale=1e-4, name=name))(h)
        brake = Dense(1, activation='sigmoid', init=lambda shape, name:
                      normal(shape, scale=1e-4, name=name))(h)
        actions = merge([steer, accel, brake], mode='concat')
        model = Model(input=[feats, auxs, encodes], output=actions)
        return model, model.trainable_weights, feats, auxs, encodes
 def create_actor_network(self, state_size,action_dim):
     print("Now we build the model")
     S = Input(shape=[state_size])   
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
     Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)  
     Acceleration = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)   
     Brake = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1) 
     V = merge([Steering,Acceleration,Brake],mode='concat')          
     model = Model(input=S,output=V)
     return model, model.trainable_weights, S
Beispiel #8
0
 def create_actor_network(self, state_size,action_dim):
     print("Now we build the model")
     S = Input(shape=[state_size])   
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
     Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)  
     Acceleration = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)   
     Brake = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1) 
     V = merge([Steering,Acceleration,Brake],mode='concat')          
     model = Model(input=S,output=V)
     return model, model.trainable_weights, S
Beispiel #9
0
def unitary_svd_init(shape, name=None):
    assert shape[0] == shape[1]

    Re = initializations.normal(shape, scale=1.0, name=name).get_value()
    Im = initializations.normal(shape, scale=1.0, name=name).get_value()
    X = Re + 1j * Im
    [U, S, V] = np.linalg.svd(X)
    X = np.dot(U, V)
    ReX = np.real(X)
    ImX = np.imag(X)
    Xaug = np.concatenate([ReX, ImX], axis=0)
    return K.variable(Xaug, name=name)
Beispiel #10
0
 def create_critic_network(self, state_size, image_size, action_dim):
     print("Now we build cnn model")
     I = Input(shape=image_size)
     I0 = Convolution2D(
         32,
         8,
         8,
         subsample=(4, 4),
         activation='relu',
         init=lambda shape, name: normal(shape, scale=0.01, name=name),
         border_mode='same',
         input_shape=image_size)(I)
     I1 = Convolution2D(
         64,
         4,
         4,
         subsample=(2, 2),
         activation='relu',
         init=lambda shape, name: normal(shape, scale=0.01, name=name),
         border_mode='same')(I0)
     I2 = Convolution2D(
         64,
         3,
         3,
         subsample=(1, 1),
         activation='relu',
         init=lambda shape, name: normal(shape, scale=0.01, name=name),
         border_mode='same')(I1)
     I2_5 = Flatten()(I2)
     I3 = Dense(
         512,
         activation='relu',
         init=lambda shape, name: normal(shape, scale=0.01, name=name))(
             I2_5)
     I4 = Dense(
         HIDDEN2_UNITS,
         activation='relu',
         init=lambda shape, name: normal(shape, scale=0.01, name=name))(I3)
     print("Now we build the model")
     S = Input(shape=[state_size])
     A = Input(shape=[action_dim], name='action2')
     w1 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     a1 = Dense(HIDDEN2_UNITS, activation='linear')(A)
     h1 = Dense(HIDDEN2_UNITS, activation='linear')(w1)
     h2 = merge([h1, a1, I4], mode='sum')
     h3 = Dense(HIDDEN2_UNITS, activation='relu')(h2)
     V = Dense(action_dim, activation='linear')(h3)
     model = Model(input=[S, A, I], output=V)
     adam = Adam(lr=self.LEARNING_RATE)
     model.compile(loss='mse', optimizer=adam)
     return model, A, S, I
Beispiel #11
0
def buildmodel():
    # Build the model using the same specifications as the DeepMind paper
    # Keras is a good library for protrotyping a quick model and will later
    # be modified/used in conjunction with pure tensorflow code
    # TODO: Need to figure out how to run on GPU

    print("Building CNN Model")
    model = Sequential()

    # 1st Convolutional layer
    model.add(Convolution2D(32, 8, 8,
                            subsample=(4, 4),
                            init=my_init,
                            border_mode='same',
                            input_shape=(img_rows, img_cols, img_channels)))
    model.add(Activation('relu'))

    # 2nd Convolutional layer
    model.add(Convolution2D(64, 4, 4,
                            subsample=(2, 2),
                            init=my_init,
                            border_mode='same'))
    model.add(Activation('relu'))

    # 3rd Convolutional layer
    model.add(Convolution2D(64, 3, 3,
                            subsample=(1, 1),
                            init=my_init,
                            border_mode='same'))
    model.add(Activation('relu'))

    # Flatten the CNN tensors into a long vector to feed into a Dense (Fully Connected Layer)
    model.add(Flatten())

    # Connect the flattened vector to a fully connected layer
    model.add(Dense(512, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))

    # The number of outputs is equal to the number of valid actions e.g NUM_ACTIONS = 2 (up, down)
    model.add(Dense(NUM_ACTIONS, init=lambda shape, name: normal(shape, scale=0.01, name=name)))

    # Use the Adam optimizer for gradient descent
    adam = Adam(lr=1e-6)

    # Compile the model
    model.compile(loss='mse', optimizer=adam)

    print("CNN Model Complete")
    return model
Beispiel #12
0
    def create_actor_network(self, state_size,action_dim):
        print("Now we build the model")
        S = Input(shape=[state_size])   
        h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
        h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
        Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)  
        Acceleration = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)   
        Brake = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1) 
        V = merge([Steering,Acceleration,Brake],mode='concat')          
        model = Model(input=S,output=V)
		
		model = Sequential()

		# CNN
		# number of convolutional filters to use
		nb_filters = 32
		# size of pooling area for max pooling
		pool_size = (2, 2)
		# convolution kernel size
		kernel_size = (3, 3)
		
		nb_epoch = 100
		batch_size = 100
		
		model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
								border_mode='valid',
								input_shape=[state_size]))
		model.add(Activation('relu'))
		model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
		model.add(Activation('relu'))
		model.add(MaxPooling2D(pool_size=pool_size))
		model.add(Dropout(0.25))

		model.add(Flatten())
		model.add(Dense(HIDDEN1_UNITS))
		model.add(Activation('relu'))
		model.add(Dropout(0.5))
		#model.add(Dense(nb_classes))
		#model.add(Activation('softmax'))
		
		'''
		model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])
		
		model.fit(S, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          verbose=1)
        '''
		return model, model.trainable_weights, S
Beispiel #13
0
    def create_network(self, state_dim, action_dim, hidden1, hidden2):
        S = Input(shape=[state_dim])
        h1 = Dense(hidden1)(S)
        h2 = Dense(hidden2)(h1)
        steer = Dense(
            1,
            activation="tanh",
            init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h2)
        accel = Dense(
            1,
            activation="sigmoid",
            init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h2)
        V = merge([steer, accel], mode="concat")
        model = Model(input=S, output=V)

        return model, model.trainable_weights, S
Beispiel #14
0
def getModel():
    model = Sequential()
    model.add(Convolution2D(32, 3, 3, subsample=(2, 2), init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same', input_shape=(imgChannel, imgRow, imgCol)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(2, 2), init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(2, 2), init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(Dense(actionNum, init=lambda shape, name: normal(shape, scale=0.01, name=name)))

    adam = Adam(lr=1e-5)
    model.compile(loss='mse', optimizer=adam)
    return model
 def __init__(self):
     self.x = Input(shape=(227, 227, 3))
     y1 = Convolution2D(48, 11, 11, subsample=(4, 4), W_regularizer=l2(0.0005), border_mode='valid',
                        activation='relu', init=self.alexnet_norm)(self.x)
     y2 = Convolution2D(48, 11, 11, subsample=(4, 4), W_regularizer=l2(0.0005), border_mode='valid',
                        activation='relu', init=self.alexnet_norm)(self.x)
     y1 = LRN2D()(y1)
     y2 = LRN2D()(y2)
     y1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(y1)
     y2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(y2)
     y1 = Convolution2D(128, 5, 5, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y1)
     # biases are 0.1
     y2 = Convolution2D(128, 5, 5, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y2)
     # biases are 0.1
     y = merge(inputs=[y1, y2], mode='concat', concat_axis=3)
     y = LRN2D()(y)
     y = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(y)
     y1 = Convolution2D(192, 3, 3, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y)
     y2 = Convolution2D(192, 3, 3, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y)
     y1 = Convolution2D(192, 3, 3, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y1)
     # biases are 0.1
     y2 = Convolution2D(192, 3, 3, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y2)
     # biases are 0.1
     y1 = Convolution2D(128, 3, 3, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y1)
     # biases are 0.1
     y2 = Convolution2D(128, 3, 3, W_regularizer=l2(0.0005), border_mode='same', activation='relu',
                        init=self.alexnet_norm)(y2)
     # biases are 0.1
     y = merge(inputs=[y1, y2], mode='concat', concat_axis=3)
     y = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(y)
     y = Flatten()(y)
     y = Dense(4096, activation='relu', W_regularizer=l2(0.0005),
               init=lambda shape, name: normal(shape, scale=0.005, name=name))(y)  # biases are 0.1
     y = Dropout(0.5)(y)
     y = Dense(4096, activation='relu', W_regularizer=l2(0.0005),
               init=lambda shape, name: normal(shape, scale=0.005, name=name))(y)  # biases are 0.1
     y = Dropout(0.5)(y)
     self.y = Dense(1000, activation='softmax', W_regularizer=l2(0.0005),
                    init=lambda shape, name: normal(shape, scale=0.01, name=name))(y)
     self.y_ = Input(shape=(1000,))
Beispiel #16
0
 def init_normal(self, shape, name=None):
     """
 Custom normal initializer for nce
 embedding. Shrink stddev.
 """
     return init.normal(shape=shape,
                        scale=1 / np.sqrt(self._emb_dim),
                        name=name)
Beispiel #17
0
    def build_model(self):
        print("Building the model")
        self.model = Sequential()
        self.model.add(
            Convolution2D(
                32,
                8,
                8,
                subsample=(4, 4),
                init=lambda shape, name: normal(shape, scale=0.01, name=name),
                border_mode='same',
                input_shape=(IMAGE_NUM_OF_CHANNELS, IMAGE_WIDTH,
                             IMAGE_HEIGHT)))
        self.model.add(Activation('relu'))
        self.model.add(
            Convolution2D(
                64,
                4,
                4,
                subsample=(2, 2),
                init=lambda shape, name: normal(shape, scale=0.01, name=name),
                border_mode='same'))
        self.model.add(Activation('relu'))
        self.model.add(
            Convolution2D(
                64,
                3,
                3,
                subsample=(1, 1),
                init=lambda shape, name: normal(shape, scale=0.01, name=name),
                border_mode='same'))
        self.model.add(Activation('relu'))
        self.model.add(Flatten())
        self.model.add(
            Dense(
                512,
                init=lambda shape, name: normal(shape, scale=0.01, name=name)))
        self.model.add(Activation('relu'))
        self.model.add(
            Dense(
                NUM_OF_ACTIONS,
                init=lambda shape, name: normal(shape, scale=0.01, name=name)))

        adam = Adam(lr=1e-6)
        self.model.compile(loss='mse', optimizer=adam)
        print("The model was successfully built")
Beispiel #18
0
def my_init2(shape, name="Random2"):
    global initValue2
    global checker2
    if checker2 is False:
        checker2 = True
        initValue2 = get_scale()
        print("initValue2:" + str(initValue2))
    return initializations.normal(shape, scale=initValue2, name=name)
Beispiel #19
0
def buildmodel():
    print("Now we build the model")
    model = Sequential()
    model.add(
        Convolution2D(
            32,
            8,
            8,
            subsample=(4, 4),
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same',
            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(
            64,
            4,
            4,
            subsample=(2, 2),
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same'))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(
            64,
            3,
            3,
            subsample=(1, 1),
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(
        Dense(512,
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(
        Dense(ACTIONS,
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))

    adam = Adam(lr=1e-6)
    model.compile(loss='mse', optimizer=adam)
    #model.summary()
    print("We finish building the model")
    plot(model, to_file='model.png', show_shapes=True)
    return model
    def createModel(self):
        model = Sequential()
        model.add(
            Convolution2D(
                32,
                8,
                8,
                subsample=(4, 4),
                init=lambda shape, name: normal(shape, scale=0.01, name=name),
                border_mode='same',
                input_shape=(4, 84, 84)))
        model.add(Activation('relu'))
        model.add(
            Convolution2D(
                64,
                4,
                4,
                subsample=(2, 2),
                init=lambda shape, name: normal(shape, scale=0.01, name=name),
                border_mode='same'))
        model.add(Activation('relu'))
        model.add(
            Convolution2D(
                64,
                3,
                3,
                subsample=(1, 1),
                init=lambda shape, name: normal(shape, scale=0.01, name=name),
                border_mode='same'))
        model.add(Activation('relu'))
        model.add(Flatten())
        model.add(
            Dense(
                512,
                init=lambda shape, name: normal(shape, scale=0.01, name=name)))
        model.add(Activation('relu'))
        model.add(
            Dense(
                self.actionCnt,
                init=lambda shape, name: normal(shape, scale=0.01, name=name)))

        adam = Adam(lr=1e-6)
        model.compile(loss='mse', optimizer=adam)

        return model
Beispiel #21
0
def buildmodel():
    print("Now we build the model")
    # 以下注释见文中
    model = Sequential()
    model.add(
        Convolution2D(
            32,
            8,
            8,
            subsample=(4, 4),
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same',
            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(
            64,
            4,
            4,
            subsample=(2, 2),
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same'))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(
            64,
            3,
            3,
            subsample=(1, 1),
            init=lambda shape, name: normal(shape, scale=0.01, name=name),
            border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(
        Dense(512,
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(
        Dense(2,
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))

    adam = Adam(lr=1e-6)
    model.compile(loss='mse', optimizer=adam)  # 使用损失函数为均方误差,优化器为Adam。
    print("We finish building the model")
    return model
Beispiel #22
0
def my_init1(shape, name="Random1"):

    global checker1
    if checker1 is False:
        checker1 = True
        global initValue1
        initValue1 = get_scale()
        print("initValue1:" + str(initValue1))
    return initializations.normal(shape, scale=initValue1, name=name)
Beispiel #23
0
def buildmodel():
    print("Now we build the model")
    model = Sequential()
    model.add(Convolution2D(32, 8, 8, subsample=(4,4),init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same',input_shape=(img_channels,img_rows,img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2,2),init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1,1),init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(Dense(2,init=lambda shape, name: normal(shape, scale=0.01, name=name)))

    adam = Adam(lr=1e-6)
    model.compile(loss='mse',optimizer=adam)
    print("We finish building the model")
    return model
Beispiel #24
0
 def create_actor_network(self, state_size,action_dim):
     print("Now we build the model")
     S = Input(shape=[state_size])
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
     bicep = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)  
     
     V = bicep          
     model = Model(input=S,output=V)
     return model, model.trainable_weights, S
 def generate_model(self):
     input_layer = Input(shape=[self.state_size])
     h0 = Dense(self.hidden_units[0], activation="relu")(input_layer)
     h1 = Dense(self.hidden_units[1], activation="relu")(h0)
     output_layer = Dense(
         self.action_size,
         activation="tanh",
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     model = Model(input=input_layer, output=output_layer)
     return model, input_layer
Beispiel #26
0
def loadModel(target, source, sourceIndex, predLabel, path):
    mmdNetLayerSizes = [25, 25]
    l2_penalty = 1e-2
    init = lambda shape, name: initializations.normal(
        shape, scale=.1e-4, name=name)
    space_dim = target.X.shape[1]

    calibInput = Input(shape=(space_dim, ))
    block1_bn1 = BatchNormalization()(calibInput)
    block1_a1 = Activation('relu')(block1_bn1)
    block1_w1 = Dense(mmdNetLayerSizes[0],
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block1_a1)
    block1_bn2 = BatchNormalization()(block1_w1)
    block1_a2 = Activation('relu')(block1_bn2)
    block1_w2 = Dense(space_dim,
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block1_a2)
    block1_output = merge([block1_w2, calibInput], mode='sum')
    block2_bn1 = BatchNormalization()(block1_output)
    block2_a1 = Activation('relu')(block2_bn1)
    block2_w1 = Dense(mmdNetLayerSizes[1],
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block2_a1)
    block2_bn2 = BatchNormalization()(block2_w1)
    block2_a2 = Activation('relu')(block2_bn2)
    block2_w2 = Dense(space_dim,
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block2_a2)
    block2_output = merge([block2_w2, block1_output], mode='sum')
    block3_bn1 = BatchNormalization()(block2_output)
    block3_a1 = Activation('relu')(block3_bn1)
    block3_w1 = Dense(mmdNetLayerSizes[1],
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block3_a1)
    block3_bn2 = BatchNormalization()(block3_w1)
    block3_a2 = Activation('relu')(block3_bn2)
    block3_w2 = Dense(space_dim,
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block3_a2)
    block3_output = merge([block3_w2, block2_output], mode='sum')

    calibMMDNet = Model(input=calibInput, output=block3_output)

    calibMMDNet.load_weights(
        os.path.join(io.DeepLearningRoot(),
                     path + '/ResNet' + str(sourceIndex) + '.h5'))

    return calibMMDNet
Beispiel #27
0
 def build_actor(self):
     print("build actor network")
     input = Input(shape=[29])
     h1 = Dense(300, activation='relu')(input)
     h2 = Dense(600, activation='relu')(h1)
     steer = Dense(
         1,
         activation='tanh',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h2)
     accel = Dense(
         1,
         activation='sigmoid',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h2)
     brake = Dense(
         1,
         activation='sigmoid',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h2)
     action = merge([steer, accel, brake], mode='concat')
     actor = Model(input=input, output=action)
     return actor
Beispiel #28
0
def mnist_irnn_model(inputShape, nb_classes):
    # inputShape 2dim
    model = Sequential()
    model.add(SimpleRNN(output_dim=100,
                        init=lambda shape, name: normal(shape, scale=0.001, name=name),
                        inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
                        activation='relu',
                        input_shape=inputShape))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.summary()
Beispiel #29
0
    def build(self, input_shapes):
        vid_input_shape = input_shapes[0]
        seq_input_shape = input_shapes[1]
        self.seq_d_in = seq_input_shape[2]
        self.vd_d_in = vid_input_shape[1]
        self.input_spec = [InputSpec(shape=vid_input_shape), InputSpec(shape=seq_input_shape)]
        self.states = [None, None]

        # GRU weights
        self.W_z = normal((self.seq_d_in, self.seq_d_out), scale=0.01)
        self.U_z = self.inner_init((self.seq_d_out, self.seq_d_out))
        self.C_z = normal((self.vd_d_in, self.seq_d_out), scale=0.01)
        self.b_z = K.zeros((self.seq_d_out))

        self.W_r = normal((self.seq_d_in, self.seq_d_out), scale=0.01)
        self.U_r = self.inner_init((self.seq_d_out, self.seq_d_out))
        self.C_r = normal((self.vd_d_in, self.seq_d_out), scale=0.01)
        self.b_r = K.zeros((self.seq_d_out))

        self.W = normal((self.seq_d_in, self.seq_d_out), scale=0.01)
        self.U = self.inner_init((self.seq_d_out, self.seq_d_out))
        self.C = normal((self.vd_d_in, self.seq_d_out), scale=0.01)
        self.b = K.zeros((self.seq_d_out))

        self.trainable_weights = [self.W_z, self.U_z, self.C_z, self.b_z,
                                  self.W_r, self.U_r, self.C_r, self.b_r,
                                  self.W, self.U, self.C, self.b]
Beispiel #30
0
def constructMMD(target):

    mmdNetLayerSizes = [25, 25]
    l2_penalty = 1e-2
    init = lambda shape, name: initializations.normal(
        shape, scale=.1e-4, name=name)
    space_dim = target.X.shape[1]

    calibInput = Input(shape=(space_dim, ))
    block1_bn1 = BatchNormalization()(calibInput)
    block1_a1 = Activation('relu')(block1_bn1)
    block1_w1 = Dense(mmdNetLayerSizes[0],
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block1_a1)
    block1_bn2 = BatchNormalization()(block1_w1)
    block1_a2 = Activation('relu')(block1_bn2)
    block1_w2 = Dense(space_dim,
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block1_a2)
    block1_output = merge([block1_w2, calibInput], mode='sum')
    block2_bn1 = BatchNormalization()(block1_output)
    block2_a1 = Activation('relu')(block2_bn1)
    block2_w1 = Dense(mmdNetLayerSizes[1],
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block2_a1)
    block2_bn2 = BatchNormalization()(block2_w1)
    block2_a2 = Activation('relu')(block2_bn2)
    block2_w2 = Dense(space_dim,
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block2_a2)
    block2_output = merge([block2_w2, block1_output], mode='sum')
    block3_bn1 = BatchNormalization()(block2_output)
    block3_a1 = Activation('relu')(block3_bn1)
    block3_w1 = Dense(mmdNetLayerSizes[1],
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block3_a1)
    block3_bn2 = BatchNormalization()(block3_w1)
    block3_a2 = Activation('relu')(block3_bn2)
    block3_w2 = Dense(space_dim,
                      activation='linear',
                      W_regularizer=l2(l2_penalty),
                      init=init)(block3_a2)
    block3_output = merge([block3_w2, block2_output], mode='sum')

    calibMMDNet = Model(input=calibInput, output=block3_output)

    return calibMMDNet, block3_output
Beispiel #31
0
 def create_actor_network(self, state_size, action_dim):
     print("Now we build the model")
     S = Input(shape=[state_size])
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
     Steering = Dense(
         1,
         activation='tanh',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     Acceleration = Dense(
         1,
         activation='sigmoid',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     Brake = Dense(
         1,
         activation='sigmoid',
         init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     V = merge([Steering, Acceleration, Brake], mode='concat')
     model = Model(input=S, output=V)
     adam = Adam(lr=self.LEARNING_RATE)
     model.compile(loss='mse', optimizer=adam)
     return model, V, S
def get_q_network(weights_path):
    model = Sequential()
    model.add(
        Dense(1024,
              init=lambda shape, name: normal(shape, scale=0.01, name=name),
              input_shape=(25112, )))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(
        Dense(1024,
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(
        Dense(6,
              init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('linear'))
    adam = Adam(lr=1e-6)
    model.compile(loss='mse', optimizer=adam)
    if weights_path != "0":
        model.load_weights(weights_path)
    return model
    def create_actor_network(self, state_size, action_dim):
        print("Actor model: state_size", state_size, "action_dim", action_dim)
        S = Input(shape = state_size)
        # S_in = Lambda(lambda img: img / 255.0)(S)
        #S1 = Lambda(lambda img: img[:,-64*64*3:])(S)
        #S_in = Reshape((64,64,3))(S1)
        batch_norm0 = BatchNormalization()(S)
        conv1 = Convolution2D(16, nb_row=4, nb_col=4, subsample=(4,4), activation='relu')(batch_norm0)
        batch_norm1 = BatchNormalization()(conv1)
        conv2 = Convolution2D(32, nb_row=4, nb_col=4, subsample=(2,2), activation='relu')(batch_norm1)
        batch_norm2 = BatchNormalization()(conv2)
        conv3 = Convolution2D(32, nb_row=4, nb_col=4, subsample=(2,2), activation = 'relu')(batch_norm2)
        batch_norm3 = BatchNormalization()(conv3)
        flat = Flatten()(batch_norm3)
        h0 = Dense(HIDDEN1_UNITS, activation='relu')(flat)
        h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)

        Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)  
        Acceleration = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
        V = merge([Steering,Acceleration],mode='concat')          
        model = Model(input=S,output=V)
        return model, model.trainable_weights, S
def create_layers(params):
    custom_init = lambda shape, name: initializations.normal(shape, scale=0.01, name=name)
    if params.rnn:
        x = Input(shape=(params.rnn_steps,) + params.observation_space_shape)
    else:
        x = Input(shape=params.observation_space_shape)
    if params.batch_norm:
        h = BatchNormalization()(x)
    else:
        h = x
    for i, hidden_size in zip(range(len(params.hidden_size)), params.hidden_size):
        if params.rnn:
            if i == len(params.hidden_size)-1:
                h = GRU(hidden_size, activation=params.activation, init=custom_init)(h)
            else:
                h = TimeDistributed(Dense(hidden_size, activation=params.activation, init=custom_init))(h)
        else:
            h = Dense(hidden_size, activation=params.activation, init=custom_init)(h)

        if params.batch_norm and i != len(params.hidden_size) - 1:
            h = BatchNormalization()(h)
    n = params.action_space_size
    y = Dense(n + 1)(h)

    if params.advantage == 'avg':
        z = Lambda(lambda a: K.expand_dims(a[:, 0], dim=-1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
                   output_shape=(n,))(y)
    elif params.advantage == 'max':
        z = Lambda(lambda a: K.expand_dims(a[:, 0], dim=-1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True),
                   output_shape=(n,))(y)
    elif params.advantage == 'naive':
        z = Lambda(lambda a: K.expand_dims(a[:, 0], dim=-1) + a[:, 1:], output_shape=(n,))(y)
    else:
        assert False

    return x, z
Beispiel #35
0
 def alexnet_norm(shape, name, dim_ordering):
     return normal(shape, scale=0.01, name=name, dim_ordering=dim_ordering)
Beispiel #36
0
 def init_normal(self, shape, name=None):
   """
   Custom normal initializer for nce
   embedding. Shrink stddev.
   """
   return init.normal(shape=shape, scale=1 / np.sqrt(self._emb_dim), name=name)
Beispiel #37
0
def normal(shape, name=None):
    return initializations.normal(shape, scale=0.05, name=name)
Beispiel #38
0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(output_dim=hidden_units,
                    init=lambda shape, name: normal(shape, scale=0.001, name=name),
                    inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
                    activation='relu',
                    input_shape=X_train.shape[1:]))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=rmsprop,
              metrics=['accuracy'])

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
          verbose=1, validation_data=(X_test, Y_test))

scores = model.evaluate(X_test, Y_test, verbose=0)
print('IRNN test score:', scores[0])
Beispiel #39
0
def my_init(shape, name=None):
    return initializations.normal(shape, scale=1.2, name=name)
def conv2D_init(shape, name=None):
    return initializations.normal(shape, scale=0.02, name=name)
Beispiel #41
0
class LossHistory(keras.callbacks.Callback):
    def on_train_begin(self, logs={}):
        self.losses = []
        self.times = []
        self.start_time = time.time()

    def on_epoch_end(self, batch, logs={}):
        self.times.append(time.time()-self.start_time)
        self.losses.append(logs.get("val_acc"))
history = LossHistory()

print('Evaluate %s...' % ClassModel.__class__.__name__)
model = Sequential()
model.add(ClassModel(input_dim=1, output_dim=hidden_units,
                    init=lambda shape: normal(shape, scale=0.001),
                    inner_init=lambda shape: identity(shape, scale=1.0),
                    activation='relu', truncate_gradient=BPTT_truncate))
model.add(Dense(hidden_units, nb_classes))
model.add(Activation('sigmoid'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
          show_accuracy=True, verbose=1, validation_data=(X_test, Y_test),  callbacks=[history])

scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('%s test score:' % ClassModel.__class__.__name__, scores[0])
print('%s test accuracy:' % ClassModel.__class__.__name__, scores[1])

record_file = FileIO()
Beispiel #42
0
def gaussian_init(shape, name=None, dim_ordering=None):
   return initializations.normal(shape, scale=0.001, name=name, dim_ordering=dim_ordering)