예제 #1
0
    def __create_model(self, load_model=True):
        hidden_size = 100

        self.model = Sequential()
        self.model.add(
            Dense(hidden_size,
                  input_shape=(2, ),
                  activation='relu',
                  kernel_regularizer=l1(0.01)))
        self.model.add(
            Dense(hidden_size, activation='relu', kernel_regularizer=l1(0.01)))
        # self.model.add(Dense(hidden_size, activation='relu'))
        self.model.add(Dense(self.num_actions))
        self.model.compile(sgd(lr=0.0001), "mse")

        if load_model and os.path.exists("model.dqlearning1"):
            self.model.load_weights("model.dqlearning1")

        self.model2 = Sequential()
        self.model2.add(
            Dense(hidden_size,
                  input_shape=(2, ),
                  activation='relu',
                  kernel_regularizer=l1(0.01)))
        self.model2.add(
            Dense(hidden_size, activation='relu', kernel_regularizer=l1(0.01)))
        # self.model2.add(Dense(hidden_size, activation='relu'))
        self.model2.add(Dense(self.num_actions))
        self.model2.compile(sgd(lr=0.0001), "mse")

        if load_model and os.path.exists("model.dqlearning2"):
            self.model.load_weights("model.dqlearning2")
예제 #2
0
def build_model():
    '''
     Returns three initialized objects: the model, the environment, and the replay.
    '''
    model = Sequential()
    model.add(
        Dense(hidden_size, input_shape=(state_space, ),
              activation='relu'))  #@jen is that comma supposed to be there?
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(action_space))
    model.compile(sgd(lr=.04, clipvalue=3.0), "mse")

    agent_model = Sequential()
    agent_model.add(
        Dense(hidden_size, input_shape=(state_space, ),
              activation='relu'))  #@jen is that comma supposed to be there?
    agent_model.add(Dense(hidden_size, activation='relu'))
    agent_model.add(Dense(action_space))
    agent_model.compile(sgd(lr=.04, clipvalue=3.0), "mse")

    # Define environment/game
    env = World(
        agent_model
    )  #JEN: I'm not sure if I can actually just give the untrained model as a starting model and it
    #will correctly act as functionally random...
    #--> make this a copy

    # Initialize experience replay object
    exp_replay = ExperienceReplay(max_memory=max_memory)

    return model, agent_model, env, exp_replay
예제 #3
0
def cnn(shape, no_classes):
    """convolutional neural network model"""

    model = Sequential()

    model.add(Conv2D(4, 3, padding='same', input_shape=shape))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(8, 3, padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(100))
    model.add(Activation('relu'))

    model.add(Dense(20))
    model.add(Activation('relu'))

    if no_classes > 2:
        model.add(Dense(no_classes))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd(lr=0.01),
                      metrics=['categorical_accuracy'])
    else:
        model.add(Dense(no_classes - 1))
        model.add(Activation('sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer=sgd(lr=0.01),
                      metrics=['accuracy'])

    return model
예제 #4
0
def build_model():
    '''
     Returns three initialized objects: the model, the environment, and the replay.
    '''

    model = Sequential()
    model.add(Dense(state_space, input_shape=(state_space,), activation='relu',kernel_regularizer=l2(0.0001)))
    model.add(Dense(hidden_size, activation='relu', kernel_regularizer=l2(0.0001)))
    model.add(Dense(action_space, kernel_regularizer=l2(0.0001)))
    model.compile(sgd(lr=.04, clipvalue = 3.0), "mse")

    agent_model = Sequential()
    agent_model.add(Dense(state_space, input_shape=(state_space,), activation='relu',kernel_regularizer=l2(0.0001)))
    agent_model.add(Dense(hidden_size, activation='relu', kernel_regularizer=l2(0.0001)))
    agent_model.add(Dense(action_space, kernel_regularizer=l2(0.0001)))
    agent_model.compile(sgd(lr=.04, clipvalue = 3.0), "mse")

    # Define environment/game
    env = World()

    # Initialize experience replay object
    exp_replay = ExperienceReplay(max_memory=max_memory)


    return model, agent_model, env, exp_replay
예제 #5
0
    def __init__(self, dataset, *args, **kwargs):
        super(VGG, self).__init__(*args, **kwargs)

        model = VGG16(include_top=False, input_shape=dataset.input_shape)
        x = Flatten(name='flatten')(model.output)
        x = Dense(1024, activation='relu', name='fc1')(x)
        x = Dropout(0.5)(x)
        x = Dense(1024, activation='relu', name='fc2')(x)
        x = Dropout(0.5)(x)
        x = Dense(dataset.output_size,
                  activation='softmax',
                  name='predictions')(x)
        self.model = Model(inputs=model.input, outputs=x)
        opt = optimizers.sgd(lr=0.0001)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])

        probabilistic_model = VGG16(include_top=False,
                                    input_shape=dataset.input_shape)
        x = Flatten(name='flatten')(probabilistic_model.output)
        x = Dense(1024, activation='relu', name='fc1')(x)
        x = Dropout(0.5)(x)
        x = Dense(1024, activation='relu', name='fc2')(x)
        x = Dropout(0.5)(x)
        x = Dense(dataset.output_size,
                  activation='softmax',
                  name='predictions')(x)
        self.probabilistic_model = Model(inputs=probabilistic_model.input,
                                         outputs=x)
        opt = optimizers.sgd(lr=0.0001)
        self.probabilistic_model.compile(loss='categorical_crossentropy',
                                         optimizer='adam',
                                         metrics=['accuracy'])
예제 #6
0
    def __init__(self, dataset, *args, **kwargs):
        super(CNN, self).__init__(*args, **kwargs)

        model = Sequential()
        model.add(
            Conv2D(32, (3, 3), padding='same',
                   input_shape=dataset.input_shape))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, (3, 3), padding='same'))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(dataset.output_size))
        model.add(Activation('softmax'))
        # opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
        # opt = optimizers.adam()
        opt = optimizers.sgd(lr=0.0001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
        self.model = model

        probabilistic_model = Sequential()
        probabilistic_model.add(
            Conv2D(32, (3, 3), padding='same',
                   input_shape=dataset.input_shape))
        probabilistic_model.add(Activation('relu'))
        probabilistic_model.add(Conv2D(32, (3, 3)))
        probabilistic_model.add(Activation('relu'))
        probabilistic_model.add(MaxPooling2D(pool_size=(2, 2)))
        probabilistic_model.add(BayesianDropout(0.25))
        probabilistic_model.add(Conv2D(64, (3, 3), padding='same'))
        probabilistic_model.add(Activation('relu'))
        probabilistic_model.add(Conv2D(64, (3, 3)))
        probabilistic_model.add(Activation('relu'))
        probabilistic_model.add(MaxPooling2D(pool_size=(2, 2)))
        probabilistic_model.add(BayesianDropout(0.25))
        probabilistic_model.add(Flatten())
        probabilistic_model.add(Dense(512))
        probabilistic_model.add(Activation('relu'))
        probabilistic_model.add(BayesianDropout(0.5))
        probabilistic_model.add(Dense(dataset.output_size))
        probabilistic_model.add(Activation('softmax'))
        # opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
        # opt = optimizers.adam()
        opt = optimizers.sgd(lr=0.0001)
        probabilistic_model.compile(loss='categorical_crossentropy',
                                    optimizer=opt,
                                    metrics=['accuracy'])
        self.probabilistic_model = probabilistic_model
def tryPerceptron(fName, learningRate, epochs):
    data = pd.read_csv("../Data/{}.csv".format(fName))
    data.columns = list(range(1, len(data.columns) + 1))
    yVals = data.iloc[:, -1]  #concrete has 3 outputs,
    xVals = data.iloc[:, :-1]  # remove those three outputs from x inputs
    xVals.insert(loc=0,
                 column=0,
                 value=[1 for x in range(0, len(xVals.values))],
                 allow_duplicates=True)
    yVals = pd.DataFrame(preprocessing.scale(yVals.to_numpy().reshape(-1, 1)))
    yVals = yVals.iloc[:, -1]
    # I FORGOT   TO SCALLE
    xVals = pd.DataFrame(preprocessing.scale(xVals))
    optimizerDict = {
        "Adam": optimizers.adam(lr=learningRate),
        "SGD": optimizers.sgd(lr=learningRate),
        "SGD_Moment": optimizers.sgd(lr=learningRate, momentum=.2)
    }
    for item in optimizerDict.keys():
        for i in range(0, len(activationArray)):
            print("\t{} {} {} ".format(item, activationArray[i],
                                       modelsToUse[0]))
            #another for loop for models after this
            currentParams = {
                'model': modelsToUse[0],
                'optimizer': optimizerDict[item],
                'activation': activationArray[i],
                'epochs': epochs,
                'lr': learningRate
            }
            if (item == "SGD" or item
                    == "SGD_Moment") and activationArray[i] == 'linear':
                if item == "SGD_Moment":
                    currentParams['optimizer'] = optimizers.sgd(
                        lr=learningRate, momentum=.5, clipnorm=1.)
                else:
                    currentParams['optimizer'] = optimizers.sgd(
                        lr=learningRate, clipnorm=1.)

            myCols, myNumFeat, rVals, rCV, rAdj = GeneralModel.forwardSelectAll(
                xVals, yVals, currentParams)
            #base case with all features.

            print("\t\tColumn Index used:{}".format(myCols))
            print("\t\tNumFeatures forwardSelect Chose:{}".format(myNumFeat))
            print("\t\t rCVVales:  {}".format(rCV))
            print("\t\t rBarVaues:  {}".format(rAdj))
            GeneralModel.graphAttsVRVals(rVals, rCV, rAdj, myNumFeat, item,
                                         currentParams, 0, fName)
예제 #8
0
def build_model_VAD_M2():
    """
    DNN分层(包含age、gender子属性,但不输出),并行学习出AVD三个属性
    :return:model
    """
    main_input = Input(shape=(1, 6373), name='main_input')

    x = Dense(64, activation='sigmoid')(main_input)

    x = Dropout(rate=0.5)(x)

    x = Flatten()(x)

    v = Dense(1, activation='relu', name='v_out')(x)
    a = Dense(1, activation='relu', name='a_out')(x)
    d = Dense(1, activation='relu', name='d_out')(x)

    model = Model(inputs=[main_input], outputs=[v, a, d])
    sgd = optimizers.sgd(lr=1e-4)
    model.compile(loss={
        'v_out': 'mse',
        'a_out': 'mse',
        'd_out': 'mse'
    },
                  loss_weights={
                      'v_out': 0.2,
                      'a_out': 0.6,
                      'd_out': 0.2
                  },
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    model.summary()

    return model
예제 #9
0
    def __init__(self, input_dim=0, output_dim=0, lr=0.01):
        self.input_dim = input_dim
        self.lr = lr

        # LSTM 신경망
        self.model = Sequential()

        self.model.add(
            LSTM(256,
                 input_shape=(1, input_dim),
                 return_sequences=True,
                 stateful=False,
                 dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(
            LSTM(256, return_sequences=True, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(
            LSTM(256, return_sequences=False, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(Dense(output_dim))
        self.model.add(Activation('sigmoid'))

        self.model.compile(optimizer=sgd(lr=lr), loss='mse')
        self.prob = None
예제 #10
0
def create_model():
    nb_filters = 8
    nb_conv = 5
    model = Sequential()

    # add a convolutional model first
    model.add(Convolution2D(nb_filters,nb_conv,border_mode='valid',input_shape=(480,640,3)))
    model.add(Activation('relu'))

    model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
    model.add(Activation('relu'))

    model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
    model.add(Activation('relu'))

    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(1))
    model.add(Activation('linear')) #regression model, hence linear activation

    model.compile(loss='mean_squared_error', optimizer=sgd())
    model.summary()
    return model
예제 #11
0
def mlp_binary(x, y, para):
    nb_features = x.shape[1]
    nb_classes = y.shape[1]
    # model
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=100,
                               verbose=1,
                               mode='auto')
    callbacks_list = [early_stop]
    model = Sequential(name=para['model_name'])
    model.add(Dense(30, input_dim=nb_features, use_bias=True))
    model.add(Activation('relu'))
    model.add(Dropout(para['drop_rate']))
    model.add(Dense(units=nb_classes))
    model.add(Activation('softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.sgd(lr=5e-3, momentum=0.5),
                  metrics=['accuracy'])
    print(model.summary())
    history = model.fit(x,
                        y,
                        batch_size=para['size_of_batch'],
                        epochs=para['nb_epoch'],
                        validation_split=0.33,
                        shuffle=True,
                        callbacks=callbacks_list)
    return history, model
예제 #12
0
def getModel(numClasses, train_features, train_labels, validation_features,
             validation_labels):
    with graph.as_default():
        print('getModel()..........numClasses: {}'.format(numClasses))
        model = models.Sequential()
        model.add(layers.Dense(512, activation='relu', input_dim=1 * 1 * 2048))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(numClasses, activation='softmax'))
        model.summary()

        model.compile(
            loss='categorical_crossentropy',
            # optimizer=optimizers.RMSprop(lr=2e-4),
            optimizer=optimizers.sgd(),
            metrics=['acc'])

        early_stopping = EarlyStopping(patience=15,
                                       mode='auto',
                                       monitor='val_loss')
        history = model.fit(train_features,
                            train_labels,
                            epochs=500,
                            batch_size=200,
                            validation_data=(validation_features,
                                             validation_labels),
                            callbacks=[early_stopping])

        model.save(MODEL_NAME)
        return model
예제 #13
0
def get_model(num_inputs, num_actions, hidden_size):
    model = Sequential()
    model.add(Dense(hidden_size, input_dim=num_inputs, activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(num_actions))
    model.compile(sgd(lr=.00001), loss="mse")
    return model
예제 #14
0
def train_canterpillar_with_generator(name):
    model = canterpillar_net()
    model.summary()
    optimiser = sgd(momentum=0.9, nesterov=True)

    model.compile(optimizer=optimiser, loss=mean_squared_error)

    x_train, x_test = prepare_data_for_canterpillar(segment_len=None)
    batch_size = 20
    steps_per_epoch = 15
    print("батчей за эпоху будет:" + str(steps_per_epoch))
    print("в одном батче " + str(batch_size) + " кардиограмм.")
    train_generator = ecg_batches_generator(segment_len=ecg_segment_len,
                                            batch_size=batch_size,
                                            ecg_dataset=x_train)
    test_generator = ecg_batches_generator(segment_len=ecg_segment_len,
                                           batch_size=batch_size,
                                           ecg_dataset=x_test)

    tb_callback = TensorBoard(log_dir='./caterpillar_logs',
                              histogram_freq=5,
                              write_graph=True,
                              write_grads=True)
    y_test = next(test_generator)

    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=50,
                                  validation_data=y_test,
                                  validation_steps=2,
                                  callbacks=[tb_callback])

    save_history(history, name)
    model.save(name + '.h5')
    return model
예제 #15
0
    def get_hyperparameter(self):
        hyper = dict()
        ############################
        '''
        (1) 파라미터 값들을 수정해주세요
       '''
        hyper['batch_size'] = 16  # 배치 사이즈 16

        hyper['epochs'] = 20  # epochs은 최대 20 설정 !!

        #hyper['learning_rate'] = 1.0  # 학습률
        #hyper['learning_rate'] = 0.1  # 학습률
        #hyper['learning_rate'] = 0.01
        #hyper['learning_rate'] = 0.001
        hyper['learning_rate'] = 0.0001  # 학습률

        # 최적화 알고리즘 선택 [sgd, rmsprop, adagrad, adam 등]
        #hyper['optimizer'] = optimizers.Adadelta(lr=hyper['learning_rate'])  # default: SGD
        hyper['optimizer'] = optimizers.sgd(lr=hyper['learning_rate'],
                                            decay=1e-6)
        #hyper['optimizer'] = optimizers.RMSprop(lr=hyper['learning_rate'], decay=1e-6)
        #hyper['optimizer'] = optimizers.Adamax(lr=hyper['learning_rate'])
        #hyper['optimizer'] = optimizers.Nadam(lr=hyper['learning_rate'])
        #hyper['optimizer'] = optimizers.Adagrad(lr=hyper['learning_rate'])
        ############################
        return hyper
def train_model(model_name):

	model, name=model_name
	if name=='emotion_bn':
		# model.compile(loss=losses.categorical_crossentropy,\
		# optimizer=optimizers.sgd(lr=1e-4), \
		# metrics=['accuracy'])
		model.compile(loss=losses.categorical_crossentropy,\
		optimizer=optimizers.sgd(lr=1e-4), \
		metrics=['accuracy'])


		checkpoint = ModelCheckpoint(filepath='/Users/nex03343/Desktop/CS231N/project/model_emotion_custom.hdf5', monitor='val_acc',\
									verbose=1, save_best_only=True, mode='max')
		csv_logger = CSVLogger('/Users/nex03343/Desktop/CS231N/project/training_emotion_custom.log')
		history=model.fit(x_train,y_train, 
			epochs=epochs , 
			batch_size=batch_size,
			shuffle=True,
			verbose=1, 
			validation_data=(x_val,y_val),
			callbacks=[csv_logger, checkpoint])
	

		return history
def main():
    # load dataset
    print('Loading dataset...')
    (x_train, y_train), (x_test, y_test) = load_data()

    # build model
    print('Building model...')
    model = build_model()

    # compile model
    print('Compiling model...')
    optimizer = sgd(0.01, 0.9, 0.0005, nesterov=True)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    #output = model.layers[1].output
    #output = output.eval(session=K.get_session())

    # train model
    for i in range(20):
        print(i + 1)
        print('Training model...')
        model.fit(x_train,
                  y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_data=(x_test, y_test),
                  shuffle=True)

        # evaluate model
        print('Evaluating model...')
        score = model.evaluate(x_test, y_test)
        print('Test accuracy: ', score[1])
        print('Test loss: ', score[0])
예제 #18
0
def setup_model(args):
    ''' Build and compile the models. '''

    # Build the models.
    gen, dis, full = build_model(args)

    # The full model is to train the generator, so freeze the discriminator.
    # Due to a bug in preserving the trainable state of a sub model after
    # saving and loading a model, each layer has to be set to untrainable
    # instead of just setting the discriminator sub model to be untrainable.
    for layer in full.get_layer('discriminator').layers:
        layer.trainable = False

    full.get_layer('discriminator').set_weights(dis.get_weights())

    # Compile the models for training.
    dis.compile(optimizer=adam(lr=args.dis_lr), loss=discriminator_loss)
    full.compile(optimizer=sgd(lr=args.gen_lr), loss=generator_loss)

    # Show the model architectures.
    print('discriminator')
    dis.summary()

    print('stacked')
    full.summary()

    return gen, dis, full
예제 #19
0
    def __init__(self,
                 name=None,
                 width=4,
                 height=4,
                 weights_directory="model_weights"):
        """Initialization: properties of the model"""

        self.epsilon = .1  # exploration
        self.num_actions = 4  # [up, right, down, left]
        self.hidden_size = 100
        self.name = name

        self.model = Sequential()
        self.model.add(
            Dense(self.hidden_size,
                  input_shape=(width * height, ),
                  activation='relu'))
        self.model.add(Dense(self.hidden_size, activation='relu'))
        self.model.add(Dense(self.num_actions))
        self.model.compile(sgd(lr=.2), "mse")

        self.weights_directory = weights_directory

        if name:
            suffix = f"-{name}"
        else:
            suffix = ""

        self.model_file = os.path.join(weights_directory,
                                       f"model_weights{suffix}.h5")
예제 #20
0
파일: rpiter.py 프로젝트: xianyue360/test
def get_optimizer(opt_name):
    if opt_name == 'sgd':
        return optimizers.sgd(lr=SGD_LEARNING_RATE, momentum=0.5)
    elif opt_name == 'adam':
        return optimizers.adam(lr=ADAM_LEARNING_RATE)
    else:
        return opt_name
예제 #21
0
def build_model_emotion1():
    """
    构建模型, 三个隐藏层, 使用的是sigmoid激活函数
    最后一层使用softmax
    :return: model
    """
    main_input = Input(shape=(1, 6373), name='main_input')
    x = Dense(64, activation='sigmoid')(main_input)
    x = Dense(64, activation='sigmoid')(x)
    x = Dense(64, activation='sigmoid')(x)

    x = Flatten()(x)

    y_emotion = Dense(8, activation='softmax')(x)

    model = Model(inputs=[main_input], outputs=[y_emotion])
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    sgd = optimizers.sgd(lr=0.01)

    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()

    return model
예제 #22
0
def get_model(img_x, img_y):
    model = Sequential()
    model.add(
        Conv2D(64,
               kernel_size=(5, 5),
               padding='same',
               activation='relu',
               input_shape=(img_x, img_y, 1)))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1)))

    model.add(
        Conv2D(128, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1)))

    model.add(
        Conv2D(256, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1)))

    model.add(Flatten())
    model.add(Dense(300, activation='relu'))
    model.add(Dropout(rate=0.5))
    model.add(Dense(8, activation='softmax'))

    adam = optimizers.sgd(lr=0.01, momentum=0.9)
    model.compile(
        optimizer=adam,
        loss='categorical_crossentropy',
        # loss='mse',
        metrics=['accuracy'])

    return model
예제 #23
0
def main():
    # load dataset
    print('Loading dataset...')
    (x_train, y_train), (x_test, y_test) = load_data()
    
    # build model
    print('Building model...')
    model = build_model()

    #plot_model(model, to_file='model.png')
    #exit()    
    # compile model
    print('Compiling model...')
    optimizer = sgd(0.01, 0.9, 0.0005)
    #optimizer = keras.optimizers.rmsprop(lr=0.0001,decay=1e-6)
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

    # train model
    for i in range(20):
        print(i+1)
        print('Training model...')
        model.fit(x_train, y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_data=(x_test, y_test),
                  shuffle=True)
    
        # evaluate model
        print('Evaluating model...')
        score = model.evaluate(x_test, y_test)
        print('Test accuracy: ', score[1])
        print('Test loss: ', score[0])
예제 #24
0
파일: mlp.py 프로젝트: chingheng113/ml_farm
def ann(x_data, y_data):
    nb_feartures = x_data.shape[1]
    nb_classes = 2
    batch_size = int(round(x_data.shape[0]*0.1, 0))
    nb_neuron_1 = int(round(nb_feartures*2/3))
    nb_neuron_2 = int(round(nb_neuron_1*2/3))
    nb_neuron_3 = int(round(nb_neuron_2*2/3))

    early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=1, mode='auto')
    callbacks_list = [early_stop]

    model = Sequential()
    model.add(Dense(nb_neuron_1, input_dim=nb_feartures))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_neuron_2))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    # model.add(Dense(nb_neuron_3))
    # model.add(Activation('tanh'))
    model.add(Dense(nb_classes, activation='softmax'))
    model.compile(optimizer=optimizers.sgd(lr=1e-2), loss=losses.mse, metrics=['accuracy'])

    history = model.fit(x_data, to_categorical(y_data),
                        batch_size=batch_size,
                        epochs=500,
                        shuffle=True,
                        validation_split=0.33,
                        callbacks=callbacks_list)
    return model, history
예제 #25
0
    def __init__(self,
                 allowed_actions,
                 height,
                 width,
                 name=None,
                 learning_rate=0.01,
                 epsilon_decay=0.9999):
        self.action_size = len(allowed_actions)
        self.actions = allowed_actions
        self.state_size = 400
        self.gamma = 0.9
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = epsilon_decay
        self.learning_rate = learning_rate

        if name is not None and os.path.isfile("model-" + name):
            model = load_model("model-" + name)
        else:
            model = Sequential()
            model.add(
                Dense(24, input_shape=(self.state_size, ), activation='relu'))
            model.add(Dense(24, activation="relu"))
            model.add(Dense(self.action_size, activation='linear'))
            model.compile(loss='mse', optimizer=sgd(lr=self.learning_rate))

        self.model = model
예제 #26
0
 def _build_model(self):
     model = Sequential()
     model.add(Dense(100, input_dim=self.state_size, activation='relu'))  # changed layer count from 24
     model.add(Dense(80, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))  # changed this from linear
     model.compile(loss='mse', optimizer=sgd(lr=self.learning_rate))
     return model
 def _build_model(self):
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.compile(loss='mse', optimizer=sgd(lr=self.learning_rate))
     return model
예제 #28
0
    def __init__(self, input_dim, output_dim=0, lr=0.01):
        self.input_dim = input_dim
        self.lr = lr

        # LSTM 신경망
        self.model = Sequential()

        self.model.add(
            LSTM(256,
                 input_shape=(5, 15),
                 return_sequences=True,
                 stateful=False,
                 dropout=0.5))

        # 기존 LSTM 모델
        # self.model.add(LSTM(256, input_shape=input_dim,
        #                     return_sequences=True, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(
            LSTM(256, return_sequences=True, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(
            LSTM(256, return_sequences=False, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(Dense(3))
        # self.model.add(Dense(units=3, activation='softmax'))
        self.model.add(Activation('linear'))

        self.model.compile(optimizer=sgd(lr=lr),
                           loss='mse',
                           metrics=['accuracy'])
        self.prob = None
예제 #29
0
    def __init__(self, input_dim=0, output_dim=0, lr=0.01):
        self.input_dim = input_dim
        self.lr = lr

        #LSTM 신경망
        self.model = Sequential()

        self.model.add(
            LSTM(256,
                 input_shape=(1, input_dim),
                 return_sequences=True,
                 stateful=False,
                 dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(
            LSTM(256, return_sequences=True, stateful=False, dropout=0.5))
        self.model.add(BatchNormalization())
        self.model.add(
            LSTM(256, return_sequences=False, stateful=False, dropout=0.5))
        self.model.add(Dense(output_dim))
        self.model.add(Activation('sigmoid'))
        # print("help")
        # print(help(Adadelta))
        # adadelta = Adadelta(lr=lr, rho=0.95, epsilon=None, decay=0.0)
        # adam = Adam(lr=self.lr,beta_1=0.9,beta_2=0.999)
        # self.model.compile(optimizer=Adam(lr=self.lr,beta_1=0.9,beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),metrics=['accuracy'],loss='mse')
        self.model.compile(optimizer=sgd(lr=lr), loss='mse')
        self.prob = None
예제 #30
0
    def __init__(self):

        # "name": behavior instance
        self.agents = {}

        # Internal episode counter.
        self.epcount = 1

        # Go through agent_config and create an agent with the corresponding model.
        for key in agent_config.keys():

            # Create NN:
            # This is a bad model. I just want to see if instantiating this works.
            # I should also probably clean this up. A lot.
            nn = Sequential()
            nn.add(Dense(20, input_shape=(16, ), activation="relu"))
            nn.add(Dense(20, activation="tanh"))
            nn.add(Dense(20, activation="tanh"))
            nn.add(Dense(3, activation="linear"))
            nn.compile(sgd(lr=0.01), "mse")

            agent_instance = agent(nn)

            # Warm up the agent and immediately reset:
            # TODO: maybe fix act()? During training we might not necessarily want
            # to act on an actual stock.
            agent_instance.act(1, np.ones((1, 16)), np.ones((1, 16)))
            agent_instance.reset()

            behavior_instance = agent_config[key]()
            self.agents[key] = (agent_instance, behavior_instance)
예제 #31
0
def baseline_model(grid_size, num_actions, hidden_size):
    # setting up the model with keras
    model = Sequential()
    model.add(Dense(hidden_size, input_shape=(grid_size,), activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(num_actions))
    model.compile(sgd(lr=.01), "mse")
    return model
예제 #32
0
def get_model ():
    m = Sequential ()
    m.add (Dense (hidden_size, input_shape=(4,), activation='relu'))
    m.add (Dense (hidden_size, activation='relu'))
    m.add (Dense (hidden_size, activation='relu'))
    m.add (Dense (num_actions))
    m.compile(sgd(lr=.01), "mse")
    return m
예제 #33
0
 def compile(self, opt):
     """
     Optimization and Loss definition
     """
     self.model.compile(
         optimizer=sgd(),
         loss=["mse", "categorical_crossentropy"]
     )
예제 #34
0
파일: autoencoder.py 프로젝트: kleach/ML
def train(images, labels, weights):
    model = Sequential()
    model.add(Dense(32, input_dim=64, init='uniform', weights=weights))
    model.add(Activation('relu'))
    model.add(Dense(output_dim=10))
    model.add(Activation('softmax'))
    _sgd = sgd(lr=0.0228)
    model.compile(loss='categorical_crossentropy', optimizer=_sgd, metrics=['accuracy'])
    model.fit(images, labels, validation_split=0.3, nb_epoch=1488)
예제 #35
0
파일: autoencoder.py 프로젝트: kleach/ML
def auto_encode():
    model = Sequential()
    model.add(Dense(32, input_dim=64, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dense(output_dim=64))
    model.add(Activation('relu'))
    _sgd = sgd(lr=0.0228)
    model.compile(loss='mse', optimizer=_sgd, metrics=['accuracy'])
    return model
예제 #36
0
    def __init__ (self, model, max_memory=2000, discount=0.7, unfreeze_count=5):
        self.max_memory = max_memory
        self.discount = discount
        self.unfreeze_count = unfreeze_count
        self.memory = []
        self.buffer = []

        self.frozen_model = Sequential.from_config (model.get_config ())
        self.frozen_model.compile (sgd(lr=.01), "mse")
예제 #37
0
 def get_model (self):
     m = Sequential ()
     m.add (Dense (self.hidden_size, input_shape=(self.num_obs,), activation='relu'))
     m.add (Dense (self.hidden_size, activation='tanh'))
     m.add (Dense (self.num_actions))
     #with open("model.json", "r") as outfile:
     #    model = model_from_json (json.load (outfile))
     #model.load_weights ("model.h5")
     m.compile(sgd(lr=.003), "mse")
     return m
예제 #38
0
파일: normalization.py 프로젝트: kleach/ML
def train(images, labels):
    model = Sequential()
    model.add(Dense(64, input_dim=64, init='uniform'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dense(output_dim=10))
    model.add(BatchNormalization())
    model.add(Activation('softmax'))
    _sgd = sgd(lr=0.0228, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=_sgd, metrics=['accuracy'])
    model.fit(images, labels, validation_split=0.3, nb_epoch=1488, batch_size=750)
    def __init__ (self, model, max_memory, discount, unfreeze_count, num_actions):
        self.max_memory = max_memory
        self.discount = discount
        self.unfreeze_count = unfreeze_count
        self.num_actions = num_actions
        self.memory = list ()

        # TODO dont assume sequential model
        # note taining algo has no affect because frozen model is never trianed
        self.frozen_model = Sequential.from_config (model.get_config ())
        self.frozen_model.compile (sgd(lr=.01), "mse")
예제 #40
0
    def __init__(self, regressor=None, gamma = 0.99):
        self.na = 3 # set when calling the update function the first time
        self.nEpochs = 3000
	self.hidden_size = 8
        if regressor is None:
            regressor = Sequential()
            regressor.add(Dense(self.hidden_size, input_shape=(2,), activation='tanh', init='glorot_uniform')) #ici je triche un peu je sais qu'il y en a 2
            regressor.add(Dense(self.hidden_size, activation='tanh',  init='glorot_uniform'))
            regressor.add(Dense(3)) # je triche pcq je sais deja le nb d actions
            regressor.compile(sgd(lr=0.000001 ,momentum=0.9, nesterov=True), "mse")
        self.Q = regressor
        self.gamma = gamma
        self.t = 0 # current iteration
예제 #41
0
def get_optimizer(config):
    if(config['optimizer'] == 'rmsprop'):
        opti = optimizers.rmsprop(lr=config['learning_rate'],
                                  clipvalue=config['grad_clip'],
                                  decay=config['decay_rate'])
        return opti
    elif(config['optimizer'] == 'adadelta'):
        opti = optimizers.adadelta(lr=config['learning_rate'],
                                   clipvalue=config['grad_clip'])
        return opti
    elif(config['optimizer'] == 'sgd'):
        opti = optimizers.sgd(lr=config['learning_rate'],
                              momentum=config['momentum'],
                              decay=config['learning_rate_decay'])
        return opti
    else:
        raise KeyError('optimizer name error')
예제 #42
0
def run():
    global ACTION_LIST
    # parameters
    epsilon = .1  # exploration
    num_actions = len(ACTION_LIST)  # [buy, hold, sell]
    transcation_cost = 0.0005
    epoch = 1000
    max_memory = 5000
    hidden_size = 300
    batch_size = 50
    look_back_term = 100
    training_period = 3000
    
    # log
    time_start_epoch = datetime.datetime.now()
    time_start = strftime("%Y-%m-%d-%H:%M:%S", gmtime())
    log_name = '../log/DRL_Trading_Learning_v1_' + time_start + '.log'
    logging.basicConfig(filename=log_name,level=logging.DEBUG)
    logging.info("Time start : " + str(time_start))
    logging.info("Parameter setting :")
    logging.info("epsilon = " + str(epsilon))
    logging.info("transaction_cost = " + str(transcation_cost))
    logging.info("epoch ="  + str(epoch))
    logging.info("max_memory = " + str(max_memory))
    #logging.info("batch_size = " + str(batch_size))
    logging.info("look back term = " + str(look_back_term))
    logging.info("hidden_size = " + str(hidden_size))
    logging.info("training period = " + str(training_period))
    print "log start"
    
    
    # import return data
    data = pd.read_csv("../Data/GBPUSD30.csv",header=None)
    close = data[5].values
    ret = (close[1:] - close[:-1])[:training_period]
    train_percent = 1
    ret_train = ret[:len(ret) * train_percent]
    ret_test = ret[len(ret) :]
    model = Sequential()
    model.add(Dense(hidden_size, input_shape=(look_back_term,), activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(num_actions))
    model.compile(sgd(lr=.2), "mse")
    env = FX_Market(ret_train = ret_train, look_back_term = look_back_term, transaction_cost = transcation_cost)
    trading_his = Trading_Memory(max_memory = max_memory)
    # Train
    return_list = []
    for e in range(epoch):
        print "epoch : " + str(e)
        env.reset()
        accumulate_ret = [0.0]
        for t in range(look_back_term - 1 , len(ret_train) - 2) :
            state = env.get_state(t)
            # decide action
            if np.random.rand() < epsilon:
                action = np.random.randint(0, num_actions, size=1)
            else:

                q = model.predict(state)
                action = np.argmax(q[0])

            new_state, reward = env.act(t, action)

            accumulate_ret.append(accumulate_ret[-1]  + reward)

            trading_his.memory(state, new_state, action, reward)
            inputs, targets = trading_his.get_batch(model, batch_size=batch_size)
            model.train_on_batch(inputs, targets)
        print "accumulate return : " + str(accumulate_ret[-1])
        return_list.append(accumulate_ret[-1])
        logging.info("accumulate return : " + str(accumulate_ret[-1]))
        loop_time = datetime.datetime.now() - time_start_epoch
        time_left = float(loop_time.seconds) / 3600.0 / float(e+1) * float(epoch - e + 1)
        print "left time : " + str(time_left) + " hours"
    
    #plt.plot(range(len(return_list)),return_list)
    #plt.show()
    
    print "finished"
예제 #43
0
#screen = pygame.display.set_mode(size)

grid_size = 15
hidden_size = 100
nb_frames = 1

gameMap = np.zeros((nb_frames, grid_size, grid_size))
gameMap[0][cheesePos[0]/20][cheesePos[1]/20] = 1
gameMap[0][mousePos[0]/20][mousePos[1]/20] = 2

nn = Sequential()
nn.add(Flatten(input_shape=(nb_frames, grid_size, grid_size)))
nn.add(Dense(hidden_size, activation='relu'))
nn.add(Dense(hidden_size, activation='relu'))
nn.add(Dense(4))
nn.compile(sgd(lr=.2), "mse")

memory = ExperienceReplay(100)
loss = 0.

def nnEvalSate(pos, gameMap, exploRate, loss):
    mouseState = np.array(pos/20)
    nnoutputs =  nn.predict(np.array([gameMap]), batch_size=1)        #nn.feed_forward(mouseState)
    nnoutputs = nnoutputs[0]                                           #nnoutputs = np.array(nn.output_layer.get_outputs())
    actionId = 0
    if random.random() < exploRate:
        actionId = random.randrange(4)
    else:
        actionId = random.choice(np.argwhere(nnoutputs == nnoutputs.max()).flatten())

    Rpoints, newMouseState = reward(mouseState, actions[actionId])
 	 # --------------------------------------------
 	 # Design the DNN model
 	 # --------------------------------------------
 	 #==========================================================================
  
  
  	model = Sequential()
  	model.add(Dense(output_dim=hidden_size,
  	                    #init=lambda shape, name: normal(shape, scale=0.001, name=name),
  	                    #inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
  	                    activation='linear',
  	                    input_shape=x_train.shape[1:]))
 # 	model.add(Dense(hidden_size, activation='relu'))
  	model.add(Dense(hidden_size, activation='tanh'))
  	model.add(Dense(1, activation='linear'))
  	model.compile(sgd(lr=0.05), "mse")
  
  	model.fit(x_train, x_label, batch_size=mb, nb_epoch=15
  	        )
      ###design DNN and fit
  
  
  	###save the weights
  
  	model.save_weights(root_folder_path + 'nn_keras_%d'%split + ".h5", overwrite=True)	
  	with open(root_folder_path + 'nn_keras_%d'%split + ".json", "w") as outfile:		
  	    json.dump(model.to_json(), outfile)
  
stop = timeit.default_timer()
 
print ("The running takes %r min" %((stop-start)/60))
예제 #45
0
def run():
    global ACTION_LIST
    
    # parameters
    epsilon = .1  # exploration
    num_actions = len(ACTION_LIST)  # [buy, hold, sell]
    transcation_cost = 0.0005
    epoch = 200
    max_memory = 1000
    batch_size = max_memory
    look_back_term = 100
    hidden_size = look_back_term
    act_function = "relu"
    learning_rate = .2
    
    # import return data
    data = pd.read_csv("../Data/GBPUSD30.csv",header=None)
    close = data[5].values
    ret = (close[1:] - close[:-1])[:1000]
    train_percent = 1
    ret_train = ret[:len(ret) * train_percent]

    
    #model.add(Dense(hidden_size, input_shape=(look_back_term,), activation=act_function))
    #model.add(Dense(hidden_size, activation=act_function))
    #model.add(Dense(hidden_size, activation=act_function))
    #model.add(Dense(num_actions))
    #model.compile(sgd(lr=learning_rate), "mse")
    
    model = Sequential()
    #model.add(Embedding(look_back_term, embedding_dims, input_length=maxlen, dropout=0.2))
    model.add(Convolution1D(hidden_size, 2, input_shape=(look_back_term,),activation=act_function))
    model.add(Dense(num_actions))
    model.compile(sgd(lr=learning_rate), "mse")
    

    env = FX_Market(ret_train = ret_train, look_back_term = look_back_term, transaction_cost = transcation_cost)

    trading_his = Trading_Memory(max_memory = max_memory)
    
    logging.basicConfig(filename='DRL_Trading_Learning_v2.log',level=logging.INFO)
    logging.info("Start time : " + strftime("%Y-%m-%d %H:%M:%S", gmtime()))
    logging.info("Parameter setting :")
    logging.info("epsilon = " + str(epsilon))
    logging.info("transaction_cost = " + str(transcation_cost))
    logging.info("epoch ="  + str(epoch))
    logging.info("max_memory = " + str(max_memory))
    logging.info("batch_size = " + str(batch_size))
    logging.info("look back term = " + str(look_back_term))
    logging.info("hidden_size = " + str(hidden_size))
    logging.info("activation function = " + act_function)
    logging.info("learning rate" + str(learning_rate))

    # Train
    return_list = []
    for e in range(epoch):
        print "epoch : " + str(e)
        env.reset()
        accumulate_ret = [0.0]
        for t in range(look_back_term - 1 , len(ret_train) - 2) :
            state = env.get_state(t)
            # decide action
            if np.random.rand() < epsilon:
                action = np.random.randint(0, num_actions, size=1)
            else:
                q = model.predict(state)
                action = np.argmax(q[0])

            new_state, reward = env.act(t, action)

            accumulate_ret.append(accumulate_ret[-1] + reward)

            trading_his.memory(state, new_state, action, reward)

            inputs, targets = trading_his.get_batch(model, batch_size=batch_size)

            model.train_on_batch(inputs, targets)

        print "accumulate return : " + str(accumulate_ret[-1])
        logging.info("accumulate return : " + str(accumulate_ret[-1]))
        return_list.append(accumulate_ret[-1])
        
#===============================================================================
#     result = pd.DataFrame()
#     result["accumulate return"] = return_list
#     result.to_csv("./DRL_result_1_14052016.csv")
# 
#     model.save_weights("./model2.h5", overwrite=True)
#     with open("model2.json", "w") as outfile:
#         json.dump(model.to_json(), outfile)
#===============================================================================

    plt.plot(range(len(return_list)),return_list,"r.")
    plt.show()
예제 #46
0
if __name__ == "__main__":
    # parameters
    epsilon = .1  # exploration
    num_actions = 3  # [move_left, stay, move_right]
    epoch = 1000
    max_memory = 500
    hidden_size = 100
    batch_size = 50
    grid_size = 10

    model = Sequential()
    model.add(Dense(hidden_size, input_shape=(grid_size**2,), activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(num_actions))
    model.compile(sgd(lr=.2), "mse")

    # If you want to continue training from a previous model, just uncomment the line bellow
    # model.load_weights("model.h5")

    # Define environment/game
    env = Catch(grid_size)

    # Initialize experience replay object
    exp_replay = ExperienceReplay(max_memory=max_memory)

    # Train
    win_cnt = 0
    for e in range(epoch):
        loss = 0.
        env.reset()
def run():
    global ACTION_LIST
    # parameters
    epsilon = 0.1  # exploration
    num_actions = len(ACTION_LIST)  # [buy, hold, sell]
    transcation_cost = 0.0005
    epoch = 150
    max_memory = 1000000
    hidden_size = 600
    batch_size = 50
    look_back_term = 200
    training_period_start = 0
    training_period_stop = 10000
    learning_rate = 0.1
    discount_rate = 0.95
    step_size = 10 # iterate step to update target_model
    input_data = "GBPUSD240.csv"

    # log
    time_start_epoch = datetime.datetime.now()
    time_start = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
    log_name = '../log/DRL_Learning_v6_' + time_start + '.log'
    logging.basicConfig(filename=log_name,level=logging.DEBUG)
    logging.info("Time start : " + str(time_start))
    logging.info("Input data :" + input_data)
    logging.info("Parameter setting :")
    logging.info("epsilon = " + str(epsilon))
    logging.info("transaction_cost = " + str(transcation_cost))
    logging.info("epoch ="  + str(epoch))
    logging.info("max_memory = " + str(max_memory))
    logging.info("batch_size = " + str(batch_size))
    logging.info("look back term = " + str(look_back_term))
    logging.info("hidden_size = " + str(hidden_size))
    logging.info("training period = " + str(training_period_start) + " ~ " + str(training_period_stop))
    logging.info("learning rate = " + str(learning_rate))
    logging.info("discount rate = " + str(discount_rate))
    logging.info("step_size = " + str(step_size))
    print "log start"

    # import return data
    data = pd.read_csv("../Data/" + input_data,header=None)
    close = data[5].values
    ret_train = (close[1:] - close[:-1])[training_period_start : training_period_stop]
    
    #ret_train_shared = shared(np.asarray(ret_train), name="ret_train")
    #print ret_train_shared.get_value()
    
    model = Sequential()
    model.add(Dense(hidden_size, input_shape=(look_back_term,), activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(num_actions))
    model.compile(sgd(lr=learning_rate), "mse")
    
    target_model = copy.deepcopy(model)
    
    env = FX_Market(ret_train = ret_train, look_back_term = look_back_term, transaction_cost = transcation_cost)
    trading_his = Trading_Memory(max_memory = max_memory, discount=discount_rate)
    

    # Train
    return_list = []
    for e in range(epoch):
        print "epoch : " + str(e)
        env.reset()
        accumulate_ret = [0.0]
        if e % step_size == 0:
            target_model = copy.deepcopy(model)
        for t in range(look_back_term - 1 , len(ret_train) - 2) :
            state = env.get_state(t)
            # decide action
            if np.random.rand() < epsilon:
                action = np.random.randint(0, num_actions, size=1)
            else:
                q = target_model.predict(state)
                action = np.argmax(q[0])

            new_state, reward = env.act(t, action)

            accumulate_ret.append(accumulate_ret[-1]  + reward)

            trading_his.memory(state, new_state, action, reward)
            
            inputs, targets = trading_his.get_batch(target_model, model,batch_size=batch_size)
            
            model.train_on_batch(inputs, targets)
        print "accumulate return : " + str(accumulate_ret[-1])
        return_list.append(accumulate_ret[-1])
        logging.info("accumulate return : " + str(accumulate_ret[-1]))
        loop_time = datetime.datetime.now() - time_start_epoch
        time_left = float(loop_time.seconds) / 3600.0 / float(e+1) * float(epoch - e + 1)
        print "left time : " + str(time_left) + " hours"
        
    result = pd.DataFrame()
    result["accumulate return"] = return_list
    result.to_csv("../Result_Data/DRL_v6_result_" + time_start + ".csv")

    model.save_weights("../Model/DRL_model_v6_" + time_start + ".h5", overwrite=True)
    with open("../Model/DRL_model_v6_" + time_start + ".json", "w") as outfile:
        json.dump(model.to_json(), outfile)

    #plt.plot(range(len(return_list)),return_list,"r.")
    #plt.show()
    #test(model, ret_test)

    time_used = datetime.datetime.now() - time_start_epoch
    time_used = float(time_used.seconds) / 3600.0
    logging.info("Processing time : " + str(time_used) + " hours")
    print "finished"
def run(optimizer_list):
    global ACTION_LIST
    global floatX
    global time_get_batch
    # parameters
    for optimizer in optimizer_list :
        version = str(7)
        epsilon = 0.1  # exploration
        num_actions = len(ACTION_LIST)  # [buy, hold, sell]
        transcation_cost = 0.0005
        epoch = 500
        max_memory = 1000000
        hidden_size = 600
        batch_size = 300
        look_back_term = 300
        training_period_start = -10500
        training_period_stop = -500
        learning_rate = 0.1
        discount_rate = 0.000009
        step_size = 10 # iterate step to update target_model
        act_function = "relu"
        
        comment = "Learning rate test ( " + optimizer + " )"
        #frame_skip = 4 # train the model with some frames intervals
        input_data = "GBPUSD240.csv"
        #input_data = "GBP_USD240_oanda.csv"
    
        # log
        time_start_epoch = datetime.datetime.now()
        time_start = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
        log_name = '../log/DRL_Learning_v' + version + '_' + time_start + '.log'
        logging.basicConfig(filename=log_name,level=logging.DEBUG)
        logging.info("Version : " + version)
        logging.info("Time start : " + str(time_start))
        logging.info("Input data :" + input_data)
        logging.info("Parameter setting :")
        logging.info("epsilon = " + str(epsilon))
        logging.info("transaction_cost = " + str(transcation_cost))
        logging.info("epoch ="  + str(epoch))
        logging.info("max_memory = " + str(max_memory))
        logging.info("batch_size = " + str(batch_size))
        logging.info("look back term = " + str(look_back_term))
        logging.info("hidden_size = " + str(hidden_size))
        logging.info("training period = " + str(training_period_start) + " ~ " + str(training_period_stop))
        logging.info("learning rate = " + str(learning_rate))
        logging.info("discount rate = " + str(discount_rate))
        logging.info("step_size = " + str(step_size))
        logging.info("activation function = " + act_function)
        logging.info("comment :" + comment)
        #logging.info("frame_skip" + str(frame_skip))
        print "log start"
    
        # import return data
        data = pd.read_csv("../Data/" + input_data,header=None)
        close = data[5].values
        
        # import return data from oanda data
        #data = pd.read_csv("../Data/" + input_data,header=0)
        #print data
        #close = data["closeAsk"].values
        #print close
        
        ret_train = (close[1:] - close[:-1])[training_period_start : training_period_stop]
        
        
        #build model : online mode and target model
        model = Sequential()
        model.add(Dense(hidden_size, input_shape=(look_back_term,), activation=act_function))
        model.add(Dense(hidden_size, activation=act_function))
        model.add(Dense(hidden_size, activation=act_function))
        model.add(Dense(num_actions))
        if optimizer == "SGD" :
            opti = sgd(lr=learning_rate)
        elif optimizer == "RMSprop" :
            opti = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08)
        elif optimizer == "Adam" :
            opti = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        elif optimizer == "Adadelta" :
            opti = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08)
        elif optimizer == "Adagrad":
            opti = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-08)
        elif optimizer == "Adamax" :
            opti = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        
        model.compile(opti, "mse")
        
        write_model(model, version, time_start)
        target_model = read_model(version, time_start)
        
        # create market
        env = FX_Market(ret_train = ret_train, look_back_term = look_back_term, transaction_cost = transcation_cost)
        # create memory
        trading_his = Trading_Memory(max_memory = max_memory, discount=discount_rate)
        
        # Train
        return_list = []
        
        for e in range(epoch):
            print "epoch : " + str(e)
            env.reset()
            accumulate_ret = [0.0] # pips earn from fx market
            if e % step_size == 0:
                write_model(model, version, time_start)
                target_model = read_model(version, time_start)
                
            #time_cal = list(np.zeros(6))
            time_get_batch = list(np.zeros(7))
            
            for t in range(look_back_term - 1 , len(ret_train) - 2) :
                #if np.random.random_integers(1,frame_skip) == 4 :
                state = env.get_state(t)
                # decide action
                if np.random.rand() < epsilon:
                    action = np.random.randint(0, num_actions, size=1)
                else:
                    q = target_model.predict(state)
                    action = np.argmax(q[0])
                
                new_state, reward = env.act(t, action)
    
                accumulate_ret.append(accumulate_ret[-1]  + reward)
    
                trading_his.memory(state, new_state, action, reward)
                
                #save_variable("memory_" + time_start, trading_his.get_memory())
                
                inputs, targets = trading_his.get_batch(target_model, model,batch_size=batch_size)
    
                model.train_on_batch(inputs, targets)
            
            #for i in range(len(time_cal)) :
            #    print "process " + str(i) + " : " + str(time_cal[i]/60) + " minutes"
            
            print "accumulate return : " + str(accumulate_ret[-1])
            
            return_list.append(accumulate_ret[-1])
            logging.info("accumulate return : " + str(accumulate_ret[-1]))
            loop_time = datetime.datetime.now() - time_start_epoch
            time_left = float(loop_time.seconds) / 3600.0 / float(e+1) * float(epoch - e + 1)
            print "left time : " + str(time_left) + " hours" + ", optimizer = " + optimizer
        
        time_used = datetime.datetime.now() - time_start_epoch
        time_used = float(time_used.seconds) / 3600.0
        logging.info("Processing time : " + str(time_used) + " hours")
        
        #save_variable("../Temp/memory_" + time_start + ".mem", trading_his.get_memory())
        
        #output accumulate data
        result = pd.DataFrame()
        result["accumulate return"] = return_list
        result.to_csv("../Result_Data/DRL_v" + version + "_result_" + time_start + ".csv")
        
        #output model
        model.save_weights("../Model/DRL_model_v" + version + "_" + time_start + ".h5", overwrite=True)
        with open("../Model/DRL_model_v" + version + "_" + time_start + ".json", "w") as outfile:
            json.dump(model.to_json(), outfile)
        outfile.close()
    
        #plt.plot(range(len(return_list)),return_list,"r.")
        #plt.show()
        #test(model, ret_test)
    
    print "finished"
예제 #49
0
            self.count = self.min_space
            print 'disturbed'
            #return np.random.randint (self.action_space)
            # always disturb right MUHAHAHAHA
            return 0

        return default

    def __call__ (self, default):
        return self.disturb (default)
disturb = Disturb ()

with open("model3.json", "r") as outfile:
    model = model_from_json (json.load (outfile))
model.load_weights ("model3.h5")
model.compile(sgd(lr=.01), "mse")

def get_env ():
    e = gym.make ('CartPole-v0')
    return e
env = get_env ()

def eval_iter (disturbance=False):
    frame_count = 0
    loss = 0
    done = False
    obs_t = env.reset ()

    while not done:
        # render
        env.render ()
def run():
    global ACTION_LIST
    # parameters
    epsilon = .1  # exploration
    num_actions = len(ACTION_LIST)  # [buy, hold, sell]
    transcation_cost = 0.0005
    epoch = 500
    max_memory = 6000
    #batch_size = 50
    look_back_term = 50
    hidden_size = 300
    act_function = "sigmoid"
    learning_rate = 1.0
    training_period = 100

    # log
    time_start_epoch = datetime.datetime.now()
    time_start = strftime("%Y-%m-%d-%H:%M:%S", gmtime())
    log_name = '../log/DRL_Trading_Learning_v1_' + time_start + '.log'
    logging.basicConfig(filename=log_name,level=logging.DEBUG)
    logging.info("Time start : " + str(time_start))
    logging.info("Parameter setting :")
    logging.info("epsilon = " + str(epsilon))
    logging.info("transaction_cost = " + str(transcation_cost))
    logging.info("epoch ="  + str(epoch))
    logging.info("max_memory = " + str(max_memory))
    #logging.info("batch_size = " + str(batch_size))
    logging.info("look back term = " + str(look_back_term))
    logging.info("hidden_size = " + str(hidden_size))
    logging.info("activation function = " + act_function)
    logging.info("learning rate = " + str(learning_rate))
    logging.info("training period = " + str(training_period))
    print "log start :" + str(time_start)

    # import return data
    data = pd.read_csv("../Data/GBPUSD30.csv",header=None)
    close = data[5].values
    ret = (close[1:] - close[:-1])[:training_period]
    train_percent = 1
    ret_train = ret[:len(ret) * train_percent]

    model = Sequential()
    model.add(Dense(hidden_size, input_shape=(look_back_term,), activation=act_function))
    model.add(Dense(hidden_size, activation=act_function))
    model.add(Dense(hidden_size, activation=act_function))
    model.add(Dense(num_actions))
    model.compile(sgd(lr=learning_rate), "mse")

    env = FX_Market(ret_train = ret_train, look_back_term = look_back_term, transaction_cost = transcation_cost)

    trading_his = Trading_Memory(max_memory = max_memory)

    # Train
    return_list = []
    for e in range(epoch):
        #loop_start = datetime.datetime.now()
        print "epoch : " + str(e)
        env.reset()
        trading_his.memory_reset()
        accumulate_ret = [0.0]
        for t in range(look_back_term - 1 , len(ret_train) - 2) :
            state = env.get_state(t)
            # decide action
            if np.random.rand() < epsilon:
                action = np.random.randint(0, num_actions, size=1)
            else:
                q = model.predict(state)
                action = np.argmax(q[0])

            new_state, reward = env.act(t, action)

            accumulate_ret.append(accumulate_ret[-1] + reward)

            trading_his.memory(state, new_state, action, reward)

            inputs, targets = trading_his.get_batch(model)

            model.train_on_batch(inputs, targets)

        print "accumulate return : " + str(accumulate_ret[-1])
        logging.info("accumulate return : " + str(accumulate_ret[-1]))
        return_list.append(accumulate_ret[-1])
        loop_time = datetime.datetime.now() - time_start_epoch
        time_left = float(loop_time.seconds) / 3600.0 / float(e+1) * float(epoch - e + 1)
        print "left time : " + str(time_left) + " hours"


    result = pd.DataFrame()
    result["accumulate return"] = return_list
    result.to_csv("../Result_Data/DRL_result_" + time_start + ".csv")

    model.save_weights("../Model/DRL_v1_model_" + time_start + ".h5", overwrite=True)
    with open("../Model/DRL_v1_model_" + time_start + ".json", "w") as outfile:
        json.dump(model.to_json(), outfile)

    #plt.plot(range(len(return_list)),return_list,"r.")
    #plt.show()
    #test(model, ret_test)

    time_used = datetime.datetime.now() - time_start_epoch
    time_used = float(time_used.seconds) / 3600.0
    logging.info("Processing time : " + str(time_used) + " hours")

    print "finished !"