コード例 #1
0
    def build_local_critic(self):
        local_critic = Sequential()
        local_critic.add(
            Dense(24,
                  input_shape=(self.state_size, ),
                  activation='relu',
                  kernel_initializer='he_uniform'))
        local_critic.add(
            Dense(12, activation='relu', kernel_initializer='he_uniform'))
        local_critic.add(
            Dense(self.value_size,
                  activation='linear',
                  kernel_initializer='he_uniform'))

        local_critic._make_predict_function()
        local_critic.set_weights(self.g_critic.get_weights())

        return local_critic
コード例 #2
0
ファイル: model.py プロジェクト: rocks6/hackumbc18
    def __init__(self, *args, **kwargs):
        super(MyServer, self).__init__(*args, **kwargs)
        total_word_count = 0
        f = open('../../data/anthem.txt', 'r')
        anthem_text, count = self.text_to_data(f.read(), "ayn rand")
        total_word_count += count

        f = open('../../data/mobydick.txt', 'r')
        mobydick_text, count = self.text_to_data(f.read(), "herman melville")
        total_word_count += count

        f = open('../../data/alice.txt', 'r')
        alice_text, count = self.text_to_data(f.read(), 'lewis carroll')
        total_word_count += count

        total_arr = anthem_text + mobydick_text + alice_text
        np.random.seed(53894343)
        np.random.shuffle(total_arr)

        self.total_dict = {
            "text": [x[0] for x in total_arr],
            "author": [x[1] for x in total_arr]
        }
        total_df = pd.DataFrame(self.total_dict)
        #print(total_df.describe())
        self.train_input, self.test_input, self.train_label, self.test_label, self.x_train, self.x_test, self.y_train, self.y_test, self.tokenizer, self.text_labels, self.num_classes = self.init_data(
            total_df, total_word_count, .7)
        model = Sequential([
            Dense(512),
            Activation('sigmoid'),
            Dense(self.num_classes),
            Activation('softmax')
        ])
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(self.x_train,
                  self.y_train,
                  epochs=20,
                  batch_size=64,
                  verbose=1,
                  validation_split=0.05)
        model._make_predict_function()
        self.model = model
コード例 #3
0
    def _build_model(self):
        # Neural Net for Deep-Q learning Model
        model = Sequential()
        model.add(Dense(24, input_dim=self.state_size, activation='tanh'))
        model.add(Dense(24, activation='tanh'))
        model.add(Dense(self.action_size, activation='linear'))
        model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))

        target_model = Sequential()
        target_model.add(
            Dense(24, input_dim=self.state_size, activation='tanh'))
        target_model.add(Dense(24, activation='tanh'))
        target_model.add(Dense(self.action_size, activation='linear'))
        target_model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))

        model._make_predict_function()
        target_model._make_predict_function()

        return model, target_model
コード例 #4
0
def get_model():
    global model
    global graph

    vgg16_model = keras.applications.vgg16.VGG16()
    model = Sequential()

    for i in vgg16_model.layers:
        model.add(i)

    for layer in model.layers:
        layer.trainable = False

    model.add(Dense(4, activation='softmax'))

    model.load_weights("model_weights.h5")

    model._make_predict_function()
    graph = tf.get_default_graph()
    print("Model loaded!")
コード例 #5
0
    def build_model(self):
        actor, critic = Sequential()
        actor.add(Dense(self.network_structure[0], input_shape=(self.input_size,), init='lecun_uniform'))
        actor.add(LeakyReLU(alpha=0.01))

        for idx in range(1, len(self.network_structure)):
            layerSize = self.network_structure[idx]
            actor.add(Dense(layerSize, init="lecun_uniform"))
            actor.add(LeakyReLU(alpha=0.01))
        
        actor.add(LSTM(self.network_structure[-1], dropout_W=0.2, dropout_U=0.2))
        critic = actor

        actor.add(Dense(self.output_size, init='lecun_uniform'))
        actor.add(Activation("softmax"))

        critic.add(Dense(1))
        critic.add(Activation("linear"))
        '''
        input = Input(shape=(self.input_size,))
        conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input) # transfer from convolution networks to dense networks
        conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv)
        conv = Flatten()(conv)
        fc = Dense(256, activation='relu')(conv)
        ls = LSTM(256, dropout_W=0.2, dropout_U=0.2)(fc)    # add LSTM cells
        policy = Dense(self.output_size, activation='softmax')(ls)
        value = Dense(1, activation='linear')(ls)

        actor = Model(inputs=input, outputs=policy)
        critic = Model(inputs=input, outputs=value)
        '''
        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic
コード例 #6
0
from keras.preprocessing import image
import numpy as np
import os
from PIL import Image
images = []
path1=r"G:\study\machine learning\competition\analytics Vidya Computer Vision\test_real\\"
for img in os.listdir(r"G:\study\machine learning\competition\analytics Vidya Computer Vision\test_real"):
    img = image.load_img(path1+img, target_size=(192, 192))
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    images.append(img)

# stack up images list to pass for prediction
images = np.vstack(images)

classifier = tf.keras.models.load_model(r"G:\study\machine learning\competition\analytics Vidya Computer Vision\model3.h5")
classifier._make_predict_function()
classes=classifier.predict(images)

pred=[]
for i in range(0,len(classes)):
    pred.append(classes[i].argmax(axis=0)+1)

import pandas as pd
z=os.listdir(r"G:\study\machine learning\competition\analytics Vidya Computer Vision\test_real")
data=pd.DataFrame(list(zip(z,pred)),columns=['image','category']).to_csv(r"G:\study\machine learning\competition\analytics Vidya Computer Vision\Solution3.csv")




コード例 #7
0
class StockLSTM(BaseModel):
    def __init__(self,
                 num_classes,
                 window_length,
                 weights_file='weights/lstm.h5'):
        self.model = None
        self.weights_file = weights_file
        self.num_classes = num_classes
        self.window_length = window_length

    def build_model(self, load_weights=True):
        """ Load training history from path

        Args:
            load_weights (Bool): True to resume training from file or just deploying.
                                 Otherwise, training from scratch.

        Returns:

        """
        if load_weights:
            self.model = keras.models.load_model(self.weights_file)
            print('Successfully loaded model')
        else:
            self.model = Sequential()
            self.model.add(
                keras.layers.LSTM(20,
                                  input_shape=(self.num_classes,
                                               self.window_length)))
            self.model.add(Dense(64))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(64))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(self.num_classes, activation='softmax'))

            self.model.compile(loss='categorical_crossentropy',
                               optimizer=Adam(lr=1e-4),
                               metrics=['accuracy'])
            print('Built model from scratch')
        self.model._make_predict_function()
        self.graph = tf.get_default_graph()

    def train(self, X_train, Y_train, X_val, Y_val, verbose=True):
        continue_train = True
        while continue_train:
            self.model.fit(X_train,
                           Y_train,
                           batch_size=128,
                           epochs=50,
                           validation_data=(X_val, Y_val),
                           shuffle=True,
                           verbose=verbose)
            save_weights = input('Type True to save weights\n')
            if save_weights:
                self.model.save(self.weights_file)
            continue_train = input(
                "True to continue train, otherwise stop training...\n")
        print('Finish.')

    def evaluate(self, X_test, Y_test, verbose=False):
        return self.model.evaluate(X_test, Y_test, verbose=verbose)

    def predict(self, X_test, verbose=False):
        return self.model.predict(X_test, verbose=verbose)

    def predict_single(self, observation):
        """ Predict the action of a single observation

        Args:
            observation: (num_stocks + 1, window_length)

        Returns: a single action array with shape (num_stocks + 1,)

        """
        action = np.zeros((self.num_classes, ))
        obsX = observation[:, -self.window_length:,
                           3] / observation[:, -self.window_length:, 0]
        obsX = normalize(obsX)
        obsX = np.expand_dims(obsX, axis=0)
        with self.graph.as_default():
            current_action_index = self.model.predict_classes(obsX,
                                                              verbose=False)
        action[current_action_index] = 1.0
        return action
コード例 #8
0
def predictValue():

    img_width, img_height = 210, 210
    epochs = 10
    batch_size = 32

    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)


    # In[3]:



    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=(210,210,3)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(5))
    model.add(Activation('softmax'))


    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()

    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=False)

    # this is the augmentation configuration we will use for testing
    test_datagen = ImageDataGenerator(rescale=1. / 255)



    model.load_weights("model/Final.h5")
    model._make_predict_function()

    # In[6]:


    classList = ['corn gray leaf spot', 'corn common rust', 'corn healthy', 'peach bacterial spot', 'peach healthy']



    datagen = ImageDataGenerator(rescale=1. / 255)
    generator = datagen.flow_from_directory(
            'imagedata',
            target_size=(img_width, img_height),
            batch_size=batch_size,
            class_mode='categorical',  # only data, no labels
            shuffle=False)  # keep data in same order as labels

    op = model.predict_generator(generator,1)
    maxprobability = max(op[0])

    for i in range(len(op[0])):
        if maxprobability == op[0][i]:
            maxprobabilityclass = i
            break

    K.clear_session()
    del model
    if maxprobability < 0.5:
        return str("image is not discernible")
    return str(classList[maxprobabilityclass])
コード例 #9
0

# Plot the train and val curve

# In[15]:


acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)



model._make_predict_function()
#Train and validation accuracy
#plt.plot(epochs, acc, 'b', label='Training accurarcy')
#plt.plot(epochs, val_acc, 'r', label='Validation accurarcy')
#plt.title('Training and Validation accurarcy')
#plt.legend()

#plt.figure()
#Train and validation loss
#plt.plot(epochs, loss, 'b', label='Training loss')
#plt.plot(epochs, val_loss, 'r', label='Validation loss')
#plt.title('Training and Validation loss')
#plt.legend()
#plt.show()

コード例 #10
0
ファイル: playercontrol.py プロジェクト: noname72/holdemq
    def __init__(self,
                 playerID,
                 game,
                 is_ai=True,
                 model=None,
                 train=None,
                 nb_frames=None,
                 name='Alice',
                 stack=5000):
        self.game = game
        self.daemon = True

        if model is not None:
            self.model = model
        elif is_ai:
            grid_size = 66
            hidden_size = 32
            nb_frames = 5
            lstm_size = 64
            batch_size = 5000

            model = Sequential()
            model.add(
                LSTM(lstm_size,
                     return_sequences=True,
                     input_shape=(nb_frames, grid_size)))
            model.add(Dropout(0.2))
            model.add(LSTM(int(lstm_size / 2), return_sequences=True))
            model.add(Dropout(0.2))
            model.add(LSTM(int(lstm_size / 4)))
            model.compile(RMSprop(), 'MSE')
            model._make_predict_function()

        if model is not None:
            assert len(
                model.output_shape
            ) == 2, "Model's output shape should be (nb_samples, nb_actions)."

            if not nb_frames and not model.input_shape[1]:
                raise Exception("Missing argument : nb_frames not provided")
            elif not nb_frames:
                nb_frames = model.input_shape[1]
            elif model.input_shape[
                    1] and nb_frames and model.input_shape[1] != nb_frames:
                raise Exception(
                    "Dimension mismatch : time dimension of model should be equal to nb_frames."
                )

        self.model = model
        self.nb_frames = nb_frames
        self.frames = None
        self.train = train
        self.nb_actions = 3

        self.playerID = playerID

        self._name = name
        self._stack = stack
        self._originalstack = stack
        self._hand = []
        self.add_player()
        self.last_S = None
コード例 #11
0
model=Sequential()
for layer in v_model.layers[:-1]:
    model.add(layer)
for layer in model.layers:
    layer.trainable=False

model.add(Dropout(0.4))
model.add(Dense(2,activation="softmax"))

model.load_weights('models/weights.h5')

# Load your trained model
#model = Sequential()

#model = load_model(MODEL_PATH)	
model._make_predict_function()          # Necessary
print('Model loaded. Start serving...')

# You can also use pretrained model from Keras
# Check https://keras.io/applications/
#from keras.applications.resnet50 import ResNet50
#model = ResNet50(weights='imagenet')
print('Model loaded. Check http://127.0.0.1:5000/')


def model_predict(img_path, model):
    img = image.load_img(img_path, target_size=(224, 224))

    # Preprocessing the image
    x = image.img_to_array(img)
    # x = np.true_divide(x, 255)
コード例 #12
0
class DeepQNetwork(Representation):

    def __init__(self,gridsize=5,actionspaceperagent=5,numberofagent=2,
                 hidden_unit=[12,12],learning_rate=0.1,
                 batch_size=32,trainpass=25,experiencebuffer=128,
                 train_period=1,
                 gamma = 0.99,
                 model_reset_counter=32,
                 memory_type = "SumTree"):

        self.Gamma = gamma
        self.batchsize = batch_size
        self.trainPass = trainpass
        self.hidden_unit = hidden_unit
        self.learningrate = learning_rate
        self.size_of_input_units = gridsize * gridsize * numberofagent
        self.gridsize = gridsize

        if memory_type=="Uniform":
            self.memory = Memory_UniformRandom(experiencebuffer)
        elif memory_type == "SumTree":
            self.memory = Memory_SumTree(experiencebuffer)

        self.fresh_experience_counter = 0
        self.actionspaceforagent = actionspaceperagent
        self.numberofagent = numberofagent
        self.output_unit = actionspaceperagent**numberofagent
        self.trainingepochtotal = 0
        self.train_period = train_period # After how many new experience we will run fitting/training.
        self.counter_experience = 0 # A counter to hold how many tuple experienced
        self.counter_modelReset = model_reset_counter

        # create model
        self.model = Sequential()
        self.model.add(Dense(self.hidden_unit[0], activation='tanh', input_dim = self.size_of_input_units ))
        for i in range(1, len(hidden_unit)):
            self.model.add(Dense(self.hidden_unit[i], activation='tanh'))
        self.model.add(Dense(self.output_unit, activation=LeakyReLU(0.3)))

        # Compile model
        self.model.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])
        self.model._make_predict_function()

        #save the TensorFlow graph:
        self.graph = tf.get_default_graph()

        self.model.summary()
        self.modelFreezed = self.model;
        self.modelFreezed.summary()

        if os.path.isfile("modeltrained.h5"):
            self.model.load_weights("modeltrained.h5")
            print("###############################")
            print("Existing model loaded.......")
            print("###############################")

        self.model.save_weights("modelinit.h5")

        # Reset the batch
        self.batchSamplesX = np.array([], dtype=np.float).reshape(0, self.size_of_input_units)
        self.batchSamplesY = np.array([], dtype=np.float).reshape(0, self.output_unit)

    def Convert_State_To_Input(self,state):

        # Create zero filled output value
        outValue = np.zeros(shape=(self.numberofagent,self.gridsize,self.gridsize))

        # Convert list to numpy array
        state = np.reshape(state,(1,state.shape[0]))

        # Decompose states
        states = np.split(state[0],self.numberofagent)

        # Fill output tensor with agents position
        for i in range(self.numberofagent) :
            row = int(states[i][0])
            col = int(states[i][1])
            outValue[i,row,col] = 1

        return outValue.flatten()

    def Get_Greedy_Pair(self,state):
        # Backward compability: Preprocess state input if needed.
        if np.size(state) != self.size_of_input_units:
            values = self.ForwardPass(self.Convert_State_To_Input(state))
        else:
            values = self.ForwardPass(state)

        # Get the maximums
        arg = values.argmax()
        valmax = values.max()

        return arg,valmax

    def Get_Value(self,state,action):

        values = self.ForwardPass(self.Convert_State_To_Input(state))
        index = self.Get_Action_Index(action)
        temp = values[index]

        return temp


    def ForwardPass(self,input):

        # Form Input Values
        input = np.reshape(input,(1,input.shape[0]))

        # Prediction of the model
        with self.graph.as_default():
            hypothesis = self.modelFreezed.predict(input)

        values = np.asarray(hypothesis).reshape(self.output_unit)

        return values

    def ForwardPassQ2(self,input):

        # Form Input Values
        input = np.reshape(input,(1,input.shape[0]))

        # Prediction of the model
        with self.graph.as_default():
            hypothesis = self.model.predict(input)

        values = np.asarray(hypothesis).reshape(self.output_unit)

        return values

    def Get_Greedy_PairQ2(self,state):
        # Backward compability: Preprocess state input if needed.
        if np.size(state) != self.size_of_input_units:
            values = self.ForwardPassQ2(self.Convert_State_To_Input(state))
        else:
            values = self.ForwardPassQ2(state)

        # Get the maximums
        arg = values.argmax()
        valmax = values.max()

        return arg,valmax

    def Set_Value(self,state,action,value):

        # Preprocess State
        inputVal = self.Convert_State_To_Input(state)

        # Update label
        values = self.ForwardPass(inputVal)
        index = self.Get_Action_Index(action)
        values[index] = value

        # Append new sample to Memory of Experiences
        # Don't worry about its size, since it is a queue
        self.memory.add((inputVal, values))

        #if self.fresh_experience_counter == self.batchsize :
        if len(self.memory) >= self.batchsize :

            self.trainingepochtotal += self.trainPass
            # print('Training Epoch:', self.trainingepochtotal)

            # Get Unique Samples from memory as much as batchsize
            minibatch = random.sample(list(self.memory), self.batchsize)

            for i in np.arange(len(minibatch)):
                X, Y = minibatch[i]
                self.batchSamplesX = np.vstack((self.batchSamplesX, X))
                self.batchSamplesY = np.vstack((self.batchSamplesY, Y))

            with self.graph.as_default():
                self.model.fit(self.batchSamplesX, self.batchSamplesY, epochs=self.trainPass, batch_size= self.batchsize, verbose=0)

            # Reset the batch
            self.batchSamplesX = np.array([], dtype=np.float).reshape(0, self.size_of_input_units)
            self.batchSamplesY = np.array([], dtype=np.float).reshape(0, self.output_unit)

    def Get_Action_Index(self, action):
        sizeOfAction = action.shape[0]
        temp = 0

        for i in np.arange(sizeOfAction):
            temp = temp + action[i] * (self.actionspaceforagent**(sizeOfAction-i-1))

        return temp;

    def Add_Experience(self,state,action,nextstate,reward,status):

        self.counter_experience += 1

        if self.counter_experience % self.counter_modelReset == 0:
            self.modelFreezed = self.model

        # WORKING
        arg_Qmax, Qmax = self.Get_Greedy_Pair(nextstate)
        QValue = reward + self.Gamma * Qmax

        self.Set_Value(state,action,QValue)

        # WORKING
        # state = self.Convert_State_To_Input(state)
        # nextstate = self.Convert_State_To_Input(nextstate)
        # action = self.Get_Action_Index(action)
        # values=0
        # self.memory.append((state,action,reward,nextstate,status,values))
        #
        # if len(self.memory) >= self.batchsize:
        #
        #     self.trainingepochtotal += self.trainPass
        #
        #     minibatch = self.Memory_Sample_UniformRandom()
        #
        #     for state, action, reward, nextstate, status, values in minibatch :
        #
        #         if status==1 :
        #             value = reward
        #         else :
        #             # arg_Qmax, Qmax = self.Get_Greedy_Pair(nextstate)
        #
        #             arg_Q2max, Q2max = self.Get_Greedy_PairQ2(nextstate)
        #
        #             Q_Update = self.ForwardPass(nextstate)[arg_Q2max]
        #
        #             value = reward + self.Gamma * Q_Update
        #
        #         values = self.ForwardPass(state)
        #         values[action] = value
        #
        #         self.batchSamplesX = np.vstack((self.batchSamplesX, state))
        #         self.batchSamplesY = np.vstack((self.batchSamplesY, values))
        #
        #     with self.graph.as_default():
        #         self.model.fit(self.batchSamplesX, self.batchSamplesY, epochs=self.trainPass,
        #                        batch_size=self.batchsize, verbose=0)
        #
        #     # Reset the batch
        #     self.batchSamplesX = np.array([], dtype=np.float).reshape(0, self.size_of_input_units)
        #     self.batchSamplesY = np.array([], dtype=np.float).reshape(0, self.output_unit)


        # NOT WORKING
        # time_Full = time.time()
        # self.memory.append((state,action,reward,nextstate,status))
        #
        # # if self.fresh_experience_counter == self.batchsize :
        # if len(self.memory) >= self.batchsize:
        #
        #     self.trainingepochtotal += self.trainPass
        #
        #
        #     # Get Unique Samples from memory as much as batchsize
        #     minibatch = random.sample(list(self.memory), self.batchsize)
        #
        #     time_Loop = time.time()
        #
        #     for i in np.arange(len(minibatch)):
        #         state, action, reward, nextstate, status = minibatch[i]
        #
        #         arg_Qmax, Qmax = self.Get_Greedy_Pair(nextstate)
        #         QValue = reward + self.Gamma * Qmax
        #
        #         processed_state = self.Convert_State_To_Input(state)
        #         values = self.ForwardPass(processed_state)
        #
        #         index = self.Get_Action_Index(action)
        #         values[index] = QValue
        #
        #         self.batchSamplesX = np.vstack((self.batchSamplesX, processed_state))
        #         self.batchSamplesY = np.vstack((self.batchSamplesY, values))
        #
        #     print("For Loop: %s seconds" % (time.time() - time_Loop))  # 0.12662458419799805 seconds
        #
        #     time_Fit = time.time()
        #     with self.graph.as_default():
        #         self.model.fit(self.batchSamplesX, self.batchSamplesY, epochs=self.trainPass, batch_size=self.batchsize,
        #                        verbose=0)
        #     print("Get_Fit: %s seconds" % (time.time() - time_Fit))  # 0.12662458419799805 seconds
        #
        #     # Reset the batch
        #     self.batchSamplesX = np.array([], dtype=np.float).reshape(0, self.size_of_input_units)
        #     self.batchSamplesY = np.array([], dtype=np.float).reshape(0, self.output_unit)
        #
        # print("Get_Full: %s seconds" % (time.time() - time_Full))  # 0.12662458419799805 seconds


            # NOT WORKING
        # Transform our states to new form then store them.
        # state = self.Convert_State_To_Input(state)
        # nextstate = self.Convert_State_To_Input (nextstate)


        # self.memory.append((state,action,reward,nextstate,status))
        # self.counter_experience+=1
        #
        # if len(self.memory) >= self.batchsize and self.counter_experience % self.train_period == 0 :
        #     self.Network_Train()

    def Network_Train(self):
        print("Network Train Called..")

        # NOT WORKING #2
        # minibatch = self.Memory_Sample_UniformRandom()
        # for i in range(self.batchsize):
        #
        #     state, action, reward, nextstate, status = minibatch[i]
        #
        #     action_id = self.Get_Action_Index(action)
        #
        #     Qtarget = self.ForwardPass(self.Convert_State_To_Input(state))
        #
        #     if status == 1:
        #         Qtarget[action_id] = reward
        #     else:
        #         arg_Qmax, Qmax = self.Get_Greedy_Pair(nextstate)
        #         Qtarget[action_id] =  reward + self.Gamma * Qmax
        #
        #     self.batchSamplesX = np.vstack((self.batchSamplesX, self.Convert_State_To_Input(state)))
        #     self.batchSamplesY = np.vstack((self.batchSamplesY, Qtarget))
        #
        # with self.graph.as_default():
        #     self.model.fit(self.batchSamplesX, self.batchSamplesY, epochs=self.trainPass, batch_size=self.batchsize,verbose=0)
        #
        # # Reset the batch
        # self.batchSamplesX = np.array([], dtype=np.float).reshape(0, self.size_of_input_units)
        # self.batchSamplesY = np.array([], dtype=np.float).reshape(0, self.output_unit)



        # NOT WORKING #1
        # minibatch = self.Memory_Sample_UniformRandom()
        # #states, actions, rewards, nextstates, stats = zip(*minibatch)
        #
        # states          = np.vstack(np.array(minibatch)[:, 0])
        # actions         = np.vstack(np.array(minibatch)[:, 1])
        # rewards         = np.vstack(np.array(minibatch)[:, 2])
        # nextstates      = np.vstack(np.array(minibatch)[:, 3])
        # stats           = np.vstack(np.array(minibatch)[:, 4])
        #
        # action_ids = np.zeros(shape=(self.batchsize, 1))
        # for i in range(self.batchsize):
        #     action_ids[i, 0] = self.Get_Action_Index(actions[i])
        #
        # # Prediction of the model
        # with self.graph.as_default():
        #
        #     # Predictions
        #     Qpredicted   = self.model.predict(nextstates)
        #     Qtarget      = self.model.predict(states)
        #
        #     for i in range(self.batchsize):
        #
        #         action_id = int(action_ids[i, 0])
        #
        #         if stats[i] == 1:
        #             Qtarget[i, action_id] = rewards[i, 0]
        #         else:
        #             arg_Qmax = np.argmax(Qpredicted[i, :])
        #             Qmax = np.max(Qpredicted[i, :])
        #             reward = rewards[i, 0]
        #
        #             Qtarget[i, action_id] =  reward + self.Gamma * Qmax
        #
        #     self.model.fit(states, Qtarget, epochs=self.trainPass, batch_size=self.batchsize,
        #                    verbose=0)

    def Memory_Sample_UniformRandom(self):
        return random.sample(list(self.memory),self.batchsize)

    def Memory_Sample_PrioritizationProportional(self):
        raise NotImplementedError()

    def Memory_Sample_PrioritizationRankBased(self):
        raise NotImplementedError()

    def Save_Model(self):
        with self.graph.as_default():
            self.model.save_weights("modelOutput.h5")
            print("###############################")
            print("Model saved.......")
            print("###############################")

    def __del__(self):
        print('Representation object died.')
コード例 #13
0
ファイル: projectTA.py プロジェクト: GunturW/interfaceTA
def load_model_klasifikasi():
    global klasifikasi, train_set, test_set, datanya, kelasnya, LOKASI_TRAINING, LOKASI_TESTING
    global MODENYA, productionEpochnya, isLoadedDariModel
    # Initializing the CNN
    klasifikasi = Sequential()

    #input the first convolution with 32 filter, (5x5) kernel, and input shape (64,64,3)
    klasifikasi.add(
        Convolution2D(
            32,  # number of filter layers
            5,  # y dimension of kernel (we're going for a 3x3 kernel)
            5,  # x dimension of kernel
            input_shape=(64, 64, 3),
            init='he_normal'))

    # input activation function relu
    klasifikasi.add(Activation('relu'))

    # input subsampling layer (maxpooling) (2x2) for reduce data
    klasifikasi.add(MaxPooling2D(pool_size=(2, 2)))

    #input the second convolution with 32 filter, (5x5) kernel,
    klasifikasi.add(
        Convolution2D(
            64,  # number of filter layers
            5,  # y dimension of kernel (we're going for a 3x3 kernel)
            5,  # x dimension of kernel
            init='he_normal'))

    # input activation function relu
    klasifikasi.add(Activation('relu'))

    # input subsampling layer (maxpooling) (2x2) for reduce data
    klasifikasi.add(MaxPooling2D(pool_size=(2, 2)))

    #Input Flatten
    klasifikasi.add(Flatten())
    klasifikasi.add(Dense(150, activation='relu', init='he_normal'))
    klasifikasi.add(Dropout(0.5))
    klasifikasi.add(Dense(84, activation='relu', init='he_normal'))
    klasifikasi.add(Dropout(0.5))

    klasifikasi.add(Dense(jumlahKelas, activation='softmax', init='he_normal'))
    print("Full Connection Between Hidden Layers and Output Layers Completed")

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_set = train_datagen.flow_from_directory(LOKASI_TRAINING,
                                                  target_size=(64, 64),
                                                  batch_size=20,
                                                  class_mode='categorical')

    test_set = test_datagen.flow_from_directory(LOKASI_TESTING,
                                                target_size=(64, 64),
                                                batch_size=20,
                                                class_mode='categorical')

    if isLoadedDariModel == True:
        namaFilenya = "modelKlasifikasi" + str(productionEpochnya) + ".h5"
        if os.path.exists(namaFilenya):
            klasifikasi = load_model(namaFilenya)
            datanya = klasifikasi.compile(optimizer='adam',
                                          loss='categorical_crossentropy',
                                          metrics=['accuracy'])
        else:
            raise ValueError(
                'Error: File Tidak Ada Harap Lakukan Training Terlebih Dahulu Sebelum Menggunakan Model'
            )
    else:
        # compile CNN
        klasifikasi.compile(optimizer='adam',
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])
        print(klasifikasi.summary())
        print("Compiling Initiated")

        if MODENYA == 'production':

            tensorboard = TensorBoard(log_dir="logs/{}".format(time()))

            datanya = klasifikasi.fit_generator(
                train_set,
                steps_per_epoch=hitungGambar(LOKASI_TRAINING),
                epochs=productionEpochnya,
                validation_data=test_set,
                validation_steps=hitungGambar(LOKASI_TESTING),
                callbacks=[tensorboard])
            klasifikasi.save("modelKlasifikasi" + str(productionEpochnya) +
                             ".h5")

            test_steps_per_epoch = numpy.math.ceil(test_set.samples /
                                                   test_set.batch_size)

            predictions = klasifikasi.predict_generator(
                test_set, steps=test_steps_per_epoch)

            predicted_classes = numpy.argmax(predictions, axis=1)

            true_classes = test_set.classes
            class_labels = list(test_set.class_indices.keys())

            report = metrics.classification_report(true_classes,
                                                   predicted_classes,
                                                   target_names=class_labels)
            print(report)
        else:
            print('Model Tidak Tersimpan')

        gambarHasilLatih()

    klasifikasi._make_predict_function()
    print("Compiling Completed")
コード例 #14
0
    Embedding(vocab_size, embedding_size, input_length=max_len),
    LSTM(256, return_sequences=True),
    TimeDistributed(Dense(300))
])

final_model = Sequential([
    Merge([image_model, caption_model], mode='concat', concat_axis=1),
    Bidirectional(LSTM(256, return_sequences=False)),
    Dense(vocab_size),
    Activation('softmax')
])

#final_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])

final_model.load_weights(path + 'time_inceptionV3.h5')
final_model._make_predict_function()
graph1 = tf.get_default_graph()


def predict_caption(image):
    start_word = ["<start>"]
    while True:
        par_caps = [word2idx[i] for i in start_word]
        par_caps = sequence.pad_sequences([par_caps],
                                          maxlen=max_len,
                                          padding='post')
        e = encode(image)
        global graph1
        with graph1.as_default():
            preds = final_model.predict([np.array([e]), np.array(par_caps)])
        word_pred = idx2word[np.argmax(preds[0])]
コード例 #15
0
for layer_dim in reversed([im_shape[-1]] + layer_dims[0:-1]):
    AE.add(
        Conv2DTranspose(layer_dim,
                        kernel_size,
                        strides=(2, 2),
                        padding='same',
                        activation='relu'))

#rmsprop works well for <=8 layers and < 8 base dim
AE.compile(optimizer='rmsprop', loss='mean_squared_error')
#adam works well with 8 layers 8 base dim
#AE.compile(optimizer='adam',loss='mean_squared_error')

#AE.compile(optimizer='sgd',loss='mean_squared_error')

AE._make_predict_function()

graph = K.get_session().graph

assert seq_len == 1, "I broke it and I don't care right now"


#Define image pre-processing function
def pre_process_im(im):
    im = cv2.resize(im.copy(), im_shape[0:2])
    im = (im.reshape((1, ) + im_shape).astype(np.float32) / 127.5) - 1
    return im


def de_process_im(im):
    #im = cv2.resize(im.copy(),win_size[0:2])
コード例 #16
0
class StockCNN(BaseModel):
    def __init__(self,
                 nb_classes,
                 window_length,
                 weights_file='weights/cnn.h5'):
        self.model = None
        self.weights_file = weights_file
        self.nb_classes = nb_classes
        self.window_length = window_length

    def build_model(self, load_weights=True):
        """ Load training history from path

        Args:
            load_weights (Bool): True to resume training from file or just deploying.
                                 Otherwise, training from scratch.

        Returns:

        """
        if load_weights:
            self.model = load_model(self.weights_file)
            print('Successfully loaded model')
        else:
            self.model = Sequential()

            self.model.add(
                Conv2D(filters=32,
                       kernel_size=(1, 3),
                       input_shape=(self.nb_classes, self.window_length, 1),
                       activation='relu'))
            self.model.add(Dropout(0.5))
            self.model.add(
                Conv2D(filters=32,
                       kernel_size=(1, self.window_length - 2),
                       activation='relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Flatten())
            self.model.add(Dense(64, activation='relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(64, activation='relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(self.nb_classes, activation='softmax'))
            self.model.compile(loss='categorical_crossentropy',
                               optimizer=Adam(lr=1e-3),
                               metrics=['accuracy'])
            print('Built model from scratch')
        self.model._make_predict_function()
        self.graph = tf.get_default_graph()

    def train(self, X_train, Y_train, X_val, Y_val, verbose=True):
        #continue_train = True
        #while continue_train:
        for episodes in range(1, 21):
            print('Episode ', episodes)
            self.model.fit(X_train,
                           Y_train,
                           batch_size=128,
                           epochs=10,
                           validation_data=(X_val, Y_val),
                           shuffle=True,
                           verbose=verbose)
        #    save_weights = input('Type True to save weights\n')
        #    if save_weights:
        #        self.model.save(self.weights_file)
        #    continue_train = input("True to continue train, otherwise stop training...\n")
        self.model.save(self.weights_file)
        print('Finish.')

    def evaluate(self, X_test, Y_test, verbose=False):
        return self.model.evaluate(X_test, Y_test, verbose=verbose)

    def predict(self, X_test, verbose=False):
        return self.model.predict(X_test, verbose=verbose)

    def predict_single(self, observation):
        """ Predict the action of a single observation

        Args:
            observation: (num_stocks + 1, window_length)

        Returns: a single action array with shape (num_stocks + 1,)

        """
        obsX = observation[:, -self.window_length:,
                           3:4] / observation[:, -self.window_length:, 0:1]
        obsX = normalize(obsX)
        obsX = np.expand_dims(obsX, axis=0)
        with self.graph.as_default():
            return np.squeeze(self.model.predict(obsX), axis=0)
コード例 #17
0
    def train(self, tsData, nb_epochs=40, batchSize=32):
        """
        Trains the LSTM on the initial TimeSeries data. 
        
        tsData can be pandas object with DateTimeIndex or a numpy array
        tsData: list with numbers or pandas series
        nb_epochs: 20 maybe better
        batchSize: 32 ok
        """
        self.in_training = True
        if self.batch_size != batchSize:
            self.batch_size = batchSize

        # if self.model is not None:
        #     print('Model is already trained')
        #     return

        # TODO input validation
        temp_df = pd.DataFrame(tsData).copy()
        temp_df.columns = ['load']

        temp_df['diff'] = temp_df.load.diff()
        temp_df['outVal'] = temp_df.load.shift(-1)

        # Scale the input train data
        temp_scaler_load = StandardScaler()
        temp_df['scaledLoad'] = temp_scaler_load.fit_transform(
            temp_df[['load']])
        temp_df['outVal'] = temp_scaler_load.transform(temp_df[['outVal']])

        temp_scaler_diff = StandardScaler()
        temp_df['scaledDiff'] = temp_scaler_diff.fit_transform(
            temp_df[['diff']])

        # Reshape the input to the desired form
        df_train = temp_df[['scaledLoad', 'scaledDiff',
                            'outVal']].dropna().values
        X, y = self.split_sequences(df_train, self.n_steps)

        # Create LSTM Network
        temp_model = Sequential()

        temp_model.add(
            LSTM(units=50,
                 activation='relu',
                 return_sequences=True,
                 input_shape=(self.n_steps, self.n_features)))
        temp_model.add(Dropout(0.2))

        temp_model.add(LSTM(units=50, activation='relu',
                            return_sequences=True))
        temp_model.add(Dropout(0.2))

        temp_model.add(LSTM(units=50, activation='relu',
                            return_sequences=True))
        temp_model.add(Dropout(0.2))

        temp_model.add(LSTM(units=50, activation='relu'))
        temp_model.add(Dropout(0.2))

        temp_model.add(Dense(units=1))

        temp_model.compile(optimizer='adam', loss='mean_squared_error')

        temp_model.fit(X,
                       y,
                       epochs=nb_epochs,
                       batch_size=self.batch_size,
                       verbose=2)
        temp_model._make_predict_function()
        while self.in_forecast:
            print(
                'Training is waiting for forecast finish to change the model')
        self.in_change = True
        self.model = temp_model
        self.df = temp_df
        self.scaler_diff = temp_scaler_diff
        self.scaler_load = temp_scaler_load
        self.in_change = False
        self.in_training = False
        '''
コード例 #18
0
vgg_model_m._make_predict_function()
input_shape_m = vgg_model_m.output_shape[1]
model_m = Sequential()
model_m.add(vgg_model_m)
model_m.add(InputLayer(input_shape=(input_shape_m, )))
model_m.add(Dense(512, activation='relu', input_dim=input_shape))
model_m.add(Dropout(0.3))
model_m.add(Dense(512, activation='relu'))
model_m.add(Dropout(0.3))
model_m.add(Dense(multi_types, activation='softmax'))
sgd = keras.optimizers.SGD(lr=multi_lr)
model_m.compile(loss='categorical_crossentropy',
                optimizer=sgd,
                metrics=['accuracy'])
model_m.load_weights("fit_multi.h5")
model_m._make_predict_function()

vgg_s = vgg16.VGG16(include_top=False,
                    weights='imagenet',
                    input_shape=input_shape)
output_s = vgg_s.layers[-1].output
output_s = keras.layers.Flatten()(output_s)
vgg_model_s = Model(vgg_s.input, output_s)
vgg_model_s._make_predict_function()
input_shape_s = vgg_model_s.output_shape[1]
model_s = Sequential()
model_s.add(InputLayer(input_shape=(input_shape_s, )))
model_s.add(Dense(512, activation='relu', input_dim=input_shape_s))
model_s.add(Dropout(0.3))
model_s.add(Dense(512, activation='relu'))
model_s.add(Dropout(0.3))
コード例 #19
0
class DeepQNetwork_PrioritizedReplay_Target_LearnerThread(Representation):
    def __init__(self,
                 gridsize=5,
                 actionspaceperagent=5,
                 numberofagent=2,
                 hidden_unit=[12, 12],
                 learning_rate=0.1,
                 batch_size=32,
                 trainpass=25,
                 experiencebuffer=128,
                 train_period=1,
                 gamma=0.99,
                 model_reset_counter=32,
                 statePreprocessType="Tensor",
                 convolutionLayer=False,
                 modelId="noid",
                 logfolder=""):

        print("###############################")
        print("DeepQNetwork_PrioritizedReplay_Target_LearnerThread")
        print("###############################")

        self.Gamma = gamma
        self.batchsize = batch_size
        self.trainPass = trainpass
        self.hidden_unit = hidden_unit
        self.learningrate = learning_rate
        self.statePreprocessType = statePreprocessType
        self.convolutionLayer = convolutionLayer
        self.mutex = Lock()

        if (statePreprocessType == "Tensor"):
            self.size_of_input_units = gridsize * gridsize * numberofagent
        elif (statePreprocessType == "Vector"):
            self.size_of_input_units = 7 * numberofagent
            # (x,y,a) for each agent
        self.gridsize = gridsize

        self.experiencebuffersize = experiencebuffer

        self.memory = Memory_SumTree(experiencebuffer)

        self.fresh_experience_counter = 0
        self.actionspaceforagent = actionspaceperagent
        self.numberofagent = numberofagent
        self.output_unit = actionspaceperagent**numberofagent
        self.trainingepochtotal = 0
        self.train_period = train_period  # After how many new experience we will run fitting/training.
        self.counter_experience = 0  # A counter to hold how many tuple experienced
        self.counter_modelReset = model_reset_counter
        self.modelId = modelId
        self.logfolder = logfolder

        self.update_target_interval = 10000
        self.experience_counter = 0

        self.model = None
        self.model_target = None

        print("log/" + logfolder + "/model_0.h5")
        if os.path.isfile("log/" + self.logfolder + "/model_0.h5"):
            print("###############################")
            print("Existing model is being loaded.......")
            print("###############################")
            self.model = load_model("log/" + self.logfolder + "/model_0.h5")
        else:
            print("###############################")
            print("Not Any Existing model found.......")
            print("###############################")

        if self.model is None:

            # create model
            self.model = Sequential()

            if self.convolutionLayer == True:
                self.convolution_input = (self.numberofagent, self.gridsize,
                                          self.gridsize)
                self.model.add(
                    Conv2D(16, (2, 2),
                           strides=(1, 1),
                           activation='relu',
                           input_shape=(self.convolution_input),
                           data_format='channels_first'))
                self.model.add(
                    Conv2D(32, (2, 2), strides=(1, 1), activation='relu'))
                #self.model.add(Conv2D(64, (3, 3), activation='relu'))
                self.model.add(Flatten())
                self.model.add(Dense(self.hidden_unit[0], activation='tanh'))
            else:
                self.model.add(
                    Dense(self.hidden_unit[0],
                          activation='tanh',
                          input_dim=self.size_of_input_units))

            for i in range(1, len(hidden_unit)):
                self.model.add(Dense(self.hidden_unit[i], activation='tanh'))
            self.model.add(Dense(self.output_unit, activation='relu'))

            # Compile model
            self.model.compile(loss='mse',
                               optimizer='sgd',
                               metrics=['accuracy'])
            self.model._make_predict_function()

            if os.path.isfile("log/" + self.logfolder + "/model_weight_0.h5"):
                self.model.load_weights("log/" + self.logfolder +
                                        "/model_weight_0.h5")
                print("###############################")
                print("Existing model params are loaded.......")
                print("###############################")

        # save the TensorFlow graph:
        self.graph = tf.get_default_graph()

        self.model.summary()

        self.model_target = clone_model(self.model)
        self.Update_target()

        # self.Save_Model()
        self.model.save_weights("log/" + self.logfolder + "/modelinit_" +
                                self.modelId + ".h5")

        # Reset the batch
        self.Reset_Batch()

        # Initialize thread parameters
        self.flag_continue = True

        #_thread.start_new_thread(self.Learner, ())
        self._thread = Thread(target=self.Learner, args=())
        self._thread.start()

    def Update_target(self):
        print('Updating Target Network')
        model_weights = self.model.get_weights()
        self.mutex.acquire(1)
        self.model_target.set_weights(model_weights)
        self.mutex.release()

    def Convert_State_To_Input(self, state):

        if (self.statePreprocessType == "Tensor"):
            # Create zero filled output value
            outValue = np.zeros(shape=(self.numberofagent, self.gridsize,
                                       self.gridsize))

            # Convert list to numpy array
            state = np.reshape(state, (1, state.shape[0]))

            # Decompose states
            states = np.split(state[0], self.numberofagent)

            # Fill output tensor with agents position
            for i in range(self.numberofagent):
                row = int(states[i][0])
                col = int(states[i][1])
                outValue[i, row, col] = 1

            if self.convolutionLayer == True:
                return outValue
            else:
                return outValue.flatten()

        elif (self.statePreprocessType == "Vector"):
            return state

    def Get_Greedy_Pair(self, state):
        # Backward compability: Preprocess state input if needed.
        if np.size(state) != self.size_of_input_units:
            values = self.ForwardPass(self.Convert_State_To_Input(state))
        else:
            values = self.ForwardPass(state)

        # Get the maximums
        arg = values.argmax()
        valmax = values.max()

        return arg, valmax

    def Get_Value(self, state, action):

        values = self.ForwardPass(self.Convert_State_To_Input(state))
        index = self.Get_Action_Index(action)
        temp = values[index]

        return temp

    def ForwardPass(self, input):

        # Form Input Values
        if self.convolutionLayer == True:
            input = np.reshape(
                input, (1, input.shape[0], input.shape[1], input.shape[2]))
        else:
            input = np.reshape(input, (1, input.shape[0]))

        self.mutex.acquire(1)
        # Prediction of the model
        with self.graph.as_default():
            hypothesis = self.model_target.predict(input)
        self.mutex.release()

        values = np.asarray(hypothesis).reshape(self.output_unit)

        return values

    def Get_Action_Index(self, action):
        sizeOfAction = action.shape[0]
        temp = 0

        for i in np.arange(sizeOfAction):
            temp = temp + action[i] * (self.actionspaceforagent**
                                       (sizeOfAction - i - 1))

        return temp

    def Set_Value(self, state, action, value):

        # Preprocess State
        state = self.Convert_State_To_Input(state)

        # Update label
        values = self.ForwardPass(state)
        index = self.Get_Action_Index(action)

        # Calculate error for Prioritized Experience Replay
        error = abs(values[index] - value)

        values[index] = value

        # Append new sample to Memory of Experiences
        # Don't worry about its size, since it is a queue
        self.memory.add(error, (state, values))

        # To be able to stop learner thread if there is no more experience
        if self.experience_counter < 10:  #self.experiencebuffersize:
            self.experience_counter += 1

        return self.trainingepochtotal

    def Learn(self):

        #if self.fresh_experience_counter == self.batchsize :
        if self.memory.length(
        ) >= self.batchsize and self.experience_counter > 0:

            self.trainingepochtotal += self.trainPass
            #print('Training Epoch:', self.trainingepochtotal)

            # Get Unique Samples from memory as much as batchsize
            minibatch = self.memory.sample(self.batchsize)

            batchSamplesX = []
            batchSamplesY = []

            for i in np.arange(len(minibatch)):
                idx, (X, Y) = minibatch[i]
                batchSamplesX.append(X)
                batchSamplesY.append(Y)

            with self.graph.as_default():
                self.model.fit(np.array(batchSamplesX),
                               np.array(batchSamplesY),
                               epochs=self.trainPass,
                               batch_size=self.batchsize,
                               verbose=0)

            if not self.trainingepochtotal % self.update_target_interval:
                self.Update_target()

            # To be able to stop learner thread if there is no more experience
            self.experience_counter -= 1
        else:
            print("Sleeping Learner Thread")
            time.sleep(1)

    # Learner Thread Run Function
    def Learner(self):

        while self.flag_continue:
            self.Learn()

        print("Thread Learner stopped.")

    def Reset_Batch(self):
        # Reset the batch
        if self.convolutionLayer == True:
            self.batchSamplesX = np.array([], dtype=np.float).reshape(
                0, self.numberofagent, self.gridsize, self.gridsize)
            self.batchSamplesY = np.array([], dtype=np.float).reshape(
                0, self.output_unit)
        else:
            self.batchSamplesX = np.array([], dtype=np.float).reshape(
                0, self.size_of_input_units)
            self.batchSamplesY = np.array([], dtype=np.float).reshape(
                0, self.output_unit)

    def Add_Experience(self, state, action, nextstate, reward, status):

        # WORKING
        arg_Qmax, Qmax = self.Get_Greedy_Pair(nextstate)
        QValue = reward + self.Gamma * Qmax

        self.Set_Value(state, action, QValue)

    def Save_Model(self):
        with self.graph.as_default():

            if not os.path.exists("log/" + self.logfolder):
                os.makedirs("log/" + self.logfolder)

            self.model.save("log/" + self.logfolder + "/model_" +
                            self.modelId + ".h5")
            self.model.save_weights("log/" + self.logfolder +
                                    "/model_weight_" + self.modelId + ".h5")
            print("###############################")
            print("Model saved: " + "log/" + self.logfolder)
            print("###############################")

    def __del__(self):
        self.Save_Model()
        self.flag_continue = False
        print('Representation object died.')
コード例 #20
0
class stock_LSTM:
    """ This class is intended to include strictly the methods used for creating and interacting with the model itself"""
    ''' All of the parameters passed to these methods must be normalized and shaped correctly'''
    def __init__(self):
        self.model = Sequential()

    def build_model(self,
                    neurons=100,
                    input_steps=49,
                    dropout_rate=0.2,
                    loss="mse",
                    optimizer="adam"):
        self.model.add(
            LSTM(neurons, input_shape=(input_steps, 16),
                 return_sequences=True))
        self.model.add(Dropout(dropout_rate))
        self.model.add(LSTM(neurons, return_sequences=True))
        self.model.add(Dropout(dropout_rate))
        self.model.add(LSTM(neurons, return_sequences=False))
        self.model.add(Dropout(dropout_rate))
        self.model.add(Dense(1, activation="linear"))
        self.model.compile(loss=loss,
                           optimizer=optimizer,
                           metrics=['accuracy'])

        debug('[Model] Model Compiled')

        return self.model

    def train(self,
              x,
              y,
              epochs=1,
              batch_size=32,
              save_dir=os.path.join(environment, "saved_models"),
              save_name=None,
              save=True):
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        if save_name == None:
            save_fname = os.path.join(
                save_dir, '%s-e%s.h5' %
                (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        else:
            save_fname = save_name
        callbacks = [
            EarlyStopping(monitor='val_accuracy', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_accuracy',
                            save_best_only=True)
        ]
        self.model.fit(
            x,
            y,
            epochs=epochs,
            batch_size=batch_size,
            callbacks=callbacks,
        )

        debug('[Model] Training Completed.')

        if save:
            self.model.save(save_fname)
            debug('Model saved as %s' % save_fname)

    def load_model(self, filepath):
        debug('[Model] Loading model from file %s' % filepath)
        self.model = load(filepath)
        self.session = tf.compat.v1.get_default_session()
        self.model._make_predict_function()
        self.graph = tf.compat.v1.get_default_graph()
        self.graph.finalize()

    def predict_point_by_point(self, data):
        # Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
        debug('[Model] Predicting Point-by-Point...')
        predicted = self.model.predict_on_batch(data)
        predicted = np.reshape(predicted, (predicted.size, ))
        return predicted
コード例 #21
0
class Jogador:

    peakVal = 0.2
    sigma = 0.001

    numeroJogadasAFrente = 5

    numeroDePossiveisComidasParaNaoConsiderarJogadaForcada = 3

    pontosQuandoPerde = -2
    pontosQuandoGanha = 1
    pontosQuandoEmpata = 0

    numeroDeLayers = 3

    def __init__(self,
                 model=None,
                 valorDama=None,
                 listaSigmas=None,
                 geracao=0,
                 genealogia=[],
                 debug=False):
        self.listaSigmas = []
        listaWeights = []
        self.currentPoints = 0
        self.totalPoints = 0
        self.numeroDeGeracoesVivo = 1
        self.geracao = geracao

        if (valorDama is None):
            self.valorDama = 2.0
        else:
            self.valorDama = valorDama

        if (model is None):
            initializer = initializers.random_uniform(minval=(-1) *
                                                      Jogador.peakVal,
                                                      maxval=Jogador.peakVal)
            self.model = Sequential()
            self.model.add(
                Dense(40,
                      input_dim=32,
                      kernel_initializer=initializer,
                      activation='tanh'))
            #            self.model.add (Dense (40, kernel_initializer=initializer, activation = 'tanh'))
            self.model.add(
                Dense(10, kernel_initializer=initializer, activation='tanh'))
            self.model.add(
                Dense(1, kernel_initializer=initializer, activation='tanh'))
            self.model.compile(loss='binary_crossentropy',
                               optimizer='adam',
                               metrics=['accuracy'])
            self.model._make_predict_function()
        else:
            self.model = model
            self.model._make_predict_function()

        if (listaSigmas is None):
            for layerIndex in range(self.numeroDeLayers):
                layer = self.model.get_layer(index=layerIndex)
                layerWeights = layer.get_weights()
                arrayWeights = np.zeros(
                    (layerWeights[0].shape[0],
                     layerWeights[0].shape[1])) + self.sigma
                arrayBiases = np.zeros(layerWeights[1].shape[0]) + self.sigma
                listaWeights.clear()
                listaWeights.append(arrayWeights)
                listaWeights.append(arrayBiases)
                self.listaSigmas.append(copy.deepcopy(listaWeights))
        else:
            self.listaSigmas = listaSigmas

        self.nomeJogador = "Jogador_" + str(uuid.uuid4()) + ".h5"

        self.genealogia = copy.deepcopy(genealogia)

        listaNomeJogador = []
        listaNomeJogador.append(self.nomeJogador)
        listaGeracaoJogador = []
        listaGeracaoJogador.append(self.geracao)
        listaGeral = []
        listaGeral.append(listaNomeJogador)
        listaGeral.append(listaGeracaoJogador)

        self.genealogia.append(listaGeral)

        self.debug = debug

    def predict(self, tabuleiro):
        if (tabuleiro.ndim == 1):
            tabuleiro = np.array([tabuleiro])
        return self.model.predict(tabuleiro)

    def salvaModelo(self):
        self.model.save(".\modelos\\" + self.nomeJogador)
        file = open(".\pesosDamas\\" + self.nomeJogador, "w+")
        file.write(str(self.valorDama))
        file.close
        file2 = open(".\genealogia\\" + self.nomeJogador, "w+")
        file2.write(str(self.genealogia))
        file2.close

    def carregaModelo(self):
        self.model = load_model(".\modelos\\" + self.nomeJogador)
        self.model._make_predict_function()

    def calculaScoreTabuleiro(self, tabuleiro, numeroDaJogada):
        if (numeroDaJogada == self.numeroJogadasAFrente):
            return 0.0

        jogadaForcada = False

        gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
        listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
        )
        if (not listaTabuleiros or listaTabuleiros is None):
            listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
            )
        elif (len(listaTabuleiros) <
              self.numeroDePossiveisComidasParaNaoConsiderarJogadaForcada):
            jogadaForcada = True

        maxScore = -9999999999

        numeroDaProximaJogada = numeroDaJogada
        if (not jogadaForcada):
            numeroDaProximaJogada += 1

        for tabuleiro in listaTabuleiros:
            if (not tabuleiro is None):
                score = (self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama)) +
                         self.calculaScoreTabuleiro(
                             tabuleiro, numeroDaProximaJogada)) / 2.0
                if (score > maxScore and numeroDaJogada == 0):
                    maxScore = score

        return maxScore

    def selecionaMelhorJogada(self, tabuleiro, numeroDaJogada):
        tabuleiroEscolhido = None
        jogadaForcada = False

        gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
        listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
        )
        if (not listaTabuleiros or listaTabuleiros is None):
            listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
            )
        elif (len(listaTabuleiros) <
              self.numeroDePossiveisComidasParaNaoConsiderarJogadaForcada):
            jogadaForcada = True

        maxScore = -9999999999

        numeroDaProximaJogada = numeroDaJogada
        if (not jogadaForcada):
            numeroDaProximaJogada += 1

        for tabuleiro in listaTabuleiros:
            if (not tabuleiro is None):
                score = (self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama)) +
                         self.calculaScoreTabuleiro(
                             tabuleiro, numeroDaProximaJogada)) / 2.0
                if (score > maxScore and numeroDaJogada == 0):
                    #                    print ("SCORE MAIOR: " + str(score))
                    #                    tabuleiro.printaTabuleiro ()
                    maxScore = score
                    tabuleiroEscolhido = copy.deepcopy(tabuleiro)

        return tabuleiroEscolhido

    def calculaScoreTabuleiroMinMax(self, tabuleiro, numeroDaJogada,
                                    jogadorJogando, alpha, beta):
        if (numeroDaJogada >= self.numeroJogadasAFrente):
            if (jogadorJogando):
                tabuleiro.inverteVisaoTabuleiro()
                score = self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama))

                gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
                listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
                )
                if (not listaTabuleiros or listaTabuleiros is None):
                    listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
                    )

                if (len(listaTabuleiros) == 0 or listaTabuleiros is None):
                    score = -1.1

                return score
            else:
                score = self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama))
                tabuleiro.inverteVisaoTabuleiro()
                gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
                listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
                )
                if (not listaTabuleiros or listaTabuleiros is None):
                    listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
                    )

                if (len(listaTabuleiros) == 0 or listaTabuleiros is None):
                    score = 1.1

                return score

        tabuleiro.inverteVisaoTabuleiro()

        jogadaForcada = False

        gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
        listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
        )
        if (not listaTabuleiros or listaTabuleiros is None):
            listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
            )
        elif (len(listaTabuleiros) <
              self.numeroDePossiveisComidasParaNaoConsiderarJogadaForcada):
            jogadaForcada = True

        numeroDaProximaJogada = numeroDaJogada
        if (not jogadaForcada):
            numeroDaProximaJogada += 1

        if (jogadorJogando):
            best = -9999999999
        else:
            best = 9999999999

        if (len(listaTabuleiros) == 0):
            if (jogadorJogando):
                return -1
            else:
                return 1

        for tabuleiro in listaTabuleiros:
            if (not tabuleiro is None):
                if (jogadorJogando):

                    best = max(
                        best,
                        self.calculaScoreTabuleiroMinMax(
                            copy.deepcopy(tabuleiro),
                            copy.deepcopy(numeroDaProximaJogada), False,
                            copy.deepcopy(alpha), copy.deepcopy(beta)))
                    alpha = max(alpha, best)
                    if (beta <= alpha):
                        break
                else:
                    best = min(
                        best,
                        self.calculaScoreTabuleiroMinMax(
                            copy.deepcopy(tabuleiro),
                            copy.deepcopy(numeroDaProximaJogada), True,
                            copy.deepcopy(alpha), copy.deepcopy(beta)))
                    beta = min(beta, best)
                    if (beta <= alpha):
                        break

        return best

    def calculaScoreTabuleiroMinMax2(self, tabuleiro, numeroDaJogada,
                                     jogadorJogando):
        if (numeroDaJogada >= self.numeroJogadasAFrente):
            if (jogadorJogando):
                tabuleiro.inverteVisaoTabuleiro()
                score = self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama))

                gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
                listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
                )
                if (not listaTabuleiros or listaTabuleiros is None):
                    listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
                    )

                if (len(listaTabuleiros) == 0 or listaTabuleiros is None):
                    score = -1.1

                return score
            else:
                score = self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama))
                tabuleiro.inverteVisaoTabuleiro()
                gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
                listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
                )
                if (not listaTabuleiros or listaTabuleiros is None):
                    listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
                    )

                if (len(listaTabuleiros) == 0 or listaTabuleiros is None):
                    score = 1.1

                return score

        tabuleiro.inverteVisaoTabuleiro()

        jogadaForcada = False

        gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
        listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
        )
        if (not listaTabuleiros or listaTabuleiros is None):
            listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
            )
        elif (len(listaTabuleiros) <
              self.numeroDePossiveisComidasParaNaoConsiderarJogadaForcada):
            jogadaForcada = True

        numeroDaProximaJogada = numeroDaJogada
        if (not jogadaForcada):
            numeroDaProximaJogada += 1

        if (jogadorJogando):
            best = -1.1
        else:
            best = 1.1

        if (len(listaTabuleiros) == 0):
            if (jogadorJogando):
                return -1
            else:
                return 1

        for tabuleiro in listaTabuleiros:
            if (not tabuleiro is None):
                if (jogadorJogando):
                    best = max(
                        best,
                        self.calculaScoreTabuleiroMinMax2(
                            copy.deepcopy(tabuleiro),
                            copy.deepcopy(numeroDaProximaJogada), False))
                else:
                    best = min(
                        best,
                        self.calculaScoreTabuleiroMinMax2(
                            copy.deepcopy(tabuleiro),
                            copy.deepcopy(numeroDaProximaJogada), True))

        return best

    def calculaScoreTabuleiroMedia(self, tabuleiro, numeroDaJogada,
                                   jogadorJogando):
        if (numeroDaJogada >= self.numeroJogadasAFrente):
            if (jogadorJogando):
                tabuleiro.inverteVisaoTabuleiro()
                score = self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama))

                gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
                listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
                )
                if (not listaTabuleiros or listaTabuleiros is None):
                    listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
                    )

                if (len(listaTabuleiros) == 0 or listaTabuleiros is None):
                    score = -1.1

                return score
            else:
                score = self.predict(
                    tabuleiro.converteTabuleiroParaArray(self.valorDama))
                tabuleiro.inverteVisaoTabuleiro()
                gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
                listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
                )
                if (not listaTabuleiros or listaTabuleiros is None):
                    listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
                    )

                if (len(listaTabuleiros) == 0 or listaTabuleiros is None):
                    score = 1.1

                return score

        tabuleiro.inverteVisaoTabuleiro()

        jogadaForcada = False

        gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
        listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
        )
        if (not listaTabuleiros or listaTabuleiros is None):
            listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
            )
        elif (len(listaTabuleiros) <
              self.numeroDePossiveisComidasParaNaoConsiderarJogadaForcada):
            jogadaForcada = True

        numeroDaProximaJogada = numeroDaJogada
        numeroDaProximaJogada += 1
        #        if (not jogadaForcada):
        #            numeroDaProximaJogada += 1

        if (len(listaTabuleiros) == 0):
            if (jogadorJogando):
                return -1
            else:
                return 1
        soma = 0
        for tabuleiro in listaTabuleiros:
            if (not tabuleiro is None):
                if (jogadorJogando):
                    soma += self.calculaScoreTabuleiroMedia(
                        copy.deepcopy(tabuleiro),
                        copy.deepcopy(numeroDaProximaJogada), False)
                else:
                    soma += self.calculaScoreTabuleiroMedia(
                        copy.deepcopy(tabuleiro),
                        copy.deepcopy(numeroDaProximaJogada), True)

        return soma / len(listaTabuleiros)

    def selecionaMelhorJogadaMinMax(self, tabuleiro, numeroDaJogada):
        tabuleiroEscolhido = None
        jogadaForcada = False

        gerenciadorDeTabuleiros = GerenciadorDeTabuleiros(tabuleiro)
        listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMultiplasComidas(
        )
        if (not listaTabuleiros or listaTabuleiros is None):
            listaTabuleiros = gerenciadorDeTabuleiros.calculaPossibilidadesDeMovimentoNormal(
            )
        elif (len(listaTabuleiros) <
              self.numeroDePossiveisComidasParaNaoConsiderarJogadaForcada):
            jogadaForcada = True

        alpha = -9999999999
        beta = 9999999999

        numeroDaProximaJogada = numeroDaJogada
        if (not jogadaForcada):
            numeroDaProximaJogada += 1

        tabuleiroEscolhido = None
        for tabuleiro in listaTabuleiros:
            if (not tabuleiro is None):

                #                score = self.calculaScoreTabuleiroMinMax (copy.deepcopy(tabuleiro), numeroDaProximaJogada, False, alpha, beta)
                score = self.calculaScoreTabuleiroMedia(
                    copy.deepcopy(tabuleiro), numeroDaProximaJogada, False)
                if (score >= alpha and numeroDaJogada == 0):
                    alpha = score
                    tabuleiroEscolhido = copy.deepcopy(tabuleiro)
                    if (self.debug):
                        print("Tabuleiro Selecionado por score: " + str(score))
                        print("Tabuleiro:")
                        tabuleiroEscolhido.printaTabuleiro()

        return tabuleiroEscolhido

    def ganhaPartida(self):
        self.currentPoints += self.pontosQuandoGanha
        self.totalPoints += self.pontosQuandoGanha

    def perdePartida(self):
        self.currentPoints += self.pontosQuandoPerde
        self.totalPoints += self.pontosQuandoPerde

    def empataPartida(self):
        self.currentPoints += self.pontosQuandoEmpata
        self.totalPoints += self.pontosQuandoEmpata

    def calculaQuantidadeDePesos(self):
        quantidadeDePesos = 0
        for layerIndex in range(self.numeroDeLayers):
            layer = self.model.get_layer(index=layerIndex)
            layerWeights = layer.get_weights()
            quantidadeDePesos += layerWeights[0].shape[0] * layerWeights[
                0].shape[1] + layerWeights[1].shape[0]

        return quantidadeDePesos

    def printaPesos(self):
        for layerIndex in range(self.numeroDeLayers):
            layer = self.model.get_layer(index=layerIndex)
            layerWeights = layer.get_weights()
            print(layerWeights)

    def __del__(self):
        del self.model
class Representation_Keras_MultiAgent_TensorInput(Representation):
    def __init__(self,
                 gridsize=5,
                 actionspaceperagent=5,
                 numberofagent=2,
                 hidden_unit=[12, 12],
                 learning_rate=0.1,
                 batch_size=10,
                 trainpass=1,
                 experiencebuffer=100):

        self.batchsize = batch_size
        self.trainPass = trainpass
        self.hidden_unit = hidden_unit
        self.learningrate = learning_rate
        self.size_of_input_units = gridsize * gridsize * numberofagent
        self.gridsize = gridsize
        self.memory = deque(maxlen=experiencebuffer)
        self.fresh_experience_counter = 0
        self.actionspaceforagent = actionspaceperagent
        self.numberofagent = numberofagent
        self.output_unit = actionspaceperagent**numberofagent
        self.trainingepochtotal = 0

        # create model
        self.model = Sequential()
        self.model.add(
            Dense(self.hidden_unit[0],
                  activation='tanh',
                  input_dim=self.size_of_input_units))
        for i in range(1, len(hidden_unit)):
            self.model.add(Dense(self.hidden_unit[i], activation='tanh'))
        self.model.add(Dense(self.output_unit, activation=LeakyReLU(0.3)))

        # Compile model
        self.model.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])
        self.model._make_predict_function()

        #save the TensorFlow graph:
        self.graph = tf.get_default_graph()

        self.model.summary()

        if os.path.isfile("modeltrained.h5"):
            self.model.load_weights("modeltrained.h5")
            print("###############################")
            print("Existing model loaded.......")
            print("###############################")

        self.model.save_weights("modelinit.h5")

        # Reset the batch
        self.batchSamplesX = np.array([], dtype=np.float).reshape(
            0, self.size_of_input_units)
        self.batchSamplesY = np.array([], dtype=np.float).reshape(
            0, self.output_unit)

    def Convert_State_To_Input(self, state):

        # Create zero filled output value
        outValue = np.zeros(shape=(self.numberofagent, self.gridsize,
                                   self.gridsize))

        # Convert list to numpy array
        state = np.reshape(state, (1, state.shape[0]))

        # Decompose states
        states = np.split(state[0], self.numberofagent)

        # Fill output tensor with agents position
        for i in range(self.numberofagent):
            row = int(states[i][0])
            col = int(states[i][1])
            outValue[i, row, col] = 1

        return outValue.flatten()

    def Get_Greedy_Pair(self, state):

        values = self.ForwardPass(self.Convert_State_To_Input(state))

        # Get the maximums
        arg = values.argmax()
        valmax = values.max()

        return arg, valmax

    def Get_Value(self, state, action):

        values = self.ForwardPass(self.Convert_State_To_Input(state))
        index = self.Get_Action_Index(action)
        temp = values[index]

        return temp

    def ForwardPass(self, input):

        # Form Input Values
        input = np.reshape(input, (1, input.shape[0]))

        # Prediction of the model
        with self.graph.as_default():
            hypothesis = self.model.predict(input)

        values = np.asarray(hypothesis).reshape(self.output_unit)

        return values

    def Set_Value(self, state, action, value):

        #time_Convert_State_To_Input = time.time()
        inputVal = self.Convert_State_To_Input(state)
        #print("Convert_State_To_Input: %s seconds" % (time.time() - time_Convert_State_To_Input)) # 7.677078247070312e-05 seconds

        #time_ForwardPass = time.time()
        values = self.ForwardPass(inputVal)
        #print("ForwardPass: %s seconds" % (time.time() - time_ForwardPass)) # 0.001142740249633789 seconds

        #time_Get_Action_Index = time.time()
        index = self.Get_Action_Index(action)
        values[index] = value
        #print("Get_Action_Index: %s seconds" % (time.time() - time_Get_Action_Index)) #0.12781143188476562 seconds

        #time_Prepare_Fit = time.time()
        # Rearrange the given input and output to make it appropriate for our NN
        sampleX = inputVal
        sampleY = values
        #np.array([value],dtype=np.float)

        # Fit the model
        #state = np.reshape(state, (1, state.shape[0]))
        #values = np.reshape(values, (1, values.shape[0]))

        # Increase counter to ensure enough new samples are gathered
        # self.fresh_experience_counter+=1

        # Append new sample to Memory of Experiences
        # Don't worry about its size, since it is a queue
        self.memory.append((sampleX, sampleY))

        #if self.fresh_experience_counter == self.batchsize :
        if len(self.memory) >= self.batchsize:

            self.trainingepochtotal += self.trainPass
            #print('Training Epoch:', self.trainingepochtotal)

            # Reset the counter
            # self.fresh_experience_counter=0

            # Get Unique Samples from memory as much as batchsize
            minibatch = random.sample(list(self.memory), self.batchsize)

            for i in np.arange(len(minibatch)):
                X, Y = minibatch[i]
                self.batchSamplesX = np.vstack((self.batchSamplesX, X))
                self.batchSamplesY = np.vstack((self.batchSamplesY, Y))

            #time_Fit = time.time()
            with self.graph.as_default():
                self.model.fit(self.batchSamplesX,
                               self.batchSamplesY,
                               epochs=self.trainPass,
                               batch_size=self.batchsize,
                               verbose=0)
            #print("Get_Fit: %s seconds" % (time.time() - time_Fit)) #0.12662458419799805 seconds

            # Reset the batch
            self.batchSamplesX = np.array([], dtype=np.float).reshape(
                0, self.size_of_input_units)
            self.batchSamplesY = np.array([], dtype=np.float).reshape(
                0, self.output_unit)

        #print("Get_Prepare_Fit: %s seconds" % (time.time() - time_Prepare_Fit)) #0.12781143188476562 seconds

        # # Increase counter to ensure enough new samples are gathered
        # self.fresh_experience_counter+=1
        #
        # # Append new sample to Memory of Experiences
        # # Don't worry about its size, since it is a queue
        # self.memory.append((sampleX, sampleY))
        #
        # if self.fresh_experience_counter == self.batchsize :
        #
        #     # Reset the counter
        #     self.fresh_experience_counter=0
        #
        #     # Get Unique Samples from memory as much as batchsize
        #     minibatch = random.sample(list(self.memory), self.batchsize)
        #
        #     for i in np.arange(len(minibatch)):
        #         X, Y = minibatch[i]
        #         self.batchSamplesX = np.vstack((self.batchSamplesX, X))
        #         self.batchSamplesY = np.vstack((self.batchSamplesY, Y))
        #
        #     print('Training Batch... ')
        #
        #     # Fit the model
        #     self.model.fit(self.batchSamplesX, self.batchSamplesY, epochs=self.trainPass, batch_size=1, verbose=0)
        #
        #     # Reset the batch
        #     self.batchSamplesX = np.array([], dtype=np.float).reshape(0, self.size_of_input_units)
        #     self.batchSamplesY = np.array([], dtype=np.float).reshape(0, self.output_unit)

    #TODO: Modify it for multiagent case
    def Get_Action_Index(self, action):

        #print("action:", action) #action: [1 4 4]
        #print(type(action)) #< class 'numpy.ndarray'>
        #print(action.shape) #(3,)
        #
        # if action.shape[0]==1 :
        #     temp = action[0]
        #
        # elif action.shape[0]==2 :
        #     temp = action[0] * self.actionspaceforagent + action[1]
        #
        # elif action.shape[0]==3 :
        #     temp = action[0] * (self.actionspaceforagent**2) \
        #            + action[1] * (self.actionspaceforagent**1) \
        #            + action[2] * (self.actionspaceforagent**0)

        sizeOfAction = action.shape[0]
        temp = 0

        for i in np.arange(sizeOfAction):
            temp = temp + action[i] * (self.actionspaceforagent**
                                       (sizeOfAction - i - 1))

        return temp

    def Save_Model(self):
        with self.graph.as_default():
            self.model.save_weights("modelOutput.h5")
            print("###############################")
            print("Model saved.......")
            print("###############################")

    def Add_Experience(self, state, action, nextstate, reward, status):
        raise NotImplementedError()

    def __del__(self):
        print('Representation object died.')