def create_smaller(): # create model model = Sequential() model.add( Dense(30, input_dim=60, kernel_initializer='normal', activation='relu')) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) # Compile model model.complie(loss='binary_crossentropy', optimizer='adam', metrics=[accuracy]) return model
class PolicyNetwork: def __init__(self, input_dim=0, output_dim=0, lr=0.01): self.input_dim = input_dim self.lr = lr #LSTM network self.model = Sequential() self.model.add( LSTM(256, imput_shape=(1, input_dim), return_sequenbces=True, stateful=False, dropout=0.5)) self.model.add(BatchNormalization()) self.add(LSTM(256, return_squence=True, stateful=False, dropout=0.5)) self.model.add(BatchNormalization()) self.add(LSTM(256, return_squence=False, stateful=False, dropout=0.5)) self.model.add(BatchNormalization()) self.model.add(Dense(output_dim)) self.model.add(Activation('sigmoid')) self.model.complie(optmizer=sgd(lr=lr), loss='mse') self.prob = None def reset(self): self.prob = None def predict(self, sample): #Get a lot sample and return neural network self.prob = self.model.predict( np.array(sample).reshape( (1, -1, self.input_dim)))[0] #Change array for keras input type return self.prob def train_on_batch(self, x, y): #policy neural network learning return self.modele.tain_on_batch(x, y) def save_model(self, model_path): #Save policy neural network learning if model_path is not None and self.model is not None: self.model.save_weights(model_path, overwrite=True) def load_model(self, model_path): #Load policy neural network learning if model_path is not None: self.model.load_weights(model_path)
class model: def __init__(self): self.model=None def cnn_model(self): self.model = Sequential() self.model.add(Conv2D(32,(3,3)),strides=1,padding="same") self.model.add(Activation("relu")) self.model.add(Conv2D(32,(5,5)),padding="same") self.model.add(MaxPooling2D(pool_size=(2,2))) self.model.add(Conv2D(32,(3,3)),padding="same") seld.model.add(Activation("relu")) self.model.add(MaxPooling2D(pool_size=(2,2))) self.model.add(Conv2D(64,(5,5)),padding=(2,2)) self.model.add(Activation("relu")) self.model.add(MaxPooling2D(pool_size=(2,2))) self.model.add(Flatten()) self.model.add(Dense(2048)) self.model.add(Activation("relu")) self.model.add(Dropout(0.5)) self.model.add(Dense(1024)) self.model.add(Activation("relu")) self.model.add(Dropout(0.5)) self.model.add(num_class) self.model.add(Activation("softmax")) self.model.summary() def train_model(self): sgd=SGD(lr=0.001,decay=0.000001,momentum=0.9,nesterov=True) self.model.complie(loss="categorical_crossentropy",optimize=sgd,metrics=["accuracy"]) train_
model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D(64, 3, 3, border_mode='same')) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) # ... (*3) : p.356 model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) model.complie(loss = 'binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # 모델 트레이닝하기 ... (*4) :.p356 model.fit(X_train, y_train, batch_size=32, nb_epoch=50) # 모델 평가하기 ... (*5) score = model.evaluate(X_train, y_train) print("lose :", score[0]) print("accuracy :", score[1]) # 일단은 에러, 옵션으로 다시 불러보기.. ing ...
model.add(Conv2D(128, (2, 2), padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(2, 2))) model.add(Conv2D(256, (2, 2), padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.3)) model.add(Flatten()) model.add(Dense(nb_classes, activation='softmax')) opt = Adam(lr=0.0001) # learning rate 조정 model.complie(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) # 2. 데이터셋 불러오기, 라벨링 from keras.preprocessing.image import ImageDataGenerator train_data = ImageDataGenerator(rescale=1./255) # 모든 픽셀을 0~1 사이의 숫자로 변환 val_data = ImageDataGenerator(rescale=1./255) test_data = ImageDataGenerator(rescale=1./255) train_set = train_data.flow_from_directory('C:/Users/User/Desktop/dataset/train', # train set 저장 경로 target_size=(100,100), # image 크기 재설정 batch_size=1, # 이미지를 몇 개씩 가져와 처리할지 설정 class_mode='categorical') # 다중분류 시 자동으로 라벨링 val_set = test_data.flow_from_directory('C:/Users/User/Desktop/dataset/test', # validation set 저장 경로
from keras.models import Sequential from keras.layers.core import Dense, Activation model = Sequential() model.add(Dense(2,1,init='uniform',activation='linear')) model.complie(loss='mse',optimizer='rmsprop')
from keras.models import Sequential from keras.layers import Dense impoer numpy as np x = np.array([1, 2, 3, 4, 5]) y = np.array([1, 2, 3, 4, 5]) model = Sequential() model.add(Dense(5, input_dim=1, activation='relu')) model.add(Dense(3)) model.add(Dense(1)) model.complie(lostt='mse', optimizer='adam') model.fit(x, y, epochs=100, batch_size=1) loss, acc = model.evaluate(x, y, batch_size=1) print('acc : ', acc)
### Schritt 6 Vollvernetztes Hidden Layer hinzufügen # output_dim = Anzahl der Neuronen im Hidden Layer # Anzahl der Neuronen im Hidden Layer = keine Daumenregel, aber Zahl zwischen Inout Neuronen und Output-Neuronen # Anzahl der Neuronen im Hidden Layer = 128 classifier.add(Dense(units = 128, activation = 'relu')) ### Schritt 7 - Outpu Layer hinzufügen ### Anzahl der Output NEuronen nur noch 1 da binäres Problem ### nicht mehr Relu sundern Sigmoid, da wir einen binären Outout haben, Relu geht von 0 bis unendlich, sigmoid geht von 0 bnis 1 = WSK für die jeweilige Klasse classifier.add(Dense(units = 1, activation = 'sigmoid')) ### Schritt 8 - Kompilieren des CNNs ### Fehlerfunktion = binäre Crossentropy, da binäres Problem ### Adam Optimizer classifier.complie(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) ### Schritt 9 - Data Augmentation mittels Keras' ImageDataGenerator # Um Overfitting zu verhindern und die Datenmenge künstlich zu erhöhen verwendet man Data Augmentation # 1. kreiert man viele Batches mit den Bildern # 2. verwendet man zufällige Transformationen auf eine zufällige Selektion der Bilder in jedem Batch # Das Modell wird durch die Randomisierung nie das selbe Bild in zwei oder mehr Batches vorfinden # flow_from_directory = Pfad der Trainings und Testdasten anegeben # rescale = obligatroisch # Weiter Data Augmentation Methoden https://keras.io/preprocessing/image/ # Rescale Data auf Werte zwischen 1 und 0 # rescale=1./255 da Pixel Werte zwischen 1 und 255 annehmen können from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2,
model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(2, 2))) model.add(Conv2D(256, (2, 2), padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.3)) model.add(Flatten()) model.add(Dense(nb_classes, activation='softmax')) # model.add(Dense(nb_classes, activation='sigmoid')) opt = Adam(lr=0.0001) # learning rate 조정 model.complie(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) # 2. 데이터셋 불러오기, 라벨링 from keras.preprocessing.image import ImageDataGenerator train_data = ImageDataGenerator(rescale=1./255) # 모든 픽셀을 0~1 사이의 숫자로 변환 val_data = ImageDataGenerator(rescale=1./255) test_data = ImageDataGenerator(rescale=1./255) train_set = train_data.flow_from_directory('C:/Users/User/Desktop/dataset/train', # train set 저장 경로 target_size=(100,100), # image 크기 재설정 batch_size=1, # 이미지를 몇 개씩 가져와 처리할지 설정 class_mode='binary') # 다중분류 시 자동으로 라벨링 val_set = test_data.flow_from_directory('C:/Users/User/Desktop/dataset/test', # validation set 저장 경로