Esempio n. 1
0
def cifar10_cnn_model_yingnan(inputShape, nb_classes):
    #inputShape 3dim
    
    model = Sequential()

    model.add(Convolution2D(64, 3, 3, border_mode='same', input_shape=inputShape))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
              
    model.summary()
    
    return model
class QLearn:
    def __init__(self, actions, epsilon, alpha, gamma):
        
        # instead of a dictionary, we'll be using
        #   a neural network
        # self.q = {}
        self.epsilon = epsilon  # exploration constant
        self.alpha = alpha      # discount constant
        self.gamma = gamma      # discount factor
        self.actions = actions
        
        # Build the neural network
        self.network = Sequential()
        self.network.add(Dense(50, init='lecun_uniform', input_shape=(4,)))
        # self.network.add(Activation('sigmoid'))
        #self.network.add(Dropout(0.2))

        self.network.add(Dense(20, init='lecun_uniform'))
        # self.network.add(Activation('sigmoid'))
        # #self.network.add(Dropout(0.2))

        self.network.add(Dense(2, init='lecun_uniform'))
        # self.network.add(Activation('linear')) #linear output so we can have range of real-valued outputs

        # rms = SGD(lr=0.0001, decay=1e-6, momentum=0.5) # explodes to non
        rms = RMSprop()
        # rms = Adagrad()
        # rms = Adam()
        self.network.compile(loss='mse', optimizer=rms)
        # Get a summary of the network
        self.network.summary()
Esempio n. 3
0
def main():
	train_X = np.load('train_X.npy')
	train_y = np.load('train_y.npy')
	test_X = np.load('test_X.npy')
	test_y = np.load('test_y.npy')

	model = Sequential()
	model.add(Flatten(input_shape=(15,60,2)))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(900))
	model.add(Activation('sigmoid'))

	print model.summary()

	adam = Adam(0.001)
	#adagrad = Adagrad(lr=0.01)
	model.compile(loss='mse', optimizer=adam)

	model.fit(train_X, train_y, batch_size=batch_size, nb_epoch=nb_epoch,
	          verbose=1, validation_data=(test_X, test_y))
	model.save_weights('model.h5', overwrite=True)
Esempio n. 4
0
def mnist_cnn_model(inputShape, nb_classes):
    #inputShape 3dim
    model = Sequential()

    # each input is 1*28*28, output is 32*26*26 because stride is 1 and image size is 28
    model.add(Convolution2D(32, 3, 3,
                            border_mode='valid',
                            input_shape=inputShape))
    model.add(Activation('relu'))
    # output is 32*24*24 because stride is 1 and input is 32*26*26
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    # pooling will reduce the output size to 32*12*12
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # dropout not effect the output shape
    model.add(Dropout(0.25))

    # conver the 32*12*12 output to 4608
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.summary()
              
    return model
Esempio n. 5
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Esempio n. 6
0
    def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())

        model.summary()

        img = Input(shape=self.img_shape)

        features = model(img)
        valid = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes+1, activation="softmax")(features)

        return Model(img, [valid, label])
 def build_model(self):
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='softmax'))
     model.summary()
     return model
Esempio n. 8
0
def trainModel():
    inputs, correctOutputs = getNNData()

    print("Collected data")

    trainingInputs = inputs[:len(inputs)//2]
    trainingOutputs = correctOutputs[:len(correctOutputs)//2]

    testInputs = inputs[len(inputs)//2:]
    testOutputs = correctOutputs[len(correctOutputs)//2:]

    model = Sequential()
    model.add(Dense(24, input_shape=(24, )))
    model.add(Activation('tanh'))
    model.add(Dense(24))
    model.add(Activation('tanh'))
    model.add(Dense(5))
    model.add(Activation('softmax'))

    model.summary()

    model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True))

    model.fit(trainingInputs, trainingOutputs, validation_data=(testInputs, testOutputs))
    score = model.evaluate(testInputs, testOutputs, verbose=0)
    print(score)

    json_string = model.to_json()
    open('my_model_architecture.json', 'w').write(json_string)
    model.save_weights('my_model_weights.h5', overwrite=True)
Esempio n. 9
0
    def build_critic(self):

        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
def buildIrrigationDetectorModel(inputShape):
	nb_filter = 64
	filter_length = 4
	
	model = Sequential()
	model.add(AveragePooling1D(pool_size=4,input_shape=inputShape))
	model.add(Conv1D(nb_filter=nb_filter,
                        kernel_size=filter_length,
                        border_mode="valid",
                        activation="relu",
                        
                        ))
	model.add(MaxPooling1D(pool_size=2))
	model.add(BatchNormalization())
	model.add(Conv1D(nb_filter=int(nb_filter),
                        kernel_size=int(filter_length),
                        border_mode="valid",
                        activation="relu", ))

	model.add(Flatten())
	model.add((Dense(100, activation='relu')))
	model.add((Dense(1, activation='sigmoid')))

	optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
	model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'] )
	model.summary()
	return model
Esempio n. 11
0
    def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=100))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(100,))
        label = Input(shape=(1,), dtype='int32')

        label_embedding = Flatten()(Embedding(self.num_classes, 100)(label))

        input = multiply([noise, label_embedding])

        img = model(input)

        return Model([noise, label], img)
Esempio n. 12
0
def test_recursive():
    # test layer-like API
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)

    # test serialization
    config = seq.get_config()
    new_graph = Sequential.from_config(config)

    seq.summary()
    json_str = seq.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = seq.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Esempio n. 13
0
def run(dataset):

    batch_size = 16
    nb_epoch = 20

    train_X, train_y = dataset['train']
    dev_X, dev_y = dataset['dev']
    test_X, test_y = dataset['test']

    
    print('train_X shape:', train_X.shape)

    print('Building model...')
    
    model = Sequential()
    model.add(Dense(1024, input_dim=train_X.shape[1], activation='sigmoid'))
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation='sigmoid'))
    model.add(Dropout(0.2))
    model.add(Dense(2, activation='softmax'))

    model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    
    model.fit(train_X, train_y, 
            batch_size=batch_size, nb_epoch=nb_epoch,
            verbose=1, validation_data=(dev_X, dev_y))
    
    score = model.evaluate(test_X, test_y, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
Esempio n. 14
0
def test_temporal_classification():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential()
    model.add(layers.GRU(8,
                         input_shape=(x_train.shape[1], x_train.shape[2])))
    model.add(layers.Dense(y_train.shape[-1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=4, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['acc'][-1] >= 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
def get_convBNeluMPdrop(num_conv_layers, nums_feat_maps, feat_scale_factor, conv_sizes, pool_sizes, dropout_conv, input_shape):
	#[Convolutional Layers]
	model = Sequential()
	input_shape_specified = False
	for conv_idx in xrange(num_conv_layers):
		# add conv layer
		n_feat_here = int(nums_feat_maps[conv_idx]*feat_scale_factor)
		if not input_shape_specified:
			print ' ---->>First conv layer is being added! with %d' % n_feat_here
			model.add(Convolution2D(n_feat_here, conv_sizes[conv_idx][0], conv_sizes[conv_idx][1], 
									input_shape=input_shape,
									border_mode='same',  
									init='he_normal'))
			input_shape_specified = True
		else:
			print ' ---->>%d-th conv layer is being added with %d units' % (conv_idx, n_feat_here)
			model.add(Convolution2D(n_feat_here, conv_sizes[conv_idx][0], conv_sizes[conv_idx][1], 
									border_mode='same',
									init='he_normal'))
		# add BN, Activation, pooling, and dropout
		model.add(BatchNormalization(axis=1, mode=2))
		model.add(keras.layers.advanced_activations.ELU(alpha=1.0)) # TODO: select activation
		
		model.add(MaxPooling2D(pool_size=pool_sizes[conv_idx]))
		if not dropout_conv == 0.0:
			model.add(Dropout(dropout_conv))
			print ' ---->>Add dropout of %f for %d-th conv layer' % (dropout_conv, conv_idx)
	model.summary()
	return model
Esempio n. 16
0
def main():
    ext = extension_from_parameters()

    out_dim = 1
    loss = 'mse'
    metrics = None
    #metrics = ['accuracy'] if CATEGORICAL else None

    datagen = RegressionDataGenerator()
    train_gen = datagen.flow(batch_size=BATCH_SIZE)
    val_gen = datagen.flow(val=True, batch_size=BATCH_SIZE)
    val_gen2 = datagen.flow(val=True, batch_size=BATCH_SIZE)

    model = Sequential()
    for layer in LAYERS:
        if layer:
            model.add(Dense(layer, input_dim=datagen.input_dim, activation=ACTIVATION))
            if DROP:
                model.add(Dropout(DROP))
    model.add(Dense(out_dim))

    model.summary()
    model.compile(loss=loss, optimizer='rmsprop', metrics=metrics)

    train_samples = int(datagen.n_train/BATCH_SIZE) * BATCH_SIZE
    val_samples = int(datagen.n_val/BATCH_SIZE) * BATCH_SIZE

    history = BestLossHistory(val_gen2, val_samples, ext)
    checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)

    model.fit_generator(train_gen, train_samples,
                        nb_epoch = NB_EPOCH,
                        validation_data = val_gen,
                        nb_val_samples = val_samples,
                        callbacks=[history, checkpointer])
def create_model(train_X, test_X, train_y, test_y):
    model = Sequential()
    model.add(Dense(500, input_shape=(238,),kernel_initializer= {{choice(['glorot_uniform','random_uniform'])}}))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None))
    model.add(Activation({{choice(['relu','sigmoid','tanh'])}}))
    model.add(Dropout({{uniform(0, 0.3)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 0.4)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation({{choice(['relu','tanh'])}}))
    model.add(Dropout(0.3))

    model.add(Dense(41))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam'])}})
    model.summary()
    early_stops = EarlyStopping(patience=3, monitor='val_acc')
    ckpt_callback = ModelCheckpoint('keras_model', 
                                 monitor='val_loss', 
                                 verbose=1, 
                                 save_best_only=True, 
                                 mode='auto')

    model.fit(train_X, train_y, batch_size={{choice([128,264])}}, nb_epoch={{choice([10,20])}}, validation_data=(test_X, test_y), callbacks=[early_stops,ckpt_callback])
    score, acc = model.evaluate(test_X, test_y, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Esempio n. 18
0
    def build_generator(self):

        model = Sequential()

        model.add(Dense(8 * 128 * 128, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((128, 128, 8)))

        # model.add(UpSampling2D())
        # model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Conv2DTranspose(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # # model.add(UpSampling2D())
        # # model.add(Conv2D(128, kernel_size=3, padding="same"))
        # model.add(Conv2DTranspose(128, kernel_size=3, padding="same"))
        # model.add(BatchNormalization(momentum=0.8))
        # model.add(LeakyReLU(alpha=0.2))

        # model.add(UpSampling2D())
        # model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Conv2DTranspose(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Conv2DTranspose(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img)
Esempio n. 19
0
File: yelp.py Progetto: wlf061/nlp
    def baseline_model():

        #CNN参数
        embedding_dims = 50
        filters = 250
        kernel_size = 3
        hidden_dims = 250

        model = Sequential()
        model.add(Embedding(max_features, embedding_dims))

        model.add(Conv1D(filters,
                         kernel_size,
                         padding='valid',
                         activation='relu',
                         strides=1))
        #池化
        model.add(GlobalMaxPooling1D())

        model.add(Dense(2, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        #可视化
        plot_model(model, to_file='yelp-cnn-model.png',show_shapes=True)

        model.summary()

        return model
Esempio n. 20
0
def create_LeNet(weights_path=None):
    """
        Use keras to create LeNet structure

        Return: the model object of keras
    """
    model = Sequential()

    # 1st Convolution layer
    model.add(Convolution2D(8, 28, 28, border_mode='same', input_shape=(3, 200, 200)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

    # 2nd Convolution layer
    model.add(Convolution2D(20, 10, 10, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

    # Dense
    model.add(Flatten())
    model.add(Dense(120, activation='relu'))
    model.add(Dense(2, activation='softmax'))

    print model.summary()
    return model
Esempio n. 21
0
def create_fullCIFAR10():
    """
        Use keras to create CIFAR-10 archetecture 

        Return: the model object of keras
    """
    # 1st Convolution layer
    model = Sequential()
    model.add(Convolution2D(32, 5, 5, border_mode='valid', input_shape=(3, 200, 200)))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())

    # 2nd Convolution layer
    model.add(Convolution2D(32, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    model.add(BatchNormalization())

    # 3rd Convolution layer
    model.add(Convolution2D(64, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    
    # Dense
    model.add(Flatten())
    model.add(Dense(250, init='normal'))
    model.add(Activation('tanh'))

    # Softmax
    model.add(Dense(2, init='normal'))
    model.add(Activation('softmax'))

    print model.summary()
    return model
Esempio n. 22
0
    def build_discriminator(self):

        model = Sequential()

        model.add(Dense(512, input_dim=np.prod(self.img_shape)))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        label = Input(shape=(1,), dtype='int32')

        label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label))
        flat_img = Flatten()(img)

        model_input = multiply([flat_img, label_embedding])

        validity = model(model_input)

        return Model([img, label], validity)
Esempio n. 23
0
def test_image_classification():
    np.random.seed(1337)
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    config = model.get_config()
    model = Sequential.from_config(config)
Esempio n. 24
0
def test_vector_classification():
    '''
    Classify random float vectors into 2 classes with logistic regression
    using 2 layer neural network with ReLU hidden units.
    '''
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=(20,),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # Test with Sequential API
    model = Sequential([
        layers.Dense(16, input_shape=(x_train.shape[-1],), activation='relu'),
        layers.Dense(8),
        layers.Activation('relu'),
        layers.Dense(y_train.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=15, batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['val_acc'][-1] > 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
Esempio n. 25
0
    def build_generator(self):

        noise_shape = (self.noise_dims,)

        model = Sequential()

        model.add(Dense(self.noise_dims, input_shape=noise_shape))  #256
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Dense(int(self.noise_dims*1.5)))  # 512
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Dense(int(self.noise_dims*2)))  # 512
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Dense(150))  # 1000
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=noise_shape)
        img = model(noise)

        return Model(noise, img)
Esempio n. 26
0
def moustafa_model1(inputShape, nb_classes):
    model = Sequential()

    model.add(Convolution2D(62, 3, 3, border_mode='same', input_shape=inputShape))
    model.add(Activation('relu'))
    model.add(Convolution2D(62, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(128, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(128, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.summary()

    return model
Esempio n. 27
0
    def build_generators(self):

        noise_shape = (100,)
        noise = Input(shape=noise_shape)

        # Shared weights between generators
        model = Sequential()
        model.add(Dense(256, input_shape=noise_shape))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        latent = model(noise)

        # Generator 1
        g1 = Dense(1024)(latent)
        g1 = LeakyReLU(alpha=0.2)(g1)
        g1 = BatchNormalization(momentum=0.8)(g1)
        g1 = Dense(np.prod(self.img_shape), activation='tanh')(g1)
        img1 = Reshape(self.img_shape)(g1)

        # Generator 2
        g2 = Dense(1024)(latent)
        g2 = LeakyReLU(alpha=0.2)(g2)
        g2 = BatchNormalization(momentum=0.8)(g2)
        g2 = Dense(np.prod(self.img_shape), activation='tanh')(g2)
        img2 = Reshape(self.img_shape)(g2)

        model.summary()

        return Model(noise, img1), Model(noise, img2)
Esempio n. 28
0
    def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
Esempio n. 29
0
def mnist_transferCNN_model(inputShape, nb_classes):
    # inputShape 3dim
    # define two groups of layers: feature (convolutions) and classification (dense)
    feature_layers = [
        Convolution2D(32, 3, 3,
                      border_mode='valid',
                      input_shape=inputShape),
        Activation('relu'),
        Convolution2D(32, 3, 3),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),
        Flatten(),
    ]
    classification_layers = [
        Dense(128),
        Activation('relu'),
        Dropout(0.5),
        Dense(nb_classes),
        Activation('softmax')
    ]

    # create complete model
    model = Sequential()
    for l in feature_layers + classification_layers:
        model.add(l)
        
    model.summary()
    
    return model
Esempio n. 30
0
def create_Tensorflow_smallCIFAR10():
    """
        Use keras to create CIFAR-10 built in tensorflow github example

        Return: the model object of keras
    """
    # 1st Convolution layer
    model = Sequential()
    model.add(Convolution2D(64, 3, 3, border_mode='valid', input_shape=(3, 200, 200), bias=True))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2,2)))
    model.add(BatchNormalization())

    # 2nd Convolution layer
    model.add(Convolution2D(32, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2,2)))
    
    # 1st Dense
    model.add(Flatten())
    model.add(Dense(384, init=init_tensorflowCIFAR))
    model.add(Activation('relu'))

    # 2nd Dense
    model.add(Dense(192, init='normal'))
    model.add(Activation('relu'))

    # Softmax
    model.add(Dense(2, init='normal'))
    model.add(Activation('softmax'))

    print model.summary()
    return model
Esempio n. 31
0
def main():
    (txt_train, label) = read_data(train_path, train=True)
    (txt_test, _) = read_data(test_path, train=False)
    label = label.reshape(-1, 1)

    ######### prepocess
    print('Convert to index sequences.')
    corpus = txt_train + txt_test

    tokenizer = Tokenizer()
    tokenizer.fit_on_texts(corpus)
    word_index = tokenizer.word_index
    train_sequences = tokenizer.texts_to_sequences(txt_train)

    pickle.dump(tokenizer, open('tokenizer.pkl', 'wb'))
    print('Pad sequences:')
    X = sequence.pad_sequences(train_sequences)
    Y = label
    output_dim = 1
    #Y = np.concatenate((1-label,label), axis=1); output_dim=2
    print('Split data into training data and validation data')
    (x_train, y_train), (x_val, y_val) = split_data(X, Y, split_ratio)
    max_article_length = x_train.shape[1]

    #########
    print('maxlen', max_article_length)
    print('Get embedding dict from glove.')
    embedding_dict = get_embedding_dict('./glove.twitter.27B.%dd.txt' %
                                        embedding_dim)
    print('Found %s word vectors.' % len(embedding_dict))
    max_features = len(word_index) + 1  # i.e. the number of words
    print('Create embedding matrix.')
    embedding_matrix = get_embedding_matrix(word_index, embedding_dict,
                                            max_features, embedding_dim)

    print('Build model...')

    csv_logger = CSVLogger('training_report.csv', append=True)
    earlystopping = EarlyStopping(monitor='val_acc',
                                  patience=5,
                                  verbose=1,
                                  mode='max')
    checkpoint = ModelCheckpoint(filepath='best.h5',
                                 verbose=1,
                                 save_best_only=True,
                                 monitor='val_acc',
                                 mode='max')
    model = Sequential()
    model.add(
        Embedding(max_features,
                  embedding_dim,
                  weights=[embedding_matrix],
                  input_length=max_article_length,
                  trainable=False))
    model.add(
        Bidirectional(
            LSTM(128, dropout=0.4, recurrent_dropout=0.3, activation='tanh')))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.4))
    model.add(BatchNormalization())
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.4))
    model.add(BatchNormalization())
    model.add(Dense(output_dim, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])
    model.summary()

    model.fit(x_train,
              y_train,
              epochs=epoch_num,
              batch_size=batch,
              validation_data=(x_val, y_val),
              callbacks=[earlystopping, checkpoint, csv_logger])
Esempio n. 32
0
def train_lstm_for_visualization():
    checkpoints = glob(MODEL_PATH + "*.h5")
    if len(checkpoints) > 0:
        checkpoints = natsorted(checkpoints)
        assert len(checkpoints) != 0, "No checkpoints for visualization found."
        checkpoint_file = checkpoints[-1]
        print("Loading [{}]".format(checkpoint_file))
        model = load_model(checkpoint_file)
        model.compile(optimizer="adam",
                      loss="categorical_crossentropy",
                      metrics=["accuracy", utils.f1_score])
        print(model.summary())

        # Load the data
        x_train, y_train, x_test, y_test, vocab_size, tokenizer, max_tweet_length = prepare_data(
            SHUFFLE)

        # Get the word to index and the index to word mappings
        word_index = tokenizer.word_index
        index_to_word = {index: word for word, index in word_index.items()}

        # Evaluate the previously trained model on test data
        test_loss, test_acc, test_fscore = model.evaluate(x_test,
                                                          y_test,
                                                          verbose=1,
                                                          batch_size=256)
        print("Loss: %.3f\nF-score: %.3f\n" % (test_loss, test_fscore))
        return model, index_to_word, x_test
    else:
        # Load the data
        x_train, y_train, x_test, y_test, vocab_size, tokenizer, max_tweet_length = prepare_data(
            SHUFFLE)

        # Get the word to index and the index to word mappings
        word_index = tokenizer.word_index
        index_to_word = {index: word for word, index in word_index.items()}

        # Build, evaluate and save the model
        model = Sequential()
        model.add(
            Embedding(input_dim=vocab_size,
                      output_dim=EMBEDDING_DIM,
                      input_length=max_tweet_length,
                      embeddings_initializer="glorot_normal",
                      name="embedding_layer"))
        model.add(
            LSTM(output_dim=HIDDEN_UNITS,
                 name="recurrent_layer",
                 activation="tanh",
                 return_sequences=True))
        model.add(Flatten())
        model.add(Dense(DENSE_UNITS, activation="relu", name="dense_layer"))
        model.add(Dense(NO_OF_CLASSES, activation="softmax"))
        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adadelta(),
                      metrics=["accuracy", utils.f1_score])
        model.summary()
        checkpoint = ModelCheckpoint(monitor="val_acc",
                                     filepath=MODEL_PATH +
                                     "model_{epoch:02d}_{val_acc:.3f}.h5",
                                     save_best_only=True,
                                     mode="max")
        model.fit(x_train,
                  y_train,
                  batch_size=BATCH_SIZE,
                  epochs=EPOCHS,
                  validation_data=(x_test, y_test),
                  callbacks=[checkpoint])
        score = model.evaluate(x_test, y_test)
        print("Loss: %.3f\nF-score: %.3f\n" % (score[0], score[1]))
        return model, index_to_word, x_test
Esempio n. 33
0
max_words = 200
input_length = 10

model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=input_length))
model.add(Convolution1D(128, kernel_size=3,
                        activation='relu'))  # 10 - 3 + 1 = 8
model.add(Convolution1D(64, kernel_size=3,
                        activation='relu'))  # 10 - 3 + 1 = 6
model.add(Convolution1D(32, kernel_size=3,
                        activation='relu'))  # 10 - 3 + 1 = 4
model.add(Flatten())  # 128 = 32 * 4
model.add(Dropout(0.2))
model.add(Dense(128, activation='sigmoid'))  # W = 128 x 128
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()

model.compile(loss='mse', optimizer='adam')

input_array = np.random.randint(n_in, size=(mb, input_length))

output_array = model.predict(input_array)
assert output_array.shape == (mb, 1)

print(
    "Saving model with embedding into several Conv1D layers into Flatten and Dense for backend {} and keras major version {}"
    .format(backend, major_version))
model.save("{}embedding_conv1d_extended_{}_{}.h5".format(
    base_path, backend, major_version))
    def build_model(self):
        # Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.

        model = Sequential()
        weight_decay = 0.0005

        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   input_shape=(32, 32, 3),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.3))
        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))
        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))
        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))
        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.4))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5))
        model.add(Flatten())
        model.add(Dense(200))
        model.add(Activation('softmax'))
        model.summary()
        return model
Esempio n. 35
0
env.seed(123)
nb_actions = env.action_space.n

# Next, we build a very simple model regardless of the dueling architecture
# if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically
# Also, you can build a dueling network by yourself and turn off the dueling network in DQN.
model = Sequential()
model.add(Flatten(input_shape=(1, ) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())

# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model,
               nb_actions=nb_actions,
               memory=memory,
               nb_steps_warmup=10,
               enable_dueling_network=True,
               dueling_type='avg',
               target_model_update=1e-2,
               policy=policy)
Esempio n. 36
0
log_file = open(log_name, 'w')
log_file.write('VGG/Tabular Late Fusion \n')

base_model1.summary(print_fn=lambda x: log_file.write(x + '\n\n'))

# Build tabular model
base_model2 = Sequential()
base_model2.add(Dense(12, input_dim=len(dummy_tabular_cols), activation='relu'))
base_model2.add(Dropout(DROPOUT_PROB))
base_model2.add(Dense(8, activation='relu'))
base_model2.add(Dropout(DROPOUT_PROB))
base_model2.add(Dense(NUM_CLASSES, activation='softmax'))
base_model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

base_model2.summary(print_fn=lambda x: log_file.write(x + '\n\n'))
x2 = base_model2.output

# Build text model
n_words = text.shape[1]
base_model3 = Sequential()
base_model3.add(Dense(50, input_shape=(n_words,), activation='relu'))
base_model3.add(Dense(4, activation='sigmoid'))
base_model3.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

base_model3.summary(print_fn=lambda x: log_file.write(x + '\n\n'))
x3 = base_model3.output

# LATE FUSION
x = concatenate([x1, x2, x3])
x = Sequential()(x)
Esempio n. 37
0
def warp(data_x, y_train):
    batch_size = 10
    nb_epoch = 10
    x_train = data_x

    ###construct the model
    model = Sequential()
    model.add(Dense(2, input_shape=(2, ), activation='tanh'))
    model.add(Dense(2, activation='softmax'))

    ###compile and summarize model
    model.summary()
    model.compile(loss='binary_crossentropy',
                  optimizer=RMSprop(lr=1e-2),
                  metrics=['accuracy'])
    ###training
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=True)

    ##loading warping datasets
    y_train2 = numpy.copy(y_train)
    for i in xrange(len(y_train2)):
        if y_train2[i][1] == 0:
            y_train2[i] = [0., 1.]

    ###construction of the warp setup with added bias layer in the beggining
    warp = Sequential()
    warp.add(Dense(2, input_shape=(2, ), init='identity'))
    warp.add(
        Dense(2,
              input_shape=(2, ),
              activation='tanh',
              weights=[
                  model.layers[0].W.get_value(), model.layers[0].b.get_value()
              ]))
    warp.add(
        Dense(2,
              activation='softmax',
              weights=[
                  model.layers[1].W.get_value(), model.layers[1].b.get_value()
              ]))

    for i in xrange(len(model.layers)):
        warp.layers[i + 1].trainable = False

    ### making sure only bias is allowed to change
    warp.layers[0].non_trainable_weights.append(
        warp.layers[0].trainable_weights[0])
    warp.layers[0].trainable_weights = warp.layers[0].trainable_weights[1:]

    warp.summary()
    warp.compile(loss='binary_crossentropy',
                 optimizer=RMSprop(lr=1e-3),
                 metrics=['accuracy'])

    def stop_training(model, x_train, y_train2, bias):
        return numpy.argmax(y_train2) == numpy.argmax(
            model.predict(x_train + bias))

    nb_epoch = 300
    sgd = RMSprop(lr=1e-3)
    biases = []
    """  make a Callback for monitoring for each point"""
    for point in xrange(x_train.shape[0]):
        print "data point #" + str(point)
        print " "
        ### setting the biases to zero between each data point
        warp.layers[0].b.set_value(
            numpy.zeros(warp.layers[0].b.get_value().shape[0],
                        dtype="float32"))
        ### warping....
        epochs = 0
        while epochs < nb_epoch:
            if numpy.argmax(y_train2[point:point + 1][0]) == numpy.argmax(
                    model.predict(x_train[point:point + 1] +
                                  warp.layers[0].b.get_value())):
                break
            print "Epoch " + str(epochs) + " - patient " + str(point)
            print " "
            print(model.predict(x_train[point:point + 1]))
            print((warp.predict(x_train[point:point + 1])))
            warp.fit(x_train, [y_train2],
                     batch_size=batch_size,
                     nb_epoch=1,
                     verbose=0,
                     shuffle=True)
            epochs += 1
        biases.append(warp.layers[0].b.get_value())
    return biases
          input_shape=env.observation_space.shape,
          activation="relu"))
q_network.add(Dropout(rate=0.01))  # not bad
# q_network.add(BatchNormalization())
# noisy_layer_2 = NoisyDense(units=32, activation="relu")
q_network.add(Dense(units=64, activation="relu"))
q_network.add(Dropout(rate=0.01))  # not bad
# q_network.add(BatchNormalization())
q_network.add(Dense(units=32, activation="relu"))
q_network.add(Dropout(rate=0.01))  # not bad
# q_network.add(BatchNormalization())
q_network.add(Dense(units=env.action_space.n, activation="linear"))

q_network.compile(optimizer="adam", loss="mean_squared_error")

print(q_network.summary())


class EpisodeEndCallback(TensorboardAgentCallback):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.episode_begin_times = []
        self.episode_end_times = []

    def on_episode_begin(self, episode_n):
        self.episode_begin_times.append(time.time())

    def on_episode_end(self, episode_n, **kwargs):
        self.episode_end_times.append(time.time())
        if episode_n > 0 and episode_n % 100 == 0:
            t = self.episode_end_times[-1] - self.episode_begin_times[-100]
Esempio n. 39
0
    y.append(0)

dir = "../Data/Processed1"
for filename in os.listdir(dir):
    img = np.load(os.path.join(dir, filename))[:, :, 0]
    smooth_img = skimage.filters.gaussian(img, 8)
    sobel_img = skimage.filters.sobel(img)
    im_rez = skimage.transform.resize(sobel_img, (256, 256, 1))
    X.append(im_rez)
    y.append(1)

X = np.array(X)
y = np.array(y)

Xtr, Xts, ytr, yts = train_test_split(X, y, test_size=0.25, random_state=42)

print(type(Xtr))
classifier.summary()
es = EarlyStopping(patience=20, restore_best_weights=True)

history = classifier.fit(Xtr,
                         ytr,
                         callbacks=[es],
                         epochs=40,
                         batch_size=8,
                         validation_data=(Xts, yts))
classifier.evaluate(Xts, yts)
classifier.save("../Models/Keras/Test2/model.h5")
import pickle as pickle
pickle.dump(history, open("../Models/Keras/Test2/history.pkl", "wb"))
X_test = sc.transform(X_test)

my_first_nn = Sequential()  # create model
my_first_nn.add(Dense(8, input_dim=30, activation='relu'))  # hidden layer
my_first_nn.add(Dense(20, input_dim=30, activation='relu'))  # hidden layer
my_first_nn.add(Dense(50, input_dim=30, activation='relu'))  # hidden layer
my_first_nn.add(Dense(1, activation='sigmoid'))  # output layer
my_first_nn.compile(loss='binary_crossentropy',
                    optimizer='adam',
                    metrics=['accuracy'])
my_first_nn_fitted = my_first_nn.fit(X_train,
                                     y_train,
                                     epochs=100,
                                     verbose=0,
                                     initial_epoch=0)
print(my_first_nn.summary())
print(my_first_nn.evaluate(X_test, y_test, verbose=0))

# print(my_first_nn.summary())
# y_pred = my_first_nn.predict(X_test)
# y_pred = (y_pred > 0.5)
#
# from sklearn.metrics import confusion_matrix
# cm = confusion_matrix(y_test, y_pred)
#
# ax= plt.subplot()
# sns.heatmap(cm, annot=True, ax = ax)
#
# # labels, title and ticks
# ax.set_xlabel('Predicted labels')
# ax.set_ylabel('True labels')
Esempio n. 41
0
def model_generator(model_dict):
    start_time = time.time()

    # initialize the model
    print("Initialize model...")
    model = Sequential()

    # create convolutional layers
    print("Add Convolutional Layers...")
    convLayers = model_dict["conv_layers"]
    for convLayerNum in convLayers:
        if convLayerNum == 1:
            model.add(
                Conv2D(
                    filters=convLayers[convLayerNum]["filters_number"],
                    kernel_size=convLayers[convLayerNum]["kernel_size"],
                    activation=convLayers[convLayerNum]["activation_function"],
                    input_shape=model_dict["input_shape"]))
        else:
            model.add(
                Conv2D(
                    filters=convLayers[convLayerNum]["filters_number"],
                    kernel_size=convLayers[convLayerNum]["kernel_size"],
                    activation=convLayers[convLayerNum]["activation_function"],
                ))
        # add pooling layer just after each conv layer
        model.add(AveragePooling2D())

    # flatten last convolutional layer output to use in connected layers
    print("Add Flatten Layer...")
    model.add(Flatten())

    # create connected layers
    print("Add Connected Layers...")
    connectedLayers = model_dict["connected_layers"]
    for concLayerNum in connectedLayers:
        model.add(
            Dense(units=connectedLayers[concLayerNum]["units"],
                  activation=connectedLayers[concLayerNum]
                  ["activation_function"]))

    # show model's summary
    model.summary()
    model.summary(print_fn=lambda x: log.write(x + '\n'))

    # set model's hyper methods
    print("Compiling...")
    model.compile(loss=model_dict["loss_method"],
                  optimizer=model_dict["optimizer"],
                  metrics=['accuracy'])

    # train the model
    print("Training...")
    H = model.fit_generator(model_dict["data_augmentation_method"].flow(
        model_dict["train_data"][0],
        model_dict["train_data"][1],
        batch_size=model_dict["batch_size"]),
                            validation_data=model_dict["validation_data"],
                            steps_per_epoch=len(model_dict["train_data"][0]) //
                            model_dict["batch_size"],
                            epochs=model_dict["epochs"])

    # model evaluating
    print("Evaluating...")
    (loss, accuracy) = model.evaluate(model_dict["validation_data"][0],
                                      model_dict["validation_data"][1])
    print("Accuracy: {:.2f}%".format(accuracy * 100))
    log.write("model accuracy: {:.2f}%\n".format(accuracy * 100))
    print("total time: {} seconds\n".format(time.time() - start_time))
    log.write("total time: {} seconds\n\n\n".format(time.time() - start_time))
Esempio n. 42
0
print(y_train[0])
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

#Simple fully connected neural network with 2 hidden layers
model = Sequential()

model.add(Dense(units=200, activation='relu', input_shape=input_shape))
model.add(Flatten())
model.add(Dense(units=200, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
model.summary()

#Learning process and compile
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)

model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

#Learning
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)

#Evaluation
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
def train_model(num_of_games):
    model_name = './models/ncaa_model.h5'
    model_file = Path(model_name)
    if model_file.exists():
        model = load_model(model_name)
    else:
        model = Sequential()
        # model.add(LSTM(32, input_shape=(3, 1860), activation='sigmoid', return_sequences=True))
        # model.add(LSTM(128, activation='sigmoid', return_sequences=True))
        # model.add(LSTM(8, activation='softmax', return_sequences=False))
        # model.add(Dense(2, activation='softmax'))
        # model.compile(loss='categorical_crossentropy', optimizer='adagrad', metrics=['accuracy'])
        #
        model.add(
            LSTM(64,
                 input_shape=(num_of_games, 1860),
                 activation='softmax',
                 return_sequences=True))
        model.add(LSTM(32, activation='tanh', return_sequences=True))
        model.add(LSTM(128, activation='elu', return_sequences=False))
        model.add(Dense(2, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adamax',
                      metrics=['accuracy'])

        print(model.summary())
        model.save(model_name)

    x = []
    y = []
    index = 0
    for season in range(2003, 2020):
        a = pd.read_csv('./training_data/%i/x_train-%i.csv' %
                        (num_of_games, season))
        b = pd.read_csv('./training_data/%i/y_train-%i.csv' %
                        (num_of_games, season))
        if len(a) == 0 or len(b) == 0:
            continue
        if index == 0:
            index = 1
            x = a
            y = b
        else:
            x = pd.concat([x, a])
            y = pd.concat([y, b])
        print('%i: %s' % (season, x.shape))

    x = x.values.reshape(y.shape[0], num_of_games, 1860)
    print('X Shape: %s' % str(x.shape))
    print('Y Shape: %s' % str(y.shape))

    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

    es = EarlyStopping(monitor='acc', mode='auto', verbose=1, patience=5)
    mc = ModelCheckpoint(model_name,
                         monitor='acc',
                         verbose=1,
                         save_best_only=True,
                         mode='auto',
                         period=5)
    model.fit(x_train,
              y_train,
              epochs=50,
              batch_size=20,
              verbose=1,
              callbacks=[es, mc])

    score = model.evaluate(x_test, y_test)
    print('=========================')
    print('Accuracy: %.3f' % score[1])
    print('=========================')

    model.save(model_name)

    return
# In[21]:



opt = 'adam'
loss = 'categorical_crossentropy'
metrics = ['accuracy']
# Compile the classifier using the configuration we want
cnn.compile(optimizer=opt, loss=loss, metrics=metrics)


# In[22]:


print(cnn.summary())


# In[23]:


history = cnn.fit(x_train, y_train,
                  batch_size=32, epochs=10,
                  validation_data=(x_test, y_test))


# In[24]:


scores = cnn.evaluate(x_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
Esempio n. 45
0
class ConvNet:
    def __init__(self):
        self.model = Sequential()
        self.trained = None
        self.train_datagen = ImageDataGenerator(rescale=1. / 255,
                                                shear_range=0.3,
                                                zoom_range=0.3,
                                                horizontal_flip=True)
        self.test_datagen = ImageDataGenerator(rescale=1. / 255)
        self.train_generator = self.train_datagen.flow_from_directory(
            'data/train',
            target_size=(150, 150),
            batch_size=32,
            class_mode='categorical')
        self.validation_generator = self.test_datagen.flow_from_directory(
            'data/validation',
            target_size=(150, 150),
            batch_size=32,
            class_mode='categorical')
        self.network()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizers.RMSprop(lr=1e-4),
                           metrics=['accuracy'])

    def __str__(self):
        return self.model.summary()

    def network(self):
        self.model.add(
            Conv2D(72, (3, 3),
                   padding='same',
                   input_shape=(150, 150, 3),
                   activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPool2D(pool_size=(5, 5)))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(48, (3, 3), activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(Conv2D(24, (3, 3), activation='relu'))
        self.model.add(MaxPool2D(pool_size=(5, 5)))

        self.model.add(Flatten())

        self.model.add(Dense(128, activation='relu'))
        self.model.add(Dense(96, activation='relu'))
        self.model.add(Dense(64, activation='relu'))
        self.model.add(Dropout(rate=0.5))
        self.model.add(Dense(6, activation='softmax'))

    def train(self):
        self.trained = self.model.fit_generator(
            self.train_generator,
            steps_per_epoch=439,
            epochs=20,
            validation_data=self.validation_generator,
            validation_steps=439)
        self.learning_curve()

    def learning_curve(self):
        plt.plot(self.trained.history['acc'])
        plt.plot(self.trained.history['val_acc'])
        plt.title('Model accuracy')
        plt.ylabel('Accuracy')
        plt.xlabel('Epoch')
        plt.legend(['Train', 'Test'], loc='upper left')
        plt.savefig('figures/model_accuracy.png')
        plt.show()

        plt.plot(self.trained.history['loss'])
        plt.plot(self.trained.history['val_loss'])
        plt.title('Model loss')
        plt.ylabel('Loss')
        plt.xlabel('Epoch')
        plt.legend(['Train', 'Test'], loc='upper left')
        plt.savefig('figures/model_loss.png')
        plt.show()
Esempio n. 46
0
input_img = Input(shape=(1, 28, 28))
y = tf.placeholder(tf.float32, shape=(None, 10))

encoder = Sequential()
x1 = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x2 = MaxPooling2D((2, 2), padding='same')(x1)
x3 = Conv2D(8, (3, 3), activation='relu', padding='same')(x2)
x4 = MaxPooling2D((2, 2), padding='same')(x3)
x5 = Conv2D(8, (3, 3), activation='relu', padding='same')(x4)
encoded = MaxPooling2D((2, 2), padding='same')(x5)
ae = Model(input_img, encoded)
encoder.add(ae)
encoder.add(Flatten())
encoder.add(Dense(10))
encoder.summary()

# at this point the representation is (4, 4, 8) i.e. 128-dimensional

x6 = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x7 = UpSampling2D((2, 2))(x6)
x8 = Conv2D(8, (3, 3), activation='relu', padding='same')(x7)
x9 = UpSampling2D((2, 2))(x8)
x10 = Conv2D(16, (3, 3), activation='relu')(x9)
x11 = UpSampling2D((2, 2))(x10)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x11)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.summary()
    #    
    #    TimeDistributed(Dense(out.shape[2]*20)),
    ##    
    #    TimeDistributed(Dense(32,)),
    #    
    TimeDistributed(Dense(1), name='test'),

    Reshape((375,)),


])
# optimizer = optimizers.Adam(clipvalue=0.5)
#optimizer = optimizers.Adam(clipnorm=1.)
#     model = multi_gpu_model(model, gpus=6)
model_1.compile(loss=kl.mean_absolute_error, optimizer = 'adam',metrics=['accuracy'])
model_1.summary()

hist_1 = model_1.fit(input_bus, output_bus, epochs=30,
                              batch_size=20, verbose=1, shuffle=False)

with open('caseof.pickles','wb') as p:
    pickle.dump(model_1,p)
    pickle.dump(hist_1,p)
# In[2]
with open('caseof.pickles','rb') as p:
    model = pickle.load(p)
    hist = pickle.load(p)
valo = sequence.pad_sequences(valo)

explainer = shap.DeepExplainer(model, tf.convert_to_tensor(input_bus[:239,:,2:,:,:],dtype = 'float32'))
Esempio n. 48
0
def buildmodel():
    model = Sequential()
    #卷積層1
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               input_shape=(48, 48, 1),
               activation='relu',
               padding='same'))
    #卷積層2與池化層2
    #model.add(ZeroPadding2D(padding=(1,1), data_format='channels_last'))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))

    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    #model.add(ZeroPadding2D(padding=(1,1), data_format='channels_last'))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))

    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    #model.add(ZeroPadding2D(padding=(1,1), data_format='channels_last'))
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))

    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    #model.add(ZeroPadding2D(padding=(1,1), data_format='channels_last'))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))

    #Step3 建立神經網路(平坦層、隱藏層、輸出層)
    model.add(Flatten())
    model.add(
        Dense(4096,
              input_shape=(48, 48, 1),
              kernel_regularizer=regularizers.l2(0.001),
              activation='relu'))
    model.add(Dropout(0.5))
    model.add(
        Dense(2048,
              input_shape=(48, 48, 1),
              kernel_regularizer=regularizers.l2(0.001),
              activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(7, activation='softmax'))

    # opt = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    opt = Adam(lr=1e-4)
    # opt = Adadelta(lr=0.1, rho=0.95, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    model.summary()
    return model
Esempio n. 49
0
class Model(object):
    FILE_PATH = "C:\\Face recognition\\face.model.h5"  #模型进行存储和读取的地方
    IMAGE_SIZE = 128  #模型接受的人脸图片一定得是128*128的

    def __init__(self):
        self.model = None

    #读取实例化后的DataSet类作为进行训练的数据源
    def read_trainData(self, dataset):
        self.dataset = dataset

    #建立CNN模型
    def build_model(self):
        self.model = Sequential()
        self.model.add(
            Convolution2D(filters=32,
                          kernel_size=(3, 3),
                          padding='same',
                          data_format='channels_last',
                          input_shape=self.dataset.X_train.shape[1:]))

        self.model.add(Activation('relu'))
        self.model.add(
            Convolution2D(filters=32,
                          kernel_size=(3, 3),
                          padding='same',
                          data_format='channels_last',
                          input_shape=self.dataset.X_train.shape[1:]))
        self.model.add(Activation('relu'))

        self.model.add(
            MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
        self.model.add(Dropout(0.25))

        self.model.add(
            Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
        self.model.add(Activation('relu'))
        self.model.add(
            Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
        self.model.add(Activation('relu'))
        self.model.add(
            MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
        self.model.add(Dropout(0.25))
        self.model.add(
            Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
        self.model.add(Activation('relu'))
        self.model.add(
            Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
        self.model.add(Activation('relu'))
        self.model.add(
            MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
        self.model.add(Dropout(0.25))

        self.model.add(
            Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
        self.model.add(Activation('relu'))
        self.model.add(
            Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
        self.model.add(Activation('relu'))
        self.model.add(
            MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(512))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dropout(0.5))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.dataset.num_classes))
        self.model.add(Activation('softmax'))
        self.model.summary()

    def train_model(self):
        self.model.compile(
            optimizer='adam',  #有很多可选的optimizer,例如RMSprop,Adagrad
            loss='categorical_crossentropy',  #可以选用squared_hinge作为loss
            metrics=['accuracy'])

        #epochs、batch_size为可调的参数,epochs为训练多少轮、batch_size为每次训练多少个样本
        self.model.fit(self.dataset.X_train,
                       self.dataset.Y_train,
                       epochs=5,
                       batch_size=16)

    def evaluate_model(self):
        print('\nTesting---------------')
        loss, accuracy = self.model.evaluate(self.dataset.X_test,
                                             self.dataset.Y_test)

        print('test loss:', loss)
        print('test accuracy:', accuracy)

    def save(self, file_path=FILE_PATH):
        print('Model Saved.')
        self.model.save(file_path)

    def load(self, file_path=FILE_PATH):
        print('Model Loaded.')
        self.model = load_model(file_path)

    #需要确保输入的img得是灰化之后(channel =1 )且 大小为IMAGE_SIZE的人脸图片
    def predict(self, img):
        img = img.reshape((1, self.IMAGE_SIZE, self.IMAGE_SIZE, 1))
        img = img.astype('float32')
        img = img / 255.0

        result = self.model.predict_proba(img)  #测算一下该img属于某个label的概率
        max_index = np.argmax(result)  #找出概率最高的

        return max_index, result[0][
            max_index]  #第一个参数为概率最高的label的index,第二个参数为对应概率
Esempio n. 50
0
def ejemplo12():
    from keras.datasets import cifar10
    from keras.layers import Flatten
    from keras.constraints import maxnorm
    from keras.optimizers import SGD
    from keras.layers.convolutional import Convolution2D
    from keras.layers.convolutional import MaxPooling2D
    # fix random seed for reproducibility
    seed = 7
    numpy.random.seed(seed)
    # load data
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    # normalize inputs from 0-255 to 0.0-1.0
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train = X_train / 255.0
    X_test = X_test / 255.0
    # one hot encode outputs
    y_train = np_utils.to_categorical(y_train)
    y_test = np_utils.to_categorical(y_test)
    num_classes = y_test.shape[1]
    # Create the model
    model = Sequential()
    model.add(
        Convolution2D(32, (3, 3),
                      input_shape=(32, 32, 3),
                      activation='relu',
                      padding='same'))
    model.add(Dropout(0.2))
    model.add(Convolution2D(32, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(64, (3, 3), activation='relu', padding='same'))
    model.add(Dropout(0.2))
    model.add(Convolution2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, (3, 3), activation='relu', padding='same'))
    model.add(Dropout(0.2))
    model.add(Convolution2D(128, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(num_classes, activation='softmax'))
    # Compile model
    epochs = 25
    lrate = 0.01
    decay = lrate / epochs
    sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    print(model.summary())
    # Fit the model
    model.fit(X_train,
              y_train,
              validation_data=(X_test, y_test),
              epochs=epochs,
              batch_size=32)
    # Final evaluation of the model
    scores = model.evaluate(X_test, y_test, verbose=0)
    print("Accuracy: %.2f%%" % (scores[1] * 100))
Esempio n. 51
0
# create NN_model
NN_model = Sequential()
NN_model.add(
    Dense(x_train.shape[1], input_dim=x_train.shape[1], activation='relu'))
NN_model.add(Dense(100, activation='relu'))
NN_model.add(Dense(50, activation='relu'))
NN_model.add(Dense(25, activation='relu'))
NN_model.add(Dense(12, activation='relu'))
# output Layer
NN_model.add(Dense(1, activation='linear'))

# Compile the network :
NN_model.compile(loss='mean_absolute_error',
                 optimizer='adam',
                 metrics=['mean_absolute_error'])
NN_model.summary()

checkpoint_name = 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_name,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='auto')
callbacks_list = [checkpoint]

history = NN_model.fit(x_train,
                       y,
                       batch_size=1000,
                       epochs=20,
                       verbose=1,
                       callbacks=callbacks_list,
Esempio n. 52
0
                   strides=(2, 2),
                   input_shape=(14, 47, 36),
                   activation='relu'))
        model.add(Conv2D(64, 3, input_shape=(5, 22, 48), activation='relu'))
        model.add(Conv2D(64, 3, input_shape=(3, 20, 64), activation='relu'))
        model.add(Flatten())
        model.add(Dense(1164, activation='relu'))
        model.add(Dropout(0.5))  #Added dropout layer to avoid overfitting
        model.add(Dense(100, activation='relu'))
        model.add(Dense(50, activation='relu'))
        model.add(Dropout(0.5))  #Added dropout layer to avoid overfitting
        model.add(Dense(10, activation='relu'))
        model.add(Dense(1))
        model.build()
        model.compile(optimizer=Adam(lr=2e-04), loss="mse")
        model.summary(print_fn=lambda x: log.write(x + '\n'))

        print("Model built!")
        datagen = ImageDataGenerator(
            brightness_range=(0.7, 0.9))  #Further data augmentation

        datagen.fit(train_x)

        history = model.fit_generator(datagen.flow(train_x,
                                                   train_y,
                                                   batch_size=64),
                                      epochs=64,
                                      validation_data=(valid_x, valid_y))

        log.write(str(history.history))
        plt.plot(history.history['loss'])
Esempio n. 53
0
     model.add(Dense(1))
     model.add(Activation('sigmoid'))
     model.compile(loss='binary_crossentropy',
                   optimizer='adam',
                   metrics=['accuracy'])
     filepath = "./models/lstm-{epoch:02d}-{loss:0.3f}-{acc:0.3f}-{val_loss:0.3f}-{val_acc:0.3f}.hdf5"
     checkpoint = ModelCheckpoint(filepath,
                                  monitor="loss",
                                  verbose=1,
                                  save_best_only=True,
                                  mode='min')
     reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.5,
                                   patience=2,
                                   min_lr=0.000001)
     print model.summary()
     model.fit(CONTENT,
               labels,
               batch_size=128,
               epochs=5,
               validation_split=0.1,
               shuffle=True,
               callbacks=[checkpoint, reduce_lr])
 else:
     model = load_model(sys.argv[1])
     print model.summary()
     test_CONTENT, _ = process_CONTENT(TEST_PROCESSED_FILE, test_file=True)
     test_CONTENT = pad_sequences(test_CONTENT,
                                  maxlen=max_length,
                                  padding='post')
     predictions = model.predict(test_CONTENT, batch_size=128, verbose=1)
def main(rnn_model):
    def message_to_array(msg):
        msg = msg.lower().split(' ')
        test_seq = np.array([word_index[word] for word in msg])

        test_seq = np.pad(test_seq, (500-len(test_seq), 0), 'constant', constant_values=(0))
        test_seq = test_seq.reshape(1, 500)
        return test_seq

    data = pd.read_csv("./spam_text_message_data.csv")
    print(data.head())
    print(data.tail())

    messages = []
    labels = []
    for index, row in data.iterrows():
        messages.append(row['Message'])
        if row['Category'] == 'ham':
            labels.append(0)
        else:
            labels.append(1)

    messages = np.asarray(messages)
    labels = np.asarray(labels)

    print("Number of messages: ", len(messages))
    print("Number of labels: ", len(labels))

    max_vocab = 10000
    max_len = 500

    # Ignore all words except the 10000 most common words
    tokenizer = Tokenizer(num_words=max_vocab)
    # Calculate the frequency of words
    tokenizer.fit_on_texts(messages)
    # Convert array of messages to list of sequences of integers
    sequences = tokenizer.texts_to_sequences(messages)

    # Dict keeping track of words to integer index
    word_index = tokenizer.word_index

    # Convert the array of sequences(of integers) to 2D array with padding
    # maxlen specifies the maximum length of sequence (truncated if longer, padded if shorter)
    data = pad_sequences(sequences, maxlen=max_len)

    print("data shape: ", data.shape)

    # We will use 80% of data for training & validation(80% train, 20% validation) and 20% for testing
    train_samples = int(len(messages)*0.8)

    messages_train = data[:train_samples]
    labels_train = labels[:train_samples]

    messages_test = data[train_samples:len(messages)-2]
    labels_test = labels[train_samples:len(messages)-2]

    embedding_mat_columns=32
    # Construct the SimpleRNN model
    model = Sequential()
    ## Add embedding layer to convert integer encoding to word embeddings(the model learns the
    ## embedding matrix during training), embedding matrix has max_vocab as no. of rows and chosen
    ## no. of columns
    model.add(Embedding(input_dim=max_vocab, output_dim=embedding_mat_columns, input_length=max_len))

    if rnn_model == 'SimpleRNN':
        model.add(SimpleRNN(units=embedding_mat_columns))
    elif rnn_model == 'LSTM':
        model.add(LSTM(units=embedding_mat_columns))
    else:
        model.add(GRU(units=embedding_mat_columns))

    model.add(Dense(1, activation='sigmoid'))

    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
    model.summary()

    #plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)

    # Training the model
    model.fit(messages_train, labels_train, epochs=10, batch_size=60, validation_split=0.2)

    # Testing the model
    pred = model.predict_classes(messages_test)
    acc = model.evaluate(messages_test, labels_test)
    print("Test loss is {0:.2f} accuracy is {1:.2f}  ".format(acc[0],acc[1]))

    # Constructing a custom message to check model
    custom_msg = 'Congratulations ur awarded 500 of CD vouchers or 125gift guaranteed Free entry for movies'
    test_seq = message_to_array(custom_msg)
    pred = model.predict_classes(test_seq)
    print(pred)
Esempio n. 55
0
class GAN_Model(object):
    def __init__(self, batch_size, timesteps, word_index, summary=None):
        self.batch_size = batch_size
        self.timesteps = timesteps
        self.word_index = word_index
        self.lstm_vec_dim = 128
        self.summary = summary
        self.D = None  # discriminator
        self.G = None  # generator
        self.AM = None  # adversarial model
        self.DM = None  # discriminator model

    # (W-F+2P)/S+1
    def discriminator(self):
        if self.D:
            return self.D

        dropout_value = 0.1
        cnn_filters = [20, 10]
        cnn_kernels = [2, 3]
        enc_convs = []
        embedding_vec = 20  # lunghezza embedding layer

        # In: (batch_size, timesteps,1),
        # Out: (batch_size, 128)

        discr_inputs = Input(shape=(self.timesteps, 38),
                             name="Discriminator_Input")
        # embedding layer. expected output ( batch_size, timesteps, embedding_vec)
        manual_embedding = Dense(embedding_vec, activation='linear')
        discr = TimeDistributed(
            manual_embedding, name='manual_embedding', trainable=False
        )(
            discr_inputs
        )  # this is actually the first layer of the discriminator in the joined GAN
        # discr = Embedding(self.word_index, embedding_vec, input_length=self.timesteps)(discr_inputs)
        for i in range(2):
            conv = Conv1D(cnn_filters[i],
                          cnn_kernels[i],
                          padding='same',
                          activation='relu',
                          strides=1,
                          name='discr_conv%s' % i)(discr)

            conv = Dropout(dropout_value, name='discr_dropout%s' % i)(conv)
            conv = MaxPooling1D()(conv)
            enc_convs.append(conv)

        # concatenating CNNs. expected output (batch_size, 7, 30)
        discr = concatenate(enc_convs)
        # LSTM. expected out (batch_size, 128)
        discr = LSTM(self.lstm_vec_dim)(discr)
        discr = Dense(1, activation='sigmoid')(discr)

        self.D = Model(inputs=discr_inputs,
                       outputs=discr,
                       name='Discriminator')
        if self.summary:
            self.D.summary()
            plot_model(self.D,
                       to_file="images/discriminator.png",
                       show_shapes=True)
        return self.D

    def generator(self):

        if self.G:
            return self.G

        dropout_value = 0.1
        cnn_filters = [20, 10]
        cnn_kernels = [2, 3]
        dec_convs = []

        # In: (batch_size, 128),
        # Out: (batch_size, timesteps, word_index)
        dec_inputs = Input(shape=(128, ), name="Generator_Input")
        # decoded = Dense(self.lstm_vec_dim, activation='sigmoid')(dec_inputs)
        # Repeating input by "timesteps" times. expected output (batch_size, 128, 15)
        decoded = RepeatVector(self.timesteps,
                               name="gen_repeate_vec")(dec_inputs)
        decoded = LSTM(self.lstm_vec_dim,
                       return_sequences=True,
                       name="gen_LSTM")(decoded)

        for i in range(2):
            conv = Conv1D(cnn_filters[i],
                          cnn_kernels[i],
                          padding='same',
                          activation='relu',
                          strides=1,
                          name='gen_conv%s' % i)(decoded)
            conv = Dropout(dropout_value, name="gen_dropout%s" % i)(conv)
            dec_convs.append(conv)

        decoded = concatenate(dec_convs)
        decoded = TimeDistributed(
            Dense(self.word_index, activation='softmax'), name='decoder_end')(
                decoded)  # output_shape = (samples, maxlen, max_features )

        self.G = Model(inputs=dec_inputs, outputs=decoded, name='Generator')
        if self.summary:
            self.G.summary()
            plot_model(self.G,
                       to_file="images/generator.png",
                       show_shapes=True)
        return self.G

    def discriminator_model(self, summary=None):
        if self.DM:
            return self.DM
        optimizer = RMSprop(lr=0.0002, decay=6e-8)
        self.DM = Sequential()
        self.DM.add(self.discriminator())
        self.DM.compile(loss='binary_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
        return self.DM

    def adversarial_model(self):
        if self.AM:
            return self.AM
        optimizer = RMSprop(lr=0.0001, decay=3e-8)
        self.AM = Sequential()
        self.AM.add(self.generator())
        self.AM.add(self.discriminator())
        self.discriminator().trainable = False
        self.AM.compile(loss='binary_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
        if self.summary:
            self.AM.summary()
            plot_model(self.AM,
                       to_file="images/adversial.png",
                       show_shapes=True)
        return self.AM
    input_dim = 2*N_crop**2
    encoding_dim = 32
    epochs = 2000
    batch = 32

    K.clear_session()
    AE_high = Sequential()
    AE_high.add(Dense(16 * encoding_dim, input_shape=(input_dim, ), activation='relu'))
    AE_high.add(Dense(4 * encoding_dim, activation='relu'))
    AE_high.add(Dense(2 * encoding_dim, activation='relu'))
    AE_high.add(Dense(encoding_dim, activation='relu'))
    AE_high.add(Dense(2 * encoding_dim, activation='relu'))
    AE_high.add(Dense(4 * encoding_dim, activation='relu'))
    AE_high.add(Dense(input_dim, activation='sigmoid'))
    AE_high.summary()
    AE_high.compile(optimizer='adam', loss='mean_squared_error')

    ### Run the TRAINING
    AE_high.fit(train_noisy, train_clean, epochs=epochs, batch_size=batch, shuffle=True, verbose=2,
                validation_data=(test_noisy, test_clean))
    decoded = AE_high.predict(test_noisy)

    # Make sure the training has succeeded by checking the residuals
    residuals = np.mean(norm(np.abs(decoded - test_clean), axis=-1))
    total = np.mean(norm(np.abs(test_clean), axis=-1))
    print(residuals / total * 100)

    # ================================================================================================================ #
    #                                   USE THE ENCODER TO TRAIN AN MLP NETWORK                                        #
    # ================================================================================================================ #
Esempio n. 57
0
def GSCSubtractNN(x_train, x_val, x_test, y_train, y_val, y_test, verbose):
    print('--------- GSC Subtract Neural Network ---------')

    # Convert target labels to categorical data format
    y_train = np_utils.to_categorical(y_train, 2)
    y_val = np_utils.to_categorical(y_val, 2)
    y_test = np_utils.to_categorical(y_test, 2)

    # Neural Network Configuration
    input_nodes = x_train.shape[1]
    drop_out = 0.3

    layer_1_nodes = 512
    layer_2_nodes = 256
    layer_3_nodes = 128
    layer_4_nodes = 32
    output_nodes = 2

    # Sequential Model of 4 hidden layers
    model = Sequential()

    model.add(
        Dense(layer_1_nodes,
              input_dim=input_nodes,
              kernel_regularizer=regularizers.l2(0),
              activity_regularizer=regularizers.l1(0),
              kernel_initializer=initializers.RandomUniform(seed=123)))
    #model.add(Activation('relu'))
    model.add(LeakyReLU(alpha=0.3))
    model.add(Dropout(drop_out))

    model.add(
        Dense(layer_2_nodes,
              input_dim=layer_1_nodes,
              kernel_regularizer=regularizers.l2(0),
              activity_regularizer=regularizers.l1(0),
              kernel_initializer=initializers.RandomUniform(seed=123)))
    #model.add(Activation('relu'))
    model.add(LeakyReLU(alpha=0.3))
    model.add(Dropout(drop_out))

    model.add(
        Dense(layer_3_nodes,
              input_dim=layer_2_nodes,
              kernel_regularizer=regularizers.l2(0),
              activity_regularizer=regularizers.l1(0),
              kernel_initializer=initializers.RandomUniform(seed=123)))
    #model.add(Activation('relu'))
    model.add(LeakyReLU(alpha=0.3))
    model.add(Dropout(drop_out))

    model.add(
        Dense(layer_4_nodes,
              input_dim=layer_3_nodes,
              kernel_regularizer=regularizers.l2(0),
              activity_regularizer=regularizers.l1(0),
              kernel_initializer=initializers.RandomUniform(seed=123)))
    #model.add(Activation('relu'))
    model.add(LeakyReLU(alpha=0.3))
    model.add(Dropout(drop_out))

    model.add(
        Dense(output_nodes,
              input_dim=layer_4_nodes,
              kernel_regularizer=regularizers.l2(0),
              activity_regularizer=regularizers.l1(0),
              kernel_initializer=initializers.RandomUniform(seed=551)))
    model.add(Activation('sigmoid'))

    model.summary()

    model.compile(optimizer=optimizers.SGD(lr=0.05,
                                           momentum=0.1,
                                           decay=0,
                                           nesterov=True),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    num_epochs = 250
    model_batch_size = 128
    tb_batch_size = 32
    early_patience = 50

    tensorboard_cb = TensorBoard(log_dir='logs',
                                 batch_size=tb_batch_size,
                                 write_graph=True)
    earlystopping_cb = EarlyStopping(monitor='val_loss',
                                     verbose=1,
                                     patience=early_patience,
                                     mode='min')

    # Train model using Training Data
    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_val, y_val),
                        epochs=num_epochs,
                        batch_size=model_batch_size,
                        shuffle=True,
                        callbacks=[tensorboard_cb, earlystopping_cb],
                        verbose=verbose)

    # Convert labels from categorical format to original vector format
    y_test = np.argmax(y_test, axis=1)

    # Predicted labels for Test Data
    pred = model.predict(x_test)
    pred = np.argmax(pred, axis=1)

    df = pd.DataFrame(history.history)
    print(df.tail(1))

    # Accuracy and Erms for Test Data
    accuracy, erms = GetErms(pred, y_test)
    print("Test Erms: " + str(erms) + " Test Acuracy: " + str(accuracy))

    # Precision and Recall for Test Data
    precision, recall = GetPrecisionRecall(pred, y_test)
    print("Precision: " + str(precision) + " Recall: " + str(recall))

    # PLot graph of Loss and Accuracy over epochs
    df.plot()
    plt.savefig('GSC_Subtract_NN.jpg', grid=True, figsize=(7, 8))
def network_f(X_test_data, Y_test_data):
    layers_array = ["conv1"]
    with open('max_dict.csv', mode='r') as infile:
        reader = csv.reader(infile, delimiter=',')
        data_read = [row for row in reader]

    conv_scale = []
    for i in range(0, 4):
        conv_scale.append(math.floor(127 / float(data_read[i * 2][1])))
        print(conv_scale[i])

    accr_list = []
    top_5_acc = []

    model = Sequential()

    # Conv1, Scaling1 and ReLU1
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               input_shape=(channels, img_rows, img_cols),
               data_format='channels_first',
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               name='conv1'))
    model.add(
        Lambda(lambda x: floor_func(x, conv_scale[0]), name='scaling1')
    )  ## Dividing by 27 (MAV) and 18.296 (Instead of 128), so need to multiply by factor of 7 in gain stage
    model.add(Activation(relu_layer, name='act_conv1'))

    # Conv2, Scaling2 and ReLU2
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               data_format='channels_first',
               kernel_initializer='he_normal',
               padding='same',
               use_bias=use_bias,
               name='conv2'))
    model.add(
        Lambda(lambda x: floor_func(x, conv_scale[1]), name='scaling2')
    )  ## Dividing by 288 (MAV) and 1 (Instead of 128), so need to multiply by factor of 128 in gain stage
    model.add(Activation(relu_layer, name='act_conv2'))

    # Pool1
    model.add(
        MaxPooling2D(pool_size=(2, 2),
                     name='pool1',
                     data_format='channels_first'))

    # Conv3, Scaling3 and ReLU3
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               data_format='channels_first',
               kernel_initializer='he_normal',
               padding='same',
               use_bias=use_bias,
               name='conv3'))
    model.add(
        Lambda(lambda x: floor_func(x, conv_scale[2]), name='scaling3')
    )  ## Dividing by 288 (MAV) and 2 (Instead of 128), so need to multiply by factor of 64 in gain stage
    model.add(Activation(relu_layer, name='act_conv3'))

    # Conv4, Scaling4  and ReLU4
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               data_format='channels_first',
               kernel_initializer='he_normal',
               padding='same',
               use_bias=use_bias,
               name='conv4'))
    model.add(
        Lambda(lambda x: floor_func(x, conv_scale[3]), name='scaling4')
    )  ## Dividing by 576 (MAV) and 1 (Instead of 128), so need to multiply by factor of 128 in gain stage
    model.add(Activation(relu_layer, name='act_conv4'))

    # Pool2
    model.add(
        MaxPooling2D(pool_size=(2, 2),
                     name='pool2',
                     data_format='channels_first'))
    model.add(Flatten())

    # model.add(Lambda(lambda x: x*6, name='scaling_fc'))

    # FC1, Batch Normalization and ReLU5
    model.add(
        Dense(512, use_bias=True, name='FC1', kernel_initializer='he_normal'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn1'))
    model.add(Activation(relu_layer, name='act_fc1'))

    # FC2, Batch Normalization and ReLU6
    model.add(
        Dense(classes,
              use_bias=True,
              name='FC2',
              kernel_initializer='he_normal'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn2'))
    model.add(Activation(softmax_layer, name='act_fc2'))

    # Optimizers
    opt = RMSprop(lr=0.0001, decay=1e-6)
    model.compile(loss='squared_hinge',
                  optimizer=opt,
                  metrics=['accuracy', 'top_k_categorical_accuracy'])
    # model.compile('adam', 'categorical_crossentropy', ['accuracy', 'top_k_categorical_accuracy'])
    model.build()
    model.summary()

    model.load_weights(weight_hdf5, by_name=True)

    score = model.evaluate(X_test_data, Y_test_data, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    print('top-k accuracy:', score[2])
    accr_list.append(score[1])
    top_5_acc.append(score[2])
    ## LAYER OUTPUTS TO DUMP
    if args["print_layers"] > 0:
        for i in layers_array:
            intermediate_layer_model = Model(inputs=model.input,
                                             outputs=model.get_layer(i).output)
            intermediate_output = intermediate_layer_model.predict(
                [X_test_data])

            file_name = "output/" + i + ".pkl"

            print("Dumping layer {} outputs to file {}".format(i, file_name))
            intermediate_output.dump(file_name)
Esempio n. 59
0
class Classifier:
    def __init__(self, resultDir: str, modelName: str, x_train, y_train_oh,
                 x_dev, y_dev_oh, x_test, y_test_oh, drop1, drop2, drop3):
        """ Initialize parameters and sequential model for training
		"""
        self.resultDir = resultDir
        self.modelName = modelName
        self.x_train = x_train
        self.x_dev = x_dev
        self.x_test = x_test
        self.y_train_oh = y_train_oh
        self.y_dev_oh = y_dev_oh
        self.y_test_oh = y_test_oh

        self.drop1 = drop1
        self.drop2 = drop2
        self.drop3 = drop3

        self.model = Sequential()

        self.model.add(Dense(200, activation='relu', input_shape=(1500, )))
        #self.model.add(BatchNormalization())
        self.model.add(Dropout(self.drop1))

        self.model.add(Dense(500, activation='relu'))
        #self.model.add(BatchNormalization())
        self.model.add(Dropout(self.drop2))

        self.model.add(Dense(800, activation='relu'))
        #self.model.add(BatchNormalization())
        self.model.add(Dropout(self.drop2))

        self.model.add(Dense(1000, activation='relu'))
        #self.model.add(BatchNormalization())
        self.model.add(Dropout(self.drop3))

        self.model.add(Dense(256, activation='softmax'))

        self.model.compile(loss='categorical_crossentropy',
                           metrics=['categorical_accuracy'],
                           optimizer='adam')
        print("Model summary\n")
        print(self.model.summary())

    def train(self, batchSize):
        """ Train the model with the training data
		batchSize : batch size during trainig
		"""

        Epochs = 1000

        logFile = self.resultDir + '/' + self.modelName + '_' + str(
            batchSize) + '.log'
        csv_logger = CSVLogger(logFile, append=True, separator="\t")

        earlyStop = EarlyStopping(monitor='categorical_accuracy',
                                  patience=10,
                                  mode='auto',
                                  verbose=1,
                                  restore_best_weights=True)

        ##filePath = self.resultDir + '/' + self.modelName + '_checkPoint_best_model.hdf5'
        #### This file will include the epoch number when it gets saved.
        ##repeatingFile = self.resultDir + '/' + self.modelName +'_{epoch:02d}_epoch_acc_{accVar:.2f}.hdf5'
        #### By default the every_10epochs will save the model at every 10 epochs
        ##checkPoint = newCallBacks.ModelCheckpoint_every_10epochs(filePath, repeatingFile, self.x_test, self.y_test_oh , monitor='val_categorical_accuracy', verbose=1, save_best_only=True, every_10epochs=True)

        self.history = self.model.fit(self.x_train,
                                      self.y_train_oh,
                                      batch_size=batchSize,
                                      epochs=Epochs,
                                      verbose=1,
                                      shuffle=True,
                                      validation_data=(self.x_dev,
                                                       self.y_dev_oh),
                                      callbacks=[csv_logger, earlyStop])

    def evaluate(self):
        """ Evaluate the model on itself
		"""

        ## We should be evaluating on dev dataset as well, so commenting x_test
        #self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)
        self.model_score = self.model.evaluate(self.x_dev,
                                               self.y_dev_oh,
                                               batch_size=2048)
        print("%s score = %f\n" % (self.modelName, self.model_score[1]))

        ##Saving atucal vs predicted predictions
        ##np.argmax returns the index where it see's 1 in the row
        #y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)
        y_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048),
                           axis=1)

        ## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack
        #output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T
        output_predict = np.vstack((np.argmax(self.y_dev_oh,
                                              axis=1), y_pred)).T

        outputFile = self.resultDir + '/' + self.modelName + '_4HLw_200_500_800_1000_' + str(
            self.history.epoch[-1] + 1) + 'epochs_' + 'Dropout_' + str(
                self.drop1).replace('.', 'p') + '_' + str(self.drop2).replace(
                    '.', 'p') + '_' + str(self.drop3).replace(
                        '.', 'p') + '_' + "_outputPredict.csv"

        np.savetxt(outputFile, output_predict, fmt="%5.0f", delimiter=",")

        ##Error Analysis of the prediction
        errorAnalysis(outputFile)

        return self.model_score

    def saveModel(self):
        """ Save the model
		"""
        saveStr = self.resultDir + '/' + self.modelName + '_4HLw_200_500_800_1000_' + str(
            self.history.epoch[-1] + 1) + 'epochs_' + 'Dropout_' + str(
                self.drop1).replace('.', 'p') + '_' + str(self.drop2).replace(
                    '.', 'p') + '_' + str(self.drop3).replace(
                        '.', 'p') + '_' + '{0:.2f}'.format(
                            self.model_score[1] * 100).replace('.',
                                                               'p') + '.h5'
        print("Saving model to\n%s\n" % (saveStr))
        self.model.save(saveStr)
def resnet():

    restnet = ResNet50(include_top=False,
                       weights='imagenet',
                       input_shape=(IMG_SIZE, IMG_SIZE, 3))

    output = restnet.layers[-1].output
    output = keras.layers.Flatten()(output)
    restnet = Model(restnet.input, output=output)
    for layer in restnet.layers:
        layer.trainable = False

    model = Sequential()
    model.add(restnet)
    model.add(Dense(512,
                    activation='relu'))  #,input_dim=(IMG_SIZE, IMG_SIZE, 3)))
    model.add(Dropout(0.3))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(10, activation='softmax'))
    #model.load_weights(weight_Path,by_name=True)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.RMSprop(lr=2e-5),
                  metrics=['accuracy'])
    #model.summary()
    '''history = model.fit(X_train,y_train,epochs=10,verbose=1)
    predictions=model.predict(X_test)
    result = model.evaluate(X_test, y_test, verbose=1)
    print("Done testing")
    print("Test loss =", result[0])
    print("Test accuracy =", result[1] * 100)'''

    ###############Fine tuning################
    restnet.trainable = True
    set_trainable = False
    for layer in restnet.layers:
        if layer.name in ['res5c_branch2b', 'res5c_branch2c', 'activation_97']:
            set_trainable = True
        if set_trainable:
            layer.trainable = True
        else:
            layer.trainable = False
    layers = [(layer, layer.name, layer.trainable) for layer in restnet.layers]
    model_finetuned = Sequential()
    model_finetuned.add(restnet)
    model_finetuned.add(Dense(512,
                              activation='relu'))  #, input_dim=input_shape))
    #model_finetuned.add(Dropout(0.3))
    model_finetuned.add(Dense(512, activation='relu'))
    #model_finetuned.add(Dropout(0.3))
    model_finetuned.add(Dense(10, activation='sigmoid'))
    model_finetuned.compile(loss='binary_crossentropy',
                            optimizer=optimizers.RMSprop(lr=1e-5),
                            metrics=['accuracy'])
    model_finetuned.summary()
    history = model_finetuned.fit(X_train, y_train, epochs=10, verbose=1)
    predictions = model_finetuned.predict(X_test)
    result = model_finetuned.evaluate(X_test, y_test, verbose=1)
    print("Done testing")

    print("Test loss =", result[0])
    print("Test accuracy =", result[1] * 100)
    model_finetuned.save('resnetModelFineTuned.h5')
    #files.download('resnetModelFineTuned.h5')
    predictionstest = model_finetuned.predict(test_data)
    predictionLabel = np.argmax(predictionstest, axis=1)
    predictionLabel = predictionLabel + 1
    submitFile['Label'] = predictionLabel
    TestID = []
    for img in tqdm(os.listdir(TEST_DIR)):
        TestID.append(img)
    submitFile['Id'] = TestID
    submitFile.to_csv("submitFile.csv", index=False)