Ejemplo n.º 1
0
    def __init__(self, restore = None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        kernel_size = (5, 5)
        drop_rate = 0.3
        model.add(Conv2D(32, kernel_size, activation='relu',
                                padding='same', name='block1_conv1', input_shape=(28,28,1)))  # 1
        model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool1'))  # 2
        model.add(Dropout(drop_rate))

        # block2
        model.add(Conv2D(64, kernel_size, activation='relu', padding='same', name='block2_conv1'))  # 4
        model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool1'))  # 5
        model.add(Dropout(drop_rate))

        model.add(Flatten(name='flatten'))

        model.add(Dense(120, activation='relu', name='fc1'))  # -5
        model.add(Dropout(drop_rate))
        model.add(Dense(84, activation='relu', name='fc2'))  # -3
        model.add(Dense(10, name='before_softmax'))  # -2
        model.add(Activation('softmax', name='predictions'))  #
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Ejemplo n.º 2
0
    def __init__(self, params, restore = None, session=None, use_log=False, image_size=28, image_channel=1):
        
        self.image_size = image_size
        self.num_channels = image_channel
        self.num_labels = 10
        
        model = Sequential()
        model.add(Flatten(input_shape=(image_size, image_size, image_channel)))
        # list of all hidden units weights
        self.U = []
        for param in params:
            # add each dense layer, and save a reference to list U
            self.U.append(Dense(param))
            model.add(self.U[-1])
            # ReLU activation
            model.add(Activation('relu'))
        self.W = Dense(10)
        model.add(self.W)
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28 * 28]
        model = Sequential()
        model.add(
            Dense(512,
                  activation='relu',
                  input_shape=(28 * 28, ),
                  name='dense_1'))
        model.add(Dropout(0.2, name='d1'))
        model.add(Dense(512, activation='relu', name='dense_2'))
        model.add(Dropout(0.2, name='d2'))
        model.add(Dense(10, activation='softmax', name='dense_3'))
        if restore:
            model.load_weights(restore, by_name=True)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Ejemplo n.º 4
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(64, (3, 3), input_shape=(32, 32, 3)))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(128, (3, 3)))
        model.add(Activation('relu'))
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation('relu'))
        model.add(Dense(256))
        model.add(Activation('relu'))
        model.add(Dense(10))
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        self.model = model
Ejemplo n.º 5
0
    def __init__(self, restore=None, session=None, use_log=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(28, 28, 1)))
        model.add(Dense(1024))
        model.add(Lambda(lambda x: x * 10))
        model.add(Activation('softplus'))
        model.add(Lambda(lambda x: x * 0.1))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Ejemplo n.º 6
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(200))
        model.add(Activation('relu'))
        model.add(Dense(200))
        model.add(Activation('relu'))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        self.model = model
def get_clf():
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dense(10))

    model.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    return model
def create_model(activation, bn, data, filters, init, kernels,
                 use_padding_same):
    model = Sequential()
    if use_padding_same:
        model.add(
            Conv2D(filters[0],
                   kernels[0],
                   input_shape=data.train_data.shape[1:],
                   padding="same"))
    else:
        model.add(
            Conv2D(filters[0],
                   kernels[0],
                   input_shape=data.train_data.shape[1:]))
    if bn:
        apply_bn(data, model)
    # model.add(Activation(activation))
    model.add(Lambda(activation))
    for f, k in zip(filters[1:], kernels[1:]):
        if use_padding_same:
            model.add(Conv2D(f, k, padding="same"))
        else:
            model.add(Conv2D(f, k))
        if bn:
            apply_bn(data, model)
        # model.add(Activation(activation))
        # ReLU activation
        model.add(Lambda(activation))
    # the output layer
    model.add(Flatten())
    model.add(Dense(data.train_labels.shape[1]))
    # load initial weights when given
    if init != None:
        model.load_weights(init)
    return model
Ejemplo n.º 9
0
    def __init__(self, restore = None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        model.add(Conv2D(6, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal',
                         input_shape=(28,28,1), name='l1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l2'))
        model.add(Conv2D(16, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal', name='l3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l4'))
        model.add(Flatten())
        model.add(Dense(120, activation='relu', kernel_initializer='he_normal', name='l5'))
        model.add(Dense(84, activation='relu', kernel_initializer='he_normal', name='l6'))
        model.add(Dense(10, activation='softmax', kernel_initializer='he_normal', name='l7'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Ejemplo n.º 10
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """

    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    for param in params:
        model.add(Dense(param))
        # ReLU activation
        model.add(Activation('relu'))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
def train(data, file_name, params, num_epochs=50, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """
    
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    n = 0
    for param in params:
        n += 1
        model.add(Dense(param, kernel_initializer='he_uniform'))
        # ReLU activation
        if activation == "arctan":
            model.add(Lambda(lambda x: tf.atan(x), name=activation+"_"+str(n)))
        else:
            model.add(Activation(activation, name=activation+"_"+str(n)))
    # the output layer, with 10 classes
    model.add(Dense(10, kernel_initializer='he_uniform'))
    
    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted/train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn,
                  optimizer=optimizer,
                  metrics=['accuracy'])
    
    model.summary()
    print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data, data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)
    

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)
    
    return {'model':model, 'history':history}
Ejemplo n.º 12
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None):
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    print(data.train_data.shape)

    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation('relu'))
    model.add(Dense(10))

    if init != None:
        model.load_weights(init)

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    if file_name != None:
        model.save(file_name)

    return model
Ejemplo n.º 13
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a 2-layer simple network for MNIST and CIFAR
    """
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # first dense layer (the hidden layer)
    model.add(Dense(params[0]))
    # \alpha = 10 in softplus, multiply input by 10
    model.add(Lambda(lambda x: x * 10))
    # in Keras the softplus activation cannot set \alpha
    model.add(Activation('softplus'))
    # so manually add \alpha to the network
    model.add(Lambda(lambda x: x * 0.1))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    # run training with given dataset, and print progress
    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return model
def get_dae_clf():
    model1 = Sequential()

    model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(AveragePooling2D((2, 2), padding="same"))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(UpSampling2D((2, 2)))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model1.add(Lambda(lambda x_: x_ - 0.5))

    model1.load_weights("./dae/mnist")
    model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    model2 = Sequential()

    model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(32, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Flatten())
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dropout(0.5))
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dense(10))

    model2.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    model2.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    model = Sequential()
    model.add(model1)
    model.add(model2)
    model.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    return model
Ejemplo n.º 15
0
    def __init__(self,
                 restore=None,
                 session=None,
                 use_softmax=False,
                 use_brelu=False,
                 activation="relu"):
        def bounded_relu(x):
            return K.relu(x, max_value=1)

        if use_brelu:
            activation = bounded_relu
        else:
            activation = activation

        print("inside CIFARModel: activation = {}".format(activation))

        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(64, (3, 3), input_shape=(32, 32, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(10))
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Ejemplo n.º 16
0
    def __init__(self,
                 restore=None,
                 session=None,
                 use_log=False,
                 use_brelu=False):
        def bounded_relu(x):
            return K.relu(x, max_value=1)

        if use_brelu:
            activation = bounded_relu
        else:
            activation = 'relu'
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
        model.add(Activation(activation))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.model = model
        self.layer_outputs = layer_outputs
class Model(object):
    def __init__(self):
        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3), input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(32, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))

    def load(self, file_path=FILE_PATH):
        print('Model Loaded.')
        self.model.load_weights(file_path)

    def predict(self, image):
        # 预测样本分类
        img = image.resize((1, IMAGE_SIZE, IMAGE_SIZE, 3))
        img = image.astype('float32')
        img /= 255
        #归一化
        result = self.model.predict(img)
        print(result)
        # 概率
        result = self.model.predict_classes(img)
        print(result)
        # 0/1

        return result[0]
def get_dae():
    model = Sequential()

    model.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(AveragePooling2D((2, 2), padding="same"))
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model.add(Lambda(lambda x_: x_ - 0.5))

    model.load_weights("./dae/mnist")
    model.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    return model
Ejemplo n.º 19
0
class AmazonKerasClassifier:
    def __init__(self):
        self.losses = []
        self.classifier = Sequential()

    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(
            BatchNormalization(input_shape=(*img_size, img_channels)))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
        self.classifier.add(Dropout(0.25))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
        self.classifier.add(Dropout(0.25))
        self.classifier.add(Conv2D(16, (2, 2), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
        self.classifier.add(Dropout(0.25))

    def add_flatten_layer(self):
        self.classifier.add(Flatten())

    def add_ann_layer(self, output_size):
        self.classifier.add(Dense(256, activation='relu'))
        self.classifier.add(Dropout(0.25))
        self.classifier.add(Dense(128, activation='relu'))
        self.classifier.add(Dropout(0.25))
        self.classifier.add(Dense(output_size, activation='sigmoid'))

    def _get_fbeta_score(self, classifier, X_valid, y_valid):
        p_valid = classifier.predict(X_valid)
        #print ('p_valid')
        #print(p_valid.shape)
        #print(p_valid)
        return fbeta_score(y_valid,
                           np.array(p_valid) > 0.2,
                           beta=2,
                           average='samples')

    def train_model(self,
                    x_train,
                    y_train,
                    epoch=5,
                    batch_size=128,
                    validation_split_size=0.2,
                    train_callbacks=()):
        history = LossHistory()

        X_train, X_valid, y_train, y_valid = train_test_split(
            x_train, y_train, test_size=validation_split_size)
        adam = Adam(lr=0.01, decay=1e-6)
        rms = RMSprop(lr=0.0001, decay=1e-6)
        self.classifier.compile(loss='binary_crossentropy',
                                optimizer='adam',
                                metrics=['accuracy'])

        print('X_train.shape[0]')
        print(X_train.shape[0])

        checkpointer = ModelCheckpoint(filepath="weights.best.hdf5",
                                       verbose=1,
                                       save_best_only=True)
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        datagen.fit(X_train)

        self.classifier.fit_generator(
            datagen.flow(X_train, y_train, batch_size=batch_size),
            steps_per_epoch=X_train.shape[0] // batch_size,
            epochs=epoch,
            validation_data=(X_valid, y_valid),
            callbacks=[history, *train_callbacks, checkpointer])

        fbeta_score = self._get_fbeta_score(self.classifier, X_valid, y_valid)
        print(fbeta_score)
        return [history.train_losses, history.val_losses, fbeta_score]

    def load_weight(self):
        self.classifier.load_weights("weights.best.hdf5")

    def predict(self, x_test):
        predictions = self.classifier.predict(x_test)
        #print('predictions')
        #print(predictions.shape)
        #print(predictions)
        return predictions

    def map_predictions(self, predictions, labels_map, thresholds):
        """
        Return the predictions mapped to their labels
        :param predictions: the predictions from the predict() method
        :param labels_map: the map 
        :param thresholds: The threshold of each class to be considered as existing or not existing
        :return: the predictions list mapped to their labels
        """
        predictions_labels = []
        for prediction in predictions:
            labels = [
                labels_map[i] for i, value in enumerate(prediction)
                if value > thresholds[i]
            ]
            predictions_labels.append(labels)

        return predictions_labels

    def close(self):
        backend.clear_session()
Ejemplo n.º 20
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10
        self.shape = [None, 32, 32, self.num_channels]
        model = Sequential()
        weight_decay = 0.0001
        model.add(
            Conv2D(192, (5, 5),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   input_shape=(32, 32, 3),
                   name='l1'))  # x_train.shape[1:]))
        model.add(BatchNormalization(name='l2'))
        model.add(Activation('relu', name='l3'))
        model.add(
            Conv2D(160, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l4'))
        model.add(BatchNormalization(name='l5'))
        model.add(Activation('relu', name='l6'))
        model.add(
            Conv2D(96, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l7'))
        model.add(BatchNormalization(name='l8'))
        model.add(Activation('relu', name='l9'))
        model.add(
            MaxPooling2D(pool_size=(3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='l10'))

        model.add(Dropout(0.3, name='l11'))

        model.add(
            Conv2D(192, (5, 5),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l12'))
        model.add(BatchNormalization(name='l13'))
        model.add(Activation('relu', name='l14'))
        model.add(
            Conv2D(192, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l15'))
        model.add(BatchNormalization(name='l16'))
        model.add(Activation('relu', name='l17'))
        model.add(
            Conv2D(192, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l18'))
        model.add(BatchNormalization(name='l20'))
        model.add(Activation('relu', name='l21'))
        model.add(
            MaxPooling2D(pool_size=(3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='l22'))

        model.add(Dropout(0.3, name='l23'))

        model.add(
            Conv2D(192, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l24'))
        model.add(BatchNormalization(name='l25'))
        model.add(Activation('relu', name='l26'))
        model.add(
            Conv2D(192, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l27'))
        model.add(BatchNormalization(name='l28'))
        model.add(Activation('relu', name='l29'))
        model.add(
            Conv2D(10, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l30'))
        model.add(BatchNormalization(name='l31'))
        model.add(Activation('relu', name='l32'))

        model.add(GlobalAveragePooling2D(name='l33'))
        model.add(Activation('softmax', name='l34'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Ejemplo n.º 21
0
def train_cnn_7layer(data,
                     file_name,
                     params,
                     num_epochs=50,
                     batch_size=256,
                     train_temp=1,
                     init=None,
                     lr=0.01,
                     decay=1e-5,
                     momentum=0.9,
                     activation="relu",
                     optimizer_name="sgd"):
    """
    Train a 7-layer cnn network for MNIST and CIFAR (same as the cnn model in Clever)
    mnist: 32 32 64 64 200 200 
    cifar: 64 64 128 128 256 256
    """

    # create a Keras sequential model
    model = Sequential()

    print("training data shape = {}".format(data.train_data.shape))

    # define model structure
    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation(activation))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation(activation))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation(activation))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation(activation))
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=None,
                         decay=decay,
                         amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=optimizer, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)

    return {'model': model, 'history': history}
class AmazonKerasClassifier:
    def __init__(self):
        self.losses = []
        self.classifier = Sequential()

    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(BatchNormalization(input_shape=(img_size, img_channels)))

        self.classifier.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(128, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(256, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))


    def add_flatten_layer(self):
        self.classifier.add(Flatten())


    def add_ann_layer(self, output_size):
        self.classifier.add(Dense(512, activation='relu'))
        self.classifier.add(BatchNormalization())
        self.classifier.add(Dropout(0.5))
        self.classifier.add(Dense(output_size, activation='sigmoid'))

    def _get_fbeta_score(self, classifier, X_valid, y_valid):
        p_valid = classifier.predict(X_valid)
        return fbeta_score(y_valid, np.array(p_valid) > 0.2, beta=2, average='samples')

    def train_model(self, x_train, y_train, learn_rate=0.001, epoch=5, batch_size=128, validation_split_size=0.2, train_callbacks=()):
        history = LossHistory()

        X_train, X_valid, y_train, y_valid = train_test_split(x_train, y_train,
                                                              test_size=validation_split_size)

        opt = Adam(lr=learn_rate)

        self.classifier.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])


        # early stopping will auto-stop training process if model stops learning after 3 epochs
        earlyStopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0, mode='auto')

        self.classifier.fit(X_train, y_train,
                            batch_size=batch_size,
                            epochs=epoch,
                            verbose=1,
                            validation_data=(X_valid, y_valid),
                            callbacks=[history, *train_callbacks, earlyStopping])
        fbeta_score = self._get_fbeta_score(self.classifier, X_valid, y_valid)
        return [history.train_losses, history.val_losses, fbeta_score]

    def save_weights(self, weight_file_path):
        self.classifier.save_weights(weight_file_path)

    def load_weights(self, weight_file_path):
        self.classifier.load_weights(weight_file_path)

    def predict(self, x_test):
        predictions = self.classifier.predict(x_test)
        return predictions

    def map_predictions(self, predictions, labels_map, thresholds):
        """
        Return the predictions mapped to their labels
        :param predictions: the predictions from the predict() method
        :param labels_map: the map
        :param thresholds: The threshold of each class to be considered as existing or not existing
        :return: the predictions list mapped to their labels
        """
        predictions_labels = []
        for prediction in predictions:
            labels = [labels_map[i] for i, value in enumerate(prediction) if value > thresholds[i]]
            predictions_labels.append(labels)

        return predictions_labels

    def close(self):
        backend.clear_session()
Ejemplo n.º 23
0
class AmazonKerasClassifier:
    def __init__(self):
        self.losses = []
        self.classifier = Sequential()
        self.x_vail = []
        self.y_vail = []
        self.train_filepath = ''
        self.train_img_filepath = ''
        self.valid_filepath = ''
        self.valid_img_filepath = ''
        self.test_img_filepath = ''
        self.test_addition_img_filepath = ''
        self.test_img_name_list = ''
        self.y_map = {}

    def setTrainFilePath(self, value):
        self.train_filepath = value

    def getTrainFilePath(self):
        return self.train_filepath

    def setValidFilePath(self, value):
        self.valid_filepath = value

    def getValidFilePath(self):
        return self.valid_filepath

    def setTrainImgFilePath(self, value):
        self.train_img_filepath = value

    def getTrainImgFilePath(self):
        return self.train_img_filepath

    def setValidImgFilePath(self, value):
        self.valid_img_filepath = value

    def getValidImgFilePath(self):
        return self.valid_img_filepath

    def setTestImgFilePath(self, value):
        self.test_img_filepath = value

    def getTestImgFilePath(self):
        return self.test_img_filepath

    def setTestAdditionImgFilePath(self, value):
        self.test_addition_img_filepath = value

    def getTestAdditionImgFilePath(self):
        return self.test_addition_img_filepath

    def getTestImgNameList(self):
        return self.test_img_name_list

    def getYMap(self):
        return self.y_map

    def vgg(self,
            type=16,
            bn=False,
            img_size=(224, 224),
            img_channels=3,
            output_size=1000):
        if type == 16 and bn == False:
            layer_list = vgg.vgg16(num_classes=output_size)
        elif type == 16 and bn == True:
            layer_list = vgg.vgg16_bn(num_classes=output_size)
        elif type == 11 and bn == False:
            layer_list = vgg.vgg11(num_classes=output_size)
        elif type == 11 and bn == True:
            layer_list = vgg.vgg11_bn(num_classes=output_size)
        elif type == 13 and bn == False:
            layer_list = vgg.vgg13(num_classes=output_size)
        elif type == 13 and bn == True:
            layer_list = vgg.vgg13_bn(num_classes=output_size)
        elif type == 19 and bn == False:
            layer_list = vgg.vgg19(num_classes=output_size)
        elif type == 19 and bn == True:
            layer_list = vgg.vgg19_bn(num_classes=output_size)
        else:
            print("请输入11,13,16,19这四个数字中的一个!")
        self.classifier.add(
            BatchNormalization(input_shape=(*img_size, img_channels)))
        for i, value in enumerate(layer_list):
            self.classifier.add(eval(value))

    def squeezenet(self,
                   type,
                   img_size=(64, 64),
                   img_channels=3,
                   output_size=1000):
        input_shape = Input(shape=(*img_size, img_channels))
        if type == 1:
            x = squeezenet.squeezenet1_0(input_shape, num_classes=output_size)
        elif type == 1.1:
            x = squeezenet.squeezenet1_1(input_shape, num_classes=output_size)
        else:
            print("请输入1,1.0这两个数字中的一个!")
        model = Model(inputs=input_shape, outputs=x)
        self.classifier = model

    def resnet(self,
               type,
               img_size=(64, 64),
               img_channels=3,
               output_size=1000):
        input_shape = Input(shape=(*img_size, img_channels))
        if type == 18:
            x = resnet.resnet18(input_shape, num_classes=output_size)
        elif type == 34:
            x = resnet.resnet34(input_shape, num_classes=output_size)
        elif type == 50:
            x = resnet.resnet50(input_shape, num_classes=output_size)
        elif type == 101:
            x = resnet.resnet101(input_shape, num_classes=output_size)
        elif type == 152:
            x = resnet.resnet152(input_shape, num_classes=output_size)
        else:
            print("请输入18,34,50,101,152这五个数字中的一个!")
            return
        model = Model(inputs=input_shape, outputs=x)
        self.classifier = model

    def inception(self, img_size=(299, 299), img_channels=3, output_size=1000):
        input_shape = Input(shape=(*img_size, img_channels))
        x = inception.inception_v3(input_shape,
                                   num_classes=output_size,
                                   aux_logits=True,
                                   transform_input=False)
        model = Model(inputs=input_shape, outputs=x)
        self.classifier = model

    def densenet(self,
                 type,
                 img_size=(299, 299),
                 img_channels=3,
                 output_size=1000):
        input_shape = Input(shape=(*img_size, img_channels))
        if type == 161:
            x = densenet.densenet161(input_shape, num_classes=output_size)
        elif type == 121:
            x = densenet.densenet121(input_shape, num_classes=output_size)
        elif type == 169:
            x = densenet.densenet169(input_shape, num_classes=output_size)
        elif type == 201:
            x = densenet.densenet201(input_shape, num_classes=output_size)
        else:
            print("请输入161,121,169,201这四个数字中的一个!")
            return
        model = Model(inputs=input_shape, outputs=x)
        self.classifier = model

    def alexnet(self, img_size=(299, 299), img_channels=3, output_size=1000):
        input_shape = Input(shape=(*img_size, img_channels))
        x = alexnet.alexnet(input_shape, num_classes=output_size)
        model = Model(inputs=input_shape, outputs=x)
        self.classifier = model

    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(
            BatchNormalization(input_shape=(*img_size, img_channels)))

        self.classifier.add(
            Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(128, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(256, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

    def add_flatten_layer(self):
        self.classifier.add(Flatten())

    def add_ann_layer(self, output_size):
        self.classifier.add(Dense(512, activation='relu'))
        self.classifier.add(BatchNormalization())
        self.classifier.add(Dropout(0.5))
        self.classifier.add(Dense(output_size, activation='sigmoid'))

    def _get_fbeta_score2(self, classifier, X_valid, y_valid):
        p_valid = classifier.predict(X_valid)
        result_threshold_list_final, score_result = self.grid_search_best_threshold(
            y_valid, np.array(p_valid))
        return result_threshold_list_final, score_result

    def _get_fbeta_score(self, classifier, X_valid, y_valid):
        p_valid = classifier.predict(X_valid)
        return fbeta_score(y_valid,
                           np.array(p_valid) > 0.2,
                           beta=2,
                           average='samples')

    def grid_search_best_threshold(self, y_valid, p_valid):
        threshold_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
        result_threshold_list_temp = [
            0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
            0.2, 0.2, 0.2, 0.2
        ]
        result_threshold_list_final = [
            0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
            0.2, 0.2, 0.2, 0.2
        ]
        for i in range(17):
            score_result = 0
            for j in range(9):
                result_threshold_list_temp[i] = threshold_list[j]
                score_temp = fbeta_score(y_valid,
                                         p_valid > result_threshold_list_temp,
                                         beta=2,
                                         average='samples')
                if score_result < score_temp:
                    score_result = score_temp
                    result_threshold_list_final[i] = threshold_list[j]
            result_threshold_list_temp[i] = result_threshold_list_final[i]
        return result_threshold_list_final, score_result

    def train_model(self,
                    x_train,
                    y_train,
                    learn_rate=0.001,
                    epoch=5,
                    batch_size=128,
                    validation_split_size=0.2,
                    train_callbacks=()):
        history = LossHistory()

        X_train, X_valid, y_train, y_valid = train_test_split(
            x_train, y_train, test_size=validation_split_size)

        self.x_vail = X_valid
        self.y_vail = y_valid
        opt = Adam(lr=learn_rate)

        self.classifier.compile(loss='binary_crossentropy',
                                optimizer=opt,
                                metrics=['accuracy'])

        earlyStopping = EarlyStopping(monitor='val_loss',
                                      patience=3,
                                      verbose=0,
                                      mode='auto')

        self.classifier.fit(
            X_train,
            y_train,
            batch_size=batch_size,
            epochs=epoch,
            verbose=1,
            validation_data=(X_valid, y_valid),
            callbacks=[history, *train_callbacks, earlyStopping])
        fbeta_score = self._get_fbeta_score(self.classifier, X_valid, y_valid)
        return [history.train_losses, history.val_losses, fbeta_score]

    def train_model_generator(self,
                              generator_train,
                              generator_valid,
                              learn_rate=0.001,
                              epoch=5,
                              batchSize=128,
                              steps=32383,
                              validation_steps=8096,
                              train_callbacks=()):
        history = LossHistory()
        #valid 8096  32383
        opt = Adam(lr=learn_rate)

        steps = steps / batchSize + 1 - 9
        validation_steps = validation_steps / batchSize + 1
        if steps % batchSize == 0:
            steps = steps / batchSize - 9
        if validation_steps % batchSize == 0:
            validation_steps = validation_steps / batchSize

        print(steps, validation_steps)
        self.classifier.compile(loss='binary_crossentropy',
                                optimizer=opt,
                                metrics=['accuracy'])

        earlyStopping = EarlyStopping(monitor='val_loss',
                                      patience=3,
                                      verbose=0,
                                      mode='auto')

        self.classifier.fit_generator(
            generator_train,
            steps_per_epoch=steps,
            epochs=epoch,
            verbose=1,
            validation_data=generator_valid,
            validation_steps=validation_steps,
            callbacks=[history, *train_callbacks, earlyStopping])
        fbeta_score = self._get_fbeta_score(self.classifier, X_valid, y_valid)
        return [history.train_losses, history.val_losses, fbeta_score]

    def generate_trainOrValid_img_from_file(self,
                                            train_set_folder,
                                            train_csv_file,
                                            img_resize=(32, 32),
                                            batchSize=128,
                                            process_count=cpu_count()):
        labels_df = pd.read_csv(train_csv_file)
        labels = sorted(
            set(
                chain.from_iterable(
                    [tags.split(" ") for tags in labels_df['tags'].values])))
        labels_map = {l: i for i, l in enumerate(labels)}

        files_path = []
        tags_list = []
        for file_name, tags in labels_df.values:
            files_path.append('{}/{}.jpg'.format(train_set_folder, file_name))
            tags_list.append(tags)

        X = []
        Y = []

        iter_num = 1
        self.y_map = {v: k for k, v in labels_map.items()}
        with ThreadPoolExecutor(process_count) as pool:
            for img_array, targets in tqdm(pool.map(
                    self._train_transform_to_matrices,
                [(file_path, tag, labels_map, img_resize)
                 for file_path, tag in zip(files_path, tags_list)]),
                                           total=len(files_path)):
                if iter_num % batchSize == 0:
                    X = []
                    Y = []
                    iter_num = 0
                X.append(img_array)
                Y.append(targets)
                iter_num += 1
                if iter_num == batchSize:
                    print(iter_num)
                    yield (np.array(X), np.array(Y))

    def _train_transform_to_matrices(self, *args):
        file_path, tags, labels_map, img_resize = list(args[0])
        img = Image.open(file_path)
        img.thumbnail(img_resize)

        img_array = np.asarray(img.convert("RGB"), dtype=np.float32) / 255

        targets = np.zeros(len(labels_map))
        for t in tags.split(' '):
            targets[labels_map[t]] = 1
        return img_array, targets

    def generate_test_img_from_file(self,
                                    test_set_folder,
                                    img_resize=(32, 32),
                                    batchSize=128,
                                    process_count=cpu_count()):
        x_test = []
        x_test_filename = []
        files_name = os.listdir(test_set_folder)

        X = []
        Y = []
        iter_num = 1
        with ThreadPoolExecutor(process_count) as pool:
            for img_array, file_name in tqdm(pool.map(
                    _test_transform_to_matrices,
                [(test_set_folder, file_name, img_resize)
                 for file_name in files_name]),
                                             total=len(files_name)):
                x_test.append(img_array)
                x_test_filename.append(file_name)
                self.test_img_name_list = x_test_filename

                if iter_num % batchSize == 0:
                    X = []
                    Y = []
                    iter_num = 0
                X.append(img_array)
                Y.append(targets)
                iter_num += 1
                if iter_num == batchSize:
                    print(iter_num)
                    yield (np.array(X), np.array(Y))

    def _test_transform_to_matrices(self, *args):
        test_set_folder, file_name, img_resize = list(args[0])
        img = Image.open('{}/{}'.format(test_set_folder, file_name))
        img.thumbnail(img_resize)
        # Convert to RGB and normalize
        img_array = np.array(img.convert("RGB"), dtype=np.float32) / 255
        return img_array, file_name

    def save_weights(self, weight_file_path):
        self.classifier.save_weights(weight_file_path)

    def load_weights(self, weight_file_path):
        self.classifier.load_weights(weight_file_path)

    def setBestThreshold(self):
        result_threshold_list_final, score_result = self._get_fbeta_score2(
            self.classifier, self.x_vail, self.y_vail)
        print('最好得分:{}'.format(score_result))
        print('最好的阈值:{}'.format(result_threshold_list_final))
        return result_threshold_list_final

    def predict(self, x_test):
        predictions = self.classifier.predict(x_test)
        return predictions

    def predict_generator(self, generator):
        predictions = self.classifier.predcit_generator(generator)
        return predictions

    def map_predictions(self, predictions, labels_map, thresholds):
        predictions_labels = []
        for prediction in predictions:
            labels = [
                labels_map[i] for i, value in enumerate(prediction)
                if value > thresholds[i]
            ]
            predictions_labels.append(labels)

        return predictions_labels

    def close(self):
        backend.clear_session()
Ejemplo n.º 24
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10
        self.shape = [None, 32, 32, self.num_channels]
        model = Sequential()
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block1_conv1',
                   input_shape=(32, 32, 3)))  # 1
        model.add(BatchNormalization(name="batch_normalization_1"))  # 2
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block1_conv2'))  # 3
        model.add(BatchNormalization(name="batch_normalization_2"))
        model.add(MaxPooling2D((2, 2), strides=(2, 2),
                               name='block1_pool'))  # 4
        # Block 2
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block2_conv1'))  # 5
        model.add(BatchNormalization(name="batch_normalization_3"))
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block2_conv2'))  # 6
        model.add(BatchNormalization(name="batch_normalization_4"))  # 7
        model.add(MaxPooling2D((2, 2), strides=(2, 2),
                               name='block2_pool'))  # 8

        # Block 3
        model.add(
            Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block3_conv1'))  # 9
        model.add(BatchNormalization(name="batch_normalization_5"))  # 10
        model.add(
            Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block3_conv2'))  # 11
        model.add(BatchNormalization(name="batch_normalization_6"))
        model.add(
            Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block3_conv3'))  # 12
        model.add(BatchNormalization(name="batch_normalization_7"))  # 13
        model.add(MaxPooling2D((2, 2), strides=(2, 2),
                               name='block3_pool'))  # 14
        model.add(Flatten())  # 15
        model.add(Dense(256, activation='relu', name='dense_1'))  # 16
        model.add(BatchNormalization(name="batch_normalization_8"))  # 17
        model.add(Dense(256, activation='relu', name='dense_2'))  # 18
        model.add(BatchNormalization(name="batch_normalization_9"))  # 19
        model.add(Dense(10, activation='softmax', name='dense_3'))  # 20
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Ejemplo n.º 25
0
class AmazonKerasClassifier:
    def __init__(self):
        self.losses = []
        self.classifier = Sequential()

    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(
            BatchNormalization(input_shape=(*img_size, img_channels)))

        self.classifier.add(
            Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(128, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(256, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

    def add_flatten_layer(self):
        self.classifier.add(Flatten())

    def add_ann_layer(self, output_size):
        self.classifier.add(Dense(512, activation='relu'))
        self.classifier.add(BatchNormalization())
        self.classifier.add(Dropout(0.5))
        self.classifier.add(Dense(output_size, activation='sigmoid'))

    def _get_fbeta_score(self, classifier, X_valid, y_valid):
        p_valid = classifier.predict(X_valid)
        return fbeta_score(y_valid,
                           np.array(p_valid) > 0.2,
                           beta=2,
                           average='samples')

    def train_model(self,
                    x_train,
                    y_train,
                    learn_rate=0.001,
                    epoch=5,
                    batch_size=128,
                    validation_split_size=0.2,
                    train_callbacks=()):
        history = LossHistory()

        X_train, X_valid, y_train, y_valid = train_test_split(
            x_train, y_train, test_size=validation_split_size)

        opt = Nadam(lr=learn_rate,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-08,
                    schedule_decay=0.004)

        self.classifier.compile(loss='binary_crossentropy',
                                optimizer=opt,
                                metrics=['accuracy'])

        # early stopping will auto-stop training process if model stops learning after 3 epochs
        earlyStopping = EarlyStopping(monitor='val_loss',
                                      patience=3,
                                      verbose=0,
                                      mode='auto')
        for i in range(epoch):
            self.classifier.fit(
                X_train,
                y_train,
                batch_size=batch_size,
                epochs=1,
                verbose=2,
                validation_data=(X_valid, y_valid),
                callbacks=[history, *train_callbacks, earlyStopping])
            fbeta_score = self._get_fbeta_score(self.classifier, X_valid,
                                                y_valid)
            print('fbeta score: %s' % fbeta_score)
        return [history.train_losses, history.val_losses, fbeta_score]

    def save_weights(self, weight_file_path):
        self.classifier.save_weights(weight_file_path)

    def load_weights(self, weight_file_path):
        self.classifier.load_weights(weight_file_path)

    def predict(self, x_test):
        predictions = self.classifier.predict(x_test)
        return predictions

    def map_predictions(self, predictions, labels_map, thresholds):
        """
        Return the predictions mapped to their labels
        :param predictions: the predictions from the predict() method
        :param labels_map: the map
        :param thresholds: The threshold of each class to be considered as existing or not existing
        :return: the predictions list mapped to their labels
        """
        predictions_labels = []
        for prediction in predictions:
            labels = [
                labels_map[i] for i, value in enumerate(prediction)
                if value > thresholds[i]
            ]
            predictions_labels.append(labels)

        return predictions_labels

    def close(self):
        backend.clear_session()
Ejemplo n.º 26
0
class Model(object):
    def __init__(self):
        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3), input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(32, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))

    def train(self, dataset, batch_size=batch_size, nb_epoch=epochs):

        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        self.model.fit_generator(
            dataset.train,
            steps_per_epoch=nb_train_samples // batch_size,
            epochs=epochs,
            validation_data=dataset.valid,
            validation_steps=nb_validation_samples // batch_size)

    def save(self, file_path=FILE_PATH):
        print('Model Saved.')
        self.model.save_weights(file_path)

    def load(self, file_path=FILE_PATH):
        print('Model Loaded.')
        self.model.load_weights(file_path)

    def predict(self, image):
        # 预测样本分类
        img = image.resize((1, IMAGE_SIZE, IMAGE_SIZE, 3))
        img = image.astype('float32')
        img /= 255

        #归一化
        result = self.model.predict(img)
        print(result)
        # 概率
        result = self.model.predict_classes(img)
        print(result)
        # 0/1

        return result[0]

    def evaluate(self, dataset):
        # 测试样本准确率
        score = self.model.evaluate_generator(dataset.valid, steps=2)
        print("样本准确率%s: %.2f%%" %
              (self.model.metrics_names[1], score[1] * 100))
Ejemplo n.º 27
0
def train(data,
          file_name,
          filters,
          kernels,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          activation=tf.nn.relu,
          bn=False):
    """
    Train a n-layer CNN for MNIST and CIFAR
    """

    # create a Keras sequential model
    model = Sequential()
    model.add(
        Conv2D(filters[0], kernels[0], input_shape=data.train_data.shape[1:]))
    if bn:
        model.add(BatchNormalization())
    model.add(Lambda(activation))
    for f, k in zip(filters[1:], kernels[1:]):
        model.add(Conv2D(f, k))
        if bn:
            model.add(BatchNormalization())
        # ReLU activation
        model.add(Lambda(activation))
    # the output layer, with 10 classes
    model.add(Flatten())
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the Adam optimizer
    sgd = Adam()

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(filters) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
Ejemplo n.º 28
0
class KerasModel:
    def __init__(self, img_size, img_channels=3, output_size=17):
        self.losses = []
        self.model = Sequential()
        self.model.add(
            BatchNormalization(input_shape=(img_size[0], img_size[1],
                                            img_channels)))

        self.model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(512, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Flatten())

        self.model.add(Dense(512, activation='relu'))
        self.model.add(BatchNormalization())
        self.model.add(Dropout(0.5))

        self.model.add(Dense(output_size, activation='sigmoid'))

    def get_fbeta_score(self, validation_data, verbose=True):
        p_valid = self.model.predict(validation_data[0])
        thresholds = optimise_f2_thresholds(validation_data[1],
                                            p_valid,
                                            verbose=verbose)
        return fbeta_score(validation_data[1],
                           np.array(p_valid) > thresholds,
                           beta=2,
                           average='samples'), thresholds

    def fit(self,
            flow,
            epochs,
            lr,
            validation_data,
            train_callbacks=[],
            batches=300):
        history = LossHistory()
        fbeta = Fbeta(validation_data)
        opt = Adam(lr=lr)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=opt,
                           metrics=['accuracy'])

        earlyStopping = EarlyStopping(monitor='val_loss',
                                      patience=3,
                                      verbose=0,
                                      mode='auto')
        self.model.fit_generator(flow,
                                 steps_per_epoch=batches,
                                 epochs=epochs,
                                 callbacks=[history, earlyStopping, fbeta] +
                                 train_callbacks,
                                 validation_data=validation_data)
        fb_score, thresholds = self.get_fbeta_score(validation_data,
                                                    verbose=False)
        return [
            fbeta.fbeta, history.train_losses, history.val_losses, fb_score,
            thresholds
        ]

    def save_weights(self, weight_file_path):
        self.model.save_weights(weight_file_path)

    def load_weights(self, weight_file_path):
        self.model.load_weights(weight_file_path)

    def predict_image(self, image):
        img = Image.fromarray(np.uint8(image * 255))
        images = [img.copy().rotate(i) for i in [-90, 90, 180]]
        images.append(img)
        images = np.asarray([
            np.asarray(image.convert("RGB"), dtype=np.float32) / 255
            for image in images
        ])
        return sum(self.model.predict(images)) / 4

    def predict(self, x_test):
        return [self.predict_image(img) for img in tqdm(x_test)]

    def map_predictions(self, predictions, labels_map, thresholds):
        predictions_labels = []
        for prediction in predictions:
            labels = [
                labels_map[i] for i, value in enumerate(prediction)
                if value > thresholds[i]
            ]
            predictions_labels.append(labels)
        return predictions_labels

    def close(self):
        backend.clear_session()
Ejemplo n.º 29
0
Archivo: ann.py Proyecto: KSR4599/ANN
Geography: France
Credit Score: 600
Gender: Male
Age: 40
Tenure: 3
Balance: 60000
Number of Products: 2
Has Credit Card: Yes
Is Active Member: Yes
Estimated Salary: 50000"""
new_prediction = classifier.predict(
    sc.transform(np.array([[0.0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])))
new_prediction = (new_prediction > 0.5)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix

#confusion matric which consists of diagonal of elements of right and wrong predictions

cm = confusion_matrix(y_test, y_pred)
print(cm)
backend.clear_session()

#Saving the weights
fname = "weights-ann.hdf5"
classifier.save_weights(fname, overwrite=True)

#loading the weights
fname = "weights-ann.hdf5"
classifier.load_weights(fname)
    def __init__(self, restore_dae=None, restore_clf=None, session=None, use_softmax=False, activation="relu"):

        print("inside MNISTModelDAE: activation = {}".format(activation))

        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model1 = Sequential()

        model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

        # Encoder
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(AveragePooling2D((2, 2), padding="same"))
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

        # Decoder
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(UpSampling2D((2, 2)))
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

        model1.add(Lambda(lambda x_: x_ - 0.5))

        model1.load_weights(restore_dae)
        model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')


        model2 = Sequential()

        model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
        model2.add(Activation(activation))
        model2.add(Conv2D(32, (3, 3)))
        model2.add(Activation(activation))
        model2.add(MaxPooling2D(pool_size=(2, 2)))

        model2.add(Conv2D(64, (3, 3)))
        model2.add(Activation(activation))
        model2.add(Conv2D(64, (3, 3)))
        model2.add(Activation(activation))
        model2.add(MaxPooling2D(pool_size=(2, 2)))

        model2.add(Flatten())
        model2.add(Dense(200))
        model2.add(Activation(activation))
        model2.add(Dense(200))
        model2.add(Activation(activation))
        model2.add(Dense(10))
        # output log probability, used for black-box attack
        if use_softmax:
            model2.add(Activation('softmax'))
        if restore:
            model2.load_weights(restore_clf)

        layer_outputs = []
        for layer in model1.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model1.layers[0].input], [layer.output]))
        for layer in model2.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model2.layers[0].input], [layer.output]))

        model = Sequential()
        model.add(model1)
        model.add(model2)
        self.model = model
        self.layer_outputs = layer_outputs