Esempio n. 1
0
    def __init__(self, restore = None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        kernel_size = (5, 5)
        drop_rate = 0.3
        model.add(Conv2D(32, kernel_size, activation='relu',
                                padding='same', name='block1_conv1', input_shape=(28,28,1)))  # 1
        model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool1'))  # 2
        model.add(Dropout(drop_rate))

        # block2
        model.add(Conv2D(64, kernel_size, activation='relu', padding='same', name='block2_conv1'))  # 4
        model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool1'))  # 5
        model.add(Dropout(drop_rate))

        model.add(Flatten(name='flatten'))

        model.add(Dense(120, activation='relu', name='fc1'))  # -5
        model.add(Dropout(drop_rate))
        model.add(Dense(84, activation='relu', name='fc2'))  # -3
        model.add(Dense(10, name='before_softmax'))  # -2
        model.add(Activation('softmax', name='predictions'))  #
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 2
0
def build_regressor():
    regressor = Sequential()
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    regressor.add(Dense(units=1, kernel_initializer='uniform', activation='linear'))
    regressor.compile(optimizer='adam', loss='mean_squared_error')
    return regressor
Esempio n. 3
0
def network(num_classes):
    model = Sequential()
    model.add(Dense(10, activation='relu', input_shape=(4, )))
    model.add(Dense(20, activation='relu'))
    model.add(Dense(10, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    return model
Esempio n. 4
0
def build_generator_dense():
    
    model = Sequential()

    # Add arbitrary layers
    first = True
    for size in generator_layers.split(":"):
        size = int(size)
        if first:
            model.add(Dense(size, input_shape=noise_shape, activation=generator_activation))
        else:
            model.add(Dense(size, activation=generator_activation))

        model.add(Dropout(dropout_value))
        first = False

    # Add the final layer
    model.add(Dense(  np.prod(url_shape) , activation="tanh"))
    model.add(Dropout(dropout_value))
    model.add(Reshape(url_shape))
    model.summary()

    # Build the model
    noise = Input(shape=noise_shape)
    gen = model(noise)

    return Model(noise, gen)
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28 * 28]
        model = Sequential()
        model.add(
            Dense(512,
                  activation='relu',
                  input_shape=(28 * 28, ),
                  name='dense_1'))
        model.add(Dropout(0.2, name='d1'))
        model.add(Dense(512, activation='relu', name='dense_2'))
        model.add(Dropout(0.2, name='d2'))
        model.add(Dense(10, activation='softmax', name='dense_3'))
        if restore:
            model.load_weights(restore, by_name=True)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 6
0
def latent(data_shape):
    model = Sequential()
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    return model
Esempio n. 7
0
    def __init__(self, restore = None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        model.add(Conv2D(6, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal',
                         input_shape=(28,28,1), name='l1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l2'))
        model.add(Conv2D(16, (5, 5), padding='valid', activation='relu', kernel_initializer='he_normal', name='l3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l4'))
        model.add(Flatten())
        model.add(Dense(120, activation='relu', kernel_initializer='he_normal', name='l5'))
        model.add(Dense(84, activation='relu', kernel_initializer='he_normal', name='l6'))
        model.add(Dense(10, activation='softmax', kernel_initializer='he_normal', name='l7'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 8
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(64, (3, 3), input_shape=(32, 32, 3)))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(128, (3, 3)))
        model.add(Activation('relu'))
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation('relu'))
        model.add(Dense(256))
        model.add(Activation('relu'))
        model.add(Dense(10))
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        self.model = model
Esempio n. 9
0
    def __init__(self, restore=None, session=None, use_log=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(28, 28, 1)))
        model.add(Dense(1024))
        model.add(Lambda(lambda x: x * 10))
        model.add(Activation('softplus'))
        model.add(Lambda(lambda x: x * 0.1))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 10
0
    def build_local_model(self):
        input = Input(shape=self.state_size)
        conv = TimeDistributed(
            Conv2D(32, (8, 8), strides=(4, 4), activation="elu"))(input)
        conv = TimeDistributed(
            Conv2D(32, (4, 4), strides=(2, 2), activation="elu"))(conv)
        conv = TimeDistributed(
            Conv2D(32, (3, 3), strides=(1, 1), activation='elu'))(conv)
        conv = TimeDistributed(
            Conv2D(8, (1, 1), strides=(1, 1), activation='elu'))(conv)
        conv = TimeDistributed(Flatten())(conv)
        conv = BatchNormalization()(conv)
        lstm = GRU(256, activation='tanh')(conv)

        policy = Dense(self.action_size, activation="softmax")(lstm)
        value = Dense(1, activation='linear')(lstm)

        local_actor = Model(inputs=input, outputs=policy)
        local_critic = Model(inputs=input, outputs=value)

        local_actor._make_predict_function()
        local_critic._make_predict_function()

        local_actor.set_weights(self.actor.get_weights())
        local_critic.set_weights(self.critic.get_weights())

        return local_actor, local_critic
Esempio n. 11
0
    def __init__(self, params, restore = None, session=None, use_log=False, image_size=28, image_channel=1):
        
        self.image_size = image_size
        self.num_channels = image_channel
        self.num_labels = 10
        
        model = Sequential()
        model.add(Flatten(input_shape=(image_size, image_size, image_channel)))
        # list of all hidden units weights
        self.U = []
        for param in params:
            # add each dense layer, and save a reference to list U
            self.U.append(Dense(param))
            model.add(self.U[-1])
            # ReLU activation
            model.add(Activation('relu'))
        self.W = Dense(10)
        model.add(self.W)
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 12
0
    def conv_3d(self):
        """
        Build a 3D convolutional network, based loosely on C3D.
            https://arxiv.org/pdf/1412.0767.pdf
        """
        # Model.
        model = Sequential()
        model.add(
            Conv3D(32, (3, 3, 3),
                   activation='relu',
                   input_shape=self.input_shape))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
        model.add(Conv3D(64, (3, 3, 3), activation='relu'))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
        model.add(Conv3D(128, (3, 3, 3), activation='relu'))
        model.add(Conv3D(128, (3, 3, 3), activation='relu'))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
        model.add(Conv3D(256, (2, 2, 2), activation='relu'))
        model.add(Conv3D(256, (2, 2, 2), activation='relu'))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Dropout(0.5))
        model.add(Dense(1024))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
Esempio n. 13
0
 def __init__(self, nn_type="resnet50", restore = None, session=None, use_imagenet_pretrain=False, use_softmax=True):
     self.image_size = 224
     self.num_channels = 3
     self.num_labels = 8
 
     input_layer = Input(shape=(self.image_size, self.image_size, self.num_channels))
     weights = "imagenet" if use_imagenet_pretrain else None
     if nn_type == "resnet50":
         base_model = ResNet50(weights=weights, input_tensor=input_layer)
     elif nn_type == "vgg16":
         base_model = VGG16(weights=weights, input_tensor=input_layer)
         # base_model = VGG16(weights=None, input_tensor=input_layer)
     x = base_model.output
     x = LeakyReLU()(x)
     x = Dense(1024)(x)
     x = Dropout(0.2)(x)
     x = LeakyReLU()(x)
     x = Dropout(0.3)(x)
     x = Dense(8)(x)
     if use_softmax:
         x = Activation("softmax")(x)
     model = Model(inputs=base_model.input, outputs=x)
 
     # for layer in base_model.layers:
     # 	layer.trainable = False
 
 
     if restore:
         print("Load: {}".format(restore))
         model.load_weights(restore)
 
     self.model = model
Esempio n. 14
0
File: ann1.py Progetto: KSR4599/ANN
def build_classifier():
    classifier = Sequential()
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    return classifier
def get_clf():
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dense(10))

    model.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    return model
Esempio n. 16
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(200))
        model.add(Activation('relu'))
        model.add(Dense(200))
        model.add(Activation('relu'))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        self.model = model
Esempio n. 17
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """

    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    for param in params:
        model.add(Dense(param))
        # ReLU activation
        model.add(Activation('relu'))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
 def construct_inception(img_size=(32, 32), output_size):
     # add a global spatial average pooling layer
     b_model = self.base_model.output
     b_model = GlobalAveragePooling2D()(b_model)
     # let's add a fully-connected layer
     b_model = Dense(1024, activation='relu')(b_model)
     # and a logistic layer -- let's say we have 200 classes
     self.predictions = Dense(200, activation='softmax')(b_model)
Esempio n. 19
0
 def _build_model(self):
     # Neural Net for Deep-Q learning Model
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
def train(data, file_name, params, num_epochs=50, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """
    
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    n = 0
    for param in params:
        n += 1
        model.add(Dense(param, kernel_initializer='he_uniform'))
        # ReLU activation
        if activation == "arctan":
            model.add(Lambda(lambda x: tf.atan(x), name=activation+"_"+str(n)))
        else:
            model.add(Activation(activation, name=activation+"_"+str(n)))
    # the output layer, with 10 classes
    model.add(Dense(10, kernel_initializer='he_uniform'))
    
    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted/train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn,
                  optimizer=optimizer,
                  metrics=['accuracy'])
    
    model.summary()
    print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data, data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)
    

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)
    
    return {'model':model, 'history':history}
Esempio n. 21
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None):
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    print(data.train_data.shape)

    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation('relu'))
    model.add(Dense(10))

    if init != None:
        model.load_weights(init)

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    if file_name != None:
        model.save(file_name)

    return model
Esempio n. 22
0
 def classifier(self, x):
     x = Dropout(0.5)(x)  #             nn.Dropout(),
     x = Dense(4096)(x)  #nn.Linear(256 * 6 * 6, 4096),
     x = Activation('relu')(x)  #nn.ReLU(inplace=True),
     x = Dropout(0.5)(x)  #nn.Dropout(),
     x = Dense(4096)(x)  #nn.Linear(4096, 4096),
     x = Activation('relu')(x)  #nn.ReLU(inplace=True),
     x = Dense(self.num_classes)(x)  #nn.Linear(4096, num_classes),
     return x
Esempio n. 23
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a 2-layer simple network for MNIST and CIFAR
    """
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # first dense layer (the hidden layer)
    model.add(Dense(params[0]))
    # \alpha = 10 in softplus, multiply input by 10
    model.add(Lambda(lambda x: x * 10))
    # in Keras the softplus activation cannot set \alpha
    model.add(Activation('softplus'))
    # so manually add \alpha to the network
    model.add(Lambda(lambda x: x * 0.1))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    # run training with given dataset, and print progress
    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return model
def get_dae_clf():
    model1 = Sequential()

    model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(AveragePooling2D((2, 2), padding="same"))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(UpSampling2D((2, 2)))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model1.add(Lambda(lambda x_: x_ - 0.5))

    model1.load_weights("./dae/mnist")
    model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    model2 = Sequential()

    model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(32, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Flatten())
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dropout(0.5))
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dense(10))

    model2.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    model2.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    model = Sequential()
    model.add(model1)
    model.add(model2)
    model.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    return model
Esempio n. 25
0
def build_regressor(optimizer):
    regressor = Sequential()
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    # Improving the ANN
    # Dropout Regularization to reduce overfitting if needed
    regressor.add(Dropout(0.1))
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    regressor.add(Dense(units=1, kernel_initializer='uniform', activation='linear'))
    regressor.compile(optimizer=optimizer, loss='mean_squared_error')
    return regressor
Esempio n. 26
0
File: ann1.py Progetto: KSR4599/ANN
def build_classifier(optimizer):
    classifier = Sequential()
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    # Improving the ANN
    # Dropout Regularization to reduce overfitting if needed
    classifier.add(Dropout(0.1))
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    classifier.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    return classifier
Esempio n. 27
0
def generator(data_shape):
    model = Sequential()
    model.add(Dense(macro._LAYER_DIM, activation='relu', input_shape=(macro._NOISE_DIM+macro._PROP_DIM,)))
    #model.add(Dropout(0.2))
    model.add(Dense(2*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    #model.add(Dense(3*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    # use sigmoid function to constrain output from 0 to 1.
    model.add(Dense(data_shape, activation='sigmoid'))
    return model
Esempio n. 28
0
def discriminator(data_shape):
    model = Sequential()
    model.add(Dense(2*macro._LAYER_DIM, activation='relu', input_shape=(data_shape,)))
    #model.add(Dropout(0.2))
    #model.add(Dense(3*macro._LAYER_DIM, activation='relu', input_shape=(data_shape,)))
    #model.add(Dropout(0.2))
    #model.add(Dense(2*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    model.add(Dense(macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    return model
Esempio n. 29
0
def CreateSimpleImageModel_512():
    dataIn = Input(shape=(3, ))
    layer = Dense(4 * 4, activation='tanh')(dataIn)
    layer = Dense(512 * 512 * 4, activation='linear')(layer)
    layer = Reshape((1, 512, 512, 4))(layer)
    modelOut = layer
    model = Model(inputs=[dataIn], outputs=[modelOut])
    adam = Adam(lr=0.005, decay=0.0001)
    model.compile(loss='mean_squared_error',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model
Esempio n. 30
0
    def __init__(self,
                 restore=None,
                 session=None,
                 use_softmax=False,
                 use_brelu=False,
                 activation="relu"):
        def bounded_relu(x):
            return K.relu(x, max_value=1)

        if use_brelu:
            activation = bounded_relu
        else:
            activation = activation

        print("inside CIFARModel: activation = {}".format(activation))

        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(64, (3, 3), input_shape=(32, 32, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(10))
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model