Esempio n. 1
0
File: ann1.py Progetto: KSR4599/ANN
def build_classifier():
    classifier = Sequential()
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    return classifier
Esempio n. 2
0
def target_last_layer():
    model = Sequential()
    model.add(
        Dense(mc._TARGET_DIM_NUM,
              name='target_nn_output',
              input_shape=(mc._OUT_DIM, )))
    return model
Esempio n. 3
0
def source_last_layer():
    model = Sequential()
    model.add(
        Dense(mc._SOURCE_DIM_NUM,
              name='source_nn_output',
              input_shape=(mc._OUT_DIM, )))
    return model
Esempio n. 4
0
def build_regressor():
    regressor = Sequential()
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    regressor.add(Dense(units=1, kernel_initializer='uniform', activation='linear'))
    regressor.compile(optimizer='adam', loss='mean_squared_error')
    return regressor
Esempio n. 5
0
    def build(self):
        """
        Builds the tiny yolo v2 network.
        :param input: input image batch to the network
        :return: logits output from network
        """
        self.model = Sequential()
        self.model.add(Convolution2D(16, (3, 3), input_shape=(416, 416, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(32, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(64, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(128, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(256, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(512, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 1), padding='valid'))

        self.model.add(Convolution2D(1024, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(Convolution2D(1024, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())

        self.model.add(Convolution2D(125, (1, 1), activation=None))

        if self.config.optimizer == 'adam':
            opt = Adam()
        elif self.config.optimizer == 'sgd':
            opt = SGD()

        if self.config.loss == 'categorical_crossentropy':
            loss = 'categorical_crossentropy'
        elif self.config.loss == 'yolov2_loss':
            raise NotImplemented

        self.model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
        self.model.summary()
        return self.model
Esempio n. 6
0
 def _build_model(self):
     # Neural Net for Deep-Q learning Model
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
Esempio n. 7
0
 def __init__(self):
     self.losses = []
     self.classifier = Sequential()
     self.x_vail = []
     self.y_vail = []
     self.train_filepath = ''
     self.train_img_filepath = ''
     self.valid_filepath = ''
     self.valid_img_filepath = ''
     self.test_img_filepath = ''
     self.test_addition_img_filepath = ''
     self.test_img_name_list = ''
     self.y_map = {}
Esempio n. 8
0
def network(num_classes):
    model = Sequential()
    model.add(Dense(10, activation='relu', input_shape=(4, )))
    model.add(Dense(20, activation='relu'))
    model.add(Dense(10, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    return model
Esempio n. 9
0
    def load_codec(self, weights_prefix):
        encoder_weight_filename = weights_prefix + "_encoder.h5"
        decoder_weight_filename = weights_prefix + "_decoder.h5"

        if not os.path.isfile(encoder_weight_filename):
            raise Exception("The file for encoder weights does not exist:{}".format(encoder_weight_filename))
        self.encoder.load_weights(encoder_weight_filename)

        if not os.path.isfile(decoder_weight_filename):
            raise Exception("The file for decoder weights does not exist:{}".format(decoder_weight_filename))
        self.decoder.load_weights(decoder_weight_filename)

        print("Encoder summaries")
        self.encoder.summary()

        _, encode_H, encode_W, numChannels = self.encoder.output_shape
        config = self.decoder.get_config()
        config2 = config[1::]
        config2[0]['config']['batch_input_shape'] = (None, encode_H, encode_W, numChannels)
        decoder_temp = Sequential.from_config(config2, custom_objects={"tf": tf})

        # set weights
        cnt = -1
        for l in self.decoder.layers:
            cnt += 1
            if cnt == 0:
                continue
            weights = l.get_weights()
            decoder_temp.layers[cnt - 1].set_weights(weights)

        self.decoder = decoder_temp
        print("Decoder summaries")
        self.decoder.summary()
Esempio n. 10
0
def generator(data_shape):
    model = Sequential()
    model.add(Dense(macro._LAYER_DIM, activation='relu', input_shape=(macro._NOISE_DIM+macro._PROP_DIM,)))
    #model.add(Dropout(0.2))
    model.add(Dense(2*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    #model.add(Dense(3*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    # use sigmoid function to constrain output from 0 to 1.
    model.add(Dense(data_shape, activation='sigmoid'))
    return model
Esempio n. 11
0
def layer3_last(input_dim1,input_dim2,input_dim3,filt_num):   
    ''' last layer : Conv - Relu - Conv ''' 
    
    seq = Sequential()
    
    for i in range(1):
        seq.add(Conv2D(filt_num,(2,2), padding='valid',input_shape=(input_dim1, input_dim2, input_dim3), name='S3_c1%d' %(i) )) # pow(25/23,2)*12*(maybe7?) 43 3
        seq.add(Activation('relu', name='S3_relu1%d' %(i)))
        
    seq.add(Conv2D(1,(2,2), padding='valid', name='S3_last')) 

    return seq 
Esempio n. 12
0
    def __init__(self):
        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3), input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(32, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))
Esempio n. 13
0
def latent(data_shape):
    model = Sequential()
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    return model
Esempio n. 14
0
    def __init__(self, img_size, img_channels=3, output_size=17):
        self.losses = []
        self.model = Sequential()
        self.model.add(
            BatchNormalization(input_shape=(img_size[0], img_size[1],
                                            img_channels)))

        self.model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(512, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Flatten())

        self.model.add(Dense(512, activation='relu'))
        self.model.add(BatchNormalization())
        self.model.add(Dropout(0.5))

        self.model.add(Dense(output_size, activation='sigmoid'))
Esempio n. 15
0
def build_model(layers, pct_dropout=0.2):
    """Build computational graph model
    
    Parameters
    ----------
    layers: list | [input, hidden_1, hidden_2, output]
        Dimensions of each layer
    pct_dropout: float | 0.0 to 1.0
        Percentage of dropout for hidden LSTM layers
    
    Returns
    -------
    model: keras.Model
        Compiled keras sequential model
    """
    if not isinstance(layers, list):
        raise TypeError('layers was expected to be of type %s, received %s' %
                        (type([]), type(layers)))
    if len(layers) != 4:
        raise ValueError('4 layer dimentions required, received only %d' %
                         len(layers))

    model = Sequential()

    model.add(
        LSTM(layers[1],
             input_shape=(layers[1], layers[0]),
             return_sequences=True,
             dropout=pct_dropout))

    model.add(LSTM(layers[2], return_sequences=False, dropout=pct_dropout))

    model.add(Dense(layers[3], activation='linear'))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
Esempio n. 16
0
def layersP3_output(input_shape, filters_count):
    seq = Sequential()
    seq.add(Conv2D(filters_count, (2, 2),
                   padding='same',
                   input_shape=input_shape,
                   activation='relu',
                   name='seq3_conv1_0'))
    seq.add(Conv2D(1, (2, 2), padding='same', name='seq3_last'))
    return seq
Esempio n. 17
0
def get_model(joint,
              verts,
              layers=3,
              activation='tanh',
              units=512,
              input_dim=100):
    """
    Build a training model based on the joint and vertices
    :param joint: RotateTransform values of the joint
    :param verts: Deltas of the vertices
    :param layers: The number of layers to create. A minimum of 2 is required.
    :param activation: The type of activation. Defaults to tanh
    :param units: The units per layer if not the input/output
    :param input_dim: The input dimensions of each layer that is not input/output
    :return: The model, name of the input node, the name of the output_node
    """
    model = Sequential()
    if layers < 2:
        logger.warning('A minimum of 2 layers is required')
        layers = 2

    input_name = 'input_node'
    output_name = 'output_node'
    for layer in range(layers):
        if not layer:
            model.add(
                Dense(units,
                      input_dim=joint.shape[1],
                      activation=activation,
                      name=input_name))
            continue

        if layer == (layers - 1):
            model.add(
                Dense(verts.shape[1], activation='linear', name=output_name))
            continue

        model.add(
            Dense(units,
                  input_dim=input_dim,
                  activation=activation,
                  name="dense_layer_%s" % layer))

    output_node = model.output.name
    input_node = '%s_input:0' % input_name
    return model, input_node, output_node
Esempio n. 18
0
def discriminator(data_shape):
    model = Sequential()
    model.add(Dense(2*macro._LAYER_DIM, activation='relu', input_shape=(data_shape,)))
    #model.add(Dropout(0.2))
    #model.add(Dense(3*macro._LAYER_DIM, activation='relu', input_shape=(data_shape,)))
    #model.add(Dropout(0.2))
    #model.add(Dense(2*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    model.add(Dense(macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    return model
Esempio n. 19
0
    def layer2_merged(self, input_dim1, input_dim2, input_dim3, filt_num,
                      conv_depth):
        ''' Merged layer : Conv - Relu - Conv - BN - Relu '''

        seq = Sequential()

        for i in range(conv_depth):
            (Conv2D(filt_num, (2, 2),
                    padding='valid',
                    input_shape=(input_dim1, input_dim2, input_dim3),
                    name='S2_c1%d' % (i)))
            (Activation('relu', name='S2_relu1%d' % (i)))
            (Conv2D(filt_num, (2, 2), padding='valid', name='S2_c2%d' % (i)))
            (BatchNormalization(axis=-1, name='S2_BN%d' % (i)))
            (Activation('relu', name='S2_relu2%d' % (i)))

        return seq
Esempio n. 20
0
    def layer1_multistream(self, input_dim1, input_dim2, input_dim3, filt_num):
        seq = Sequential()
        ''' Multi-Stream layer : Conv - Relu - Conv - BN - Relu  '''

        # (Reshape((input_dim1,input_dim12,input_dim3),input_shape=(input_dim1, input_dim2, input_dim3,1)))
        for i in range(3):
            (Conv2D(int(filt_num), (2, 2),
                    input_shape=(input_dim1, input_dim2, input_dim3),
                    padding='valid',
                    name='S1_c1%d' % (i)))
            (Activation('relu', name='S1_relu1%d' % (i)))
            (Conv2D(int(filt_num), (2, 2),
                    padding='valid',
                    name='S1_c2%d' % (i)))
            (BatchNormalization(axis=-1, name='S1_BN%d' % (i)))
            (Activation('relu', name='S1_relu2%d' % (i)))

        (Reshape((input_dim1 - 6, input_dim2 - 6, int(filt_num))))

        return seq
Esempio n. 21
0
def layer1_multistream(input_dim1,
                       input_dim2,
                       input_dim3,
                       filt_num,
                       do_vis=False,
                       name=None):
    seq = Sequential()
    ''' Multi-Stream layer : Conv - Relu - Conv - BN - Relu  '''
    if do_vis:
        global feats
        global feat_names


#    seq.add(Reshape((input_dim1,input_dim12,input_dim3),input_shape=(input_dim1, input_dim2, input_dim3,1)))
    for i in range(3):
        seq.add(
            Conv2D(int(filt_num), (2, 2),
                   input_shape=(input_dim1, input_dim2, input_dim3),
                   padding='valid',
                   name='S1_c1%d' % (i)))
        seq.add(Activation('relu', name='S1_relu1%d' % (i)))

        seq.add(
            Conv2D(int(filt_num), (2, 2),
                   padding='valid',
                   name='S1_c2%d' % (i)))
        seq.add(BatchNormalization(axis=-1, name='S1_BN%d' % (i)))
        seq.add(Activation('relu', name='S1_relu2%d' % (i)))
    if do_vis:
        feats.append(seq)
        feat_names.append(name + '_S1_c22')
    seq.add(Reshape((input_dim1 - 6, input_dim2 - 6, int(filt_num))))

    return seq
Esempio n. 22
0
    def __init__(self, restore = None, session=None, use_softmax=False, use_brelu = False, activation = "relu"):
        def bounded_relu(x):
                return K.relu(x, max_value=1)
        if use_brelu:
            activation = bounded_relu

        print("inside MNISTModel: activation = {}".format(activation))

        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(32, (3, 3),
                         input_shape=(28, 28, 1)))
        model.add(Activation(activation))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Flatten())
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.model = model
        self.layer_outputs = layer_outputs
Esempio n. 23
0
    def __init__(self, restore = None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        kernel_size = (5, 5)
        drop_rate = 0.3
        model.add(Conv2D(32, kernel_size, activation='relu',
                                padding='same', name='block1_conv1', input_shape=(28,28,1)))  # 1
        model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool1'))  # 2
        model.add(Dropout(drop_rate))

        # block2
        model.add(Conv2D(64, kernel_size, activation='relu', padding='same', name='block2_conv1'))  # 4
        model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool1'))  # 5
        model.add(Dropout(drop_rate))

        model.add(Flatten(name='flatten'))

        model.add(Dense(120, activation='relu', name='fc1'))  # -5
        model.add(Dropout(drop_rate))
        model.add(Dense(84, activation='relu', name='fc2'))  # -3
        model.add(Dense(10, name='before_softmax'))  # -2
        model.add(Activation('softmax', name='predictions'))  #
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 24
0
def convert(file_name, new_name, cifar=False):
    if not cifar:
        eq_weights, new_params = get_weights(file_name)
        data = MNIST()
    else:
        eq_weights, new_params = get_weights(file_name, inp_shape=(32, 32, 3))
        data = CIFAR()
    model = Sequential()
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    for param in new_params:
        model.add(Dense(param))
        model.add(Lambda(lambda x: tf.nn.relu(x)))
    model.add(Dense(10))

    for i in range(len(eq_weights)):
        try:
            print(eq_weights[i][0].shape)
        except:
            pass
        model.layers[i].set_weights(eq_weights[i])

    sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.save(new_name)
    acc = model.evaluate(data.validation_data, data.validation_labels)[1]
    printlog("Converting CNN to MLP")
    nlayer = file_name.split('_')[-3][0]
    filters = file_name.split('_')[-2]
    kernel_size = file_name.split('_')[-1]
    printlog(
        "model name = {0}, numlayer = {1}, filters = {2}, kernel size = {3}".
        format(file_name, nlayer, filters, kernel_size))
    printlog("Model accuracy: {:.3f}".format(acc))
    printlog("-----------------------------------")
    return acc
Esempio n. 25
0
def layer1_multistream(input_dim1, input_dim2, input_dim3, filt_num,
                       channelImage):
    seq = Sequential()
    ''' Multi-Stream layer : Conv - Relu - Conv - BN - Relu  '''

    #seq.add(Reshape((input_dim1,input_dim2,input_dim3),input_shape=(input_dim1, input_dim2, input_dim3,1)))
    for i in range(3):
        #seq.add(Conv2D(int(filt_num),(2,2),input_shape=(input_dim1, input_dim2, input_dim3), padding='valid', name='S1_c1%d' %(i),data_format='channels_last' ))
        seq.add(
            Conv3D(int(filt_num), (2, 2, 2),
                   input_shape=(input_dim1, input_dim2, input_dim3,
                                channelImage),
                   padding='valid',
                   name='S1_c1%d' % (i),
                   data_format='channels_last'))
        seq.add(Activation('relu', name='S1_relu1%d' % (i)))
        seq.add(
            Conv3D(int(filt_num), (2, 2, 2),
                   padding='valid',
                   name='S1_c2%d' % (i),
                   data_format='channels_last'))
        seq.add(BatchNormalization(axis=-1, name='S1_BN%d' % (i)))
        seq.add(Activation('relu', name='S1_relu2%d' % (i)))

    #seq.add(Reshape((input_dim1-6,input_dim2-6,int(filt_num))))

    return seq
Esempio n. 26
0
    def __init__(self, restore=None, session=None, use_log=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(28, 28, 1)))
        model.add(Dense(1024))
        model.add(Lambda(lambda x: x * 10))
        model.add(Activation('softplus'))
        model.add(Lambda(lambda x: x * 0.1))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 27
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        model.add(
            Conv2D(6, (5, 5),
                   padding='valid',
                   activation='relu',
                   kernel_initializer='he_normal',
                   input_shape=(28, 28, 1),
                   name='l1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l2'))
        model.add(
            Conv2D(16, (5, 5),
                   padding='valid',
                   activation='relu',
                   kernel_initializer='he_normal',
                   name='l3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l4'))
        model.add(Flatten())
        model.add(
            Dense(120,
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='l5'))
        model.add(
            Dense(84,
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='l6'))
        model.add(
            Dense(10,
                  activation='softmax',
                  kernel_initializer='he_normal',
                  name='l7'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Esempio n. 28
0
	def onBeginTraining(self):
		ue.log("starting mnist keras cnn training")

		model_file_name = "mnistKerasCNN"
		model_directory = ue.get_content_dir() + "/Scripts/"
		model_sess_path =  model_directory + model_file_name + ".tfsess"
		model_json_path = model_directory + model_file_name + ".json"

		my_file = Path(model_json_path)

		#reset the session each time we get training calls
		K.clear_session()

		#let's train
		batch_size = 128
		num_classes = 10
		epochs = 8

		# input image dimensions
		img_rows, img_cols = 28, 28

		# the data, shuffled and split between train and test sets
		(x_train, y_train), (x_test, y_test) = mnist.load_data()

		if K.image_data_format() == 'channels_first':
			x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
			x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
			input_shape = (1, img_rows, img_cols)
		else:
			x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
			x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
			input_shape = (img_rows, img_cols, 1)

		x_train = x_train.astype('float32')
		x_test = x_test.astype('float32')
		x_train /= 255
		x_test /= 255
		ue.log('x_train shape:' + str(x_train.shape))
		ue.log(str(x_train.shape[0]) + 'train samples')
		ue.log(str(x_test.shape[0]) + 'test samples')

		# convert class vectors to binary class matrices
		y_train = keras.utils.to_categorical(y_train, num_classes)
		y_test = keras.utils.to_categorical(y_test, num_classes)

		model = Sequential()
		model.add(Conv2D(64, kernel_size=(3, 3),
						  activation='relu',
						  input_shape=input_shape))
		
		# model.add(Dropout(0.2))
		# model.add(Flatten())
		# model.add(Dense(512, activation='relu'))
		# model.add(Dropout(0.2))
		# model.add(Dense(num_classes, activation='softmax'))

		#model.add(Conv2D(64, (3, 3), activation='relu'))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))
		model.add(Flatten())
		model.add(Dense(128, activation='relu'))
		model.add(Dropout(0.5))
		model.add(Dense(num_classes, activation='softmax'))

		model.compile(loss=keras.losses.categorical_crossentropy,
					  optimizer=keras.optimizers.Adadelta(),
					  metrics=['accuracy'])

		model.fit(x_train, y_train,
				  batch_size=batch_size,
				  epochs=epochs,
				  verbose=1,
				  validation_data=(x_test, y_test),
				  callbacks=[self.stopcallback])
		score = model.evaluate(x_test, y_test, verbose=0)
		ue.log("mnist keras cnn training complete.")
		ue.log('Test loss:' + str(score[0]))
		ue.log('Test accuracy:' + str(score[1]))

		self.session = K.get_session()
		self.model = model

		stored = {'model':model, 'session': self.session}

		#run a test evaluation
		ue.log(x_test.shape)
		result_test = model.predict(np.reshape(x_test[500],(1,28,28,1)))
		ue.log(result_test)

		#flush the architecture model data to disk
		#with open(model_json_path, "w") as json_file:
		#	json_file.write(model.to_json())

		#flush the whole model and weights to disk
		#saver = tf.train.Saver()
		#save_path = saver.save(K.get_session(), model_sess_path)
		#model.save(model_path)

		
		return stored
 def __init__(self):
     self.losses = []
     self.classifier = Sequential()
Esempio n. 30
0
from tensorflow.contrib.keras.api.keras.layers import Activation
import tushare as ts
import numpy as np
import time
import matplotlib.pyplot as plt


def plot_results(predicted_data, true_data):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(true_data, label='True Data')
    plt.plot(predicted_data, label='Prediction')
    plt.legend()
    plt.show()

model = Sequential()
#if tf.gfile.Exists('./logs'):
#    tf.gfile.Remove('./logs')
tbCallBack  = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=True)
basetick = 30
pretick = 5
sh001 = ts.get_k_data("000001",index=True)
#print(sh001.head())
#train_x = sh001.iloc[:sh001.index.size-pretick-basetick,1:6]
#print(train_x.head())
train_x = []
train_y = []
for index in range(sh001.index.size-pretick-basetick):
    #ty = sh001.iloc[index+basetick+pretick,3]-sh001.iloc[index+basetick,1]
    ty = sh001.iloc[index+basetick+pretick,3]
    #tx = sh001.iloc[index:index+basetick,1:6]
Esempio n. 31
0
import numpy as np
np.random.seed(1337)

from jellyfish_eye_k.data_set import load_data
from tensorflow.contrib.keras.api.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from tensorflow.contrib.keras.api.keras.models import Sequential, save_model


(x_train, y_train), (x_validation, y_validation), (x_test, y_test) = load_data()

model = Sequential((
    Conv2D(32, 5, activation='relu', input_shape=x_train[0].shape),
    Conv2D(64, 5, activation='relu'),
    MaxPooling2D(),
    Dropout(0.5),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(256, activation='relu'),
    Dropout(0.5),
    Dense(3, activation='softmax')))

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=20, epochs=5, verbose=1, validation_data=(x_validation, y_validation))

score = model.evaluate(x_test, y_test, verbose=0)

print('Test loss: {0}'.format(score[0]))
print('Test accuracy: {0}'.format(score[1]))

save_model(model, './jellyfish_eye.h5')
del model
class AmazonKerasClassifier:
    def __init__(self):
        self.losses = []
        self.classifier = Sequential()

    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(BatchNormalization(input_shape=(img_size, img_channels)))

        self.classifier.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(128, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(256, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))


    def add_flatten_layer(self):
        self.classifier.add(Flatten())


    def add_ann_layer(self, output_size):
        self.classifier.add(Dense(512, activation='relu'))
        self.classifier.add(BatchNormalization())
        self.classifier.add(Dropout(0.5))
        self.classifier.add(Dense(output_size, activation='sigmoid'))

    def _get_fbeta_score(self, classifier, X_valid, y_valid):
        p_valid = classifier.predict(X_valid)
        return fbeta_score(y_valid, np.array(p_valid) > 0.2, beta=2, average='samples')

    def train_model(self, x_train, y_train, learn_rate=0.001, epoch=5, batch_size=128, validation_split_size=0.2, train_callbacks=()):
        history = LossHistory()

        X_train, X_valid, y_train, y_valid = train_test_split(x_train, y_train,
                                                              test_size=validation_split_size)

        opt = Adam(lr=learn_rate)

        self.classifier.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])


        # early stopping will auto-stop training process if model stops learning after 3 epochs
        earlyStopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0, mode='auto')

        self.classifier.fit(X_train, y_train,
                            batch_size=batch_size,
                            epochs=epoch,
                            verbose=1,
                            validation_data=(X_valid, y_valid),
                            callbacks=[history, *train_callbacks, earlyStopping])
        fbeta_score = self._get_fbeta_score(self.classifier, X_valid, y_valid)
        return [history.train_losses, history.val_losses, fbeta_score]

    def save_weights(self, weight_file_path):
        self.classifier.save_weights(weight_file_path)

    def load_weights(self, weight_file_path):
        self.classifier.load_weights(weight_file_path)

    def predict(self, x_test):
        predictions = self.classifier.predict(x_test)
        return predictions

    def map_predictions(self, predictions, labels_map, thresholds):
        """
        Return the predictions mapped to their labels
        :param predictions: the predictions from the predict() method
        :param labels_map: the map
        :param thresholds: The threshold of each class to be considered as existing or not existing
        :return: the predictions list mapped to their labels
        """
        predictions_labels = []
        for prediction in predictions:
            labels = [labels_map[i] for i, value in enumerate(prediction) if value > thresholds[i]]
            predictions_labels.append(labels)

        return predictions_labels

    def close(self):
        backend.clear_session()
    X_train.append(training_set_scaled[i - 60:i, 0])
    y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)

# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

# Part 2 - Building the RNN

# Importing the Keras libraries and packages
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense
from tensorflow.contrib.keras.api.keras.layers import LSTM

# Initialising the RNN
regressor = Sequential()

# Adding the input layer and the LSTM layer
regressor.add(LSTM(units=3, return_sequences=True, input_shape=(None, 1)))

# Adding a second LSTM layer
regressor.add(LSTM(units=3, return_sequences=True))

# Adding a third LSTM layer
regressor.add(LSTM(units=3, return_sequences=True))

# Adding a fourth LSTM layer
regressor.add(LSTM(units=3))

# Adding the output layer
regressor.add(Dense(units=1))
Esempio n. 34
0
# Load the data, shuffled and split between train and test sets
data = mnist.load_data({'dataset': {}})
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']

# Bring data into necessary format
x_train = mnist.preprocess(x_train, subtact_mean=False)
x_test = mnist.preprocess(x_test, subtact_mean=False)
y_train = mnist.to_categorical(y_train, mnist.n_classes)
y_test = mnist.to_categorical(y_test, mnist.n_classes)

# Define model
input_shape = (mnist.img_rows, mnist.img_cols, 1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(mnist.n_classes, activation='softmax'))

# Fit model
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])
model.fit(x_train, y_train,