コード例 #1
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28 * 28]
        model = Sequential()
        model.add(
            Dense(512,
                  activation='relu',
                  input_shape=(28 * 28, ),
                  name='dense_1'))
        model.add(Dropout(0.2, name='d1'))
        model.add(Dense(512, activation='relu', name='dense_2'))
        model.add(Dropout(0.2, name='d2'))
        model.add(Dense(10, activation='softmax', name='dense_3'))
        if restore:
            model.load_weights(restore, by_name=True)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
コード例 #2
0
ファイル: models.py プロジェクト: LIANGMA314/ATDL
def target_last_layer():
    model = Sequential()
    model.add(
        Dense(mc._TARGET_DIM_NUM,
              name='target_nn_output',
              input_shape=(mc._OUT_DIM, )))
    return model
コード例 #3
0
ファイル: models.py プロジェクト: LIANGMA314/ATDL
def source_last_layer():
    model = Sequential()
    model.add(
        Dense(mc._SOURCE_DIM_NUM,
              name='source_nn_output',
              input_shape=(mc._OUT_DIM, )))
    return model
コード例 #4
0
ファイル: models.py プロジェクト: LIANGMA314/ATDL
def latent(data_shape):
    model = Sequential()
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    return model
コード例 #5
0
ファイル: model.py プロジェクト: tianhanwen/tianhanwen.git.io
def layer1_multistream(input_dim1, input_dim2, input_dim3, filt_num,
                       channelImage):
    seq = Sequential()
    ''' Multi-Stream layer : Conv - Relu - Conv - BN - Relu  '''

    #seq.add(Reshape((input_dim1,input_dim2,input_dim3),input_shape=(input_dim1, input_dim2, input_dim3,1)))
    for i in range(3):
        #seq.add(Conv2D(int(filt_num),(2,2),input_shape=(input_dim1, input_dim2, input_dim3), padding='valid', name='S1_c1%d' %(i),data_format='channels_last' ))
        seq.add(
            Conv3D(int(filt_num), (2, 2, 2),
                   input_shape=(input_dim1, input_dim2, input_dim3,
                                channelImage),
                   padding='valid',
                   name='S1_c1%d' % (i),
                   data_format='channels_last'))
        seq.add(Activation('relu', name='S1_relu1%d' % (i)))
        seq.add(
            Conv3D(int(filt_num), (2, 2, 2),
                   padding='valid',
                   name='S1_c2%d' % (i),
                   data_format='channels_last'))
        seq.add(BatchNormalization(axis=-1, name='S1_BN%d' % (i)))
        seq.add(Activation('relu', name='S1_relu2%d' % (i)))

    #seq.add(Reshape((input_dim1-6,input_dim2-6,int(filt_num))))

    return seq
コード例 #6
0
    def __init__(self, params, restore = None, session=None, use_log=False, image_size=28, image_channel=1):
        
        self.image_size = image_size
        self.num_channels = image_channel
        self.num_labels = 10
        
        model = Sequential()
        model.add(Flatten(input_shape=(image_size, image_size, image_channel)))
        # list of all hidden units weights
        self.U = []
        for param in params:
            # add each dense layer, and save a reference to list U
            self.U.append(Dense(param))
            model.add(self.U[-1])
            # ReLU activation
            model.add(Activation('relu'))
        self.W = Dense(10)
        model.add(self.W)
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
コード例 #7
0
def train(data, file_name, params, num_epochs=50, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """
    
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    n = 0
    for param in params:
        n += 1
        model.add(Dense(param, kernel_initializer='he_uniform'))
        # ReLU activation
        if activation == "arctan":
            model.add(Lambda(lambda x: tf.atan(x), name=activation+"_"+str(n)))
        else:
            model.add(Activation(activation, name=activation+"_"+str(n)))
    # the output layer, with 10 classes
    model.add(Dense(10, kernel_initializer='he_uniform'))
    
    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted/train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn,
                  optimizer=optimizer,
                  metrics=['accuracy'])
    
    model.summary()
    print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data, data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)
    

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)
    
    return {'model':model, 'history':history}
コード例 #8
0
def layersP3_output(input_shape, filters_count):
    seq = Sequential()
    seq.add(Conv2D(filters_count, (2, 2),
                   padding='same',
                   input_shape=input_shape,
                   activation='relu',
                   name='seq3_conv1_0'))
    seq.add(Conv2D(1, (2, 2), padding='same', name='seq3_last'))
    return seq
コード例 #9
0
ファイル: model.py プロジェクト: yoshihidesawada/CompGAN
def discriminator(data_shape):
    model = Sequential()
    model.add(Dense(2*macro._LAYER_DIM, activation='relu', input_shape=(data_shape,)))
    #model.add(Dropout(0.2))
    #model.add(Dense(3*macro._LAYER_DIM, activation='relu', input_shape=(data_shape,)))
    #model.add(Dropout(0.2))
    #model.add(Dense(2*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    model.add(Dense(macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    return model
コード例 #10
0
def layer2_merged(input_dim1,input_dim2,input_dim3,filt_num,conv_depth):
    ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
    
    seq = Sequential()
    
    for i in range(conv_depth):
        seq.add(Conv2D(filt_num,(2,2), padding='valid',input_shape=(input_dim1, input_dim2, input_dim3), name='S2_c1%d' % (i) ))
        seq.add(Activation('relu', name='S2_relu1%d' %(i))) 
        seq.add(Conv2D(filt_num,(2,2), padding='valid', name='S2_c2%d' % (i))) 
        seq.add(BatchNormalization(axis=-1, name='S2_BN%d' % (i)))
        seq.add(Activation('relu', name='S2_relu2%d' %(i)))
          
    return seq     
コード例 #11
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """

    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    for param in params:
        model.add(Dense(param))
        # ReLU activation
        model.add(Activation('relu'))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
コード例 #12
0
def layersP1_multistream(input_shape, filters_count):
    seq = Sequential()
    for i in range(3):
        seq.add(Conv2D(int(filters_count), (2, 2),
                       input_shape=input_shape,
                       padding='same',
                       name='seq1_conv1_%d' % (i)))
        seq.add(Activation('relu', name='seq1_relu1_%d' % i))
        seq.add(Conv2D(int(filters_count), (2, 2),
                       padding='same',
                       name='seq1_conv2_%d' % (i)))
        seq.add(BatchNormalization(axis=-1, name='seq1_BN_%d' % i))
        seq.add(Activation('relu', name='seq1_relu2_%d' % i))

    return seq
コード例 #13
0
def layersP2_merged(input_shape, filters_count, conv_depth):
    seq = Sequential()
    for i in range(conv_depth):
        seq.add(Conv2D(filters_count, (2, 2),
                       padding='same',
                       input_shape=input_shape,
                       name='seq2_conv1_%d' % (i)))
        seq.add(Activation('relu', name='seq2_relu1_%d' % i))
        seq.add(Conv2D(filters_count, (2, 2),
                       padding='same',
                       input_shape=input_shape,
                       name='seq2_conv2_%d' % (i)))
        seq.add(BatchNormalization(axis=-1, name='seq2_BN_%d' % i))
        seq.add(Activation('relu', name='seq2_relu2_%d' % i))
    return seq
コード例 #14
0
def build_classifier(optimizer):
    classifier = Sequential()
    classifier.add(
        Dense(units=6,
              kernel_initializer='uniform',
              activation='relu',
              input_dim=11))
    classifier.add(Dropout(0.1))
    classifier.add(
        Dense(units=6, kernel_initializer='uniform', activation='relu'))
    classifier.add(Dropout(0.1))
    classifier.add(
        Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    classifier.compile(optimizer=optimizer,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    return classifier
コード例 #15
0
ファイル: ann1.py プロジェクト: KSR4599/ANN
def build_classifier(optimizer):
    classifier = Sequential()
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    # Improving the ANN
    # Dropout Regularization to reduce overfitting if needed
    classifier.add(Dropout(0.1))
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    classifier.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    return classifier
コード例 #16
0
def build_regressor(optimizer):
    regressor = Sequential()
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    # Improving the ANN
    # Dropout Regularization to reduce overfitting if needed
    regressor.add(Dropout(0.1))
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    regressor.add(Dense(units=1, kernel_initializer='uniform', activation='linear'))
    regressor.compile(optimizer=optimizer, loss='mean_squared_error')
    return regressor
コード例 #17
0
def build_regressor():
    regressor = Sequential()
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    regressor.add(Dense(units=1, kernel_initializer='uniform', activation='linear'))
    regressor.compile(optimizer='adam', loss='mean_squared_error')
    return regressor
コード例 #18
0
def network(num_classes):
    model = Sequential()
    model.add(Dense(10, activation='relu', input_shape=(4, )))
    model.add(Dense(20, activation='relu'))
    model.add(Dense(10, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    return model
コード例 #19
0
 def _build_model(self):
     # Neural Net for Deep-Q learning Model
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
コード例 #20
0
ファイル: setup_cifar.py プロジェクト: happyxzw/Crown
    def __init__(self, restore = None, session=None, use_log=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(32, 32, 3)))
        model.add(Dense(1024))
        model.add(Activation('softplus'))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
コード例 #21
0
def build_classifier(
    optimizer
):  #  we explicitly use optimizer parameter here, and other parameters are in fit method
    Classifier = Sequential()
    Classifier.add(
        Dense(units=6,
              kernel_initializer='uniform',
              activation='relu',
              input_dim=11))
    Classifier.add(Dropout(0.1))
    Classifier.add(
        Dense(units=6, kernel_initializer='uniform', activation='relu'))
    Classifier.add(
        Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    Classifier.compile(optimizer=optimizer,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    return Classifier
コード例 #22
0
ファイル: model.py プロジェクト: yoshihidesawada/CompGAN
def generator(data_shape):
    model = Sequential()
    model.add(Dense(macro._LAYER_DIM, activation='relu', input_shape=(macro._NOISE_DIM+macro._PROP_DIM,)))
    #model.add(Dropout(0.2))
    model.add(Dense(2*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    #model.add(Dense(3*macro._LAYER_DIM, activation='relu'))
    #model.add(Dropout(0.2))
    # use sigmoid function to constrain output from 0 to 1.
    model.add(Dense(data_shape, activation='sigmoid'))
    return model
コード例 #23
0
ファイル: train_gan.py プロジェクト: marty90/URL-generator
def build_discriminator_dense():

    model = Sequential()
    model.add(Flatten(input_shape=url_shape))

    # Add arbitrary layers
    for size in discriminator_layers.split(":"):
        size = int(size)
        model.add(Dense(size, activation=discriminator_activation))
        model.add(Dropout(dropout_value))

    # Add the final layer, with a single output
    model.add(Dense(1, activation='sigmoid'))
    model.summary()

    # Build the model
    gen = Input(shape=url_shape)
    validity = model(gen)
    return Model(gen, validity)
コード例 #24
0
def layer3_last(input_dim1,input_dim2,input_dim3,filt_num):   
    ''' last layer : Conv - Relu - Conv ''' 
    
    seq = Sequential()
    
    for i in range(1):
        seq.add(Conv2D(filt_num,(2,2), padding='valid',input_shape=(input_dim1, input_dim2, input_dim3), name='S3_c1%d' %(i) )) # pow(25/23,2)*12*(maybe7?) 43 3
        seq.add(Activation('relu', name='S3_relu1%d' %(i)))
        
    seq.add(Conv2D(1,(2,2), padding='valid', name='S3_last')) 

    return seq 
コード例 #25
0
def convert(file_name, new_name, cifar=False):
    if not cifar:
        eq_weights, new_params = get_weights(file_name)
        data = MNIST()
    else:
        eq_weights, new_params = get_weights(file_name, inp_shape=(32, 32, 3))
        data = CIFAR()
    model = Sequential()
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    for param in new_params:
        model.add(Dense(param))
        model.add(Lambda(lambda x: tf.nn.relu(x)))
    model.add(Dense(10))

    for i in range(len(eq_weights)):
        try:
            print(eq_weights[i][0].shape)
        except:
            pass
        model.layers[i].set_weights(eq_weights[i])

    sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.save(new_name)
    acc = model.evaluate(data.validation_data, data.validation_labels)[1]
    printlog("Converting CNN to MLP")
    nlayer = file_name.split('_')[-3][0]
    filters = file_name.split('_')[-2]
    kernel_size = file_name.split('_')[-1]
    printlog(
        "model name = {0}, numlayer = {1}, filters = {2}, kernel size = {3}".
        format(file_name, nlayer, filters, kernel_size))
    printlog("Model accuracy: {:.3f}".format(acc))
    printlog("-----------------------------------")
    return acc
コード例 #26
0
ファイル: lstm.py プロジェクト: Kiris-tingna/TsPy
class LSTM(object):
    """Long Short Term Memory Regressor Class"""
    def __init__(self, layers, pct_dropout=0.2):
        """Build computational graph model
    
        Parameters
        ----------
        layers: list | [input, hidden_1, hidden_2, output]
            Dimensions of each layer
        pct_dropout: float | 0.0 to 1.0
            Percentage of dropout for hidden LSTM layers
        
        Returns
        -------
        model: keras.Model
            Compiled keras sequential model
        """
        if not isinstance(layers, list):
            raise TypeError(
                'layers was expected to be of type %s, received %s' %
                (type([]), type(layers)))
        if len(layers) != 4:
            raise ValueError('4 layer dimentions required, received only %d' %
                             len(layers))

        self.model = Sequential()

        self.model.add(
            _LSTM(layers[1],
                  input_shape=(layers[1], layers[0]),
                  return_sequences=True,
                  dropout=pct_dropout))

        self.model.add(
            _LSTM(layers[2], return_sequences=False, dropout=pct_dropout))

        self.model.add(Dense(layers[3], activation='linear'))

        self.model.compile(loss="mse", optimizer="rmsprop")

    def fit(self, X, y, **kwargs):
        """Train the model"""
        self.model.fit(X, y, **kwargs)

    def predict(self, series):
        """Prediction using provided series"""
        return self.model.predict(series)
コード例 #27
0
ファイル: train.py プロジェクト: stephanosterburg/MLDeform
def get_model(joint,
              verts,
              layers=3,
              activation='tanh',
              units=512,
              input_dim=100):
    """
    Build a training model based on the joint and vertices
    :param joint: RotateTransform values of the joint
    :param verts: Deltas of the vertices
    :param layers: The number of layers to create. A minimum of 2 is required.
    :param activation: The type of activation. Defaults to tanh
    :param units: The units per layer if not the input/output
    :param input_dim: The input dimensions of each layer that is not input/output
    :return: The model, name of the input node, the name of the output_node
    """
    model = Sequential()
    if layers < 2:
        logger.warning('A minimum of 2 layers is required')
        layers = 2

    input_name = 'input_node'
    output_name = 'output_node'
    for layer in range(layers):
        if not layer:
            model.add(
                Dense(units,
                      input_dim=joint.shape[1],
                      activation=activation,
                      name=input_name))
            continue

        if layer == (layers - 1):
            model.add(
                Dense(verts.shape[1], activation='linear', name=output_name))
            continue

        model.add(
            Dense(units,
                  input_dim=input_dim,
                  activation=activation,
                  name="dense_layer_%s" % layer))

    output_node = model.output.name
    input_node = '%s_input:0' % input_name
    return model, input_node, output_node
コード例 #28
0
ファイル: lstm_.py プロジェクト: Kiris-tingna/TsPy
def build_model(layers, pct_dropout=0.2):
    """Build computational graph model
    
    Parameters
    ----------
    layers: list | [input, hidden_1, hidden_2, output]
        Dimensions of each layer
    pct_dropout: float | 0.0 to 1.0
        Percentage of dropout for hidden LSTM layers
    
    Returns
    -------
    model: keras.Model
        Compiled keras sequential model
    """
    if not isinstance(layers, list):
        raise TypeError('layers was expected to be of type %s, received %s' %
                        (type([]), type(layers)))
    if len(layers) != 4:
        raise ValueError('4 layer dimentions required, received only %d' %
                         len(layers))

    model = Sequential()

    model.add(
        LSTM(layers[1],
             input_shape=(layers[1], layers[0]),
             return_sequences=True,
             dropout=pct_dropout))

    model.add(LSTM(layers[2], return_sequences=False, dropout=pct_dropout))

    model.add(Dense(layers[3], activation='linear'))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
コード例 #29
0
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

# Part 2 - Building the RNN

# Importing the Keras libraries and packages
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense
from tensorflow.contrib.keras.api.keras.layers import LSTM

# Initialising the RNN
regressor = Sequential()

# Adding the input layer and the LSTM layer
regressor.add(LSTM(units=3, return_sequences=True, input_shape=(None, 1)))

# Adding a second LSTM layer
regressor.add(LSTM(units=3, return_sequences=True))

# Adding a third LSTM layer
regressor.add(LSTM(units=3, return_sequences=True))

# Adding a fourth LSTM layer
regressor.add(LSTM(units=3))

# Adding the output layer
regressor.add(Dense(units=1))

# Compiling the RNN
regressor.compile(optimizer='rmsprop', loss='mean_squared_error')
コード例 #30
0
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

# Part 2 - Building the RNN

# Importing the Keras libraries and packages
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense
from tensorflow.contrib.keras.api.keras.layers import LSTM

# Initialising the RNN
regressor = Sequential()

# Adding the input layer and the LSTM layer
regressor.add(LSTM(units=3, input_shape=(None, 1)))

# Adding the output layer
regressor.add(Dense(units=1))

# Compiling the RNN
regressor.compile(optimizer='rmsprop', loss='mean_squared_error')

# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs=100, batch_size=32)

# Part 3 - Making the predictions and visualising the results

# Getting the real stock price for February 1st 2012 - January 31st 2017
path = os.path.join(script_dir, '../dataset/Google_Stock_Price_Test.csv')
dataset_test = pd.read_csv(path)
コード例 #31
0
data = mnist.load_data({'dataset': {}})
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']

# Bring data into necessary format
x_train = mnist.preprocess(x_train, subtact_mean=False)
x_test = mnist.preprocess(x_test, subtact_mean=False)
y_train = mnist.to_categorical(y_train, mnist.n_classes)
y_test = mnist.to_categorical(y_test, mnist.n_classes)

# Define model
input_shape = (mnist.img_rows, mnist.img_cols, 1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(mnist.n_classes, activation='softmax'))

# Fit model
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
コード例 #32
0
class AmazonKerasClassifier:
    def __init__(self):
        self.losses = []
        self.classifier = Sequential()

    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(BatchNormalization(input_shape=(img_size, img_channels)))

        self.classifier.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(128, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(256, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))


    def add_flatten_layer(self):
        self.classifier.add(Flatten())


    def add_ann_layer(self, output_size):
        self.classifier.add(Dense(512, activation='relu'))
        self.classifier.add(BatchNormalization())
        self.classifier.add(Dropout(0.5))
        self.classifier.add(Dense(output_size, activation='sigmoid'))

    def _get_fbeta_score(self, classifier, X_valid, y_valid):
        p_valid = classifier.predict(X_valid)
        return fbeta_score(y_valid, np.array(p_valid) > 0.2, beta=2, average='samples')

    def train_model(self, x_train, y_train, learn_rate=0.001, epoch=5, batch_size=128, validation_split_size=0.2, train_callbacks=()):
        history = LossHistory()

        X_train, X_valid, y_train, y_valid = train_test_split(x_train, y_train,
                                                              test_size=validation_split_size)

        opt = Adam(lr=learn_rate)

        self.classifier.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])


        # early stopping will auto-stop training process if model stops learning after 3 epochs
        earlyStopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0, mode='auto')

        self.classifier.fit(X_train, y_train,
                            batch_size=batch_size,
                            epochs=epoch,
                            verbose=1,
                            validation_data=(X_valid, y_valid),
                            callbacks=[history, *train_callbacks, earlyStopping])
        fbeta_score = self._get_fbeta_score(self.classifier, X_valid, y_valid)
        return [history.train_losses, history.val_losses, fbeta_score]

    def save_weights(self, weight_file_path):
        self.classifier.save_weights(weight_file_path)

    def load_weights(self, weight_file_path):
        self.classifier.load_weights(weight_file_path)

    def predict(self, x_test):
        predictions = self.classifier.predict(x_test)
        return predictions

    def map_predictions(self, predictions, labels_map, thresholds):
        """
        Return the predictions mapped to their labels
        :param predictions: the predictions from the predict() method
        :param labels_map: the map
        :param thresholds: The threshold of each class to be considered as existing or not existing
        :return: the predictions list mapped to their labels
        """
        predictions_labels = []
        for prediction in predictions:
            labels = [labels_map[i] for i, value in enumerate(prediction) if value > thresholds[i]]
            predictions_labels.append(labels)

        return predictions_labels

    def close(self):
        backend.clear_session()
コード例 #33
0
ファイル: kerasTest.py プロジェクト: xxwei/TraderCode
test_x = train_x[train_x.shape[0]-100:]
test_y = train_y[train_y.shape[0]-100:]
train_x = train_x[:train_x.shape[0]-100]
train_y = train_y[:train_y.shape[0]-100]
print(train_x.shape)
print(train_y.shape)
print(test_x.shape)
print(test_y.shape)
#通过 input_shape 指定,不需要样本大小,见例子
#通过 batch_input_shape 指定,需要指定样本大小
#2D Layer 通过input_dim指定各维大小,3D Layer通过input_dim 和 input_length 两个参数指定
#Keras LSTM层的工作方式是通过接收3维(N,W,F)的数字阵列,其中N是训练序列的数目,W是序列长度,F是每个序列的特征数目。
TIME_STEPS = 30
INPUT_SIZE = 1
#model.add(LSTM(1,batch_input_shape=(None, TIME_STEPS, INPUT_SIZE)))
model.add(LSTM(1,input_shape=(TIME_STEPS,INPUT_SIZE)))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("Compilation Time : ", time.time() - start)
tbCallBack.set_model(model)
model.fit(train_x,train_y,batch_size=32,epochs=5)
score = model.evaluate(train_x, train_y, batch_size=32)
#model.save_weights('w1.hdf5')
predicted = model.predict(test_x,batch_size=32,verbose=2)
predicted = np.reshape(predicted, (predicted.size,))

print(predicted)
print(score)