# Initialize number of estimations for fold, cross_validation score list and
# confusion matrix init
n_est = 5
crs_val_fold = []
iter = 0
conf_arr = []
history_rec = []

# Prepare the splitting based on number of estimations for StratifiedKFold
for train_ind, test_ind in StratifiedKFold(n_est, shuffle=True).split(X, y):
    X_train, X_test = X[train_ind], X[test_ind]
    y_train, y_test = y[train_ind], y[test_ind]

    # Setup and train the model
    model = Sequential([
        InputLayer((990, )),
        Dense(64),
        Dropout(0.5),
        ReLU(),
        Dense(32),
        Dropout(0.5),
        ReLU(),
        Dense(1, activation='sigmoid')
    ])

    # model.summary()

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['acc'])
Beispiel #2
0
act = 'sigmoid'
opt = 'RMSprop'
lf = 'binary_crossentropy'
ep = 1000
bs = 32
val_split = 0.2
es = EarlyStopping(monitor='val_loss',
                   min_delta=0,
                   patience=10,
                   verbose=0,
                   mode='auto',
                   baseline=None,
                   restore_best_weights=True)
# Construction layers:
regressor = Sequential()
regressor.add(InputLayer(input_shape=(lookback, 1)))
regressor.add(CuDNNLSTM(units=input_units, return_sequences=False))
regressor.add(Dropout(dp))
regressor.add(Dense(units=output_units, activation=act))
regressor.compile(optimizer=opt, loss=lf)

#------------ Loop over 19 study periods -------------
for i in range(0, len(return_window)):

    # Determine which stocks are eligleble for the study period
    vec0 = list(list(np.where(binary_matrix[(749 + i * test), :] == 1))[0])
    vec = []
    for u in vec0:
        if (all(np.isnan(return_window[i, 0:750, u])) == False and all(
                np.isnan(return_window[i, 750:1000, u])) == False) == True:
            vec.append(u)
def test_recursion():
    ####################################################
    # test recursion

    a = Input(shape=(32, ), name='input_a')
    b = Input(shape=(32, ), name='input_b')

    dense = Dense(16, name='dense_1')
    a_2 = dense(a)
    b_2 = dense(b)
    merged = layers.concatenate([a_2, b_2], name='merge')
    c = Dense(64, name='dense_2')(merged)
    d = Dense(5, name='dense_3')(c)

    model = Model(inputs=[a, b], outputs=[c, d], name='model')

    e = Input(shape=(32, ), name='input_e')
    f = Input(shape=(32, ), name='input_f')
    g, h = model([e, f])

    # g2, h2 = model([e, f])

    assert g._keras_shape == c._keras_shape
    assert h._keras_shape == d._keras_shape

    # test separate manipulation of different layer outputs
    i = Dense(7, name='dense_4')(h)

    final_model = Model(inputs=[e, f], outputs=[i, g], name='final')
    assert len(final_model.inputs) == 2
    assert len(final_model.outputs) == 2
    assert len(final_model.layers) == 4

    # we don't check names of first 2 layers (inputs) because
    # ordering of same-level layers is not fixed
    print('final_model layers:', [layer.name for layer in final_model.layers])
    assert [layer.name
            for layer in final_model.layers][2:] == ['model', 'dense_4']

    print(model.compute_mask([e, f], [None, None]))
    assert model.compute_mask([e, f], [None, None]) == [None, None]

    print(final_model.compute_output_shape([(10, 32), (10, 32)]))
    assert final_model.compute_output_shape([(10, 32), (10, 32)]) == [(10, 7),
                                                                      (10, 64)]

    # run recursive model
    fn = K.function(final_model.inputs, final_model.outputs)
    input_a_np = np.random.random((10, 32))
    input_b_np = np.random.random((10, 32))
    fn_outputs = fn([input_a_np, input_b_np])
    assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]

    # test serialization
    model_config = final_model.get_config()
    print(json.dumps(model_config, indent=4))
    recreated_model = Model.from_config(model_config)

    fn = K.function(recreated_model.inputs, recreated_model.outputs)
    input_a_np = np.random.random((10, 32))
    input_b_np = np.random.random((10, 32))
    fn_outputs = fn([input_a_np, input_b_np])
    assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]

    ####################################################
    # test multi-input multi-output

    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])

    o = Input(shape=(32, ), name='input_o')
    p = Input(shape=(32, ), name='input_p')
    q, r = model([o, p])

    assert n._keras_shape == (None, 5)
    assert q._keras_shape == (None, 64)
    s = layers.concatenate([n, q], name='merge_nq')
    assert s._keras_shape == (None, 64 + 5)

    # test with single output as 1-elem list
    multi_io_model = Model([j, k, o, p], [s])

    fn = K.function(multi_io_model.inputs, multi_io_model.outputs)
    fn_outputs = fn([
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32))
    ])
    assert [x.shape for x in fn_outputs] == [(10, 69)]

    # test with single output as tensor
    multi_io_model = Model([j, k, o, p], s)

    fn = K.function(multi_io_model.inputs, multi_io_model.outputs)
    fn_outputs = fn([
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32))
    ])
    # note that the output of the K.function will still be a 1-elem list
    assert [x.shape for x in fn_outputs] == [(10, 69)]

    # test serialization
    model_config = multi_io_model.get_config()
    recreated_model = Model.from_config(model_config)

    fn = K.function(recreated_model.inputs, recreated_model.outputs)
    fn_outputs = fn([
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32)),
        np.random.random((10, 32))
    ])
    # note that the output of the K.function will still be a 1-elem list
    assert [x.shape for x in fn_outputs] == [(10, 69)]

    config = model.get_config()
    Model.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)

    ####################################################
    # test invalid graphs

    # input is not an Input tensor
    j = Input(shape=(32, ), name='input_j')
    j = Dense(32)(j)
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])

    with pytest.raises(TypeError):
        Model([j, k], [m, n])

    # disconnected graph
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    with pytest.raises(RuntimeError):
        Model([j], [m, n])

    # redundant outputs
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    # this should work with a warning
    Model([j, k], [m, n, n])

    # redundant inputs
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    with pytest.raises(ValueError):
        Model([j, k, j], [m, n])

    # i have not idea what I'm doing: garbage as inputs/outputs
    j = Input(shape=(32, ), name='input_j')
    k = Input(shape=(32, ), name='input_k')
    m, n = model([j, k])
    with pytest.raises(TypeError):
        Model([j, k], [m, n, 0])

    ####################################################
    # test calling layers/models on TF tensors

    if K._BACKEND == 'tensorflow':
        import tensorflow as tf
        j = Input(shape=(32, ), name='input_j')
        k = Input(shape=(32, ), name='input_k')
        m, n = model([j, k])
        tf_model = Model([j, k], [m, n])

        j_tf = tf.placeholder(dtype=K.floatx())
        k_tf = tf.placeholder(dtype=K.floatx())
        m_tf, n_tf = tf_model([j_tf, k_tf])
        assert m_tf.get_shape().as_list() == [None, 64]
        assert n_tf.get_shape().as_list() == [None, 5]

        # test merge
        layers.concatenate([j_tf, k_tf], axis=1)
        layers.add([j_tf, k_tf])

        # test tensor input
        x = tf.placeholder(shape=(None, 2), dtype=K.floatx())
        InputLayer(input_tensor=x)

        x = Input(tensor=x)
        Dense(2)(x)
Beispiel #4
0
input_shape = (4096, )
input_reshape = (64, 64, 1)

conv_num_filters = 5
conv_filter_size = 5

pool_size = (2, 2)

hidden_num_units = 100
output_num_units = 4

epochs = 5
batch_size = 128

model = Sequential([
    InputLayer(input_shape=input_reshape),
    Convolution2D(64, 5, 5, activation='relu'),
    MaxPooling2D(pool_size=pool_size),
    Convolution2D(64, 5, 5, activation='relu'),
    MaxPooling2D(pool_size=pool_size),
    Convolution2D(64, 4, 4, activation='relu'),
    Flatten(),
    Dense(output_dim=hidden_num_units, activation='relu'),
    Dense(output_dim=output_num_units,
          input_dim=hidden_num_units,
          activation='softmax'),
])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
 
 # Training, Val and Test Data Sets    
 x_train, y_train, x_val, y_val, x_test, y_test = lib.kfold(nemo,yearini,yearfin,i,'all')  
 
 X_train = sliding_window_tranf(x_train, n_steps, 'all')          
 Y_train = sliding_window_tranf(y_train, n_steps, 'all')   
 
 X_val = sliding_window_tranf(x_val, n_steps, 'all')          
 Y_val = sliding_window_tranf(y_val, n_steps, 'all') 
 
 X_test = sliding_window_tranf(x_test, n_steps, 'all')          
 Y_test = sliding_window_tranf(y_test, n_steps, 'all')     
         
 # Model
 model = Sequential()
 model.add(InputLayer(input_shape=(n_steps,n_var_in)))
 model.add(Bidirectional(GRU(128, return_sequences=True)))
 model.add(TimeDistributed(Dense(n_var_out, activation='linear')))
 #model.summary()
 
 # Training    
 batch_size = 32
 nb_epoch = 500
 
 adam = Adam(lr=0.0001)
 model.compile(optimizer=adam, loss='mse')   
 
 earlyStopping=EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')
 training = model.fit(X_train, Y_train,
                      validation_data=(X_val,Y_val),
                      batch_size=batch_size, epochs=nb_epoch,
import numpy as np
import pandas as pd
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.layers import Dense, InputLayer
from keras.models import Sequential
from skimage.transform import resize

count = 0

base_model = VGG16(
    weights='imagenet', include_top=False,
    input_shape=(224, 224, 3))  # include_top=False to remove the top layer

model = Sequential()  # i. Building the model
model.add(InputLayer((7 * 7 * 512, )))  # input layer
model.add(Dense(units=1024, activation='relu'))  # hidden layer
model.add(Dense(3, activation='softmax'))  # output layer

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])  # ii. Compiling the model

model.load_weights("modelweights.h5")

count = 0  # Testing and Calculating the screen timing
videoFile = "Tom and Jerry 3.mp4"
# cap = cv2.VideoCapture(videoFile)
# frameRate = cap.get(5)  # frame rate
# x = 1
# while (cap.isOpened()):
Beispiel #7
0
 def build_encoder(self):
     encoder = Sequential()
     encoder.add(InputLayer(input_shape=INPUT_SHAPE, name="in"))
     encoder.add(self.model.get_layer("encoder"))
     self.encoder = encoder
def visible(x):
    model.add(InputLayer(input_shape=(x, x, 3)))
Beispiel #9
0
        #os.remove('test_images/Train2/'+filename)
        i = i + 1
X = np.array(X, dtype=float)

# Splitting train and test sets
split = int(0.95*len(X))
Xtrain = X[:split]
Xtrain = 1.0/255*Xtrain
Xtest = rgb2lab(1.0/255*X[split:])[:,:,:,0]
Xtest = Xtest.reshape(Xtest.shape+(1,))
Ytest = rgb2lab(1.0/255*X[split:])[:,:,:,1:]
Ytest = Ytest / 128

# Model Structure
model = Sequential()
model.add(InputLayer(input_shape=(256, 256, 1)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.5))
model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
Beispiel #10
0
    def distancenet(self,
                    vocab_size,
                    output_size,
                    maxsize=1,
                    hop_depth=-1,
                    dropout=False,
                    d_perc=1,
                    type="CCE",
                    shape=0,
                    q_shape=1):
        print(bcolors.UNDERLINE + 'Building nn model...' + bcolors.ENDC)

        print(q_shape, shape, "====================")
        sentrnn = Sequential()
        emb = Embedding(vocab_size,
                        self.embed_hidden_size,
                        mask_zero=False,
                        W_constraint=mx(),
                        W_regularizer=reg(),
                        init=init_function,
                        input_shape=shape)
        sentrnn.add(emb)
        #emb_bn = bn()
        #sentrnn.add(emb_bn)
        sentrnn.add(Dropout(0.2))

        if (convolution):
            #print maxsize, q_shape
            #exit()
            conv = Convolution1D(self.sent_hidden_size,
                                 maxsize,
                                 subsample_length=1,
                                 activation=act,
                                 border_mode="same")
            #conv_bn = bn()
            sentrnn.add(conv)
            #sentrnn.add(conv_bn)
            #sentrnn.add(Dropout(0.1))
            sentrnn.add(MaxPooling1D(pool_length=maxsize))
            #sentrnn.add(Dropout(d_perc))
        else:
            sentrnn.add(MaxPooling1D(pool_length=maxsize))
            #sentrnn.add(SimpleRNN( self.query_hidden_size, return_sequences=True,activation = "leakyrelu", init = init_function,dropout_W = 0.3, dropout_U=0.3, consume_less = "mem"))
            # sentrnn.add(ResettingRNN(maxsize, self.query_hidden_size, return_sequences=True,activation = "tanh", init = init_function,dropout_W = 0.3, dropout_U=0.3, consume_less = "mem"))
            # sentrnn.add(bn())
            # sentrnn.add(AttentionPooling(pool_length=maxsize))

            #sentrnn.add(Convolution1D(self.sent_hidden_size,maxsize, subsample_length = 1, activation=act, border_mode="same"))
            #sentrnn.add(bn())
            #
            #sentrnn.add(Dropout(0.1))
            #td = TimeDistributedDense(self.sent_hidden_size, activation = act, init = init_function)
            #sentrnn.add(td)
            #sentrnn.add(bn())
            #sentrnn.add(Dropout(d_perc))

        qrnn = Sequential()
        #emb = Embedding(vocab_size, self.embed_hidden_size, mask_zero=False,W_constraint=mx(), W_regularizer=reg(), init = init_function, input_shape=shape, input_length=q_shape)

        qrnn.add(InputLayer(input_shape=(q_shape, )))
        qrnn.add(emb)
        #qrnn.add(emb_bn)
        qrnn.add(Dropout(0.2))

        if (convolution):
            #conv = Convolution1D(self.sent_hidden_size,q_shape, subsample_length = 1, activation=act, border_mode="same")
            qrnn.add(conv)
            #qrnn.add(conv_bn)
            #qrnn.add(Dropout(0.1))
            qrnn.add(MaxPooling1D(pool_length=q_shape))
            qrnn.add(Flatten(1))
        else:
            qrnn.add(
                SimpleRNN(self.query_hidden_size,
                          return_sequences=False,
                          activation=act,
                          init=init_function,
                          dropout_W=0.1,
                          dropout_U=0.1,
                          consume_less="mem"))
            #qrnn.add(Convolution1D(self.sent_hidden_size,q_shape, subsample_length = 1, activation=act, border_mode="same"))
            #qrnn.add(bn())
            #qrnn.add(MaxPooling1D(pool_length=q_shape))

            #qrnn.add(Flatten())

        init_qa = [sentrnn, qrnn]
        past = []

        #at = GRU(self.sent_hidden_size, dropout_W = 0.3, dropout_U=0.3, activation=act)

        td = TimeDistributedDense(self.sent_hidden_size,
                                  activation=act,
                                  init=init_function)
        at = AttentionRecurrent(self.sent_hidden_size, activation="softmax")

        for i in range(hop_depth):
            hop = Sequential()
            hop.add(
                AttentionMerge((i == hop_depth - 1),
                               "abs",
                               init_qa + past,
                               mode="sum"))
            hop.add(td)
            #hop.add(bn())
            #hop.add(Dropout(0.1))
            hop.add(at)

            #hop.add(bn())
            #hop.add(Dropout(0.1))
            past.append(hop)

        # model = Sequential()
        # model.add(AttentionMerge(False, "concat", init_qa + past, mode = "concat"))
        # model.add(LSTM(self.query_hidden_size, return_sequences=False, init = init_function,dropout_W = 0.2, dropout_U=0.2))

        model = hop

        #self._adddepth(model, dropout, d_perc)
        model.add(
            Dense(vocab_size,
                  W_constraint=mx(),
                  W_regularizer=reg(),
                  init=init_function))
        #model.add(bn())
        model.add(Activation("softmax"))
        if (type == "CCE"):
            model.compile(optimizer=self._getopt(),
                          loss='categorical_crossentropy',
                          metrics=["accuracy"],
                          class_mode='categorical')
        else:
            model.compile(optimizer=self._getopt(), loss='mse')

        print(model.summary())
        plot(model,
             show_layer_names=False,
             show_shapes=True,
             to_file='model.png')

        return model
Beispiel #11
0
y_train = np.array(y_train)

x_test = x_test.reshape(-1, img_size, img_size, 3)
y_test = np.array(y_test)

print(
    "######################    Training and test shapes  ##########################"
)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print("")

model = Sequential()
model.add(InputLayer(input_shape=(img_size, img_size, 3)))

model.add(ZeroPadding2D((3, 3)))
model.add(Conv2D(112, activation='relu', kernel_size=(3, 3)))
model.add(MaxPooling2D((2, 2), strides=(3, 3)))
model.add(Conv2D(72, activation='relu', kernel_size=(2, 2)))
model.add(MaxPooling2D((2, 2), strides=(3, 3)))
model.add(Conv2D(64, activation='relu', kernel_size=(2, 2)))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(32, activation='relu', kernel_size=(2, 2)))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2), strides=(3, 3)))
model.add(Conv2D(16, activation='relu', kernel_size=(2, 2)))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
    old_session = KTF.get_session()
    sess = tf.Session('')
    KTF.set_session(sess)

    trained = False
    checkpoint = tf.train.get_checkpoint_state(f_model)
    if (not args.force_train
        ) and checkpoint and checkpoint.model_checkpoint_path:
        yaml_string = open(os.path.join(f_model, model_filename)).read()
        model = model_from_yaml(yaml_string)
        model.load_weights(os.path.join(f_model, weights_filename))
        trained = True
    else:
        model = Sequential()
        model.add(InputLayer(input_shape=input_shape, name='input'))
        model.add(
            Convolution2D(nb_filters,
                          kernel_size[0],
                          kernel_size[1],
                          border_mode='same',
                          activation='relu'))
        model.add(MaxPooling2D(pool_size=pool_size))
        model.add(
            Convolution2D(nb_filters,
                          kernel_size[0],
                          kernel_size[1],
                          border_mode='same',
                          activation='relu'))
        model.add(MaxPooling2D(pool_size=pool_size))
        model.add(Flatten())
    def build(self):
        class KerasNNFeaturesProvider(ViewsFeaturesProvider):
            def __init__(self, config):
                super(KerasNNFeaturesProvider, self).__init__(config)

            def features(self, observation):
                base_features = super().features(observation)
                return base_features.reshape(1, self.config.num_products)

        class KerasNNModel(Model):
            def __init__(self, config, model):
                super(KerasNNModel, self).__init__(config)
                self.model = model

            def act(self, observation, features):
                # X is a vector of organic counts
                predictions = self.model.predict(features)
                # print(f"Prediction: {predictions}")
                # Take the one you think is most likely to give a click
                action = np.argmax(predictions)
                # print(f"Action taken: {action}")
                ps_all = np.zeros(self.config.num_products)
                ps_all[action] = 1.0

                return {
                    **super().act(observation, features),
                    **{
                        'a': action,
                        'ps': 1.0,
                        'ps-a': ps_all,
                    },
                }

        # Get data
        features, actions, deltas, pss = self.train_data()

        # Extract data properly
        X = features  # NxP vector where rows are users, columns are counts of organic views
        N = X.shape[0]  # Number of bandit feedback samples
        P = X.shape[1]  # Number of items
        A = actions  # Vector of length N - indicating the action that was taken
        y = deltas  # Vector of length N - indicating whether a click occurred
        # Explicitly mask - drop non-clicks
        mask = y == 1
        X = X[mask]
        A = A[mask]
        y = y[mask]
        pss = pss[mask]

        n_clicks = np.sum(deltas)

        # Explicitly build one-hot matrix for actions
        A_one_hot = np.zeros((n_clicks, P))
        A_one_hot[np.arange(n_clicks), A] = 1

        # print(A_one_hot.shape)
        # print(X.shape)
        # exit(1)

        network = Sequential()
        network.add(InputLayer(input_shape=X[0].shape))
        # for i in range(5):
        network.add(Dense(X.shape[1]**2, activation=tf.nn.relu))
        network.add(Dense(A_one_hot.shape[1], activation=tf.nn.softmax))

        network.compile(optimizer="adam",
                        loss="categorical_crossentropy",
                        metrics=["accuracy"])

        # # X = X[None,:,:]
        network.fit(X, A_one_hot, epochs=10, class_weight=1 / pss, verbose=1)
        # Train a model
        # model = LogisticRegression(solver='lbfgs', multi_class='multinomial').fit(X, A, sample_weight=1 / pss)

        return (KerasNNFeaturesProvider(self.config),
                KerasNNModel(self.config, network))
Beispiel #14
0
import gym
import numpy as np
from keras.models import Sequential
from keras.layers import InputLayer, Dense
from tqdm import trange

model = Sequential()
model.add(InputLayer(batch_input_shape=(1, 5)))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(2, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])


def eps_q_learn_nn_train(env, num_episodes=500):
    # now execute the q learning
    y = 0.95
    eps = 0.5
    decay_factor = 0.999
    r_avg_list = []
    for _ in trange(num_episodes):
        s = env.reset()
        eps *= decay_factor
        done = False
        r_sum = 0
        while not done:
            if np.random.random() < eps:
                a = np.random.randint(0, 2)
            else:
                a = np.argmax(model.predict(np.identity(5)[s:s + 1]))
            new_s, r, done, _ = env.step(a)
            target = r + y * np.max(
Beispiel #15
0
    Dense(units=hidden_1_num_units,
          input_dim=g_input_shape,
          activation='relu',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
    Dense(units=hidden_2_num_units,
          activation='relu',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
    Dense(units=g_output_num_units,
          activation='sigmoid',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
    Reshape(d_input_shape),
])

# discriminator
model_d = Sequential([
    InputLayer(input_shape=d_input_shape),
    Flatten(),
    Dense(units=hidden_1_num_units,
          activation='relu',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
    Dense(units=hidden_2_num_units,
          activation='relu',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
    Dense(units=d_output_num_units,
          activation='sigmoid',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
])

# Compiling the GAN

gan = simple_gan(model_g, model_d, normal_latent_sampling((100, )))
Beispiel #16
0
x_tests = x_tests.astype('float32')
x_tests /= 255
y_tests = np_utils.to_categorical(y_tests, correct)

print(x_trains.shape)
print(x_tests.shape)

from keras.models import Sequential
from keras.layers import InputLayer, Dense
from keras.layers.recurrent import LSTM
from keras import optimizers, regularizers

model = Sequential()

model.add(InputLayer(input_shape=(28, 28)))
weight_decay = 1e-4
model.add(LSTM(units=128, dropout=0.25, return_sequences=True))
model.add(LSTM(units=128, dropout=0.25, return_sequences=True))
model.add(
    LSTM(units=128,
         dropout=0.25,
         return_sequences=False,
         kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Dense(units=10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.Adam(),
              metrics=['accuracy'])

model.summary()
Beispiel #17
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun  3 15:52:43 2019

@author: Orlando Ciricosta 
"""

from keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D
from keras.layers import InputLayer, Reshape, Flatten
from keras.models import Sequential

yolo = Sequential()

yolo.add(InputLayer(input_shape=(448, 448, 3)))

yolo.add(Conv2D(64, 7, strides=2, padding='same', name='conv-0_block-0'))
yolo.add(LeakyReLU(alpha=0.1, name='leaky-0_block-0'))
yolo.add(MaxPool2D(strides=2, name='pool_block-0'))

yolo.add(Conv2D(192, 3, padding='same', name='conv-0_block-1'))
yolo.add(LeakyReLU(alpha=0.1, name='leaky-0_block-1'))
yolo.add(MaxPool2D(strides=2, name='pool_block-1'))

yolo.add(Conv2D(128, 1, padding='same', name='conv-0_block-2'))
yolo.add(LeakyReLU(alpha=0.1, name='leaky-0_block-2'))
yolo.add(Conv2D(256, 3, padding='same', name='conv-1_block-2'))
yolo.add(LeakyReLU(alpha=0.1, name='leaky-1_block-2'))
yolo.add(Conv2D(256, 1, padding='same', name='conv-2_block-2'))
yolo.add(LeakyReLU(alpha=0.1, name='leaky-2_block-2'))
yolo.add(Conv2D(512, 3, padding='same', name='conv-3_block-2'))
Beispiel #18
0
def processConv(labelNum,
                modelName,
                balancedWeight='None',
                embedding='None',
                clean=False,
                char=False):
    if embedding == 'tweet2vec':
        embData = np.load('embModel/embeddings.npy')
        tweetVector = np.reshape(embData,
                                 (embData.shape[0], 1, embData.shape[1]))
        print len(embData)
        labels = []
        labelFile = open('embModel/long1out.label', 'r')
        for line in labelFile:
            labels.append(line.strip())
        labelFile.close()
        activityLabels = np.array(labels)
        encoder = LabelEncoder()
        encoder.fit(labels)
        labelList = encoder.classes_.tolist()
        print labelList
        encodedLabels = encoder.transform(labels)
        labels = np_utils.to_categorical(encodedLabels)

        timeList = []
        timeFile = open('embModel/long1out.time', 'r')
        for line in timeFile:
            dateTemp = line.strip().split()
            timeList.append([
                dayMapper[dateTemp[0]],
                hourMapper(dateTemp[3].split(':')[0])
            ])
        timeFile.close()
        timeVector = np.array(timeList)
    else:
        placeList = []
        placeListFile = open('lists/google_place_long.category', 'r')
        for line in placeListFile:
            if not line.startswith('#'):
                placeList.append(line.strip())
        placeListFile.close()
        activityList = []
        activityListFile = open(
            'lists/google_place_activity_' + modelName + '.list', 'r')
        for line in activityListFile:
            if not line.startswith('#'):
                activityList.append(line.strip())
        activityListFile.close()

        contents = []
        labels = []
        timeList = []
        labelTweetCount = {}
        placeTweetCount = {}
        for index, place in enumerate(placeList):
            activity = activityList[index]
            if activity not in labelTweetCount:
                labelTweetCount[activity] = 0.0
            if clean:
                tweetFile = open(
                    'data/google_place_tweets3.3/POI_clean/' + place + '.json',
                    'r')
            else:
                tweetFile = open(
                    'data/google_place_tweets3.3/POI/' + place + '.json', 'r')
            tweetCount = 0
            for line in tweetFile:
                data = json.loads(line.strip())
                contents.append(data['text'].encode('utf-8'))
                dateTemp = data['created_at'].split()
                timeList.append([
                    dayMapper[dateTemp[0]],
                    hourMapper(dateTemp[3].split(':')[0])
                ])
                labels.append(activity)
                tweetCount += 1
            tweetFile.close()
            labelTweetCount[activity] += tweetCount
            placeTweetCount[place] = tweetCount
        activityLabels = np.array(labels)
        timeVector = np.array(timeList)
        encoder = LabelEncoder()
        encoder.fit(labels)
        labelList = encoder.classes_.tolist()
        print labelList
        encodedLabels = encoder.transform(labels)
        labels = np_utils.to_categorical(encodedLabels)

        if embedding == 'word2vec':
            w2v = word2vecReader.Word2Vec()
            embModel = w2v.loadModel()
            tweetVector, invalidList = contents2vecs(embModel, contents)
            timeVector = np.delete(timeVector, invalidList, 0)
            labels = np.delete(labels, invalidList, 0)
            tweetVector = np.reshape(
                tweetVector, (tweetVector.shape[0], 1, tweetVector.shape[1]))
        elif embedding == 'gensim':
            tk = Tokenizer(num_words=vocabSize, char_level=char)
            tk.fit_on_texts(contents)
            tweetSequences = tk.texts_to_sequences(contents)
            tweetVector = sequence.pad_sequences(tweetSequences,
                                                 maxlen=tweetLength)
            word_index = tk.word_index

            w2v = word2vecReader.Word2Vec()
            embModel = w2v.loadModel()
            embMatrix = np.zeros((len(word_index) + 1, EMBEDDING_word2vec))
            for word, i in word_index.items():
                if word in embModel:
                    embVector = embModel[word]
                    embMatrix[i] = embVector
        else:
            tk = Tokenizer(num_words=vocabSize, char_level=char)
            tk.fit_on_texts(contents)
            tweetSequences = tk.texts_to_sequences(contents)
            print tweetSequences[0]
            tweetVector = sequence.pad_sequences(tweetSequences,
                                                 maxlen=tweetLength)

            print tweetVector.shape
            print tweetVector[0]
    # training
    print('training...')
    resultFile = open('result/result', 'a')
    accuracy = 0.0
    skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    for fold, (train_index,
               test_index) in enumerate(skf.split(timeVector, activityLabels)):
        model_text = Sequential()
        if embedding == 'word2vec':
            # model_text.add(Masking(mask_value=0., input_shape=(None, EMBEDDING_DIM)))
            # model_text.add(InputLayer(input_shape=(None, None, EMBEDDING_DIM), name='embedding_input'))
            model_text.add(
                Conv1D(filters=32,
                       kernel_size=5,
                       padding='same',
                       activation='relu',
                       input_shape=(None, EMBEDDING_word2vec)))
        elif embedding == 'tweet2vec':
            model_text.add(
                Conv1D(filters=32,
                       kernel_size=5,
                       padding='same',
                       activation='relu',
                       input_shape=(None, EMBEDDING_tweet2vec)))
        elif embedding == 'gensim':
            model_text.add(
                Embedding(len(word_index) + 1,
                          EMBEDDING_word2vec,
                          input_length=tweetLength,
                          weights=[embMatrix],
                          trainable=False))
            model_text.add(
                Conv1D(filters=64,
                       kernel_size=5,
                       padding='same',
                       activation='relu'))
        else:
            #model_text.add(Embedding(vocabSize, embeddingVectorLength, input_length=tweetLength))
            #model_text.add(Dropout(0.2))
            model_text.add(InputLayer(input_shape=(None, vocabSize)))
            model_text.add(
                Conv1D(filters=64,
                       kernel_size=10,
                       padding='same',
                       activation='relu'))
        # model_text.add(Dropout(0.2))
        #model_text.add(GlobalMaxPooling1D())
        #model_text.add(Conv1D(filters=64, kernel_size=5, padding='same', activation='relu'))
        model_text.add(MaxPooling1D(pool_size=3))
        #model_text.add(Conv1D(filters=64, kernel_size=5, padding='same', activation='relu'))
        #model_text.add(MaxPooling1D(pool_size=35))
        model_text.add(Flatten())
        #model_text.add(Dense(100, activation='relu'))

        model_time = Sequential()
        model_time.add(
            Dense(2, input_shape=(2, ), activation='relu', name='time_input'))
        # merge text and time branches
        model = Sequential()
        model.add(Merge([model_text, model_time], mode='concat'))
        # model.add(Dropout(0.5))
        model.add(Dense(labelNum, activation='softmax', name='output_layer'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        print(model.summary())

        tweet_train = tweetVector[train_index]
        time_train = timeVector[train_index]
        label_train = labels[train_index]
        tweet_test = tweetVector[test_index]
        time_test = timeVector[test_index]
        label_test = labels[test_index]
        activityLabels_train = activityLabels[train_index]
        if balancedWeight == 'sample':
            sampleWeight = compute_sample_weight('balanced', label_train)
            model.fit([tweet_train, time_train],
                      label_train,
                      epochs=3,
                      batch_size=10,
                      sample_weight=sampleWeight)
        elif balancedWeight == 'class':
            classWeight = compute_class_weight('balanced',
                                               np.unique(activityLabels_train),
                                               activityLabels_train)
            model.fit([tweet_train, time_train],
                      label_train,
                      epochs=3,
                      batch_size=10,
                      class_weight=classWeight)
        else:
            model.fit([tweet_train, time_train],
                      label_train,
                      epochs=3,
                      batch_size=10,
                      verbose=0)

        scores = model.evaluate([tweet_test, time_test], label_test, verbose=0)
        print("Accuracy: %.2f%%" % (scores[1] * 100))
        resultFile.write('Fold ' + str(fold) + ': ' + str(scores[1] * 100) +
                         '\n')
        accuracy += scores[1] * 100
    resultFile.write('Overall: ' + str(accuracy / 5) + '\n\n')
    print(accuracy / 5)
    resultFile.close()
# Encode conllu-formatted data into embeddings
[X_train,
 y_train] = functions.word2vec_data_encoding(data_train, word2vec_model,
                                             sequence_len, embedding_dim,
                                             extra_embeddings, tag_dict)
[X_val, y_val] = functions.word2vec_data_encoding(data_val, word2vec_model,
                                                  sequence_len, embedding_dim,
                                                  extra_embeddings, tag_dict)

# Model definition:
print("Setting up model...")
model = Sequential()

num_tags = len(tag_dict)
model.add(InputLayer(input_shape=(sequence_len, embedding_dim)))
model.add(
    Bidirectional(
        LSTM(config.getint('model', 'hidden_units'), return_sequences=True)))
model.add(Dense(num_tags,
                activation='softmax'))  # Dense can handle 3D input too now
print("Done.")

print("Compiling...")
model.compile(loss=config.get("training", "loss"),
              optimizer=config.get("training", "optimizer"),
              metrics=json.loads(config.get("evaluation", "metrics")))
print("Done.")
model.summary()

# Model training:
Beispiel #20
0
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
    plt.xlabel(class_names[train_labels[i]])

#reshape the input data
train_images = train_images.reshape(60000, 28, 28, -1)
test_images = test_images.reshape(10000, 28, 28, -1)

x_train, x_val, y_train, y_val = train_test_split(train_images,
                                                  train_labels,
                                                  test_size=0.15,
                                                  random_state=0)

# Build the model
model = Sequential()
model.add(InputLayer(input_shape=(28, 28, 1)))
model.add(BatchNormalization())

model.add(
    Conv2D(64, (5, 5),
           activation='relu',
           padding='same',
           bias_initializer='RandomNormal',
           kernel_initializer='random_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (5, 5), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
Beispiel #21
0
 def build_decoder(self):
     decoder = Sequential()
     decoder.add(InputLayer(input_shape=(BOTTLENECK_SIZE, ), name="in"))
     decoder.add(self.model.get_layer("decoder"))
     self.decoder = decoder
Beispiel #22
0
def tiny_yolo_model():
    model = Sequential()
    model.add(InputLayer(input_shape=(width, height, 3)))
    model.add(
        Conv2D(16,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2, 2)))

    model.add(
        Conv2D(32,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2, 2)))

    model.add(
        Conv2D(64,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2, 2)))

    model.add(
        Conv2D(128,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2, 2)))

    model.add(
        Conv2D(256,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2, 2)))

    model.add(
        Conv2D(512,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same', strides=(1, 1)))

    model.add(
        Conv2D(1024,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))

    model.add(
        Conv2D(1024,
               use_bias=False,
               data_format="channels_last",
               padding='same',
               kernel_size=(3, 3),
               strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.1))

    model.add(
        Conv2D(125,
               use_bias=True,
               data_format="channels_last",
               padding='same',
               kernel_size=(1, 1),
               strides=(1, 1)))

    return model
opt = 'RMSprop'
lf = 'binary_crossentropy'
ep = 1000
bs = 32
val_split = 0.2
es = EarlyStopping(monitor='val_loss',
                   min_delta=0,
                   patience=10,
                   verbose=0,
                   mode='auto',
                   baseline=None,
                   restore_best_weights=True)

# Construction layers:
classifier = Sequential()
classifier.add(InputLayer(input_shape=(lookback, 1)))
classifier.add(CuDNNLSTM(units=input_units, return_sequences=False))
classifier.add(Dropout(dp))
classifier.add(Dense(units=output_units, activation=act))
classifier.compile(optimizer=opt, loss=lf)

#------------ Loop over 19 study periods -------------
for i in range(0, len(return_window)):

    # Determine which stocks are eligleble for the study period
    vec0 = list(list(np.where(binary_matrix[(749 + i * test), :] == 1))[0])
    vec = []
    for u in vec0:
        if (all(np.isnan(return_window[i, 0:750, u])) == False and all(
                np.isnan(return_window[i, 750:1000, u])) == False) == True:
            vec.append(u)
Beispiel #24
0
    def def_model(self, model_id=0):
        input_shape = self.input_shape
        if model_id == 0:  # vgg 16
            input_tensor = Input(shape=input_shape)
            x = Conv2D(64, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block1_conv1')(input_tensor)
            x = Conv2D(64, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block1_conv2')(x)
            x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

            # Block 2
            x = Conv2D(128, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block2_conv1')(x)
            x = Conv2D(128, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block2_conv2')(x)
            x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

            # Block 3
            x = Conv2D(256, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block3_conv1')(x)
            x = Conv2D(256, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block3_conv2')(x)
            x = Conv2D(256, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block3_conv3')(x)
            x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

            # Block 4
            x = Conv2D(512, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block4_conv1')(x)
            x = Conv2D(512, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block4_conv2')(x)
            x = Conv2D(512, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block4_conv3')(x)
            x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

            # Classification block
            x = Flatten(name='flatten')(x)
            x = Dense(4096, activation='relu', name='fc1')(x)
            x = Dense(4096, activation='relu', name='fc2')(x)
            x = Dropout(0.5)(x)
            x = Dense(10, activation='softmax', name='predictions')(x)
            return_model = Model(inputs=[input_tensor], outputs=[x])
            return_model.compile(loss=keras.losses.categorical_crossentropy,
                                 optimizer=keras.optimizers.Adam(),
                                 metrics=['accuracy'])

        elif model_id == 1:
            """
            Convolutional Neural Network: https://github.com/umbertogriffo/Fashion-mnist-cnn-keras/blob/
            master/src/convolutional/fashion_mnist_cnn.py
            """
            return_model = Sequential()
            return_model.add(
                Conv2D(32, (5, 5),
                       input_shape=self.input_shape,
                       padding='same',
                       activation='relu'))
            return_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

            return_model.add(
                Conv2D(64, (5, 5), padding='same', activation='relu'))
            return_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

            return_model.add(
                Conv2D(128, (1, 1), padding='same', activation='relu'))
            return_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

            return_model.add(Flatten())

            return_model.add(
                Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
            return_model.add(Dropout(0.5))
            return_model.add(
                Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
            return_model.add(Dropout(0.5))

            return_model.add(Dense(self.num_classes, activation='softmax'))
            # Compile model
            lrate = 0.1
            decay = lrate / self.epoch
            sgd = keras.optimizers.SGD(lr=lrate,
                                       momentum=0.9,
                                       decay=decay,
                                       nesterov=True)
            return_model.compile(loss='categorical_crossentropy',
                                 optimizer=sgd,
                                 metrics=['accuracy'])
        elif model_id == 2:
            # https://github.com/cmasch/zalando-fashion-mnist/blob/master/Simple_Convolutional_Neural_Network_Fashion-MNIST.ipynb
            cnn = Sequential()

            cnn.add(InputLayer(input_shape=self.input_shape))

            cnn.add(BatchNormalization())
            cnn.add(
                Convolution2D(64, (4, 4), padding='same', activation='relu'))
            cnn.add(MaxPooling2D(pool_size=(2, 2)))
            cnn.add(Dropout(0.1))

            cnn.add(Convolution2D(64, (4, 4), activation='relu'))
            cnn.add(MaxPooling2D(pool_size=(2, 2)))
            cnn.add(Dropout(0.3))

            cnn.add(Flatten())

            cnn.add(Dense(256, activation='relu'))
            cnn.add(Dropout(0.5))

            cnn.add(Dense(64, activation='relu'))
            cnn.add(BatchNormalization())

            cnn.add(Dense(self.num_classes, activation='softmax'))
            cnn.compile(loss='categorical_crossentropy',
                        optimizer=keras.optimizers.Adam(),
                        metrics=['accuracy'])

            return cnn
        elif model_id == 3:
            pass  # https://github.com/markjay4k/Fashion-MNIST-with-Keras/blob/master/pt%204%20-%20Deeper%20CNNs.ipynb
        else:
            raise Exception("unsupported model")
        return return_model
Beispiel #25
0
np.save(ROOT_DIR + '\\train_L_' + db_name, database_L)

# -------------------------------  Build and Train CNN  -------------------------------

# Load the databases
train_L_db = np.load(ROOT_DIR + '\\train_L_' + db_name + '.npy')
train_ab_db = np.load(ROOT_DIR + '\\train_ab_' + db_name + '.npy')
test_L_db = np.load(ROOT_DIR + '\\test_L_' + db_name + '.npy')
test_ab_db = np.load(ROOT_DIR + '\\test_ab_' + db_name + '.npy')

model_name = 'regular'

if model_name == 'regular':
    # Create the CNN model
    model = Sequential()
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(
        Convolution2D(64, (3, 3), strides=2, padding='same',
                      activation='relu'))
    model.add(
        Convolution2D(128, (3, 3),
                      strides=1,
                      padding='same',
                      activation='relu'))
    model.add(
        Convolution2D(128, (3, 3),
                      strides=2,
                      padding='same',
                      activation='relu'))
    model.add(
        Convolution2D(256, (3, 3),
        return res

    def myinverse(x):
        res = np.zeros(Nparameters)
        for i in range(Nparameters):
            res[i] = x[i] * (ub[i] - lb[i]) * 0.5 + (ub[i] + lb[i]) * 0.5
        return res

    X_train_trafo = np.array([myscale(x) for x in X_train])
    X_val_trafo = np.array([myscale(x) for x in X_val])
    X_test_trafo = np.array([myscale(x) for x in X_test])

    #Neural Network
    keras.backend.set_floatx('float64')
    NN1 = Sequential()
    NN1.add(InputLayer(input_shape=(Nparameters, )))
    NN1.add(Dense(30, activation='elu'))
    NN1.add(Dense(30, activation='elu'))
    #NN1.add(Dropout(0.05))
    NN1.add(Dense(30, activation='elu'))
    #NN1.add(Dense(30, activation = 'elu'))
    NN1.add(Dense(Nstrikes * Nmaturities, activation='linear'))
    NN1.summary()

    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true)))

    def root_relative_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true) / y_true))

    NN1.compile(loss=root_mean_squared_error, optimizer="adam")
       Note : The weight files must be for the Theano model (theano kernels, th dim ordering)

3) Run the script.

4) Use the weight files in the created folders : ["tf-kernels-tf-dim/", "tf-kernels-th-dim/", "th-kernels-tf-dim/"]
'''

from keras.models import Sequential
from keras.layers import InputLayer, Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D

K.set_image_dim_ordering('th')
# th_dim_model = None # Create your theano model here with TH dim ordering
th_dim_model = Sequential(
)  # Create your tensorlow model with TF dimordering here
th_dim_model.add(InputLayer(input_shape=(20, 50, 50)))
th_dim_model.add(Convolution2D(40, 5, 5, bias=True))
th_dim_model.add(MaxPooling2D(pool_size=(2, 2)))
th_dim_model.add(Activation('relu'))
th_dim_model.add(Convolution2D(60, 3, 3, bias=True))
th_dim_model.add(MaxPooling2D(pool_size=(2, 2)))
th_dim_model.add(Activation('relu'))
th_dim_model.add(Convolution2D(90, 3, 3, bias=True))
th_dim_model.add(MaxPooling2D(pool_size=(2, 2)))
th_dim_model.add(Activation('relu'))
th_dim_model.add(Flatten())
th_dim_model.add(Dense(500, bias=True))  # 1440 -> 500
th_dim_model.add(Activation('tanh'))
th_dim_model.add(Dense(8, bias=True))  # 500 -> 8

K.set_image_dim_ordering('tf')
Beispiel #28
0
 def make_nested_seq_model(input_shape, layer, level=1):
     model = layer
     for i in range(1, level + 1):
         layers = [InputLayer(input_shape), model] if (i == 1) else [model]
         model = Sequential(layers)
     return model
train_features_vgg = get_bottleneck_features(vgg_model, train_imgs_scaled)
validation_features_vgg = get_bottleneck_features(vgg_model,
                                                  validation_imgs_scaled)

print('Train Bottleneck Features:', train_features_vgg.shape,
      '\tValidation Bottleneck Features:', validation_features_vgg.shape)

#create dense layer for clasification
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer
from keras.models import Sequential
from keras import optimizers

input_shape = vgg_model.output_shape[1]

model = Sequential()
model.add(InputLayer(input_shape=(input_shape, )))
model.add(Dense(512, activation='relu', input_dim=input_shape))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
              metrics=['accuracy'])

model.summary()

history = model.fit(x=train_features_vgg,
                    y=train_labels_enc,
                    validation_data=(validation_features_vgg,
Beispiel #30
0
    num_flux = spectra.shape[1]                         #number of flux point for each spectrum
    num_labels = 4                                      #load the TEMP, LOGG, VROT, META datasets
    num_train  = int(np.floor(spectra.shape[0]*0.6))    #60% of entire dataset
    num_cv     = int(np.floor(spectra.shape[0]*0.2))    #20% of entire dataset

activation  = 'relu'                           #rectified Linear Unit (ReLu) activation function
initializer = 'he_normal'                      #He parameter initialization
input_shape = (None, num_flux, 1)              #vector input shape for nn
num_filters = [4, 16]                          #number of filters for convolutional layers
num_hidden  = [256, 128]                       #number of nodes for dense layers
filter_length = 8                              #filter dimension for convolutional layers
pool_length   = 4                              #pool dimension for maxpooling layers

model = Sequential([                           #construction of Neural Network in sequential mode

          InputLayer(batch_input_shape=input_shape),
          Conv1D(filters=num_filters[0], kernel_size=filter_length, padding="same",
                 activation=activation, kernel_initializer=initializer),
          Conv1D(filters=num_filters[1], kernel_size=filter_length, padding="same",
                 activation=activation, kernel_initializer=initializer),
          MaxPooling1D(pool_size=pool_length),
          Flatten(),   #flatten the feature map to a vector so that it could be inputed to the fully-connected layers
          Dense(units=num_hidden[0], activation=activation, kernel_initializer=initializer),
          Dense(units=num_hidden[1], activation=activation, kernel_initializer=initializer),
          Dense(units=num_labels, activation='linear', input_dim=num_hidden[1]), #linear activation in this layer to perform regression

          ])

lr = 0.0007                                     #initial learning rate for ADAM
beta_1 = 0.9                                    #decay parameter for estimator (default value)
beta_2 = 0.999                                  #decay parameter fot estimator (default value)