def get_DenseNet_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    x = Conv3D(DENSE_NET_INITIAL_CONV_DIM, (3, 3, 3), padding='same')(inputs)
    print('input')
    print(x.get_shape())

    for i in range(DENSE_NET_BLOCKS):
        x = dense_block(x)
        if i != DENSE_NET_BLOCKS - 1:
            x = transition_block(x)

    print('top')
    x = GlobalAveragePooling3D()(x)
    print(x.get_shape())

    if DENSE_NET_ENABLE_DROPOUT:
        x = Dropout(DENSE_NET_DROPOUT)(x)

    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE), loss='binary_crossentropy', metrics=['accuracy'])

    return model
def get_Inception_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    print('inputs')
    print(inputs.get_shape())

    # Make inception base
    x = inception_base(inputs)

    for i in range(INCEPTION_BLOCKS):
        x = inception_block(x, filters=INCEPTION_KEEP_FILTERS)

        if (i + 1) % INCEPTION_REDUCTION_STEPS == 0 and i != INCEPTION_BLOCKS - 1:
            x = reduction_block(x, filters=INCEPTION_KEEP_FILTERS // 2)

    print('top')
    x = GlobalMaxPooling3D()(x)
    print(x.get_shape())
    x = Dropout(INCEPTION_DROPOUT)(x)
    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE), loss='binary_crossentropy', metrics=['accuracy'])

    return model
            i += 1
            if i >= len(data_images):
                i = 0

        yield np.array(images), np.array(steer_angles)


model = Sequential()
model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=INPUT_SHAPE))
model.add(Lambda(lambda var: var / 127.5 - 1.0))
model.add(Conv2D(24, 5, 5, activation='relu', subsample=(2, 2)))
model.add(Conv2D(36, 5, 5, activation='relu', subsample=(2, 2)))
model.add(Conv2D(48, 5, 5, activation='relu', subsample=(2, 2)))
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Dropout(KEEP_PROB))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
#model.summary()

model.compile(loss='mean_squared_error', optimizer=Adam(lr=LEARNING_RATE))

model.fit_generator(image_loader(x_train, y_train, BATCH_SIZE, True),
                    len(x_train),
                    EPOCHS,
                    max_q_size=1,
                    validation_data=image_loader(x_valid, y_valid, BATCH_SIZE,
                                                 False),
Beispiel #4
0
def train(csv_file):
    dataframe = pandas.read_csv(csv_file,
                                engine='python',
                                quotechar='"',
                                header=None)
    dataset = dataframe.sample(frac=1).values

    # Preprocess dataset
    X = dataset[:, 0]
    Y = dataset[:, 1]

    for index, item in enumerate(X):
        X[index] = item

    tokenizer = Tokenizer(filters='\t\n', char_level=True)
    tokenizer.fit_on_texts(X)

    # Extract and save word dictionary
    word_dict_file = 'build/word-dictionary.json'

    if not os.path.exists(os.path.dirname(word_dict_file)):
        os.makedirs(os.path.dirname(word_dict_file))

    with open(word_dict_file, 'w') as outfile:
        json.dump(tokenizer.word_index, outfile, ensure_ascii=False)

    num_words = len(tokenizer.word_index) + 1
    X = tokenizer.texts_to_sequences(X)

    max_log_length = 2083
    train_size = int(len(dataset) * .75)

    X_processed = sequence.pad_sequences(X, maxlen=max_log_length)
    X_train, X_test = X_processed[0:train_size], X_processed[
        train_size:len(X_processed)]
    Y_train, Y_test = Y[0:train_size], Y[train_size:len(Y)]

    #tb_callback = TensorBoard(log_dir='./logs', embeddings_freq=1)

    model = Sequential()
    model.add(Embedding(num_words, 32, input_length=max_log_length))
    model.add(Dropout(0.5))
    model.add(LSTM(16, recurrent_dropout=0.5))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print(model.summary())
    #from keras.utils import multi_gpu_model
    #model = multi_gpu_model(model, gpus=)
    model.fit(X_train,
              Y_train,
              validation_split=0.25,
              epochs=3,
              batch_size=500)

    # Evaluate model
    score, acc = model.evaluate(X_test, Y_test, verbose=1, batch_size=500)

    print("Model Accuracy: {:0.2f}%".format(acc * 100))

    # Save model
    model.save_weights('urls-lstm-weights.h5')
    model.save('urls-lstm-model.h5')
    with open('urls-lstm-model.json', 'w') as outfile:
        outfile.write(model.to_json())
Beispiel #5
0
def unet_archPaper(h, w):
  print("Model of size: %d %d" % (h, w))

  inputs = Input((1, h , w)) # 160 x 160
  ordering = 'th'  # 'th': (ch, h, w),  'tf': (h, w, ch)

  conv_1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(inputs)
  conv_2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_1)
  print 'view conv2', conv_2.get_shape()
  pool1 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_2)
  pool1 = Dropout(0.15)(pool1)
  print 'view pool1', pool1.get_shape()

  conv_3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool1)
  conv_4 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_3)
  print '\nview conv4', conv_3.get_shape(), '< up-3'
  pool2 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_4)
  pool2 = Dropout(0.25)(pool2)
  print 'view pool2', pool2.get_shape()

  conv_5 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool2)
  conv_6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_5)
  print '\nview conv6', conv_5.get_shape(), '< up-2'
  pool3 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_6)
  pool3 = Dropout(0.4)(pool3)
  print 'view pool3', pool3.get_shape()

  conv_7 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool3)
  conv_8 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_7)
  print '\nview conv8', conv_8.get_shape(), '< up-1'
  pool4 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_8)
  pool4 = Dropout(0.5)(pool4)
  print 'view pool4', pool4.get_shape()

  conv_9 = Convolution2D(1024, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool4)
  print '\nview conv9', conv_9.get_shape()
  conv_10 = Convolution2D(1024, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_9)
  print 'view conv10', conv_10.get_shape()
  pool5 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_10) # 5x5
  pool5 = Dropout(0.5)(pool5)
  print 'view pool5', pool5.get_shape()

  ####################################################################################################
  up_1 = merge([UpSampling2D(size=(2, 2))(conv_8), pool5], mode='concat', concat_axis=1)
  print '\nview up1', up_1.get_shape()
  conv_12 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(up_1)
  conv_13 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_12)

  pool6 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv_13)  # 5x5
  pool6 = Dropout(0.5)(pool6)
  print 'view pool6', pool6.get_shape()

  ##################
  up_2 = merge([UpSampling2D(size=(2, 1))(conv_6), pool6], mode='concat', concat_axis=1)
  print '\nview up2', up_2.get_shape()
  conv_15 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init='he_normal')(up_2)
  conv_16 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init='he_normal')(conv_15)
  print 'view conv16', conv_16.get_shape()
  pool7 = Dropout(0.15)(conv_16)
  print 'view pool7', pool7.get_shape()

  ##################
  up_3 = merge([UpSampling2D(size=(2, 1))(conv_4), pool7], mode='concat', concat_axis=1)
  print '\nview up3', up_3.get_shape()
  conv_18 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(up_3)
  conv_19 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_18)
  print 'view conv18', conv_18.get_shape()
  pool8 = Dropout(0.4)(conv_19)
  print 'view pool8', pool8.get_shape()

  ##################
  up_4 = merge([UpSampling2D(size=(2, 1))(conv_2), pool8], mode='concat', concat_axis=1)
  print 'view up4', up_4.get_shape()
  conv_21 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(up_4)
  print 'view conv9-1', conv_21.get_shape()
  conv_22 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_21)
  print 'view conv9', conv_22.get_shape()
  pool9 = Dropout(0.25)(conv_22)
  ##################################################################


  conv_23 = Convolution2D(1, 1, 1, activation='sigmoid', init = 'he_normal')(pool9)
  conv_24 = Convolution2D(1, 1, 1, activation='sigmoid', init = 'he_normal')(conv_23)
  print 'view conv10', conv_24.get_shape()

  model = Model(input=inputs, output=conv_24)
  #model = Model(input=inputs, output=conv12)
  model.summary()
  #plot(model, "model.png")
  return model
    model.add(MaxPool1D(pool_size=pool1_pool_size))

    model.add(
        Conv1D(filters=conv2_filters,
               kernel_size=conv2_kernel_size,
               strides=conv2_strides,
               padding='same',
               activation='relu',
               kernel_initializer='he_normal'))

    model.add(MaxPool1D(pool_size=pool2_pool_size))

    model.add(Flatten())

    model.add(Dropout(rate=0.5))

    model.add(Dense(units=500, activation='relu'))

    model.add(Dropout(rate=0.5))

    model.add(Dense(units=10, activation='softmax'))

    adam = Adam()

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    history = model.fit(x_train,
                        y_train,
Beispiel #7
0
def NeuralNetwork(encode_type="LabelEncode"):
    df = Base_Process(encode_type)
    # 加一个地理位置聚类
    df = pd.merge(df, _F_Clsuter_Geo(), on=pri_id, how='left')

    # 加一个用户活跃TopN省份 市 区
    temp = _F_GeoCode(n=1)
    df = pd.merge(df, temp, on=pri_id, how='left')

    # 加入distinct的统计
    temp = _F_nunique(3)
    df = pd.merge(df, temp, on=pri_id, how='left')

    # 加入ratio的统计
    temp = _F_nunique_ratio(3)
    df = pd.merge(df, temp, on=pri_id, how='left')

    _Train = pd.merge(_train, df, on=pri_id, how='left').fillna(0)
    _Test = pd.merge(_test, df, on=pri_id, how='left').fillna(0)
    features = [col for col in _Train.columns if col != pri_id and col != 'y']

    _Label = _Train['y']

    # 数据输入和结构构造
    from keras.models import Sequential
    model = Sequential()
    from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout
    from keras import backend as K
    import tensorflow as tf
    import itertools

    shape = _Train.shape
    # 卷积层
    # model.add(Conv2D(64, (3,3), activation='relu', input_shape = (shape[0],shape[1],1)))
    # # 池化层
    # model.add(MaxPooling2D(pool_size=(2,2)))
    # # 全连接层 (设置输出层的维度)
    # model.add(Dense(256, activation='relu'))
    # # dropout层
    # model.add(Dropout(0.5))
    # # 最后全连接层,输出概率
    # model.add(Dense(1, activation='sigmoid'))

    # MLP
    # print(shape)
    model.add(Dense(64, input_dim=402, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    # 编译(后面要换成自己定义的评价函数)

    # 本题评分标准
    def tpr_weight_funtion(y_true, y_pred):

        # batch_size, n_elems = y_pred.shape[0],y_pred.shape[1]
        # idxs = list(itertools.permutations(range(n_elems)))
        # permutations = tf.gather(y_pred, idxs, axis=-1)  # Shape=(batch_size, n_permutations, n_elems)

        d = pd.DataFrame()
        # sess = tf.Session()
        # sess.run(tf.global_variables_initializer())
        # d['prob'] = permutations.eval(session=sess)

        d['prob'] = list(K.eval(y_pred))
        d['y'] = list(y_true)
        d = d.sort_values(['prob'], ascending=[0])
        y = d.y
        PosAll = pd.Series(y).value_counts()[1]
        NegAll = pd.Series(y).value_counts()[0]
        pCumsum = d['y'].cumsum()
        nCumsum = np.arange(len(y)) - pCumsum + 1
        pCumsumPer = pCumsum / PosAll
        nCumsumPer = nCumsum / NegAll
        TR1 = pCumsumPer[abs(nCumsumPer - 0.001).idxmin()]
        TR2 = pCumsumPer[abs(nCumsumPer - 0.005).idxmin()]
        TR3 = pCumsumPer[abs(nCumsumPer - 0.01).idxmin()]
        return 0.4 * TR1 + 0.3 * TR2 + 0.3 * TR3

    def AUC(y_true, y_pred):
        not_y_pred = np.logical_not(y_pred)
        y_int1 = y_true * y_pred
        y_int0 = np.logical_not(y_true) * not_y_pred
        TP = np.sum(y_pred * y_int1)
        FP = np.sum(y_pred) - TP
        TN = np.sum(not_y_pred * y_int0)
        FN = np.sum(not_y_pred) - TN
        TPR = np.float(TP) / (TP + FN)
        FPR = np.float(FP) / (FP + TN)
        return ((1 + TPR - FPR) / 2)

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    # model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=[AUC])

    # 训练 (batch_size 每次迭代选择的样本数)

    res = pd.DataFrame()
    res[pri_id] = _Test[pri_id]
    _K_Train = pd.DataFrame()
    _KTrain = pd.DataFrame()
    _KTrain[pri_id] = _Train[pri_id]
    # 需要将输入归一化
    _Train, _Test = _M.Normalize(_Train[features], _Test[features])

    from sklearn.model_selection import StratifiedKFold
    # 将_Train分成5份,5折之后求平均
    skf = StratifiedKFold(n_splits=5)
    pred = np.zeros((_Test.shape[0], 1))

    for train, test in skf.split(_Train, _Label):

        model.fit(_Train.iloc[train],
                  _Label.iloc[train],
                  epochs=50,
                  batch_size=128)
        # 连接剩下的一折和test
        temp = model.predict(_Test)
        pred += np.asarray(temp)
        _K_T = pd.DataFrame()
        _K_T[pri_id] = _KTrain.iloc[test][pri_id]
        _K_T['mlp'] = model.predict(_Train.iloc[test])
        _K_Train = pd.concat((_K_Train, _K_T))

    pred /= 5
    # 全连接的输出
    res['mlp'] = pred
    res = pd.concat((_K_Train, res))
    res.to_csv(data_path + "data/_F_mlp_features.csv", index=False)
# Step-1b Max Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))

#%% Hidden Layer 1
# Step-2a Convolution
classifier.add(Conv2D(48, (3, 3), activation='relu'))
# Step-2b Max Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))

#%% Output Layer
# Step-4 Flattening
classifier.add(Flatten())

#%% Step-5 Full Connection
classifier.add(Dense(28, input_shape=(64, 16), activation='relu'))
classifier.add(Dropout(0.4))

classifier.add(Dense(14, input_shape=(64, 8), activation='relu'))
classifier.add(Dropout(0.2))

classifier.add(Dense(1, activation='sigmoid'))

#%% Compiling CNN
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

#%% Constructing the image data generator for Train set and test set
from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1. / 255,
    def sep_cnn_model(input_shape,
                      max_length,
                      num_classes,
                      num_features,
                      embedding_matrix,
                      input_tensor=None,
                      emb_size=300,
                      blocks=1,
                      filters=64,
                      kernel_size=4,
                      dropout_rate=0.25):
        op_units, op_activation = ModelGenerator._get_last_layer_units_and_activation(
            num_classes)

        inputs = Input(name='inputs', shape=[max_length], tensor=input_tensor)
        if embedding_matrix is None:
            layer = Embedding(input_dim=num_features,
                              output_dim=emb_size,
                              input_length=input_shape)(inputs)
        else:
            num_features = MAX_VOCAB_SIZE
            layer = Embedding(
                input_dim=num_features,
                output_dim=emb_size,
                input_length=input_shape,
                embeddings_initializer=keras.initializers.Constant(
                    embedding_matrix))(inputs)

        for _ in range(blocks - 1):
            layer = Dropout(rate=dropout_rate)(layer)
            layer = SeparableConv1D(filters=filters,
                                    kernel_size=kernel_size,
                                    activation='relu',
                                    bias_initializer='random_uniform',
                                    depthwise_initializer='random_uniform',
                                    padding='same')(layer)
            layer = SeparableConv1D(filters=filters,
                                    kernel_size=kernel_size,
                                    activation='relu',
                                    bias_initializer='random_uniform',
                                    depthwise_initializer='random_uniform',
                                    padding='same')(layer)
            layer = MaxPooling1D(pool_size=3)(layer)

        layer = SeparableConv1D(filters=filters * 2,
                                kernel_size=kernel_size,
                                activation='relu',
                                bias_initializer='random_uniform',
                                depthwise_initializer='random_uniform',
                                padding='same')(layer)
        layer = SeparableConv1D(filters=filters * 2,
                                kernel_size=kernel_size,
                                activation='relu',
                                bias_initializer='random_uniform',
                                depthwise_initializer='random_uniform',
                                padding='same')(layer)

        layer = GlobalAveragePooling1D()(layer)
        # model.add(MaxPooling1D())
        layer = Dropout(rate=0.5)(layer)
        layer = Dense(op_units, activation=op_activation)(layer)
        model = keras.models.Model(inputs=inputs, outputs=layer)
        return model
Beispiel #10
0
def myTradingSystem(DATE, OPEN, HIGH, LOW, CLOSE, VOL, exposure, equity, settings):
    ''' This system uses trend following techniques to allocate capital into the desired equities'''

    try:
        print(settings['counter'])
    except:
        settings['counter'] = 0
        settings['LSTM_regressor'] = dict()
        settings['sc'] = dict()

    nMarkets = CLOSE.shape[1]
    pos = numpy.zeros(nMarkets)
    print(DATE[0])
    count_in_cash = 0
    for market in range(1,nMarkets):
        print("Processing market, index: {}".format(market))

        close = CLOSE[:, market]
        DATE_ = DATE
        data = pd.DataFrame()
        print(len(close) == len(DATE_))
        data['CLOSE'] = close
        data['observation_date'] = DATE_
        data['observation_date'] = pd.to_datetime(data['observation_date'].astype(str))
        data = data.merge(features, on=date, how="left")
        print(data.head())

        # retrain the lSTM model every 100 days
        if settings['counter']%100==0:
            training_set = numpy.reshape(CLOSE[:,market], (CLOSE[:,market].shape[0], 1))
            print("training_set", training_set.shape)
            training_set = (training_set-numpy.insert(training_set, 0, 0, axis=0)[:-1,])/numpy.insert(training_set, 0, 0, axis=0)[:-1,]
            training_set = training_set[1:,]
            print(training_set.shape)

            sc = MinMaxScaler(feature_range = (0, 1))
            training_set_scaled = sc.fit_transform(training_set)
            settings['sc'][str(market)] = sc 

            
            X_train = []
            y_train = []
            
            for i in range(30, training_set.shape[0]):
                X_train.append(training_set_scaled[i-30:i,0])
                y_train.append(training_set_scaled[i,0])
            
            X_train, y_train = numpy.array(X_train), numpy.array(y_train)
            print("len of X_train", len(X_train))
            X_train = numpy.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
            print(X_train.shape)
            

            # LSTM
            print("Re-training LSTM!")
            regressor = Sequential()

            regressor.add(LSTM(units = 25, return_sequences = True, input_shape = (X_train.shape[1], 1)))
            regressor.add(Dropout(0.1))

            regressor.add(LSTM(units = 20, return_sequences = True))
            regressor.add(Dropout(0.1))

            regressor.add(LSTM(units = 5))
            regressor.add(Dropout(0.1))

            regressor.add(Dense(units = 1))

            regressor.compile(optimizer = 'Adam', loss = 'mean_squared_error')

            regressor.fit(X_train, y_train, epochs = 1, batch_size = 32)
            
            settings['LSTM_regressor'][str(market)] = regressor
            
            print("Completed re-training!")
        else:
            print("Deploying existing LSTM!")
            sc = settings['sc'][str(market)]
            regressor = settings['LSTM_regressor'][str(market)]
        

        X_pred = []
        pred_set = numpy.reshape(CLOSE[:,market], (CLOSE[:,market].shape[0], 1))


        pred_set = (pred_set - numpy.insert(pred_set, 0, 0, axis=0)[:-1,])/numpy.insert(pred_set, 0, 0, axis=0)[:-1,]
        pred_set = pred_set[1:,]
        pred_set_scaled = sc.fit_transform(pred_set)
        X_pred.append(pred_set_scaled[-30:,0])
        X_pred = numpy.array(X_pred)
        X_pred = numpy.reshape(X_pred, (X_pred.shape[0], X_pred.shape[1], 1))
        predicted_stock_price = regressor.predict(X_pred)
        print(predicted_stock_price)
        predicted_stock_price = sc.inverse_transform(predicted_stock_price)

        err_term = 0.005 #%[0.005:0.001:0.008]#
        if predicted_stock_price[0][0] > 0:
            pos[market] = 1
            # print('LONG')


        elif predicted_stock_price[0][0] + err_term < 0:
            pos[market] = -1
            # print('SHORT') 

        else:
            pos[0] = pos[0] + 1 

        print('*' * 100)
    
    settings['counter'] = settings['counter'] + 1

    return pos, settings
####Set type here, x for flow_x, y for flow_y and <blank> for frame
if(len(sys.argv)==6):
	type=sys.argv[5] #or 'y'
else:
	type=''


#Architecture

# Define layer 1
input_img1 = Input(shape=(1, inp_sz[0], inp_sz[1]))
x1 = Convolution2D(no_layer[0], 3,3, activation='tanh', border_mode='same')(input_img1)
x1 = Convolution2D(no_layer[1], 5,5, activation='tanh', border_mode='same')(x1)
x1 = BatchNormalization(mode=0, axis=1) (x1)
y1 = MaxPooling2D((2,2), border_mode='valid')(x1)
x1 = Dropout(drop)(y1)
x1 = Convolution2D(no_layer[1], 5,5, activation='tanh', border_mode='same')(x1)
x1 = Convolution2D(no_layer[0], 3,3, activation='tanh', border_mode='same')(x1)
x1 = BatchNormalization(mode=0, axis=1) (x1)
x1 = UpSampling2D((2, 2))(x1)
decoded1 = Convolution2D(1,3,3, border_mode='same', activation='sigmoid')(x1)

#Make layer1
autoencoder1 = Model(input_img1, decoded1)
autoencoder1.compile(optimizer=opti, loss='binary_crossentropy')
encoder1 = Model(input=input_img1, output=y1)
json_string = autoencoder1.to_json()
open(path2sav+'autoencoder1_temp.json', 'w').write(json_string)


# Define layer2
    def build_discriminator(self):

        k = 4
        s = 2

        model = Sequential()

        # First Layer
        model.add(
            Conv2D(
                filters=self.ndf,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
                input_shape=self.img_shape,
            )
        )
        model.add(LeakyReLU(alpha=0.2))

        # Layer 2
        model.add(
            Conv2D(
                filters=self.ndf*2,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Layer 3
        model.add(
            Conv2D(
                filters=self.ndf*4,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Layer 4
        model.add(
            Conv2D(
                filters=self.ndf*8,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Layer 5
        model.add(
            Conv2D(
                filters=self.ndf*16,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Final Layer
        model.add(Flatten())
        model.add(Dropout(.3))
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
Beispiel #13
0
        TickStartWith, TickNumLength, CabChar
    ],
                          axis=1)


x, y = shape_data(x_tr, norm=True), y_tr.values.reshape(y_tr.shape[0], 1)
x_test, y_test = shape_data(x_t,
                            norm=True), y_t.values.reshape(y_t.shape[0], 1)
print('x.shape: ', x.shape)
print('y.shape: ', y.shape)
print('x_test.shape: ', x.shape)
print('y_test.shape: ', y.shape)

model = Sequential()
model.add(Dense(units=64, activation='relu', input_dim=x.shape[1]))
model.add(Dropout(0.5))
model.add(Dense(units=128, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(units=128, activation='relu'))
model.add(Dropout(0.7))
model.add(Dense(units=32, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(units=1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
# model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(x, y, epochs=700, batch_size=1024)
Beispiel #14
0
def model_define_():
    is_cata = False
    reps = []
    ip = Input(shape=(10, 10, 2))
    ipc = Input(shape=(1,))
    h = Conv2D(32, 3, activation='elu')(ip)
    h = MaxPool2D()(h)
    reps.append(Flatten(name='rep0')(h))

    h = Conv2D(128, 3, activation='elu')(h)
    h = MaxPool2D()(h)
    h = Dropout(0.5)(h)
    # h = Conv2D(256, 3, activation='elu')(h)
    # h = Dropout(0.5)(h)
    # h = Conv2D(512, 3, activation='elu')(h)
    reps.append(Flatten(name='rep1')(h))

    h = Conv2D(8, 3, activation='elu', padding='same')(ip)
    h = MaxPool2D()(h)
    h = Conv2D(32, 3, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(128, 3, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(512, 1, activation='elu', padding='same')(h)
    h = Dropout(0.5)(h)
    h = Flatten(name='rep2')(h)
    reps.append(h)

    h = Conv2D(8, 3, activation='elu')(ip)
    h = Conv2D(16, 3, activation='elu')(h)
    h = Conv2D(32, 3, activation='elu')(h)
    h = Conv2D(64, 3, activation='elu')(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep3')(h))

    h = Conv2D(8, 5, activation='elu', padding='same')(ip)
    h = MaxPool2D()(h)
    h = Conv2D(32, 5, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep4')(h))

    h = Conv2D(32, 5, activation='elu')(ip)
    h = Conv2D(64, 5, activation='elu')(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep5')(h))

    h = Flatten()(ip)
    reps.append(h)
    for i in range(2):
        h = Dense(128, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Dense(128, activation='elu', name='rep6')(h))

    h = Conv2D(8, 5, activation='elu', padding='same')(ip)
    h = MaxPool2D(pool_size=(1, 2))(h)
    h = Conv2D(16, 5, activation='elu', padding='same')(h)
    h = MaxPool2D(pool_size=(1, 2))(h)
    h = Conv2D(32, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep7')(h))

    reps.append(ipc)
    h = concatenate(reps)
    h = Dense(1024, activation='elu')(h)
    h = Dropout(0.5)(h)
    h = Dense(1024, activation='elu')(h)
    h = Dropout(0.5)(h)
    out = Dense(1)(h)
    out = add([out, ipc])
    m = Model([ip, ipc], out)
    opt = Adam(lr=1e-3)
    m.compile(loss='mse', optimizer=opt)
    m.summary()
    return m
    convolutional.Conv2D(filters=32,
                         kernel_size=(2, 2),
                         padding='same',
                         strides=(1, 1),
                         activation='relu'))

# create a convolutional layer for 2 dimensions
model.add(
    convolutional.Conv2D(filters=32,
                         kernel_size=(2, 2),
                         padding='same',
                         strides=(1, 1),
                         activation='relu'))

model.add(BatchNormalization())
model.add(Dropout(.1))

# create a max pooling layer for 2 dimensions
model.add(pooling.MaxPooling2D(
    pool_size=(2, 2),
    padding='same',
))
# create a convolutional layer for 2 dimensions
model.add(
    convolutional.Conv2D(filters=64,
                         kernel_size=(2, 2),
                         padding='same',
                         strides=(1, 1),
                         activation='relu'))
# create a convolutional layer for 2 dimensions
model.add(
Beispiel #16
0
def main():
    # used to get the session/graph data from keras
    K.set_learning_phase(0)
    # get the data in a Pandas dataframe
    raw_data = pd.read_csv(FLAGS.csv_file)

    # convert to one hot vectors
    emotion_array = process_emotion(raw_data[['emotion']])
    # convert to a 48x48 float matrix
    pixel_array = process_pixels(raw_data[['pixels']])

    # split for test/train
    y_train, y_test = split_for_test(emotion_array)
    x_train_matrix, x_test_matrix = split_for_test(pixel_array)

    n_train = int(len(x_train_matrix))
    n_test = int(len(x_test_matrix))

    x_train_input = duplicate_input_layer(x_train_matrix, n_train)
    x_test_input = duplicate_input_layer(x_test_matrix, n_test)

    # vgg 16. include_top=False so the output is the 512 and use the learned weights
    vgg16 = VGG16(include_top=False, input_shape=(48, 48, 3), pooling='avg', weights='imagenet')

    # get vgg16 outputs
    x_train_feature_map = get_vgg16_output(vgg16, x_train_matrix, n_train)
    x_test_feature_map = get_vgg16_output(vgg16, x_test_matrix, n_test)

    # build and train model
    top_layer_model = Sequential()
    top_layer_model.add(Dense(256, input_shape=(512,), activation='relu'))
    top_layer_model.add(Dense(256, input_shape=(256,), activation='relu'))
    top_layer_model.add(Dropout(0.5))
    top_layer_model.add(Dense(128, input_shape=(256,)))
    top_layer_model.add(Dense(NUM_CLASSES, activation='softmax'))

    adamax = Adamax()

    top_layer_model.compile(loss='categorical_crossentropy',
                            optimizer=adamax, metrics=['accuracy'])

    # train
    top_layer_model.fit(x_train_feature_map, y_train,
                        validation_data=(x_train_feature_map, y_train),
                        nb_epoch=FLAGS.n_epochs, batch_size=FLAGS.batch_size)
    # Evaluate
    score = top_layer_model.evaluate(x_test_feature_map,
                                     y_test, batch_size=FLAGS.batch_size)

    print("After top_layer_model training (test set): {}".format(score))

    # Merge two models and create the final_model_final_final
    inputs = Input(shape=(48, 48, 3))
    vg_output = vgg16(inputs)
    print("vg_output: {}".format(vg_output.shape))
    # TODO: the 'pooling' argument of the VGG16 model is important for this to work otherwise you will have to  squash
    # output from (?, 1, 1, 512) to (?, 512)
    model_predictions = top_layer_model(vg_output)
    final_model = Model(input=inputs, output=model_predictions)
    final_model.compile(loss='categorical_crossentropy',
                        optimizer=adamax, metrics=['accuracy'])
    final_model_score = final_model.evaluate(x_train_input,
                                             y_train, batch_size=FLAGS.batch_size)
    print("Sanity check - final_model (train score): {}".format(final_model_score))

    final_model_score = final_model.evaluate(x_test_input,
                                             y_test, batch_size=FLAGS.batch_size)
    print("Sanity check - final_model (test score): {}".format(final_model_score))
    # config = final_model.get_config()
    # weights = final_model.get_weights()

    # probably don't need to create a new model
    # model_to_save = Model.from_config(config)
    # model_to_save.set_weights(weights)
    model_to_save = final_model

    print("Model input name: {}".format(model_to_save.input))
    print("Model output name: {}".format(model_to_save.output))

    # Save Model
    builder = saved_model_builder.SavedModelBuilder(FLAGS.export_path)
    signature = predict_signature_def(inputs={'images': model_to_save.input},
                                      outputs={'scores': model_to_save.output})
    with K.get_session() as sess:
        builder.add_meta_graph_and_variables(sess=sess,
                                             tags=[tag_constants.SERVING],
                                             signature_def_map={'predict': signature})
        builder.save()
print(embedding_weight_matrix.shape) #Each word will be reprsented by a 50d weith matrix

#build model        
#vocab_size: 45
#word_embed_size: 50
#seq_maxlen: 16
#weights: Loaded Weight matrix
#left_input: 16
#Now build a Embedding layer
left_input = Input(shape=(Xtrain_left.shape[1],),dtype='int32')
left = Embedding(input_dim=vocab_size, output_dim=word_embed_size, 
                   input_length=seq_maxlen, weights=[embedding_weight_matrix], 
                   trainable = False) (left_input)
print(left_input)
left = LSTM(100, return_sequences=False)(left)
left = Dropout(0.3)(left)

#Now do the same for right side as well
right_input = Input(shape=(Xtrain_right.shape[1],),dtype='int32')

right = Embedding(input_dim=vocab_size, output_dim=word_embed_size, 
                   input_length=seq_maxlen, weights=[embedding_weight_matrix], 
                   trainable = False) (right_input)
right = LSTM(100, return_sequences=False)(right)
right = Dropout(0.3)(right)

x = concatenate([left, right])
x = Dense(10, activation='relu')(x)
output = Dense(1)(x) #Tells the similarity

model = Model(inputs=[left_input, right_input], outputs=output)
Beispiel #18
0
def build_model(input_layer, start_neurons,block="resnext", DropoutRatio = 0.5,filter_size=32,nClasses=2,weight_decay=1e-4):
    # 101 -> 50
    conv1 = Conv1D(start_neurons * 1, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input_layer)
    
    conv1 = bottleneck(conv1,start_neurons * 1, block)
    
    conv1 = Activation(ACTIVATION)(conv1)
    pool1 = MaxPooling1D((2))(conv1)
    pool1 = Dropout(DropoutRatio/2)(pool1)

    # 50 -> 25
    conv2 = Conv1D(start_neurons * 2, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(pool1)
    
    conv2 = bottleneck(conv2,start_neurons * 2, block)
    
    conv2 = Activation(ACTIVATION)(conv2)
    pool2 = MaxPooling1D((2))(conv2)
    pool2 = Dropout(DropoutRatio)(pool2)

    # 25 -> 12
    conv3 = Conv1D(start_neurons * 4, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(pool2)

    conv3 = bottleneck(conv3,start_neurons * 4, block)
    
    conv3 = Activation(ACTIVATION)(conv3)
    pool3 = MaxPooling1D((2))(conv3)
    pool3 = Dropout(DropoutRatio)(pool3)

    # 12 -> 6
    conv4 = Conv1D(start_neurons * 8, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(pool3)
#     conv4 = residual_block(conv4,start_neurons * 8)
#     conv4 = residual_block(conv4,start_neurons * 8)
    conv4 = bottleneck(conv4,start_neurons * 8, block)
    
    conv4 = Activation(ACTIVATION)(conv4)
    pool4 = MaxPooling1D((2))(conv4)
    pool4 = Dropout(DropoutRatio)(pool4)

    # Middle
    convm = Conv1D(start_neurons * 16, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(pool4)
#     convm = residual_block(convm,start_neurons * 16)
#     convm = residual_block(convm,start_neurons * 16)
    convm = bottleneck(convm,start_neurons * 16, block)
    
    convm = Activation(ACTIVATION)(convm)
    
    # 6 -> 12
    #deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
    deconv4 = Conv1D(start_neurons * 8, filter_size,activation='relu', padding='same',
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay)
                     )(UpSampling1D(size=2)(convm))#kernel_initializer='he_normal'
    
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(DropoutRatio)(uconv4)
    
    uconv4 = Conv1D(start_neurons * 8, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(uconv4)
#     uconv4 = residual_block(uconv4,start_neurons * 8)
#     uconv4 = residual_block(uconv4,start_neurons * 8)
    uconv4 = bottleneck(uconv4,start_neurons * 8, block)
    
    uconv4 = Activation(ACTIVATION)(uconv4)
    
    # 12 -> 25
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
    deconv3 = Conv1D(start_neurons * 4, filter_size, activation='relu', padding='same',
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay)
                     )(UpSampling1D(size=2)(uconv4))#kernel_initializer='he_normal'
    uconv3 = concatenate([deconv3, conv3])    
    uconv3 = Dropout(DropoutRatio)(uconv3)
    
    uconv3 = Conv1D(start_neurons * 4, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(uconv3)
#     uconv3 = residual_block(uconv3,start_neurons * 4)
#     uconv3 = residual_block(uconv3,start_neurons * 4)
    uconv3 = bottleneck(uconv3,start_neurons * 4, block)

    uconv3 = Activation(ACTIVATION)(uconv3)

    # 25 -> 50
    #deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
    deconv2 = Conv1D(start_neurons * 2, filter_size, activation='relu', padding='same',
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay)
                     )(UpSampling1D(size=2)(uconv3))#kernel_initializer='he_normal'
    uconv2 = concatenate([deconv2, conv2])
        
    uconv2 = Dropout(DropoutRatio)(uconv2)
    uconv2 = Conv1D(start_neurons * 2, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(uconv2)
#     uconv2 = residual_block(uconv2,start_neurons * 2)
#     uconv2 = residual_block(uconv2,start_neurons * 2)
    uconv2 = bottleneck(uconv2,start_neurons * 2, block)

    uconv2 = Activation(ACTIVATION)(uconv2)
    
    # 50 -> 101
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
    deconv1 = Conv1D(start_neurons * 1, filter_size, activation='relu', padding='same',
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay)
                     )(UpSampling1D(size=2)(uconv2))#kernel_initializer='he_normal'
    uconv1 = concatenate([deconv1, conv1])
    
    uconv1 = Dropout(DropoutRatio)(uconv1)
    uconv1 = Conv1D(start_neurons * 1, filter_size, activation=None, padding="same",
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(uconv1)
#     uconv1 = residual_block(uconv1,start_neurons * 1)
#     uconv1 = residual_block(uconv1,start_neurons * 1)
    uconv1 = bottleneck(uconv1,start_neurons * 1, block)
    
    uconv1 = Activation(ACTIVATION)(uconv1)
    
    uconv1 = Dropout(DropoutRatio/2)(uconv1)
    
    #******************* Deep Super Vision ******************#
    hypercolumn = concatenate(
        [
            uconv1,
            Conv1D(start_neurons * 2, filter_size, activation='relu', padding='same',use_bias=False,
                kernel_regularizer=l2(weight_decay))(UpSampling1D(size=2)(uconv2)),
            Conv1D(start_neurons * 4, filter_size, activation='relu', padding='same',use_bias=False,
                kernel_regularizer=l2(weight_decay))(UpSampling1D(size=4)(uconv3)),
            Conv1D(start_neurons * 8, filter_size, activation='relu', padding='same',use_bias=False,
                kernel_regularizer=l2(weight_decay))(UpSampling1D(size=8)(uconv4))#kernel_initializer='he_normal',
#             Lambda(lambda image: ktf.image.resize_images(image, (img_size_target, img_size_target)))(uconv2),
#             Lambda(lambda image: ktf.image.resize_images(image, (img_size_target, img_size_target)))(uconv3),
#             Lambda(lambda image: ktf.image.resize_images(image, (img_size_target, img_size_target)))(uconv4)
        ]
    )
    hypercolumn = Dropout(0.5)(hypercolumn)
    hypercolumn = Conv1D(start_neurons * 1, filter_size, padding="same", activation='relu',use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(hypercolumn)
    output_layer_noActi = Conv1D(1, 1, padding="same", activation=None,use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(hypercolumn)
    output_layer =  Activation('sigmoid', name='seg_output')(output_layer_noActi)  
        
#     #output_layer = Conv1D(1, 1, padding="same", activation="sigmoid")(uconv1)
#     output_layer = Conv1D(nClasses, 1, activation='relu', padding='same')(uconv1)#kernel_initializer='he_normal'
#     output_layer = core.Reshape((nClasses, input_length))(output_layer)
#     output_layer = core.Permute((2, 1))(output_layer)
#     output_layer = core.Activation('softmax')(output_layer)
#     #model = Model(inputs=inputs, outputs=conv9)
    
    return output_layer
Beispiel #19
0
def CNN_conf(cfg, hist_save, epochs=1, test=False, gpu_no=0):
    verbose = 1  #CHRIS TODO set this to 0
    batch_size = 100
    num_classes = 10
    epochs = 2000  #CHRIS increased from 1 to 5 to make results less random and noisy
    data_augmentation = False
    num_predictions = 20
    logfile = 'mnist-cnn.log'
    savemodel = False

    # The data, shuffled and split between train and test sets:
    (x_train, y_train), (x_test,
                         y_test) = cifar10.load_data()  #mnist.load_data()

    #CHRIS reshape only needed for mnist
    #x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],x_train.shape[2],1)
    #x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],x_test.shape[2],1)

    cfg_df = pd.DataFrame(cfg, index=[0])

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train.flatten(), num_classes)
    y_test = keras.utils.to_categorical(y_test.flatten(), num_classes)

    #print('skip steps:')
    #print([cfg['skint_0'],cfg['skint_1'],cfg['skint_2']],[cfg['skst_0'],cfg['skst_1'],cfg['skst_2']])
    #(skip_ints,skip_ints_count) passed to Skip_manager constructor TODO get from cfg vector
    skip_manager = Skip_manager(
        [cfg['skint_0'], cfg['skint_1'], cfg['skint_2']],
        [cfg['skst_0'], cfg['skst_1'], cfg['skst_2']])

    input1 = keras.layers.Input(shape=(x_train.shape[1], x_train.shape[2],
                                       x_train.shape[3]))

    layer = Dropout(cfg['dropout_0'], input_shape=x_train.shape[1:])(input1)
    layer = skip_manager.connect_skip(layer)
    #CHRIS removed following:
    #layer = Conv2D(cfg['filters_0'], (cfg['k_0'], cfg['k_0']), padding='same',kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
    #layer = Activation(cfg['activation'])(layer)#kernel_initializer='random_uniform',
    #layer = skip_manager.connect_skip(layer)

    #stack 0
    for i in range(cfg['stack_0']):
        layer = Conv2D(cfg['filters_0'], (cfg['k_0'], cfg['k_0']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_0'] > 0):
        #maxpooling as cnn
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_1'], (cfg['k_1'], cfg['k_1']),
                           strides=(cfg['s_0'], cfg['s_0']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_1'], cfg['k_1']),
                                 strides=(cfg['s_0'], cfg['s_0']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_1'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 1
    for i in range(cfg['stack_1']):
        layer = Conv2D(cfg['filters_2'], (cfg['k_2'], cfg['k_2']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_1'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_3'], (cfg['k_3'], cfg['k_3']),
                           strides=(cfg['s_1'], cfg['s_1']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_3'], cfg['k_3']),
                                 strides=(cfg['s_1'], cfg['s_1']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_2'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 2
    for i in range(cfg['stack_2']):
        layer = Conv2D(cfg['filters_4'], (cfg['k_4'], cfg['k_4']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_2'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_5'], (cfg['k_5'], cfg['k_5']),
                           strides=(cfg['s_2'], cfg['s_2']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_5'], cfg['k_5']),
                                 strides=(cfg['s_2'], cfg['s_2']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_3'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 3
    for i in range(cfg['stack_3']):
        layer = Conv2D(cfg['filters_6'], (cfg['k_6'], cfg['k_6']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_3'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_7'], (cfg['k_7'], cfg['k_7']),
                           strides=(cfg['s_3'], cfg['s_3']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_7'], cfg['k_7']),
                                 strides=(cfg['s_3'], cfg['s_3']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_4'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 4
    for i in range(cfg['stack_4']):
        layer = Conv2D(cfg['filters_8'], (cfg['k_8'], cfg['k_8']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_4'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_9'], (cfg['k_9'], cfg['k_9']),
                           strides=(cfg['s_4'], cfg['s_4']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_9'], cfg['k_9']),
                                 strides=(cfg['s_4'], cfg['s_4']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_5'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 5
    for i in range(cfg['stack_5']):
        layer = Conv2D(cfg['filters_10'], (cfg['k_10'], cfg['k_10']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_5'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_11'], (cfg['k_11'], cfg['k_11']),
                           strides=(cfg['s_5'], cfg['s_5']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_11'], cfg['k_11']),
                                 strides=(cfg['s_5'], cfg['s_5']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_6'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 6
    for i in range(cfg['stack_6']):
        layer = Conv2D(cfg['filters_12'], (cfg['k_12'], cfg['k_12']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_6'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_13'], (cfg['k_13'], cfg['k_13']),
                           strides=(cfg['s_6'], cfg['s_6']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_13'], cfg['k_13']),
                                 strides=(cfg['s_6'], cfg['s_6']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_7'])(layer)
        layer = skip_manager.connect_skip(layer)

    #global averaging
    if (cfg['global_pooling']):
        layer = GlobalAveragePooling2D()(layer)
    else:
        layer = Flatten()(layer)

    #head
    if cfg['dense_size_0'] > 0:
        layer = Dense(cfg['dense_size_0'],
                      kernel_regularizer=l2(cfg['l2']),
                      bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activ_dense'])(layer)
    if cfg['dense_size_1'] > 0:
        layer = Dense(cfg['dense_size_1'],
                      kernel_regularizer=l2(cfg['l2']),
                      bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activ_dense'])(layer)
    layer = Dense(num_classes,
                  kernel_regularizer=l2(cfg['l2']),
                  bias_regularizer=l2(cfg['l2']))(layer)
    layer = Activation(cfg['activ_dense'])(layer)

    cfg['decay'] = cfg['lr'] / float(epochs)

    def step_decay(epoch):
        initial_lrate = cfg['lr']
        drop = 0.1
        epochs_drop = 20.0
        lrate = initial_lrate * math.pow(drop,
                                         math.floor((1 + epoch) / epochs_drop))
        return lrate

    callbacks = []
    if (cfg['step'] == True):
        callbacks = [LearningRateScheduler(step_decay)]
        cfg['decay'] = 0.

    # initiate RMSprop optimizer
    #opt = keras.optimizers.rmsprop(lr= cfg['lr'], decay=cfg['decay'])
    opt = keras.optimizers.SGD(lr=cfg['lr'],
                               momentum=0.9,
                               decay=cfg['decay'],
                               nesterov=False)

    model = keras.models.Model(inputs=input1, outputs=layer)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    if test:
        return model  #TODO remove this, just for testing

    #print("amount of parameters:")
    #print(model.count_params())
    #CHRIS test if gpu has enough memory
    nvmlInit()
    handle = nvmlDeviceGetHandleByIndex(int(gpu_no))
    meminfo = nvmlDeviceGetMemoryInfo(handle)
    #max_size = meminfo.total #6689341440
    if meminfo.free / 1024.**2 < 1.0:
        print('gpu is allready in use')
    nvmlShutdown()
    #if model.count_params()*4*2 >= max_size:#CHRIS *4*2: 4 byte per parameter times 2 for backpropagation
    #print('network too large for memory')
    #return 1000000000.0*(model.count_params()*4*2/max_size), 5.0*(model.count_params()*4*2/max_size)

    #max_size = 32828802 * 2 #CHRIS twice as large as RESnet-34-like implementation
    #max_size = 129200130 #CHRIS twice as wide as RESnet-34-like implementation with batchsize=10, one network of this size was able to be ran on tritanium gpu
    max_size = 130374394  #CHRIS twice as wide as RESnet-34-like implementation with batchsize=100, one network of this size was able to be ran on tritanium gpu
    #if model.count_params() > max_size:
    #print('network too large for implementation')
    #return 1000000000.0*(model.count_params()/max_size), 5.0*(model.count_params()/max_size)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255.
    x_test /= 255.

    hist_func = TimedAccHistory()

    if not data_augmentation:
        print('Not using data augmentation.')
        start = time.time()
        hist = model.fit(x_train,
                         y_train,
                         batch_size=batch_size,
                         epochs=epochs,
                         validation_data=(x_test, y_test),
                         callbacks=[hist_func],
                         verbose=verbose,
                         shuffle=True)
        stop = time.time()
    else:
        print('Using real-time data augmentation.')
        # This will do preprocessing and realtime data augmentation:
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        datagen.fit(x_train)

        # Fit the model on the batches generated by datagen.flow().
        start = time.time()
        hist = model.fit_generator(datagen.flow(x_train,
                                                y_train,
                                                batch_size=batch_size),
                                   verbose=verbose,
                                   callbacks=callbacks,
                                   epochs=epochs,
                                   steps_per_epoch=len(x_train) / batch_size,
                                   validation_data=(x_test, y_test))
        stop = time.time()

    timer = stop - start
    #print('run-time:')
    #print(timer)
    hist_save.append([hist.history['val_acc'], hist_func.timed])

    if savemodel:
        model.save('best_model_mnist.h5')
    maxval = max(hist.history['val_acc'])
    #loss = -1 * math.log( 1.0 - max(hist.history['val_acc']) ) #np.amin(hist.history['val_loss'])
    loss = -1 * math.log(max(hist.history['val_acc'])
                         )  #CHRIS minimizing this will maximize accuracy
    #print('max val_acc:')
    #print(max(hist.history['val_acc']))
    #print('loss:')
    #print(loss)
    #perf5 = max(hist.history['val_top_5_categorical_accuracy'])

    if logfile is not None:
        log_file = logfile  #os.path.join(data_des, logfile)
        cfg_df['perf'] = maxval

        # save the configurations to log file
        if os.path.isfile(log_file):
            cfg_df.to_csv(log_file, mode='a', header=False, index=False)
        else:
            cfg_df.to_csv(log_file, mode='w', header=True, index=False)
    return timer, loss
Beispiel #20
0
# add step elements into train and test
target_data = np.append(target_data, np.repeat(target_data[-1, ], step))
training_data = np.append(training_data, np.repeat(training_data[-1, ], step))

X_train, y_train = convertToMatrix(training_data, step)
X_test, y_test = convertToMatrix(target_data, step)

X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))

model = Sequential()
model.add(SimpleRNN(units=128, input_shape=(1, step), activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.05))
model.add(Dense(32, activation="relu"))
model.add(Dense(16, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
model.summary()

#모델 학습
optimizer_history = model.fit(X_train,
                              y_train,
                              epochs=300,
                              batch_size=1500,
alpha = RepeatVector(300)(alpha)
alpha = Reshape([73, 300])(alpha)
print alpha.shape
att_output = multiply([x_embed, alpha])


c_models = merge([distanceModel1, distanceModel2, POSModel2, att_output], mode='concat', concat_axis=-1)

c_models = Convolution1D(nb_filter=nb_filter,
                        filter_length=filter_length,
                        border_mode='same',
                        activation='tanh',
                        subsample_length=1)(c_models)

c_models = Bidirectional(LSTM(150))(c_models)
c_models = Dropout(0.25)(c_models)
c_models = Dense(n_out, activation='softmax')(c_models)

main_model = Model(inputs=[Input1, Input2, Input6, Input3, Input4, Input5], outputs=c_models)
main_model.compile(loss='categorical_crossentropy',optimizer='rmsprop')
main_model.summary()
SVG(model_to_dot(main_model, show_shapes="True", show_layer_names="False").create(prog='dot', format='svg'))


################################################## Model Complete ####################################################


################################################## Training begins ###################################################

print "Start training"
max_prec, max_rec, max_acc, max_f1 = 0,0,0,0
def get_sentence_attention_combined_output(word_model, word_length,
                                           sent_length, n_classes):
    #x = Permute((2,1))(si_vects)
    nclasses = n_classes
    input = Input(shape=(sent_length, word_length), dtype='int32')
    print(' input to sentence attn network', word_model)
    attentions_pred = []
    #print(output.summary())
    si_vects = TimeDistributed(word_model)(input)
    print('Shape after si_vects', si_vects.shape)
    u_it = TimeDistributed(TimeDistributed(Dense(100,
                                                 activation='tanh')))(si_vects)
    print('Shape after word vector', u_it.shape)
    #h_it = TimeDistributed(Reshape((100,word_length)))(si_vects)
    #print('Shape after reshape word vector',h_it.shape)

    attn_final_word = [
        TimeDistributed(ATTNWORD(1))(u_it) for i in range(nclasses)
    ]
    #a_it = Reshape(( word_length, 1))(a_it)
    #h_it = Reshape((word_length, 512))(h_it)
    print('ATTN Shape', attn_final_word[0].shape)
    attn_final_word = [
        Multiply()([si_vects, attn_final_word[i]]) for i in range(nclasses)
    ]  #Multiply()([h_it,a_it])
    print('Multi word Shape', attn_final_word[0].shape)
    attn_final_word = [
        Reshape((sent_length, 100, word_length))(attn_final_word[i])
        for i in range(nclasses)
    ]
    print('Shape of the att1 is {}'.format(attn_final_word[0].shape))
    attn_final_word = [
        Lambda(lambda x: K.sum(x, axis=3))(attn_final_word[i])
        for i in range(nclasses)
    ]
    print('Shape of the lambda word is {}'.format(attn_final_word[0].shape))
    attn_sents_for_all_classes = []
    for i in range(nclasses):
        x = Bidirectional(GRU(50, return_sequences=True))(attn_final_word[i])
        #x = Bidirectional(LSTM(256,return_sequences=True))(x)
        print('Shape after BD LSTM', x.shape)
        #x1 = Permute((2,1))(x)
        #print('Shape after permute',x1.shape)
        u_it = TimeDistributed(Dense(100, activation='tanh'))(x)
        print('Shape after word vector', u_it.shape)
        #h_it = Reshape((100,sent_length))(x)
        attn_final_sent = ATTNWORD(1)(u_it)
        print('Shape of the sent att is {}'.format(attn_final_sent.shape))
        #attentions_pred.append(attn_final)
        attn_final_sent = Multiply()([x, attn_final_sent])
        print('Shape of the multi sent att is {}'.format(
            attn_final_sent.shape))
        attn_final_sent = Reshape((100, sent_length))(attn_final_sent)
        attn_final_sent = Lambda(lambda x: K.sum(x, axis=2))(attn_final_sent)
        print('Shape of the lambda sent att is {}'.format(
            attn_final_sent.shape))
        attn_sents_for_all_classes.append(attn_final_sent)
    x = Concatenate()(attn_sents_for_all_classes)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.2)(x)
    #x = Dense(128, activation='relu')(x)
    #x = Dropout(0.2)(x)
    #x = Dense(64, activation='relu')(x)
    #x = Dropout(0.2)(x)
    x = Dense(64, activation='relu')(x)
    preds = Dense(nclasses, activation='sigmoid')(x)

    model = Model(input, preds)

    return model
    def getModel(self):
        inputs = Input(shape=self.size_image + (1,))

        # ********** DOWNSAMPLING PATH **********
        last_layer = inputs
        list_last_convlayer_downpath = []

        for ilayer in range(self.num_layers_depth-1):

            # convolutional layers
            for iconv in range(self.num_convlayers_downpath[ilayer]):

                last_layer = Convolution3D(filters=self.num_featuremaps_layers[ilayer],
                                           kernel_size=self.size_convfilter_downpath_layers[ilayer],
                                           padding=self.type_padding,
                                           activation=self.activation_hidden)(last_layer)
            #endfor

            if self.isDropout:
                last_layer = Dropout(rate=self.dropout_rate)(last_layer)
            if self.isBatchNormalize:
                last_layer = BatchNormalization()(last_layer)

            # store last convolutional layer needed for upsampling path
            list_last_convlayer_downpath.append(last_layer)

            # pooling layer
            last_layer = MaxPooling3D(pool_size=self.size_pooling_layers[ilayer],
                                      padding=self.type_padding)(last_layer)
        #endfor
        # ********** DOWNSAMPLING PATH **********

        # deepest convolutional layers
        ilayer = self.num_layers_depth - 1
        for j in range(self.num_convlayers_downpath[ilayer]):

            last_layer = Convolution3D(filters=self.num_featuremaps_layers[ilayer],
                                       kernel_size=self.size_convfilter_downpath_layers[ilayer],
                                       padding=self.type_padding,
                                       activation=self.activation_hidden)(last_layer)
        #endfor

        if self.isDropout:
            last_layer = Dropout(rate=self.dropout_rate)(last_layer)
        if self.isBatchNormalize:
            last_layer = BatchNormalization()(last_layer)

        # ********** UPSAMPLING PATH **********
        #
        for ilayer in range(self.num_layers_depth-2, -1, -1):

            # upsampling layer
            last_layer = UpSampling3D(size=self.size_pooling_layers[ilayer])(last_layer)

            # merge layers
            if self.type_padding=='valid':
                # need to crop the downpath layer to the size of uppath layer
                shape_cropping = self.get_limits_cropImage_merge_downLayer_validConv(ilayer)
                last_layer_downpath = Cropping3D(cropping=shape_cropping)(list_last_convlayer_downpath[ilayer])

            elif self.type_padding=='same':
                last_layer_downpath = list_last_convlayer_downpath[ilayer]

            last_layer = merge(inputs=[last_layer, last_layer_downpath],
                               mode='concat',
                               concat_axis=-1)

            # convolutional layers
            for j in range(self.num_convlayers_downpath[ilayer]):

                last_layer = Convolution3D(filters=self.num_featuremaps_layers[ilayer],
                                           kernel_size=self.size_convfilter_uppath_layers[ilayer],
                                           padding=self.type_padding,
                                           activation=self.activation_hidden)(last_layer)
            #endfor

            if self.isDropout:
                last_layer = Dropout(rate=self.dropout_rate)(last_layer)
            if self.isBatchNormalize:
                last_layer = BatchNormalization()(last_layer)
        #endfor
        #  ********** UPSAMPLING PATH **********

        outputs = Convolution3D(filters=1,
                                kernel_size=(1, 1, 1),
                                padding=self.type_padding,
                                activation=self.activation_output)(last_layer)

        # return complete model
        return Model(input=inputs, output=outputs)
Beispiel #24
0
    def Initialize(self):
        '''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''

        self.session = K.get_session()
        self.graph = tf.get_default_graph()

        self.SetStartDate(2018, 8, 1)  #Set Start Date
        self.SetEndDate(2018, 11, 21)  #Set End Date
        self.SetCash(100000)  #Set Strategy Cash

        ## start the Keras/ Tensorflow session
        self.session = K.get_session()
        self.graph = tf.get_default_graph()

        ## set the currency pair that we are trading, and the correlated currency pair
        self.currency = "AUDUSD"
        self.AddForex(self.currency, Resolution.Daily)

        self.correl_currency = "USDCHF"
        self.AddForex(self.correl_currency, Resolution.Daily)

        ## define a long list, short list and portfolio
        self.long_list, self.short_list = [], []

        # Initialise indicators
        self.rsi = RelativeStrengthIndex(9)
        self.bb = BollingerBands(14, 2, 2)
        self.macd = MovingAverageConvergenceDivergence(12, 26, 9)
        self.stochastic = Stochastic(14, 3, 3)
        self.ema = ExponentialMovingAverage(9)

        ## Arrays to store the past indicators
        prev_rsi, prev_bb, prev_macd, lower_bb, upper_bb, sd_bb, prev_stochastic, prev_ema = [],[],[],[],[],[],[],[]

        ## Make history calls for both currency pairs
        self.currency_data = self.History(
            [self.currency], 150,
            Resolution.Daily)  # Drop the first 20 for indicators to warm up
        self.correl_data = self.History([self.correl_currency], 150,
                                        Resolution.Daily)

        ## save the most recent open and close
        ytd_open = self.currency_data["open"][-1]
        ytd_close = self.currency_data["close"][-1]

        ## remove yesterday's data. We will query this onData
        self.currency_data = self.currency_data[:-1]
        self.correl_data = self.correl_data[:-1]

        ## iterate over past data to update the indicators
        for tup in self.currency_data.loc[self.currency].itertuples():
            # making Ibasedatabar for stochastic
            bar = QuoteBar(
                tup.Index, self.currency,
                Bar(tup.bidclose, tup.bidhigh, tup.bidlow, tup.bidopen), 0,
                Bar(tup.askclose, tup.askhigh, tup.asklow, tup.askopen), 0,
                timedelta(days=1))

            self.stochastic.Update(bar)
            prev_stochastic.append(float(self.stochastic.ToString()))

            self.rsi.Update(tup.Index, tup.close)
            prev_rsi.append(float(self.rsi.ToString()))

            self.bb.Update(tup.Index, tup.close)
            prev_bb.append(float(self.bb.ToString()))
            lower_bb.append(float(self.bb.LowerBand.ToString()))
            upper_bb.append(float(self.bb.UpperBand.ToString()))
            sd_bb.append(float(self.bb.StandardDeviation.ToString()))

            self.macd.Update(tup.Index, tup.close)
            prev_macd.append(float(self.macd.ToString()))

            self.ema.Update(tup.Index, tup.close)
            prev_ema.append(float(self.ema.ToString()))

        ## Forming the Indicators df
        ## This is common to the Price Prediction
        rsi_df = pd.DataFrame(prev_rsi, columns=["rsi"])
        macd_df = pd.DataFrame(prev_macd, columns=["macd"])
        upper_bb_df = pd.DataFrame(upper_bb, columns=["upper_bb"])
        lower_bb_df = pd.DataFrame(lower_bb, columns=["lower_bb"])
        sd_bb_df = pd.DataFrame(sd_bb, columns=["sd_bb"])
        stochastic_df = pd.DataFrame(prev_stochastic, columns=["stochastic"])
        ema_df = pd.DataFrame(prev_ema, columns=["ema"])

        self.indicators_df = pd.concat([
            rsi_df, macd_df, upper_bb_df, lower_bb_df, sd_bb_df, stochastic_df,
            ema_df
        ],
                                       axis=1)
        self.indicators_df = self.indicators_df.iloc[20:]
        self.indicators_df.reset_index(inplace=True, drop=True)

        ## Currency Data Price
        self._currency_data = deepcopy(self.currency_data)
        self._currency_data = self._currency_data.reset_index(level=[0, 1],
                                                              drop=True)

        self._currency_data.drop(columns=[
            "askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh",
            "bidhigh", "bidlow", "bidclose"
        ],
                                 inplace=True)
        self._currency_data = self._currency_data.iloc[20:]
        self._currency_data.reset_index(inplace=True, drop=True)

        ## saving the previous 6 days OHLC for the price prediction model
        _close_prev_prices = self._previous_prices(
            "close", self._currency_data["close"], 6)
        _open_prev_prices = self._previous_prices("open",
                                                  self._currency_data["open"],
                                                  6)
        _high_prev_prices = self._previous_prices("high",
                                                  self._currency_data["high"],
                                                  6)
        _low_prev_prices = self._previous_prices("low",
                                                 self._currency_data["low"], 6)

        _all_prev_prices = pd.concat([
            _close_prev_prices, _open_prev_prices, _high_prev_prices,
            _low_prev_prices
        ],
                                     axis=1)

        _final_table = self._currency_data.join(_all_prev_prices, how="outer")
        _final_table = _final_table.join(self.indicators_df, how="outer")

        # Drop NaN from feature table
        self._features = _final_table.dropna()

        self._features.reset_index(inplace=True, drop=True)

        # Make labels for LSTM model
        self._labels = self._features["close"]
        self._labels = pd.DataFrame(self._labels)
        self._labels.index -= 1
        self._labels = self._labels[1:]
        _new_row = pd.DataFrame({"close": [ytd_close]})
        self._labels = self._labels.append(_new_row)
        self._labels.reset_index(inplace=True, drop=True)

        # Currency Data Direction
        self.currency_data_direction = self.currency_data.reset_index(
            level=[0, 1], drop=True)

        self.currency_data_direction.drop(columns=[
            "askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh",
            "bidhigh", "bidlow", "bidclose", "open", "high", "low"
        ],
                                          inplace=True)
        self.currency_data_direction = self.currency_data_direction.iloc[20:]
        self.currency_data_direction.reset_index(inplace=True, drop=True)

        # Correlation Currency Data
        self.correl_data = self.correl_data.reset_index(level=[0, 1],
                                                        drop=True)
        self.correl_data.drop(columns=[
            "askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh",
            "bidhigh", "bidlow", "bidclose", "open", "high", "low"
        ],
                              inplace=True)
        self.correl_data = self.correl_data.iloc[20:]
        self.correl_data.reset_index(inplace=True, drop=True)
        self.correl_data.rename(index=str,
                                columns={"close": "correl_close"},
                                inplace=True)

        # Close Price Direction Change
        self.close_dir_change = self.direction_change(
            "close", self.currency_data_direction["close"], 11)

        # Correlation Currency Direction Change
        self.correl_dir_change = self.direction_change(
            "correl_close", self.correl_data["correl_close"], 11)

        # Join the tables
        joined_table_direction = self.currency_data_direction.join(
            self.close_dir_change, how="outer")
        joined_table_direction = joined_table_direction.join(
            self.correl_dir_change, how="outer")
        joined_table_direction = joined_table_direction.join(
            self.indicators_df, how="outer")

        # Features Direction
        self.features_direction = joined_table_direction.dropna()
        self.features_direction.reset_index(inplace=True, drop=True)

        ## lowerBB and upperBB should change to the difference
        self.features_direction["lower_bb_diff"] = self.features_direction[
            "close"] - self.features_direction["lower_bb"]
        self.features_direction["upper_bb_diff"] = self.features_direction[
            "upper_bb"] - self.features_direction["close"]
        self.features_direction["ema_diff"] = self.features_direction[
            "ema"] - self.features_direction["close"]

        self.features_direction.drop(columns=["upper_bb", "lower_bb", "ema"],
                                     inplace=True)

        # Make raw df for labels

        self.labels = self.features_direction["close"]
        self.labels = pd.DataFrame(self.labels)
        self.labels.index -= 1

        self.labels = self.labels[1:]

        new_row = pd.DataFrame({"close": [ytd_close]})
        self.labels = self.labels.append(new_row)

        self.labels.reset_index(inplace=True, drop=True)

        ## Form the binary labels: 1 for up and 0 for down
        self.labels_direction_new = pd.DataFrame(columns=["direction"])
        for row in self.labels.iterrows():

            new_close, old_close = row[1], self.features_direction["close"][
                row[0]]
            change = (new_close - old_close)[0]
            percent_change = 100 * change / old_close

            if percent_change >= 0:
                this_df = pd.DataFrame({"direction": [1]})

            elif percent_change < 0:
                this_df = pd.DataFrame({"direction": [0]})

            self.labels_direction_new = self.labels_direction_new.append(
                this_df)

        self.labels_direction_new.reset_index(inplace=True, drop=True)

        ## Test out different features
        self.features_direction.drop(
            columns=["rsi", "stochastic", "close", "sd_bb"], inplace=True)

        self.scaler_X = MinMaxScaler()
        self.scaler_X.fit(self.features_direction)
        scaled_features_direction = self.scaler_X.transform(
            self.features_direction)

        # Hyperparameters Funetuning
        max_depth = [10, 15, 20, 30]
        n_estimators = [100, 200, 300, 500]
        criterion = ["gini", "entropy"]

        tscv = TimeSeriesSplit(n_splits=4)

        params_df = pd.DataFrame(
            columns=["depth", "n_est", "criterion", "acc_score"])

        for depth in max_depth:
            for n_est in n_estimators:
                for crn in criterion:
                    acc_scores = []
                    for train_index, test_index in tscv.split(
                            scaled_features_direction):
                        X_train, X_test = scaled_features_direction[
                            train_index], scaled_features_direction[test_index]
                        #Y_train, Y_test = labels_direction.loc[train_index], labels_direction.loc[test_index]

                        Y_train, Y_test = self.labels_direction_new[
                            "direction"][
                                train_index], self.labels_direction_new[
                                    "direction"][test_index]

                        Y_train, Y_test = Y_train.astype('int'), Y_test.astype(
                            'int')

                        RF = RandomForestClassifier(criterion=crn,
                                                    n_estimators=n_est,
                                                    max_depth=depth,
                                                    random_state=12345)
                        RF_model = RF.fit(X_train, Y_train)

                        y_pred = RF_model.predict(X_test)

                        acc_score = accuracy_score(Y_test, y_pred)
                        acc_scores.append(acc_score)

                    average_acc = np.mean(acc_scores)
                    # self.Debug("ACC")
                    # self.Debug(average_acc)
                    ## make this df for cells, epoch and mse and append to params_df
                    this_df = pd.DataFrame({
                        "depth": [depth],
                        "n_est": [n_est],
                        "criterion": [crn],
                        "acc_score": [average_acc]
                    })
                    params_df = params_df.append(this_df)

        opt_values = params_df[params_df['acc_score'] ==
                               params_df['acc_score'].max()]
        opt_depth, opt_n_est, opt_crn = opt_values["depth"][0], opt_values[
            "n_est"][0], opt_values["criterion"][0]

        self.RF = RandomForestClassifier(criterion="gini",
                                         n_estimators=300,
                                         max_depth=10,
                                         random_state=123)
        self.RF_model = self.RF.fit(
            scaled_features_direction,
            self.labels_direction_new["direction"].astype('int'))

        ## Define scaler for this class
        self._scaler_X = MinMaxScaler()
        self._scaler_X.fit(self._features)
        self._scaled_features = self._scaler_X.transform(self._features)

        self._scaler_Y = MinMaxScaler()
        self._scaler_Y.fit(self._labels)
        self._scaled_labels = self._scaler_Y.transform(self._labels)

        ## fine tune the model to determine hyperparameters
        ## only done once (upon inititialize)

        _tscv = TimeSeriesSplit(n_splits=2)
        _cells = [100, 200]
        _epochs = [50, 100]

        ## create dataframee to store optimal hyperparams
        _params_df = pd.DataFrame(columns=["cells", "epoch", "mse"])

        # ## loop thru all combinations of cells and epochs
        for i in _cells:
            for j in _epochs:

                print("CELL", i, "EPOCH", j)

                # list to store the mean square errors
                cvscores = []

                for train_index, test_index in _tscv.split(
                        self._scaled_features):
                    #print(train_index, test_index)
                    X_train, X_test = self._scaled_features[
                        train_index], self._scaled_features[test_index]
                    Y_train, Y_test = self._scaled_labels[
                        train_index], self._scaled_labels[test_index]

                    X_train = np.reshape(
                        X_train, (X_train.shape[0], 1, X_train.shape[1]))
                    X_test = np.reshape(X_test,
                                        (X_test.shape[0], 1, X_test.shape[1]))

                    model = Sequential()
                    model.add(
                        LSTM(i,
                             input_shape=(1, X_train.shape[2]),
                             return_sequences=True))
                    model.add(Dropout(0.10))
                    model.add(LSTM(i, return_sequences=True))
                    model.add(LSTM(i))
                    model.add(Dropout(0.10))
                    model.add(Dense(1))
                    model.compile(loss='mean_squared_error',
                                  optimizer='rmsprop',
                                  metrics=['mean_squared_error'])
                    model.fit(X_train, Y_train, epochs=j, verbose=0)

                    scores = model.evaluate(X_test, Y_test)
                    cvscores.append(scores[1])

                ## get average value of mean sq error
                MSE = np.mean(cvscores)

                ## make this df for cells, epoch and mse and append to params_df
                this_df = pd.DataFrame({
                    "cells": [i],
                    "epoch": [j],
                    "mse": [MSE]
                })
                # self.Debug(this_df)
                # params_df = params_df.append(this_df)

                _params_df = _params_df.append(this_df)
                self.Debug(_params_df)

        # # Check the optimised values (O_values) obtained from cross validation
        # # This code gives the row which has minimum mse and store the values to O_values
        # _O_values = _params_df[_params_df['mse'] == _params_df['mse'].min()]

        # # Extract the optimised values of cells and epochcs from abbove row (having min mse)
        self._opt_cells = 200
        self._opt_epochs = 100
        # self._opt_cells = _O_values["cells"][0]
        # self._opt_epochs = _O_values["epoch"][0]

        _X_train = np.reshape(self._scaled_features,
                              (self._scaled_features.shape[0], 1,
                               self._scaled_features.shape[1]))
        _y_train = self._scaled_labels

        self._session = K.get_session()
        self._graph = tf.get_default_graph()

        # Intialise the model with optimised parameters
        self._model = Sequential()
        self._model.add(
            LSTM(self._opt_cells,
                 input_shape=(1, _X_train.shape[2]),
                 return_sequences=True))
        self._model.add(Dropout(0.20))
        self._model.add(LSTM(self._opt_cells, return_sequences=True))
        self._model.add(Dropout(0.20))
        self._model.add(LSTM(self._opt_cells, return_sequences=True))
        self._model.add(LSTM(self._opt_cells))
        self._model.add(Dropout(0.20))
        self._model.add(Dense(1))

        # self.model.add(Activation("softmax"))
        self._model.compile(loss='mean_squared_error',
                            optimizer='adam',
                            metrics=['mean_squared_error'])
Beispiel #25
0
X_train = np.array(X[:TraingTestSize])
X_test = np.array(X[TraingTestSize:])

Y_train = np.array(Y[:TraingTestSize])
Y_test = np.array(Y[TraingTestSize:])

# In this step, I create neural network with one input layer, one hidden layer and one output layer. I follow MSE because this is a regression problem.

# In[14]:

model = Sequential()
model.add(BatchNormalization())
model.add(Dense(4, input_shape=(6, )))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(2))
model.add(Activation('linear'))
model.compile(loss='mean_squared_error', optimizer='Adam')

# In[15]:

history = model.fit(X_train,
                    Y_train,
                    epochs=50,
                    validation_data=(X_test, Y_test),
                    verbose=1)
print(f'MSE on training set {history.history["loss"][-1]}')
print(f'MSE on testing set {history.history["val_loss"][-1]}')
Beispiel #26
0
    X, Y, num, Xdim, Ydim = ReadTrainingData(trainCSV)

    model = Sequential()
    model.add(Conv2D(filters = 128, kernel_size = (5, 5), input_shape = (48, 48, 1), padding='same'))
    model.add(Conv2D(filters = 128, kernel_size = (5, 5), padding='same'))
    #  model.add(GaussianNoise(0.1))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(LeakyReLU(alpha = 0.3))

    model.add(Conv2D(filters = 256, kernel_size = (5, 5), padding='same'))
    model.add(Conv2D(filters = 256, kernel_size = (5, 5), padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(LeakyReLU(alpha = 0.3))
    model.add(Dropout(0.2))

    model.add(Conv2D(filters = 512, kernel_size = (3, 3), padding='same'))
    model.add(Conv2D(filters = 512, kernel_size = (3, 3), padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(LeakyReLU(alpha = 0.3))
    model.add(Dropout(0.3))

    model.add(Conv2D(filters = 768, kernel_size = (3, 3), padding='same'))
    model.add(Conv2D(filters = 768, kernel_size = (3, 3), padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(LeakyReLU(alpha = 0.3))
    model.add(Dropout(0.4))
Beispiel #27
0
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import backend as K
from keras import optimizers;
from keras import losses;
from keras import metrics;

#-------------------  rec ----------------------
#_________ CNN ____________
X_input =Input(shape=(96,96,1));
X = Conv2D(32, kernel_size=3, strides=2, input_shape=(96,96,1), padding="same")(X_input);
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Conv2D(64, kernel_size=3, strides=2, padding="same")(X);
X = ZeroPadding2D(padding=((0,1),(0,1)))(X);
X = BatchNormalization(momentum=0.8)(X);
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Conv2D(128, kernel_size=3, strides=2, padding="same")(X);
X = BatchNormalization(momentum=0.8)(X);
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Conv2D(256, kernel_size=3, strides=1, padding="same")(X);
X = BatchNormalization(momentum=0.8)(X);
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Flatten()(X);
X = Dense(2048, activation='relu')(X);
train_data_dir = 'train'
validation_data_dir = 'validation'
nb_train_samples = 97
nb_validation_samples = 23
epochs = 50
batch_size = 2

# build the VGG16 network
model = applications.VGG16(weights=weights_path, include_top=False, input_shape=(150,150,3))
print('Model loaded.')

# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))

# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning

# add the model on top of the convolutional base
#model.add(top_model)
x=model.output
x=Flatten(input_shape=model.output_shape[1:])(x)
x=Dropout(0.5)(x)
x=Dense(1, activation='sigmoid')(x)
model = Model(model.input, x)
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
	return averaged

if __name__ == "__main__":

	model = Sequential()

	if K.image_data_format() == 'channels_first':
		input_shape = (3, img_rows, img_cols)
	else:
		input_shape = (img_rows, img_cols, 3)

	# ----- Based off of simple deep net for CIFAR small images dataset ------
	model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=input_shape))
	model.add(Conv2D(32, (3, 3), activation='relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(0.25))

	model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
	model.add(Conv2D(64, (3, 3), activation='relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(0.25))

	model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
	model.add(Conv2D(128, (3, 3), activation='relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(0.25))

	model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
	model.add(Conv2D(256, (3, 3), activation='relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(0.25))
Beispiel #30
0
    def get_unet(self):
        inputs = Input((config.norm_size, config.norm_size, config.channels))
        # 网络结构定义

        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(inputs)
        print("conv1 shape:", conv1.shape)
        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv1)
        print("conv1 shape:", conv1.shape)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        print("pool1 shape:", pool1.shape)

        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool1)
        print("conv2 shape:", conv2.shape)
        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv2)
        print("conv2 shape:", conv2.shape)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        print("pool2 shape:", pool2.shape)

        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool2)
        print("conv3 shape:", conv3.shape)
        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv3)
        print("conv3 shape:", conv3.shape)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
        print("pool3 shape:", pool3.shape)

        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(drop5))
        merge6 = concatenate([drop4, up6], axis=3)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv6)

        up7 = Conv2D(256,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv6))
        merge7 = concatenate([conv3, up7], axis=3)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv7))
        merge8 = concatenate([conv2, up8], axis=3)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv8))
        merge9 = concatenate([conv1, up9], axis=3)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(2,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

        model = Model(inputs=inputs, outputs=conv10)

        return model
Beispiel #31
0
def segnet_arch_2c(h,w,dropouts):
  
  print("Model of size: %d %d" % (h, w))
  ch = 1
  ordering = 'th' # 'th': (ch, h, w),  'tf': (h, w, ch)
  inputs = Input(shape=(ch, h, w))
  concat_axis = 1
  
  #              0       1      2      3    4     5      6     7      8
  #dropouts =  [0.37,   0.51,  0.34,  0.48,  1,  0.48,  0.28,  0.78,  0.8]
  #dropouts =  [[0.15,0.25,0.4,0.5,1,0.4,0.25,0.15,0.15]]

  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(inputs)
  conv2 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv1)
  pool1 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv2)
  pool1 = Dropout(dropouts[0])(pool1)
  print 'pool1', pool1.get_shape()

  conv3 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool1)
  conv4 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv3)
  pool2 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv4)
  pool2 = Dropout(dropouts[1])(pool2)
  print 'pool2', pool2.get_shape()

  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool2)
  conv6 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv5)
  pool3 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv6)
  pool3 = Dropout(dropouts[2])(pool3)
  print 'pool3', pool3.get_shape()

  conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool3)
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv7)
  print 'conv8', conv8.get_shape()
  pool4 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv8)
  pool4 = Dropout(dropouts[3])(pool4)
  print 'pool4', pool4.get_shape()

  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool4)
  conv10 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv9)
  #pool5 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv10)  # 5x5
  #pool5 = Dropout(dropouts[4])(pool5)
  print 'conv10', conv10.get_shape()

  up1 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv10)
  print 'up1 upsampling2D:', up1.get_shape()
  up1 = merge([up1, conv8], mode='concat', concat_axis=concat_axis)
  # up1 = merge([(UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)), pool4], mode='concat', concat_axis=1)
  up1 = Dropout(dropouts[5])(up1)
  print 'up1', up1.get_shape()
  conv11 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up1)
  conv12 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv11)
  print 'conv12', conv12.get_shape()

  up2 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv12)
  print 'up2 upsampling2D:', up2.get_shape()
  up2 = merge([up2, conv6], mode='concat', concat_axis=concat_axis)
  # up2 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
  up2 = Dropout(dropouts[6])(up2)
  print 'up2', up2.get_shape()
  conv13 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up2)
  conv14 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv13)
  print 'conv13', conv13.get_shape()  # 7,80,32
  print 'conv2', conv4.get_shape()  # 1,160,16

  up3 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv14)  # 14, 160, 32
  print 'up3 upsampling2D:', up3.get_shape()
  up3 = merge([up3, conv4], mode='concat', concat_axis=concat_axis)
  # up3 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
  up3 = Dropout(dropouts[7])(up3)
  print 'up3', up3.get_shape()
  conv15 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up3)
  conv16 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv15)
  print 'conv16', conv16.get_shape()

  up4 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv16)
  print 'up4 upsampling2D:', up4.get_shape()
  up4 = merge([up4, conv2], mode='concat', concat_axis=concat_axis)
  # up4 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
  up4 = Dropout(dropouts[8])(up4)
  conv17 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up4)
  conv18 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv17)
  print 'conv18 shape:', conv18.get_shape()
  #predictions = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv18) #old
  predictions = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv18) #old

  '''
  dense1 = Flatten()(conv19)
  print 'dense1 shape',dense1.get_shape()
  dense1 = Dropout(1)(dense1)
  
  predictions = Dense(input_dim=ch*1*1,output_dim =h*w,init = 'he_normal',activation = 'softmax')(dense1)
  print 'precision get shape',predictions.get_shape()
  '''
  model = Model(input=inputs, output=predictions)
  model.summary()
  #plot(model, "model.png")
  return model,predictions
Beispiel #32
0
# build up one-hot encoded input x and output y where x is a character
# in the text y is the next character in the text

x = np.zeros((len(sentences), config.maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
    for t, char in enumerate(sentence):
        x[i, t, char_indices[char]] = 1
    y[i, char_indices[next_chars[i]]] = 1

#Creating Model
model = Sequential()
model.add(Bidirectional(LSTM(config.rememberChars, input_shape=(config.maxlen, len(chars)), return_sequences=True)))
model.add(Bidirectional(LSTM(config.rememberChars, input_shape=(config.maxlen, len(chars)), return_sequences=True)))
model.add(Bidirectional(LSTM(config.rememberChars, input_shape=(config.maxlen, len(chars)))))
model.add(Dropout(0.4))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(len(chars), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer="rmsprop")


def sample(preds, temperature=1.0):
    # helper function to sample an index from a probability array
    preds = np.asarray(preds).astype('float64')
    preds = np.log(preds) / temperature
    exp_preds = np.exp(preds)
    preds = exp_preds / np.sum(exp_preds)
    probas = np.random.multinomial(1, preds, 1)
    return np.argmax(probas)
Beispiel #33
0
def segnet_arch_2c_rgb(h, w):
  print("Model of size: %d %d" % (h, w))
  ch = 3 # 1
  inputs = Input(shape=(ch, h , w))
  ordering = 'th'  # 'th': (ch, h, w),  'tf': (h, w, ch)
  #             0       1      2      3    4     5      6     7      8
  dropouts = [0.37,  0.51,   0.34,  0.48,  1,   0.48, 0.28, 0.78,  0.8]

  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(inputs)
  print 'conv1', conv1.get_shape()
  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv1)
  print 'conv1.', conv1.get_shape()
  pool1 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv1)
  pool1 = Dropout(dropouts[0])(pool1)
  print 'pool1', pool1.get_shape()

  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool1)
  print 'conv2', conv2.get_shape()
  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv2)
  print 'conv2.', conv2.get_shape()
  pool2 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv2)
  pool2 = Dropout(dropouts[1])(pool2)
  print 'pool2', pool2.get_shape()

  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool2)
  print 'conv3', conv3.get_shape()
  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv3)
  print 'conv3.', conv3.get_shape()
  pool3 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv3)
  pool3 = Dropout(dropouts[2])(pool3)  #changed from 0.4 to 0.25
  print 'pool3', pool3.get_shape()

  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool3)
  print 'conv4', conv4.get_shape()
  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv4)
  print 'conv4.', conv4.get_shape()
  pool4 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv4)
  pool4 = Dropout(dropouts[3])(pool4)  #changed from 0.5 to 0.25
  print 'pool4', pool4.get_shape()

  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool4)
  print 'conv5', conv5.get_shape()
  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv5)
  print 'conv5.', conv5.get_shape()


  up1 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)
  print 'up1 upsampling2D:', up1.get_shape()
  up1 = merge([up1, conv4], mode='concat', concat_axis=1)
  #up1 = merge([(UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)), pool4], mode='concat', concat_axis=1)
  up1 = Dropout(dropouts[4])(up1)
  print 'up1 merge conv4', up1.get_shape()
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up1)
  print 'conv8', conv8.get_shape()
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv8)
  print 'conv8.', conv8.get_shape()

  up2 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv8)
  print 'up2 upsampling2D:', up2.get_shape()
  up2 = merge([up2, conv3], mode='concat', concat_axis=1)
  #up2 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
  up2 = Dropout(dropouts[5])(up2)
  print 'up2 merge conv3',up2.get_shape()
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up2)
  print 'conv9',conv9.get_shape()  # 7,80,32
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv9)
  print 'conv9.',conv9.get_shape()  # 7,80,32

  up3 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv9)   # 14, 160, 32
  print 'up3 upsampling2D:', up3.get_shape()
  up3 = merge([up3, conv2], mode='concat', concat_axis=1)
  #up3 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
  up3 = Dropout(dropouts[6])(up3)
  print 'up3 merge conv2',up3.get_shape()
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up3)
  print 'conv10',conv10.get_shape()
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv10)
  print 'conv10.',conv10.get_shape()

  up4 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv10)
  print 'up4 upsampling2D:', up4.get_shape()
  up4 = merge([up4, conv1], mode='concat', concat_axis=1)
  #up4 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
  up4 = Dropout(dropouts[7])(up4)
  print 'up4 merge conv1',up4.get_shape()
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up4)
  print 'conv11',conv11.get_shape()
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv11)
  print 'conv11.',conv11.get_shape()

  #conv12 = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv11)
  conv12 = Convolution2D(1, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv11)
  print 'out',conv12.get_shape()

  predictions = K.argmax(conv12, axis=1)
  model = Model(input=inputs, output=[conv12])
  
  model.summary()
  #return model
  return model, predictions
Beispiel #34
0
def train_detector(X_train, X_test, Y_train, Y_test, nb_filters = 32, batch_size=128, nb_epoch=5, nb_classes=2, do_augment=False, save_file='models/detector_model.hdf5'):
    """ vgg-like deep convolutional network """
    
    np.random.seed(1337)  # for reproducibility
      
    # input image dimensions
    img_rows, img_cols = X_train.shape[1], X_train.shape[2]
    
    # size of pooling area for max pooling
    pool_size = (2, 2)
    # convolution kernel size
    kernel_size = (3, 3) 
    input_shape = (img_rows, img_cols, 1)


    model = Sequential()
    model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                            border_mode='valid',
                            input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    # (16, 8, 32)
     
    model.add(Convolution2D(nb_filters*2, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters*2, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    # (8, 4, 64) = (2048)
        
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
        
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    
    if do_augment:
        datagen = ImageDataGenerator(
            rotation_range=20,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.2,
            zoom_range=0.2)
        datagen.fit(X_train)
        model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                            samples_per_epoch=len(X_train), nb_epoch=nb_epoch,
                            validation_data=(X_test, Y_test))
    else:
        model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=1, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    model.save(save_file)  
X_test1 = sequence.pad_sequences(T1, maxlen=maxlen)

T2 = [[valid_chars[y] for y in x] for x in T2]
X_test2 = sequence.pad_sequences(T2, maxlen=maxlen)


print(X_train.shape)
print(y_train.shape)


embedding_vecor_length = 128

model = Sequential()
model.add(Embedding(max_features, embedding_vecor_length, input_length=maxlen))
model.add(LSTM(128))
model.add(Dropout(0.1))
model.add(Dense(21))
model.add(Activation('softmax'))
print(model.summary())

model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="logs/lstm/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='val_acc',mode='max')
csv_logger = CSVLogger('logs/lstm/training_set_lstmanalysis.csv',separator=',', append=False)
model.load_weights("logs/lstm/checkpoint-14.hdf5")
#model.fit(X_train, y_train, batch_size=32, nb_epoch=1000,validation_split=0.33, shuffle=True,callbacks=[checkpointer,csv_logger])
#score, acc = model.evaluate(X_test, y_test, batch_size=32)
#print('Test score:', score)
#print('Test accuracy:', acc)


y_pred1 = model.predict_classes(X_test1)
Beispiel #36
0
def unet_arch_2c(h, w):
  print("Model of size: %d %d" % (h, w))
  ch = 1 # 1
  inputs = Input(shape=(ch, h , w)) # 160 x 160
  ordering = 'th'  # 'th': (ch, h, w),  'tf': (h, w, ch)

  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(inputs)
  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv1)
  pool1 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv1)
  pool1 = Dropout(0.15)(pool1)
  print 'pool1', pool1.get_shape()

  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool1)
  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv2)
  pool2 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv2)
  pool2 = Dropout(0.25)(pool2)
  print 'pool2', pool2.get_shape()

  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool2)
  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv3)
  pool3 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv3)
  pool3 = Dropout(0.4)(pool3)
  print 'pool3', pool3.get_shape()

  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool3)
  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv4)
  print 'conv4', conv4.get_shape()
  pool4 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv4)
  pool4 = Dropout(0.5)(pool4)
  print 'pool4', pool4.get_shape()

  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool4)
  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv5)
  # pool5 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv5) # 5x5
  # pool5 = Dropout(0.5)(pool5)
  print 'conv5', conv5.get_shape()


  up1 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)
  #print 'up1', up1.get_shape()
  up1 = merge([up1, conv4], mode='concat', concat_axis=1)
  #up1 = merge([(UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)), pool4], mode='concat', concat_axis=1)
  up1 = Dropout(0.4)(up1)
  print 'up1', up1.get_shape()
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up1)
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv8)
  print 'conv8', conv8.get_shape()

  up2 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv8)
  up2 = merge([up2, conv3], mode='concat', concat_axis=1)
  #up2 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
  up2 = Dropout(0.25)(up2)
  print 'up2',up2.get_shape()
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up2)
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv9)
  print 'conv9',conv9.get_shape()  # 7,80,32
  print 'conv2',conv2.get_shape()  # 1,160,16

  up3 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv9)   # 14, 160, 32
  up3 = merge([up3, conv2], mode='concat', concat_axis=1)
  #up3 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
  up3 = Dropout(0.15)(up3)
  print 'up3',up3.get_shape()
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up3)
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv10)
  print 'conv10',conv10.get_shape()

  up4 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv10)
  up4 = merge([up4, conv1], mode='concat', concat_axis=1)
  #up4 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
  up4 = Dropout(0.15)(up4)
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up4)
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv11)

  predictions = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv11)

  model = Model(input=inputs, output=predictions)
  model.summary()
  #plot(model, "model.png")
  return model