Exemplo n.º 1
0
def prepare_model():
    global scaler
    data = import_data(ticker,
                       timeframe,
                       start_train_date,
                       end_train_date,
                       calculate_input,
                       lookback,
                       calculate_output,
                       lookforward,
                       split=(100, 0, 0))
    scaler = StandardScaler()  # Creating an instance of a scaler.
    scaler.fit(data['train_input'])  # Fitting the scaler.
    data_scaled = scaler.transform(data['train_input'])  # Normalizing data
    m = Sequential()
    m.add(
        Dense(units=num_features,
              activation='tanh',
              input_dim=num_features,
              kernel_initializer=he_uniform(1)))
    m.add(Dense(num_features, activation='tanh'))
    m.add(Dense(1, activation='linear'))
    m.compile(loss='mean_squared_error', optimizer='sgd')
    m.fit(data_scaled, data['train_output'], epochs=num_epochs)
    return m
Exemplo n.º 2
0
def prepare_model():
    data = import_data(ticker,
                       timeframe,
                       start_train_date,
                       end_train_date,
                       calculate_input,
                       lookback,
                       calculate_output,
                       lookforward,
                       split=(100, 0, 0))

    # Creating a model...
    model = Sequential()
    model.add(
        Dense(units=num_features * 2,
              activation='tanh',
              input_dim=num_features,
              kernel_initializer=he_uniform(1)))
    model.add(Dense(num_features * 2, activation='tanh'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    one_hot_train_outputs = keras.utils.to_categorical(
        data['train_output'],
        num_classes=num_classes)  # Performing one-hot encoding
    model.fit(data['train_input'], one_hot_train_outputs,
              epochs=num_epochs)  # Training the model
    return model
Exemplo n.º 3
0
def prepare_model():
    data = import_data(ticker,
                       timeframe,
                       start_train_date,
                       end_train_date,
                       calculate_input,
                       lookback,
                       calculate_output,
                       lookforward,
                       split=(100, 0, 0))
    model1 = create_and_train_model(data)
    model2 = create_and_train_model(data)
    model3 = create_and_train_model(data)
    return (model1, model2, model3)
Exemplo n.º 4
0
def prepare_model():
    global _lookback, _lookforward, _num_features, _timeframe, _ticker, _scaler

    data = import_data(
        _ticker,
        _timeframe,
        _start_train_date,
        _end_train_date,
        calculate_input,
        _lookback,
        calculate_output,
        _lookforward,
        split=(
            100, 0, 0
        )  # This time we need only a train set (100% for train set, 0% for test and validation ones)
    )

    #scaler = StandardScaler()
    #scaler.fit( data['train_input'] )
    #train_input_scaled = scaler.transform( data['train_input'] )

    # Creating a model...
    model = Sequential()  # Creating a model class instance
    # The number of nodes in the first hidden layer equals the number of features multiplied by 2
    model.add(
        Dense(units=_num_features * 2,
              activation='tanh',
              input_dim=_num_features,
              kernel_initializer=he_uniform(1)))
    # Adding another hidden layer
    model.add(Dense(_num_features * 2, activation='tanh'))
    # Adding an output layer
    model.add(Dense(_num_classes, activation='softmax'))
    # Compiling the model
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    one_hot_train_outputs = keras.utils.to_categorical(
        data['train_output'],
        num_classes=_num_classes)  # Performing one-hot encoding
    model.fit(data['train_input'], one_hot_train_outputs,
              epochs=_num_epochs)  # Training the model
    return model
Exemplo n.º 5
0
def prepare_model():
    data = import_data(
        ticker,
        timeframe,
        start_train_date,
        end_train_date,
        calculate_input,
        lookback,
        calculate_output,
        lookforward,
        split=(100, 0, 0)
    )

    # Creating an SVC model
    model = SVC(tol=1e-4, degree=4)
    # Reshaping the inputs to be passed into the "fit" function
    train_output = np.reshape(data['train_output'], (np.shape(data['train_output'])[0], ))
    model.fit(data['train_input'], train_output)
    return model
Exemplo n.º 6
0
def prepare_model():
    data = import_data(ticker,
                       timeframe,
                       start_train_date,
                       end_train_date,
                       calculate_input,
                       lookback,
                       calculate_output,
                       lookforward,
                       split=(100, 0, 0))
    model = Sequential()
    model.add(
        Dense(units=num_features,
              activation='tanh',
              input_dim=num_features,
              kernel_initializer=he_uniform(1)))
    model.add(Dense(num_features, activation='tanh'))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mean_squared_error', optimizer='sgd')
    model.fit(data['train_input'], data['train_output'], epochs=num_epochs)
    return model
Exemplo n.º 7
0
def prepare_model():
    data = import_data(
        TICKER,
        TIMEFRAME,
        START_TRAIN_DATE,
        END_TRAIN_DATE,
        calculate_input,
        LOOKBACK,
        calculate_output,
        LOOKFORWARD,
        split=(
            100, 0, 0
        )  # This time we need only a train set (100% for train set, 0% for test and validation ones)
    )

    # Creating a model...
    model = Sequential()  # Creating a model class instance
    # The number of nodes in the first hidden layer equals the number of features multiplied by 2
    model.add(
        Dense(units=NUM_FEATURES * 2,
              activation='tanh',
              input_dim=NUM_FEATURES,
              kernel_initializer=he_uniform(1)))
    # Adding another hidden layer
    model.add(Dense(NUM_FEATURES * 2, activation='tanh'))
    # Adding an output layer
    model.add(Dense(NUM_CLASSES, activation='softmax'))
    # Compiling the model
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    one_hot_train_outputs = keras.utils.to_categorical(
        data['train_output'],
        num_classes=NUM_CLASSES)  # Performing one-hot encoding
    model.fit(data['train_input'], one_hot_train_outputs,
              epochs=NUM_EPOCHS)  # Training the model
    return model
Exemplo n.º 8
0
def prepare_model():
    global lookback, num_features, lookforward, num_classes

    lookback, num_features = CALC_INPUT_FN('query_lookback')
    lookforward, num_classes = CALC_OUTPUT_FN('query_lookforward')

    l1models.calculate_thresholds_for_equal_class_sets(TICKER, TIMEFRAME,
                                                       START_TRAIN_DATE,
                                                       END_TRAIN_DATE,
                                                       CALC_OUTPUT_FN)

    data = import_data(
        TICKER,
        TIMEFRAME,
        START_TRAIN_DATE,
        END_TRAIN_DATE,
        CALC_INPUT_FN,
        lookback,
        CALC_OUTPUT_FN,
        lookforward,
        split=(
            100, 0, 0
        )  # This time we need only a train set (100% for train set, 0% for test and validation ones)
    )

    model = CREATE_MODEL_FN(num_features, num_classes, optimizer=OPTIMIZER)
    one_hot_train_outputs = keras.utils.to_categorical(
        data['train_output'],
        num_classes=num_classes)  # Performing one-hot encoding

    # Calculating class weights
    num_samples = len(data['train_output'])
    num_samples_by_classes = [0] * num_classes
    for n in range(num_samples):
        index = int(data['train_output'][n][0])
        num_samples_by_classes[index] += 1
    print('NUM. SAMPLES BY CLASSES:')
    print(str(num_samples_by_classes))
    cw = {}
    for c in range(num_classes):
        cw[c] = (num_samples - num_samples_by_classes[c]) / num_samples
    sys.stderr.write('Fitting...\n')
    model.fit(data['train_input'],
              one_hot_train_outputs,
              class_weight=cw,
              epochs=NUM_EPOCHS,
              verbose=VERBOSE)  # Training the model

    sys.stderr.write('Evaluating train...\n')
    scores = model.evaluate(data['train_input'],
                            one_hot_train_outputs,
                            verbose=VERBOSE)
    metrics = "%s: %.2f%%\n" % (model.metrics_names[1], scores[1] * 100)
    sys.stderr.write(metrics)

    sys.stderr.write('Evaluating test...\n')
    data = import_data(
        TICKER,
        TIMEFRAME,
        END_TRAIN_DATE,
        END_TEST_DATE,
        CALC_INPUT_FN,
        lookback,
        CALC_OUTPUT_FN,
        lookforward,
        split=(
            0, 0, 100
        )  # This time we need only a test set (100% for train set, 0% for test and validation ones)
    )
    one_hot_test_outputs = keras.utils.to_categorical(
        data['test_output'],
        num_classes=num_classes)  # Performing one-hot encoding
    scores = model.evaluate(data['test_input'],
                            one_hot_test_outputs,
                            verbose=VERBOSE)
    metrics = "%s: %.2f%%\n" % (model.metrics_names[1], scores[1] * 100)
    sys.stderr.write(metrics)

    return model