示例#1
0
def get_filters():
    """
    Asks user to specify a city, month, and day to analyze.

    Returns:
        (str) city - name of the city to analyze
        (str) month - name of the month to filter by, or "all" to apply no month filter
        (str) day - name of the day of week to filter by, or "all" to apply no day filter
    """
    print('Hello! Let\'s explore some US bikeshare data!')
    # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
    city = get_inputs(
        'Would you like to see data for Chicago, New York, or Washington? \n',
        cities)

    #get whether the user wants to filter the date by month, day, both or not at all
    while True:
        date_filter = input(
            'Would you like to filter the data by month, day, both or not at all? Type "none" for no time filter \n'
        ).lower()
        if date_filter == 'month':
            #get user input for month (all, january, february, ... , june)
            month = get_inputs(
                'Which month? January, February, March, April, May, or June? \n',
                months)
            day = 'all'
            break
        elif date_filter == 'day':
            month = 'all'
            #get user input for day of week (all, monday, tuesday, ... sunday)
            day = get_inputs(
                'Which day? Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday? \n',
                days)
            break
        elif date_filter == 'both':
            #get the user input for both month and day of week and filter the data by them
            month = get_inputs(
                'Which month? January, February, March, April, May, or June? \n',
                months)
            day = get_inputs(
                'Which day? Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday? \n',
                days)
            break
        elif date_filter == 'none':
            #use no filter for the data
            month = 'all'
            day = 'all'
            break
        else:
            print('Invalid entry')
            continue

    print('-' * 40)
    return city, month, day
    def __init__(self, w, z):
        self.X_train_set, self.Y_train_set_n, self.X_valid_set, self.Y_valid_set_n, self.X_test_set,\
            self.Y_test_set_n, self.Y_train_set_v, self.Y_valid_set_v, self.Y_test_set_v =\
            inputs.get_inputs('mnist.pkl.gz')

        self.n_training_examples = self.X_train_set.shape[0]

        self.max_epochs = w
        self.batch_size = z
        self.w = {}

        self.w['conv_1'] = {
            'weights': tf.Variable(tf.random_normal((5, 5, 1, 32))),
            'biases': tf.Variable(tf.random_normal((1, 32)))
        }

        self.w['conv_2'] = {
            'weights': tf.Variable(tf.random_normal((5, 5, 32, 64))),
            'biases': tf.Variable(tf.random_normal((1, 64)))
        }

        self.w['fc_1'] = {
            'weights': tf.Variable(tf.random_normal((7 * 7 * 64, 1024))),
            'biases': tf.Variable(tf.random_normal((1, 1024)))
        }

        self.w['fc_2'] = {
            'weights': tf.Variable(tf.random_normal((1024, 10))),
            'biases': tf.Variable(tf.random_normal((1, 10)))
        }
示例#3
0
def main():
    while True:
        inp_dict = get_inputs()
        try:
            clear_dict = clear_inputs(inp_dict)
        except ValueError:
            print('________________________________')
            print('Value incorrect please try again')
            print('________________________________')
            continue
        else:
            final_list = filter_data(cards, clear_dict)
            final_str = show_item(final_list)
            print('\n'.join(final_str))
        break
示例#4
0
    def build_model(self):
        # get a batch of inputs
        self.train_inputs, self.val_inputs = get_inputs(self)
        # get a batch of inputs
        self.placeholders = get_placeholders(self)

        # define a global step/iteration number
        self.global_step = tf.Variable(0, name="global_step", trainable=False)

        ## infer, and compute the loss
        # handle the tf complaint about no loss
        dummy_var = tf.Variable(0.0, name="dummy")
        self.loss = tf.identity(dummy_var-dummy_var)
        loss_dict, self.pred_cat, self.cat = self.inference(is_train=hyp.do_train, reuse=False)
        for loss in loss_dict.values():
            self.loss = self.loss + loss
        self.loss += tf.reduce_sum(slim.losses.get_regularization_losses())
        tf.summary.scalar('loss', self.loss)

        ## define a big summary op we can run
        self.summary = tf.summary.merge_all()
        
        ## define a saverloader
        self.saver = tf.train.Saver(max_to_keep=100)
示例#5
0
                                         childrenEncoderOutputs)

decoderInputs = [attrCodeTensor, childrenCodeTensor]

# Apply decoder on encoder outputs.
childrenCodeActivated = childrenCodec.decode(decoderInputs)
attrsCodeActivated = attributeCodec.decode(decoderInputs)

# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
trainer_model = Model(
    [*attributesEncoderInputs, *childrenEncoderInputs, *decoderInputs],
    [childrenCodeActivated, attrsCodeActivated])

# Get inputs.
inputData = get_inputs(modelArgs)

plot_model(trainer_model, to_file="plot.png")

# Run training
trainer_model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
trainer_model.fit([inputData.encoder_input_data, inputData.decoder_input_data],
                  inputData.decoder_target_data,
                  batch_size=modelArgs.batch_size,
                  epochs=modelArgs.epochs,
                  validation_split=0.2)

# Save model
saveModel(trainer_model, 't_s2s.json')
with open("modelArgs.json", "w") as fp:
    json.dump(modelArgs, fp)
示例#6
0
import os
import inputs

import pandas as pd

user_inputs = inputs.get_inputs()
strat_list = user_inputs['strategy'].split(",")  # account for commas
if ("all" in strat_list[0].lower()):
    directory_path = os.path.dirname(os.path.abspath(__file__))
    new_path = os.path.join(directory_path, "stratlist.txt")

    strat_list.clear()
    with open(new_path) as f:  # read from input.txt
        lines = f.readlines()
        for line in lines:
            strat_list.append(line.strip() + ".csv")
    f.close()

csv_files = []
# index = 0
for csv in strat_list:
    csv_files.append(pd.read_csv(csv))
    # index = index + 1

main_df = pd.concat(csv_files, axis=1)

print(main_df)

main_df.to_csv('sim.csv', index=False)
示例#7
0
        sampled_char = modelArgs.reverse_target_char_index[sampled_token_index]
        decoded_sentence += sampled_char

        # Exit condition: either hit max length
        # or find stop character.
        if (sampled_char == '\n'
                or len(decoded_sentence) > modelArgs.max_decoder_seq_length):
            stop_condition = True

        # Update the target sequence (of length 1).
        target_seq = np.zeros((1, 1, modelArgs.num_decoder_tokens))
        target_seq[0, 0, sampled_token_index] = 1.

        # Update states
        states_value = [h, c]

    return decoded_sentence


# Get inputs.
inputs = get_inputs(modelArgs)

for seq_index in range(100):
    # Take one sequence (part of the training set)
    # for trying out decoding.
    input_seq = inputs.encoder_input_data[seq_index:seq_index + 1]
    decoded_sentence = decode_sequence(input_seq)
    print('-')
    print('Input sentence:', inputs.input_texts[seq_index].encode("utf-8"))
    print('Decoded sentence:', decoded_sentence.encode("utf-8"))
示例#8
0
def main():
    inp_dict = get_inputs()
    clear_dict = clear_inputs(inp_dict)
    final_list = filter_data(cards, clear_dict)
    pprint(final_list)
示例#9
0
from keras.layers import Input, LSTM, Dense
from keras.utils import plot_model
from utils import saveModel
from inputs import get_inputs 

# Build training args needed during training and also inference.
trainArgs = AttrDict()
trainArgs.batch_size = 64  # Batch size for training.
trainArgs.epochs = 100  # Number of epochs to train for.
trainArgs.latent_dim = 256  # Latent dimensionality of the encoding space.
trainArgs.num_samples = 10000  # Number of samples to train on.
# Path to the data txt file on disk.
trainArgs.data_path = 'fra-eng/fra.txt'

# Get inputs.
inputs = get_inputs(trainArgs)

# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, trainArgs.num_encoder_tokens))
encoder = LSTM(trainArgs.latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]

# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, trainArgs.num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(trainArgs.latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,