示例#1
0
    def GetPreviousMonthsLog(self):
        lastMonth = self.AddMonths(datetime.now(), -1)
        logFilePath = get_logfile_path(self.logFolder, lastMonth.year,
                                       lastMonth.month)

        if path.exists(logFilePath):
            with open(logFilePath, 'r') as f:
                data = read_file(logFilePath)
                jsonData = json.loads(data)
                return jsonData['results']
        else:
            return None
示例#2
0
def run(opt_method, path, seed, eps, max_iter, *args):
    x, y = read_file(path)
    M = LogisticRegression(x, y, seed)
    w_k = opt_method(M, eps, max_iter, *args)
    f_val = M.calc_func(w_k)
    grad = M.calc_grad(w_k)
    grad_norm = LA.norm(grad)
    res = {
        'f_opt': f_val,
        'grad_norm': grad_norm,
        'rk': M.r_k[-1],
        'oracle_calls': M.oracle_calls,
        'solution': w_k.tolist(),
        'time': M.total_time}
    return res
示例#3
0
    def LogResultsToFile(self, results):
        now = datetime.now()
        currentLogFolder = path.join(self.logFolder, str(now.year))
        logFilePath = get_logfile_path(self.logFolder, now.year, now.month)

        if not path.exists(currentLogFolder):
            makedirs(currentLogFolder)

        if path.exists(logFilePath):
            with open(logFilePath, 'r') as f:
                data = read_file(logFilePath)
                jsonData = json.loads(data)
                jsonData['results'] += [results]
        else:
            jsonData = {'results': [results]}

        with open(logFilePath, 'w+') as f:
            f.write(json.dumps(jsonData))
示例#4
0
def parse():
	content	= read_file(argv[1])
	rules = []
	for line in content:
		if line[0] == "?":
			queries = get_qf(line, "?")
		elif line[0] == "=":
			facts = get_qf(line, "=")
		elif line != "\n":
			line = re.sub('[\s]', '', line)
			if "#" in line:
				line = line.split("#")[0]
			rules.append(line)
	ctx	= {
		"queries": queries,
		"facts": facts,
		"rules": rules
	}
	return ctx
示例#5
0
def predict_func(filename):
    data_filename = os.path.join("data", filename + '.csv')
    output_filename = os.path.join("out", filename + '.txt')

    length, list_open, list_high, list_low, list_close = read_file(
        data_filename)

    length, input_data, label_data = normalize_data(length, list_open,
                                                    list_high, list_low,
                                                    list_close)

    data_input, data_close = [], []
    for j in range(length - seq_length):
        _x = input_data[j:j + seq_length]
        _y = label_data[j + seq_length]
        data_input.append(_x)
        data_close.append(_y)

    train_size = int(length * 0.7) - 1

    #start index 1 for use prev_data for costing
    train_input, test_input = np.array(data_input[1:train_size]), np.array(
        data_input[train_size:])
    train_close, test_close = np.array(data_close[1:train_size]), np.array(
        data_close[train_size:])
    train_close_prev = np.array(data_close[0:train_size - 1])

    X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
    Y = tf.placeholder(tf.float32, [None, 1])
    Y_prev = tf.placeholder(tf.float32, [None, 1])

    # build a LSTM network
    cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim,
                                        forget_bias=1.0,
                                        state_is_tuple=True)
    #cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob = 0.5)

    outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
    Y_pred = tf.contrib.layers.fully_connected(outputs[:, -1],
                                               output_dim,
                                               activation_fn=None)
    cost = (Y - Y_pred) * (tf.cast(
        (Y_pred - Y_prev) * (Y - Y_prev) < 0, tf.float32) * 2 + tf.cast(
            (Y_pred - Y_prev) * (Y - Y_prev) >= 0, tf.float32))
    # cost/loss
    loss = tf.reduce_mean(tf.square(cost))  # sum of the squares
    # optimizer
    optimizer = tf.train.AdamOptimizer(learning_rate)
    training = optimizer.minimize(loss)

    # RMSE
    targets = tf.placeholder(tf.float32, [None, 1])
    predictions = tf.placeholder(tf.float32, [None, 1])
    rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        # Training step
        for k in range(iterations):
            _, step_loss = sess.run([training, loss],
                                    feed_dict={
                                        X: train_input,
                                        Y: train_close,
                                        Y_prev: train_close_prev
                                    })

        # Test step
        test_predict = sess.run(Y_pred, feed_dict={X: test_input})
        rmse = sess.run(rmse,
                        feed_dict={
                            targets: test_close,
                            predictions: test_predict
                        })

    print(test_predict[-1])

    test_close = unormalize_data(test_close, list_close)
    test_predict = unormalize_data(test_predict, list_close)

    with open(output_filename, 'w') as f:
        f.write(to_output_form(test_close, test_predict))
示例#6
0
 def __init__(self, config_file):
     content = read_file(config_file)
     self.op = self._get_op(content)