Ejemplo n.º 1
0
def main():
    """Get the command line arguments, and start indexing documents into Elaseticsearch
    """
    parsed_args = common.parse_arguments()
    base_dir = parsed_args.path
    index_name = parsed_args.index_name
    configure_index(index_name)

    walk_and_index_all_files(base_dir, index_name)
def configure_global_app():
    """Sets fields in app.config based on the command line parameters. These values are used in various
    parts of the search code.
    """
    parsed_args = common.parse_arguments()
    app.config['input_files_root'] = parsed_args.path
    app.config['index_name'] = parsed_args.index_name
    print('Files root: %s. Index name: %s', app.config['input_files_root'],
          app.config['index_name'])
    return
    def run(self):
        conf = common.parse_arguments()
        logging.basicConfig(filename=conf['log_file'], level=logging.INFO)
        mqtt_server = Server(conf['mqtt_broker'], '1883', conf['service_id'],
                             conf['password'])
        influxdb_server = Server(conf['influxdb_host'], conf['influxdb_port'],
                                 conf['influxdb_user'],
                                 conf['influxdb_password'])
        rest_server = Server(conf['rest_url'], '', conf['service_id'],
                             conf['password'])
        global service
        service = InfluxdbMqttClient(conf['service_id'], mqtt_server,
                                     rest_server, influxdb_server)

        loopTask1 = task.LoopingCall(service.process_messages)
        loopDeferred1 = loopTask1.start(1.0)  #process messages every second
        loopDeferred1.addErrback(self.handle_error)

        loopTask2 = task.LoopingCall(service.publish_status)
        loopDeferred2 = loopTask2.start(
            600.0)  #Publish status every 10 minutes
        loopDeferred2.addErrback(self.handle_error)

        reactor.run()  #Keeps the process running forever
#
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import trustedanalytics as ta

from common import parse_arguments, check_uaa_file

parameters = parse_arguments()

ta.create_credentials_file(parameters.uaa_file_name)

check_uaa_file(parameters.uaa_file_name)

query = "select * from " + parameters.database_name + "." + parameters.table_name
print("\nQuery: {}".format(query))
hq = ta.HiveQuery(query)
frame = ta.Frame(hq)
Ejemplo n.º 5
0
        while True:
            message = self.transport.recv(MAX_PACKAGE_LENGTH)
            if message:
                # при сообщении
                self.server.send(message, self.sockname)
            else:
                # при отключении
                self.transport.close()
                server.connections.remove(self)
                return

    def send(self, message):
        self.transport.sendall(message)


def close(serv):
    while True:
        command = input('')
        if command == 'q':
            for connection in serv.connections:
                connection.transport.close()
            os._exit(0)


if __name__ == '__main__':
    args = parse_arguments()
    server = Server(args.a, args.p)
    server.start()
    threading.Thread(target=close, args=(server, )).start()

# lsof -i :7777
Ejemplo n.º 6
0

def perceptron():
    """Train and evaluate the perceptron model."""
    tf.logging.set_verbosity(FLAGS.verbosity)

    print("Preprocessing data...")
    tic()
    train_raw, x_train, y_train, x_test, y_test, _, _, classes = preprocess_data(
        FLAGS)
    toc()

    # Set the output dimension according to the number of classes
    FLAGS.output_dim = len(classes)

    # Train and evaluate the model.
    tic()
    run_experiment(x_train, y_train, x_test, y_test,
                   bag_of_words_perceptron_model, 'train_and_evaluate', FLAGS)
    toc()


# Run script ##############################################
if __name__ == "__main__":
    parser = create_parser_training(MODEL_DIRECTORY, NUM_EPOCHS, BATCH_SIZE,
                                    LEARNING_RATE)

    FLAGS = parse_arguments(parser)

    perceptron()
    except requests.exceptions.RequestException as re:
        logging.error("RequestException :" + url)
        logging.exception(re)

    except Exception as e:
        logging.exception(e)


# Global variables

NUM_THREAD_WORKERS = 3
PUBLISH_STATS_INTERVAL = 600  # 10 minutes
INFLUX_DATABASE = 'openchirp'

running = True
conf = common.parse_arguments()  # read configuration parameters
status_topic = 'openchirp/service/' + str(conf['service_id']) + '/status'
events_topic = 'openchirp/service/' + str(conf['service_id']) + '/thing/events'
transducers_topic = 'openchirp/device/+/+'
things_url = str(conf['rest_url'] + '/service/' + conf['service_id'] +
                 '/things')

auth_cred = HTTPBasicAuth(conf['service_id'], conf['password'])

publish_stats_daemon = threading.Thread(name='StatsTimer',
                                        target=publish_status,
                                        daemon=True)

# user in memory device defenition storage
devices = dict()
pointsWritten = 0  # keeps track of the points written in the last 10 minutes
Ejemplo n.º 8
0
if __name__ == '__main__':
	COMMANDS = ('metadata', 'create_bucket', 'delete_bucket',
		'create_key', 'upload_key', 'download_key', 'delete_key')

	if len(sys.argv) < 2:
		usage()

	command = sys.argv[1]
	if command not in COMMANDS:
		usage()

	argv = sys.argv[2:]
	options = 'r:a:s:'
	long_options = ['region_name=', 'access_key=', 'secret_key=']
	configuration, opts, args = common.parse_arguments(argv,
		options, long_options, {'region_name': 'us-east-1'})
	if configuration is None:
		usage()

	for region in boto.s3.regions():
		if region.name == configuration['region_name']:
			break
	else:
		usage()

	if command == 'metadata':
		metadata(configuration)
	elif command == 'create_bucket':
		create_bucket(configuration, args[0])
	elif command == 'delete_bucket':
		delete_bucket(configuration, args[0])
Ejemplo n.º 9
0
    x = [v for v in range(0, iterations)]

    plt.plot(x, best_values, 'g', label='Best value')
    # plt.plot(x, avg_values, 'b', label='Average value')
    plt.title(title)

    if optimal_value is not None:
        optimal = [optimal_value] * iterations
        plt.plot(x, optimal, 'r', label='Optimal value')

    plt.ylabel('Value')
    plt.xlabel('Iteration')
    plt.legend()
    plt.savefig('result.png', bbox_inches='tight')
    plt.show()


if __name__ == '__main__':
    input_file, solution_file, points_count, iterations, upper_bound, lower_bound = parse_arguments(
        sys.argv[1:])
    best_values, optimal_value = solve(input_file, solution_file, points_count,
                                       iterations, upper_bound, lower_bound,
                                       True)

    name = [v for v in input_file.split("/")][-1]
    title = "gpu: {}, points: {}, iterations: {}, upper: {}, lower: {}"
    title = title.format(name, points_count, iterations, upper_bound,
                         lower_bound)

    plot_results(best_values, optimal_value, title)