예제 #1
0
def setUpForDjango(test):
    connect(crate_host)
    import os
    os.environ["DJANGO_SETTINGS_MODULE"] = "crate.client.django.tests.settings"

    from django.test.runner import setup_databases
    setup_databases(3, False)
예제 #2
0
def init_tables():
    conn = connect(crate_server)
    c = conn.cursor()
    try:
        c.execute('create table logs (id string primary key, ts timestamp)')
    except ProgrammingError:
        pass
예제 #3
0
def fill_table(hosts, fqtable, num_records, bulk_size=1000):
    """ fills a table with random data

    Will insert <num_records> into <fqtable> on <hosts>.
    Each insert request will contain <bulk_size> items.

    Depending on colum names and data types of the given table an appropriate
    provider is selected which is used to generate random data.

    E.g. a column called `name` will be filled with names.

    """
    conn = connect(hosts)
    c = conn.cursor()

    schema, table = parse_table(fqtable)
    columns = retrieve_columns(c, schema, table)
    yield "Found schema: "
    yield columns
    generate_row = create_row_generator(columns)

    stmt = to_insert(fqtable, columns)[0]
    yield "Using insert statement: "
    yield stmt

    bulk_size = min(num_records, bulk_size)
    num_inserts = int(num_records / bulk_size)
    yield "Will make {} requests with a bulk size of {} per request".format(num_inserts, bulk_size)
    loop.run_until_complete(_run_fill_table(conn, stmt, generate_row, num_inserts, bulk_size))
예제 #4
0
def create_index(index_name, crate_home, output_dir):
    crate_layer = CrateLayer(
        'data',
        crate_home=crate_home,
        port=CRATE_HTTP_PORT,
        transport_port=CRATE_TRANSPORT_PORT,
        settings={
            'es.api.enabled': True,
            # The disk.watermark settings can be removed once crate-python > 0.21.1 has been released
            "cluster.routing.allocation.disk.watermark.low" : "100k",
            "cluster.routing.allocation.disk.watermark.high" : "10k",
            "cluster.routing.allocation.disk.watermark.flood_stage" : "1k",
        }
    )
    crate_layer.start()
    crate_http = 'localhost:{}'.format(CRATE_HTTP_PORT)
    try:
        with connect(crate_http) as conn:
            cur = conn.cursor()
            cmds = INDICES[index_name].split(';')
            for cmd in cmds[:-1]:
                LOGGER.info(cmd)
                cur.execute(cmd)
            cur.execute("select version['number'] from sys.nodes")
            version = cur.fetchone()[0]
            r = http.request('POST', crate_http + '/_flush')
            r.read()
            compress_index(index_name, version, crate_layer.wdPath(), output_dir)
    finally:
        crate_layer.stop()
예제 #5
0
def setup(*args):
    node.start()
    with connect(node.http_url) as conn:
        c = conn.cursor()
        c.execute('create table x.demo (id int, name string, country string) \
                  with (number_of_replicas = 0)')
        c.execute('create blob table blobtable with (number_of_replicas = 0)')
예제 #6
0
파일: tests.py 프로젝트: cgvarela/crate
def tearDown(test):
    # drop leftover tables after each test
    with connect(CRATE_DSN) as conn:
        c = conn.cursor()
        c.execute("""
            SELECT table_schema, table_name
            FROM information_schema.tables
            WHERE table_schema NOT IN ('blob', 'sys', 'information_schema', 'pg_catalog')
        """)
        for schema, table in c.fetchall():
            try:
                c.execute("""
                    DROP TABLE IF EXISTS "{}"."{}"
                """.format(schema, table))
            except Exception as e:
                print('Failed to drop table {}.{}: {}'.format(schema, table, e))

    # at the moment it is not possible to reset custom analyzers, char_filters,
    # and tokenizers via SQL
    # that's why we do a PUT request to the ES api
    reset_custom_analysis = {
        'persistent': {
            'crate.analysis.custom.analyzer.*': None,
            'crate.analysis.custom.char_filter.*': None,
            'crate.analysis.custom.tokenizer.*': None,
        }
    }
    request = Request('http://' + CRATE_DSN + '/_cluster/settings',
                      data=json.dumps(reset_custom_analysis).encode('utf-8'),
                      headers={'Content-Type': 'application/json'},
                      method='PUT')
    with urlopen(request) as response:
        assert response.status == 200
예제 #7
0
파일: models.py 프로젝트: chaudum/crate-top
 def __init__(self, hosts=[]):
     self.hosts = hosts
     conn = connect(self.hosts)
     self.server_version = conn.lowest_server_version
     self.cursor = conn.cursor()
     self.http = urllib3.PoolManager(3)
     self.last_update = datetime.now()
예제 #8
0
def create_index(index_name, crate_home, output_dir):
    crate_layer = CrateLayer(
        'data',
        crate_home=crate_home,
        port=CRATE_HTTP_PORT,
        transport_port=CRATE_TRANSPORT_PORT,
        settings={
            'es.api.enabled': True,
        }
    )
    crate_layer.start()
    crate_http = 'localhost:{}'.format(CRATE_HTTP_PORT)
    try:
        with connect(crate_http) as conn:
            cur = conn.cursor()
            cmds = INDICES[index_name].split(';')
            for cmd in cmds[:-1]:
                LOGGER.info(cmd)
                cur.execute(cmd)
            cur.execute("select version['number'] from sys.nodes")
            version = cur.fetchone()[0]
            r = http.request('POST', crate_http + '/_flush')
            r.read()
            compress_index(index_name, version, crate_layer.wdPath(), output_dir)
    finally:
        crate_layer.stop()
예제 #9
0
    def setUpClass(cls):
        # auto-discovery with unicast on the same host only works if all nodes are configured with the same port range
        transport_port_range = bind_range(range_size=cls.NUM_SERVERS)
        for i in range(cls.NUM_SERVERS):
            layer = CrateNode(
                crate_dir=crate_path(),
                version=(4, 0, 0),
                settings={
                    'cluster.name': cls.__class__.__name__,
                    'node.name': cls.node_name(i),
                    'psql.port': 0,
                    'transport.tcp.port': transport_port_range,
                },
                env={
                    'JAVA_HOME': os.environ.get('JAVA_HOME', ''),
                    'CRATE_HEAP_SIZE': '256M'
                }
            )
            layer.start()
            cls.HTTP_PORTS.append(layer.addresses.http.port)
            cls.CRATES.append(layer)

        dsn = cls.random_dns()
        num_nodes = 0

        # wait until all nodes joined the cluster
        with connect(dsn) as conn:
            c = conn.cursor()
            while num_nodes < len(cls.CRATES):
                c.execute("select * from sys.nodes")
                num_nodes = len(c.fetchall())
                time.sleep(5)
예제 #10
0
 def do_connect(self, server, error_trace=False):
     """
     connect to one or more server
     with "connect servername:port[ servername:port [...]]"
     """
     self.conn = client.connect(servers=server, error_trace=self.error_trace)
     self.cursor = self.conn.cursor()
     results = []
     failed = 0
     for server in self.conn.client.active_servers:
         try:
             server_infos = self.conn.client.server_infos(server)
         except ConnectionError as e:
             failed += 1
             results.append([server, None, False, e.message])
         else:
             results.append(
                 server_infos + (True, "OK", )
             )
     self.pprint(
         results,
         ["server_url", "node_name", "connected", "message"])
     if failed == len(results):
         self.print_error("connect")
     else:
         self.print_success("connect")
예제 #11
0
파일: tests.py 프로젝트: TonyWuLihu/crate
 def test_multiple_probe_selects(self):
     conn = connect('localhost:{}'.format(CRATE_HTTP_PORT))
     cur = conn.cursor()
     cur.execute("SELECT os, load, os FROM sys.nodes")
     res = cur.fetchall()
     for row in res:
         self.assertEqual(row[0], row[2])
예제 #12
0
def main():
    try:
        aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
        aws_access_key = os.environ['AWS_ACCESS_KEY_ID']
    except KeyError:
        print("Please set your AWS_SECRET_ACCESS_KEY and AWS_ACCESS_KEY_ID environment variables.")
        return 1

    args = parse_args()

    connection = client.connect(args.host, error_trace=True)
    cur = connection.cursor()

    create_table(cur, os.path.join(os.path.dirname(__file__), "..", "schema.sql"))
    alter_table(cur, 0)

    for month in get_month_partitions(args.start, args.end):
        print('Importing Github data for {0} ...'.format(month))
        s3_url = 's3://{0}:{1}@crate.sampledata/github_archive/{2}-*'.format(quote_plus(aws_access_key),
            quote_plus(aws_secret_key), month)
        print('>>> {0}'.format(s3_url))
        cmd = '''COPY github PARTITION (month_partition=?) FROM ? WITH (compression='gzip')'''
        try:
            cur.execute(cmd, (month, s3_url,))
        except Exception as e:
            print("Error while importing {}: {}".format(s3_url, e))
            print(e.error_trace)

    alter_table(cur, 1)
    return 0
예제 #13
0
파일: test_jmx.py 프로젝트: crate/crate
 def test_number_of_open_connections(self):
     jmx_client = JmxClient(JMX_PORT)
     with connect(enterprise_crate.http_url) as _:
         stdout, stderr = jmx_client.query_jmx(
             'io.crate.monitoring:type=Connections', 'HttpOpen')
         self.assertGreater(int(stdout), 0)
         self.assertEqual(stderr, '')
예제 #14
0
def setUpWithCrateLayer(test):
    test.globs['HttpClient'] = http.Client
    test.globs['crate_host'] = crate_host
    test.globs['pprint'] = pprint
    test.globs['print'] = cprint

    conn = connect(crate_host)
    cursor = conn.cursor()

    def refresh(table):
        cursor.execute("refresh table %s" % table)
    test.globs["refresh"] = refresh

    with open(docs_path('testing/testdata/mappings/locations.sql')) as s:
        stmt = s.read()
        cursor.execute(stmt)
        stmt = ("select count(*) from information_schema.tables "
                "where table_name = 'locations'")
        cursor.execute(stmt)
        assert cursor.fetchall()[0][0] == 1

    data_path = docs_path('testing/testdata/data/test_a.json')
    # load testing data into crate
    cursor.execute("copy locations from ?", (data_path,))
    # refresh location table so imported data is visible immediately
    refresh("locations")

    # create blob table
    cursor.execute("create blob table myfiles clustered into 1 shards " +
                   "with (number_of_replicas=0)")
예제 #15
0
 def connect(self, port=4200):
     crate_ip = '127.0.0.1'
     if self.cli.info()['OperatingSystem'].startswith(u'Boot2Docker'):
         import subprocess;
         crate_ip = subprocess.check_output(r'docker-machine ip',
             stderr=None, shell=True).decode("utf-8").strip('\n')
     return connect(['{0}:{1}'.format(crate_ip, str(port))])
예제 #16
0
def run_crate_benchmark(opts):
  conn = client.connect(servers=opts.crate_hosts)
  print >> stderr, "Connecting to Crate..."
  cursor = conn.cursor()

  print >> stderr, "Connection succeeded..."
  times = []
  # Clean up old table if still exists
  try:
    cursor.execute(CLEAN_QUERY.rstrip(';'))
  except:
    pass

  for _ in range(opts.num_trials):
    cursor.execute(CRATE_MAP[opts.query_num][0].rstrip(';'))
    cursor.execute(CRATE_MAP[opts.query_num][1].rstrip(';'))
    duration = float(cursor.duration) / 1000.0
    times.append(duration)
    # for debugging purposes
    cursor.execute('REFRESH TABLE %s' % TMP_TABLE)
    cursor.execute('SELECT count(*) from %s' % TMP_TABLE)
    result = cursor.fetchone()
    print >> stderr, '{0} records in {1}s'.format(result[0], duration)
    cursor.execute(CLEAN_QUERY.rstrip(';'))
  return times
예제 #17
0
 def test_execute_with_args(self):
     client = MagicMock(spec=Client)
     conn = connect(client=client)
     c = conn.cursor()
     statement = 'select * from locations where position = ?'
     c.execute(statement, 1)
     client.sql.assert_called_once_with(statement, 1)
예제 #18
0
 def connect(self, port=4200):
     crate_ip = '127.0.0.1'
     if self.cli.info()['Name'] == u'boot2docker':
         import subprocess;
         crate_ip = subprocess.check_output(r'boot2docker ip',
             stderr=None, shell=True).strip('\n')
     return connect(['{0}:{1}'.format(crate_ip, str(port))])
예제 #19
0
파일: command.py 프로젝트: foreachsky/crash
 def __init__(self,
              output_writer=None,
              connection=None,
              error_trace=False,
              is_tty=True,
              autocomplete=True):
     self.error_trace = error_trace
     self.connection = connection or connect(error_trace=error_trace)
     self.cursor = self.connection.cursor()
     self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
     self.lines = []
     self.exit_code = 0
     self.expanded_mode = False
     self.sys_info_cmd = SysInfoCommand(self)
     self.commands = {
         'q': self._quit,
         'c': self._connect,
         'connect': self._connect,
         'dt': self._show_tables,
         'check': self._check,
         'sysinfo': self.sys_info_cmd.execute,
     }
     self.commands.update(built_in_commands)
     self.logger = ColorPrinter(is_tty)
     self._autocomplete = autocomplete
예제 #20
0
 def get(self):
     conn = connect(crate_server)
     c = conn.cursor()
     c.execute('insert into logs (id, ts) values (?, ?)',
               (str(uuid4()), int(time() * 1000)))
     c.execute('select count(*) from logs')
     number = c.fetchone()[0]
     self.write('Hello Number {0}'.format(number))
예제 #21
0
def crate_connection1():
    host = crate_config_no_schema['connection']['params']['host']
    conn = client.connect(host, error_trace=True)
    yield conn
    schemas = ("historian", "testing")
    for x in schemas:
        clean_schema_from_database(conn, x)
    conn.close()
예제 #22
0
 def test_execute_with_bulk_args(self):
     client = MagicMock(spec=Client)
     conn = connect(client=client)
     c = conn.cursor()
     statement = 'select * from locations where position = ?'
     c.execute(statement, bulk_parameters=[[1]])
     client.sql.assert_called_once_with(statement, None, [[1]])
     conn.close()
예제 #23
0
    def test_community_edition_has_no_max_nodes_limit(self):
        if not CRATE_CE:
            return

        with connect(self.random_dns()) as conn:
            c = conn.cursor()
            c.execute("create table t1 (id int)")
            self.assertEqual(1, c.rowcount)
예제 #24
0
def teardown(*args):
    try:
        with connect(node.http_url) as conn:
            c = conn.cursor()
            c.execute('drop table x.demo')
            c.execute('drop blob table blobtable')
    finally:
        node.stop()
예제 #25
0
    def test_community_edition_has_no_license(self):
        if not CRATE_CE:
            return

        with connect(self.random_dns()) as conn:
            c = conn.cursor()
            c.execute("select license, license['issued_to'] from sys.cluster")
            self.assertEqual([[None, None]], c.fetchall())
예제 #26
0
파일: tests.py 프로젝트: cgvarela/crate
def _execute_sql(stmt):
    """
    Invoke a single SQL statement and automatically close the HTTP connection
    when done.
    """
    with connect(CRATE_DSN) as conn:
        c = conn.cursor()
        c.execute(stmt)
예제 #27
0
파일: tests.py 프로젝트: lumannnn/vyi
def tearDownVyiTransactions(test):
    conn = connect(crate_host)
    cursor = conn.cursor()
    cursor.execute("drop table users")
    cursor.execute("drop table user_transactions")
    cursor.execute("drop table projects")
    cursor.execute("drop table stats")
    cursor.execute("drop table transactions")
    def test_lowest_server_version(self):
        infos = [(None, None, '0.42.3'),
                 (None, None, '0.41.8'),
                 (None, None, 'not a version')]

        client = Client(servers="localhost:4200 localhost:4201 localhost:4202")
        client.server_infos = lambda server: infos.pop()
        connection = connect(client=client)
        self.assertEqual((0, 41, 8), connection.lowest_server_version.version)
예제 #29
0
def _get_runtimes(hosts):
    conn = connect(hosts)
    c = conn.cursor()
    c.execute("select min(runtime_stats['avg']), statement from benchmarks group by statement")
    rows = c.fetchall()
    for min_avg, statement in rows:
        c.execute("select runtime_stats['avg'] from benchmarks "
                  "where statement = ? order by ended desc limit 1", (statement,))
        yield min_avg, c.fetchall()[0][0], statement
예제 #30
0
파일: tests.py 프로젝트: WilliamRen/crate
def wait_for_schema_update(schema, table, column):
    conn = connect('localhost:' + str(CRATE_HTTP_PORT))
    c = conn.cursor()
    count = 0
    while count == 0:
        c.execute(('select count(*) from information_schema.columns '
                    'where schema_name = ? and table_name = ? '
                    'and column_name = ?'),
                    (schema, table, column))
        count = c.fetchone()[0]
예제 #31
0
 def create_connection(self):
     return client.connect('{}:{}'.format(self.args.host, self.args.port),
                           username=self.args.user,
                           password=self.args.password,
                           schema=self.args.schema,
                           timeout=self.args.timeout)
def main(argv):
    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # Setting up the connection to CrateDB (with command line args)
    crate_host = None
    crate_user = None
    crate_password = None
    start_date = None
    end_date = None
    delta = 24
    tenant_name = 'EKZ'
    cursor = None
    connection = None
    dry_run = False
    try:
        opts, args = getopt.getopt(argv, "h:u:p:t:s:e:d:r:", [
            "host=", "user="******"password="******"tenant-name=", "start-date=",
            "end-date=", "delta-time", "dry-run"
        ])
    except getopt.GetoptError:
        logger.error("wrong parameters")
        print('occupancy.py -h <cratedb_host> -u <cratedb_user>')
        sys.exit(2)
    for opt, arg in opts:
        if opt in ("-h", "--host"):
            crate_host = arg
        elif opt in ("-u", "--user"):
            crate_user = arg
        elif opt in ("-p", "--password"):
            crate_password = arg
        elif opt in ("-t", "--tenant-name"):
            tenant_name = parser.parse(arg)
        elif opt in ("-s", "--start-date"):
            start_date = parser.parse(arg)
        elif opt in ("-e", "--end-date"):
            end_date = parser.parse(arg)
        elif opt in ("-d", "--delta-time"):
            delta = int(arg)
        elif opt in ("-r", "--dry-run"):
            dry_run = bool(arg)
    if not crate_host:
        logger.error("missing parameters")
        print('occupancy.py -h <cratedb_host> -u <cratedb_user>')
        sys.exit(-2)
    try:
        logger.info("connecting...")
        schema = "mt" + tenant_name.lower()
        connection = client.connect(crate_host,
                                    username=crate_user,
                                    password=crate_password)
        cursor = connection.cursor()
        computeOccupancy(cursor, schema, start_date, end_date, delta, dry_run)
    except Exception as e:
        logger.error(str(e), exc_info=True)
        sys.exit(-2)
    finally:
        if cursor:
            cursor.close()
        if connection:
            connection.close()
        sys.exit()
예제 #33
0
파일: mines.py 프로젝트: amrbz/heroes
def battle(hero_id,
           mine_id,
           hero_health=None,
           hero_attack=None,
           mine_health=None,
           mine_attack=None):
    hero_army_sql = ''
    mine_army_sql = ''

    connection = client.connect(g.db)
    cursor = connection.cursor()

    cursor.execute("""
		SELECT id, attack, health
		FROM units
		ORDER BY attack
	""")
    units = cursor.fetchall()

    cursor.execute("""
		SELECT 
			h.army
		FROM heroes h
		WHERE id='{hero_id}'
	""".format(hero_id=hero_id))
    hero_data = cursor.fetchone()
    hero_army = hero_data[0]

    cursor.execute("""
		SELECT 
			m.army, 
			m.hero_id
		FROM mines m
		WHERE id='{mine_id}'
	""".format(mine_id=mine_id))
    mine_data = cursor.fetchone()
    mine_army = mine_data[0]
    mine_hero = mine_data[1]

    if not hero_health:
        hero_health = 0
        hero_attack = 0
        for hero_unit in hero_army:
            for unit in units:
                if unit[0] == hero_unit['unit_id']:
                    hero_attack += hero_unit['qty'] * int(unit[1])
                    hero_health += hero_unit['qty'] * int(unit[2])
                    break

    if not mine_health:
        mine_health = 0
        mine_attack = 0
        for mine_unit in mine_army:
            for unit in units:
                if unit[0] == mine_unit['unit_id']:
                    mine_attack += mine_unit['qty'] * int(unit[1])
                    mine_health += mine_unit['qty'] * int(unit[2])
                    break

    mine_health -= hero_attack
    hero_health -= mine_attack

    if hero_health <= 0 or mine_health <= 0:
        army = hero_army if hero_health > 0 else mine_army
        health_left = hero_health if hero_health > 0 else mine_health

        for unit in units:
            unit_health = unit[2]
            for army_unit in army:
                if army_unit['unit_id'] == unit[0]:
                    row_health = int(army_unit['qty']) * int(unit_health)
                    if health_left < row_health:
                        killed = int((row_health - health_left) * 1.0 /
                                     row_health * int(army_unit['qty']))
                        army[0] = {
                            'unit_id': army_unit['unit_id'],
                            'qty': army_unit['qty'] - killed
                        }
                        print 'LAT ROW', killed
                    else:
                        print '--> Deleted', units[0][0]
                        del army[0]
                    break

        army_sql = ''
        for unit in army:
            el = """
					{{
						unit_id='{unit_id}',
						qty={qty}
					}}
				""".format(unit_id=unit['unit_id'], qty=unit['qty'])
            army_sql = el if army_sql == '' else ','.join((army_sql, el))

        hero_army_sql = army_sql if hero_health > 0 else ''
        mine_army_sql = army_sql if mine_health > 0 else ''

        return hero_army_sql, mine_army_sql
    else:
        return battle(hero_id, mine_id, hero_health, hero_attack, mine_health,
                      mine_attack)
예제 #34
0
def connectCrate():
    time.sleep(10)

    # 1- Creamos la conexion
    try:
        connection = client.connect('crate:4200',
                                    timeout=5,
                                    error_trace=True,
                                    backoff_factor=0.2)
        print("CONNECT OK")
    except Exception as err:
        print("CONNECT ERROR: %s" % err)

    # 2- Cogemos el cursor
    cursor = connection.cursor()

    # 3- Creamos una tabla llamada peliculas
    try:
        print(
            "CREATE TABLE: create table peliculas(id int, titulo text, ano int)"
        )
        cursor.execute("CREATE TABLE peliculas(id int, titulo text, ano int)")
        print("CREATE TABLE OK")
    except Exception as err:
        print("No se ha podido crear la tabla")

    # 4- Añadimos 10 peliculas a la tabla
    try:
        print("INSERTS:")
        cursor.execute("insert into peliculas values (1, 'Pinocho',1940)")
        print("\tinsert into peliculas values (1, 'Pinocho',1940)")

        cursor.execute("insert into peliculas values (2, 'Dumbo',1941)")
        print("\tinsert into peliculas values (2, 'Dumbo',1941)")

        cursor.execute("insert into peliculas values (3, 'Peter Pan',1953)")
        print("\tinsert into peliculas values (3, 'Peter Pan',1953)")

        cursor.execute(
            "insert into peliculas values (4, 'La dama y el vagabundo',1955)")
        print(
            "\tinsert into peliculas values (4, 'La dama y el vagabundo',1955)"
        )

        cursor.execute("insert into peliculas values (5,'101 dalmatas',1961)")
        print("\tinsert into peliculas values (5,'101 dalmatas',1961)")

        cursor.execute("insert into peliculas values (6, 'Mary Poppins',1964)")
        print("\tinsert into peliculas values (6, 'Mary Poppins',1964)")

        cursor.execute(
            "insert into peliculas values (7, 'El libro de la selva',1967)")
        print(
            "\tinsert into peliculas values (7, 'El libro de la selva',1967)")

        cursor.execute(
            "insert into peliculas values (8, 'Los tres mosqueteros',1993)")
        print(
            "\tinsert into peliculas values (8, 'Los tres mosqueteros',1993)")

        cursor.execute("insert into peliculas values (9, 'El rey leon',1994)")
        print("\tinsert into peliculas values (9, 'El rey leon',1994)")

        cursor.execute("insert into peliculas values (10, 'Toy Story',1995)")
        print("\tinsert into peliculas values (10, 'Toy Story',1995)")

        print("INSERT OK")
    except Exception as err:
        print("No se ha podido insertar datos en la tabla peliculas")

    # 5- Haremos un select para ver que se han añadido bien las 10 peliculas
    try:
        time.sleep(10)
        print("SELECT: select * from peliculas order by id")
        cursor.execute("select * from peliculas order by id")
        print("SELECT OK")
        print("Los elementos añadidos son: ", cursor.fetchall())
    except Exception as err:
        print("No se ha podido realizar la select")

    #Ahora crearemos una segunda tabla. que añada cada 30 segundos un numero aleatorio.

    #5º CREAMOS LA TABLA NUMEROS

    try:
        print(
            "CREATE TABLE: create table numeros(numeros int, fecha text, hora text)"
        )
        cursor.execute(
            "CREATE TABLE numeros(numero int, fecha text, hora text)")
        print("TABLA CREADA OK")
    except Exception as err:
        print("No se ha podido crear la tabla")

    #6º MIRAR SI EXISTE EL FICHERO, Y SI EXISTE AÑADIMOS LOS ROWS QUE HAY EN EL FICHERO

    if not path.exists('rows.csv'):
        print("Creando fichero rows.csv")
        with open('rows.csv', 'w', newline='') as csvfile:
            fieldnames = ['numero', 'fecha', 'hora']
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    #7º - CADA 30 segundos, AÑADIREMOS UN NUEVO ROW EN LA BASE DE DATOS, Y ESCRIBIREMOS EN EL FICHERO LOS DATOS.
    while True:
        numero = random.randint(1,
                                100)  # coge un numero aleatorio del 1 al 100
        now = datetime.now()
        date = now.date()  # coge la fecha de hoy
        hora = now.time().__str__()  # coge la hora exacta

        cursor.execute(
            "insert into numeros values (?, ?, ?)",
            (numero, str(date), hora))  # añadimos el elemento en la tabla
        texto = "".join((str(numero), ",", str(date), ",", hora))
        print(
            "insert into numeros values (",
            texto,
            ")",
        )

        #cerraremos y abriremos el fichero cada vez para q se vayan actualizando los datos.
        file = open('rows.csv', 'a', newline='')  # abriremos el fichero
        file.write(texto + "\n")  # escribimos la linea
        print("Linea añadida al fichero: ", texto)
        file.close()  # cerramos el fichero

        time.sleep(30)  # espera 30 segundos
예제 #35
0
from crate import client
import os
from zmq.utils import jsonapi

root = os.path.dirname(os.path.abspath(__file__))
with open('{}/crate_config'.format(root), 'r') as fp:
    data = jsonapi.loads(fp.read())

host = data['connection']['params']['host']
conn = client.connect(host, error_trace=True)

cursor = conn.cursor()

schema = 'test_import'
tables = [
    'analysis', 'analysis_string', 'datalogger', 'datalogger_string', 'device',
    'device_string', 'topic', 'meta', 'record'
]

for t in tables:
    try:
        if schema:
            full_table_name = "{schema}.{table}".format(schema=schema, table=t)
        else:
            full_table_name = t

        cursor.execute("DROP TABLE {}".format(full_table_name))
    except Exception as ex:
        print(ex.message)

cursor.close()
예제 #36
0
 def get_connection(self):
     if self._connection is None:
         self._connection = crate_client.connect(self._host,
                                                 error_trace=True)
     return self._connection
예제 #37
0
#!/usr/bin/env python
from crate import client

connection = client.connect("localhost:4200", username="******")
print(connection)
cursor = connection.cursor()
print(cursor)
cursor.execute("SELECT * FROM smartvalve;")
result = cursor.fetchone()
print(result)
예제 #38
0
    def get_emulation(self, emulation_id):
        print '\n--> Fetching data for emulation id', emulation_id

        start_fetch_time = time.time()
        connection = client.connect(g.db)
        cursor = connection.cursor()

        cursor.execute("""
      SELECT 
        b.id,
        b.height, 
        b.adopted,
        b.provider, 
        b.node_id
      FROM blocks b, nodes n
      WHERE b.node_id=n.id
      AND n.emulation_id='{emulation_id}'
      ORDER BY adopted
    """.format(emulation_id=emulation_id))
        blocks = cursor.fetchall()

        # r = lambda: random.randint(0,255)
        blocks_data = []
        for block in blocks:
            cursor.execute("""
        SELECT 
          c.id,
          c.city,
          c.country,
          c.lat,
          c.lng,
          c.pop
        FROM nodes n
        LEFT JOIN cities c ON n.location_id = c.id
        WHERE n.id='{node_id}'
      """.format(node_id=block[4]))
            city = cursor.fetchone()

            blocks_data.append({
                'block': {
                    'id': block[0],
                    'height': block[1],
                    'adopted': block[2],
                    'provider': block[3],
                    'nodeId': block[4]
                },
                'city': {
                    'id': city[0],
                    'name': city[1],
                    'country': city[2],
                    'lat': city[3],
                    'lng': city[4],
                    'pop': city[5]
                }
            })

        cursor.execute("""
      SELECT 
        id, 
        nodes_qty,
        block_size,
        block_freq,
        bandwidth,
        ping,
        validation_time,
        description,
        iterations,
        with_forks,
        created
      FROM emulations
      WHERE id='{emulation_id}'
    """.format(emulation_id=emulation_id))
        emulation = cursor.fetchone()

        emulation_data = {
            'id': emulation[0],
            'nodesQty': emulation[1],
            'blockSize': emulation[2],
            'blockFreq': emulation[3],
            'bandwidth': emulation[4],
            'ping': emulation[5],
            'validationTime': emulation[6],
            'description': emulation[7],
            'iterations': emulation[8],
            'withForks': emulation[9],
            'created': emulation[10]
        }

        cursor.execute("""
      SELECT provider 
      FROM blocks b, nodes n
      WHERE b.node_id=n.id
      AND n.emulation_id='{emulation_id}'
      GROUP BY provider
    """.format(emulation_id=emulation_id))
        providers = cursor.fetchall()

        r = lambda: random.randint(0, 255)
        providers_data = {}
        for provider in providers:
            providers_data[str(provider[0])] = {}
            for height in xrange(emulation[8]):
                color = '%02X%02X%02X' % (r(), r(), r())
                providers_data[str(provider[0])][height] = color

        data = {
            'emulation': emulation_data,
            'blocks': blocks_data,
            'providers': providers_data
        }

        cursor.close()
        connection.close()

        print '--> Done in ', time.time() - start_fetch_time, 'sec'

        response = make_response(jsonify(data))
        response.status_code = 200
        return response
예제 #39
0
    def generate_trees(self, emulation_id):
        print '--> Generating nodes trees for emulation {emulation_id}'.format(
            emulation_id=emulation_id)

        connection = client.connect(g.db)
        cursor = connection.cursor()

        cursor.execute("""
      SELECT 
        n.id,
        c.id,  
        c.lat,
        c.lng
      FROM nodes n, cities c
      WHERE n.location_id = c.id
      AND n.emulation_id='{emulation_id}'
    """.format(emulation_id=emulation_id))
        nodes = cursor.fetchall()

        print '--> Nodes qty:', len(nodes)

        data = []
        for index, node in enumerate(nodes):
            tree = []
            for branch in nodes:
                if node[0] == branch[0]: continue

                tree.append({
                    'node_id':
                    branch[0],
                    'distance':
                    distance.distance((node[2], node[3]),
                                      (branch[2], branch[3])).km
                })

            sorted_tree = sorted(tree, key=lambda x: x['distance'])
            data.append({'id': node[0], 'tree': sorted_tree})

            sql_tree = ''
            for el in sorted_tree:
                sql = """{{node_id='{node_id}',distance='{distance}'}}""".format(
                    node_id=el['node_id'], distance=el['distance'])

                if sql_tree == '':
                    sql_tree = sql
                else:
                    sql_tree = ','.join((sql_tree, sql))

            cursor.execute("""
        UPDATE nodes
        SET tree=[{tree}]
        WHERE id='{id}'
      """.format(id=node[0], tree=sql_tree))

            print '--> Updated tree for node {node_id}. Nodes left: {nodes_left}'.format(
                node_id=node[0], nodes_left=(len(nodes) - index - 1))

        cursor.close()
        connection.close()

        print '--> Generating nodes trees complete'
예제 #40
0
파일: mines.py 프로젝트: amrbz/heroes
def get_mines():

    connection = client.connect(g.db)
    cursor = connection.cursor()

    try:
        cursor.execute("""
			SELECT 
				m.id,
				m.name,
				m.img,
				m.location,
				m.production,
				m.army,
				m.supply,
				m.collected,
				h.id,
				h.name,
				c.id,
				c.name,
				c.img
			FROM mines m
			LEFT JOIN heroes h ON m.hero_id = h.id
			LEFT JOIN clans c ON h.clan_id = c.id
			ORDER BY m.name
		""")
        mines = cursor.fetchall()

        i = 0
        for mine in mines:
            units_descr = []
            for unit in mine[5]:
                cursor.execute("""
					SELECT 
						id, 
						name,
						img,
						health,
						attack,
						price
					FROM units
					WHERE id='{unit_id}'
				""".format(unit_id=unit['unit_id']))
                data = cursor.fetchone()
                units_descr.append({
                    'unit': {
                        'id': data[0],
                        'name': data[1],
                        'img': data[2],
                        'health': data[3],
                        'attack': data[4],
                        'price': data[5]
                    },
                    'qty': unit['qty']
                })
            mines[i][5] = units_descr
            i += 1

    except Exception, error:
        print 'ERROR: ', error
        return bad_request(error)
예제 #41
0
    def init(self):
        connection = client.connect(g.db)
        cursor = connection.cursor()

        print '\n****** INIT EMULATION ******\n'

        emulation_start_time = time.time()
        emulation_id = str(uuid.uuid4())
        emulation_created = str(int(time.time()))

        print '--> Emulation ID:', emulation_id
        cursor.execute("""
      INSERT INTO emulations(
        id,
        nodes_qty,
        ping,
        validation_time,
        block_size,
        block_freq,
        bandwidth,
        iterations,
        with_forks,
        description,
        created
      ) VALUES(
        '{id}',
        '{nodes_qty}',
        '{ping}',
        '{validation_time}',
        '{block_size}',
        '{block_freq}',
        '{bandwidth}',
        '{iterations}',
        '{with_forks}',
        '{description}',
        '{created}')
    """.format(id=emulation_id,
               nodes_qty=request.json['nodesQty'],
               ping=request.json['ping'],
               validation_time=request.json['validationTime'],
               block_size=request.json['blockSize'],
               block_freq=request.json['blockFreq'],
               bandwidth=request.json['bandwidth'],
               iterations=int(request.json['iterations']),
               with_forks=request.json['withForks'],
               description=request.json['description'],
               created=emulation_created))

        cursor.execute("""REFRESH TABLE emulations""")

        nodes = Nodes()
        nodes_ids = nodes.generate_nodes(emulation_id)
        nodes.generate_trees(emulation_id)

        print '\n--> Starting mining'

        total_time = 0
        init_time = time.time()
        blocks_ids = []
        i = 0
        for height in xrange(int(request.json['iterations'])):
            for node_id in nodes_ids:
                i += 1
                blocks = Blocks(emulation_id)
                start_time = time.time()
                nonce = blocks.mine_block()
                end_time = time.time()

                mining_time = end_time - start_time
                total_time += mining_time

                # cursor.execute("""
                #   SELECT adopted FROM blocks
                #   WHERE node_id='{node_id}'
                #   ORDER BY height DESC
                #   LIMIT 1
                # """.format(
                #   node_id=node_id
                # ))
                # res = cursor.fetchone()
                # prev_block_adopted = None if not res else res[0]

                block_id = str(uuid.uuid4())
                cursor.execute("""
          INSERT INTO blocks(
            id,
            node_id,
            height,
            nonce,
            init_time,
            mining_time,
            provider
          ) VALUES(
            '{id}',
            '{node_id}',
            '{height}',
            '{nonce}',
            {init_time},
            '{mining_time}',
            '{provider}')
        """.format(id=block_id,
                   node_id=node_id,
                   height=height,
                   nonce=nonce,
                   init_time=float(init_time),
                   mining_time=mining_time,
                   provider=node_id))

                if height > 0:
                    blocks_ids.append(block_id)
                cursor.execute("""REFRESH TABLE blocks""")

                # if last_block:
                #   # print 'LAST BLOCK ADOPTED', last_block['adopted']
                #   pass
                # else:
                #   pass

                # node_blocks.append({
                #   'id': block_id,
                #   'adopted': (last_block['adopted']/1000 + mining_time*1000) if last_block else (init_time + mining_time),
                #   'provider': node_id
                # })

                # sql_blocks = ''
                # for node_block in node_blocks:
                #   # print node_block
                #   # print node_block['adopted'], mining_time
                #   sql = """{{id='{block_id}',adopted={adopted}, provider='{provider}'}}""".format(
                #     block_id=node_block['id'],
                #     adopted=node_block['adopted'],
                #     provider=node_block['provider']
                #   )

                #   if sql_blocks == '':
                #     sql_blocks = sql
                #   else:
                #     sql_blocks = ','.join((sql_blocks, sql))

                # cursor.execute("""
                #   UPDATE nodes
                #   SET blocks=[{sql_blocks}]
                #   WHERE id='{node_id}'
                # """.format(
                #   sql_blocks=sql_blocks,
                #   node_id=node_id
                # ))

                print '--> Nonce: ' + str(nonce) + ' ' + (
                    12 - len(str(nonce))) * '=' + ' Time: ' + str(
                        round(mining_time, 2)) + ' Hashrate: ' + str(
                            int(nonce / mining_time)) + ' Iteration: ' + str(i)

        avg_time = round(
            total_time / (len(nodes_ids) * int(request.json['iterations'])), 2)
        # scaling_factor = round(int(request.json['blockFreq'])*int(request.json['iterations'])/avg_time,2)
        scaling_factor = round(int(request.json['blockFreq']) / avg_time, 2)

        print '--> Mining complete. Avg time: ', avg_time, 'Scaling factor:', scaling_factor

        cursor.execute("""
      UPDATE emulations 
      SET scaling_factor='{scaling_factor}'
      WHERE id='{emulation_id}'
    """.format(scaling_factor=scaling_factor, emulation_id=emulation_id))

        # (prev_block_adopted/1000 + mining_time*1000) if prev_block_adopted else (init_time + mining_time),

        for node_id in nodes_ids:
            cursor.execute("""REFRESH TABLE blocks""")
            cursor.execute("""
        UPDATE blocks 
        SET mining_time=mining_time*{scaling_factor}*1000
        WHERE node_id='{node_id}'
      """.format(scaling_factor=scaling_factor, node_id=node_id))

            for height in xrange(int(request.json['iterations'])):
                cursor.execute("""REFRESH TABLE blocks""")
                cursor.execute("""
          SELECT id, init_time, mining_time, adopted
          FROM blocks
          WHERE node_id='{node_id}'
          AND height={height}
        """.format(node_id=node_id,
                   height=height - 1 if height > 0 else height))
                prev_block_data = cursor.fetchone()

                adopted = prev_block_data[3] + prev_block_data[
                    2] if height > 0 else prev_block_data[1] + prev_block_data[
                        2]

                cursor.execute("""
          UPDATE blocks
          SET adopted={adopted}
          WHERE node_id='{node_id}'
          AND height={height}
        """.format(node_id=node_id, height=height, adopted=adopted / 1000))

                cursor.execute("""
          UPDATE blocks
          SET init_time={adopted}
          WHERE node_id='{node_id}'
          AND height={height}
        """.format(node_id=node_id, height=height + 1, adopted=adopted / 1000))
                cursor.execute("""REFRESH TABLE blocks""")

        cursor.close()
        connection.close()

        print '\n--> Sending blocks\n'
        for height in xrange(int(request.json['iterations'])):
            blocks.send_blocks(height)

        data = {
            'id': emulation_id,
            'nodesQty': request.json['nodesQty'],
            'ping': request.json['ping'],
            'validationTime': request.json['validationTime'],
            'blockSize': request.json['blockSize'],
            'blockFreq': request.json['blockFreq'],
            'description': request.json['description'],
            'bandwidth': request.json['bandwidth'],
            'iterations': int(request.json['iterations']),
            'withForks': request.json['withForks'],
            'scalingFactor': scaling_factor,
            'created': emulation_created
        }

        print '\n--> Emulation complete in', int(time.time() -
                                                 emulation_start_time), 'sec\n'

        response = make_response(jsonify(data))
        response.status_code = 201
        return response
예제 #42
0
파일: mines.py 프로젝트: amrbz/heroes
def update_mine(mine_id):

    connection = client.connect(g.db)
    cursor = connection.cursor()

    data = request.get_json()
    army = data['army']

    army_sql = ''
    for unit in army:
        el = """
                {{
					unit_id='{unit_id}',
					qty={qty}
				}}
            """.format(unit_id=unit['unitId'], qty=unit['qty'])
        army_sql = el if army_sql == '' else ','.join((army_sql, el))

    try:

        cursor.execute("""
			UPDATE mines
			SET army=[{army}]
			WHERE id='{mine_id}'
		""".format(mine_id=mine_id, army=army_sql))
        cursor.execute("""REFRESH TABLE mines""")
        cursor.execute("""
			SELECT 
				m.id,
				m.name,
				m.img,
				m.location,
				m.production,
				m.army,
				m.supply,
				m.collected,
				h.id,
				h.name,
				c.id,
				c.name,
				c.img
			FROM mines m
			LEFT JOIN heroes h ON m.hero_id = h.id
			LEFT JOIN clans c ON h.clan_id = c.id
			WHERE m.id='{mine_id}'
		""".format(mine_id=mine_id))
        mine = cursor.fetchone()

        units_descr = []
        for unit in mine[5]:
            cursor.execute("""
				SELECT 
					id, 
					name,
					img,
					health,
					attack,
					price
				FROM units
				WHERE id='{unit_id}'
			""".format(unit_id=unit['unit_id']))
            data = cursor.fetchone()
            units_descr.append({
                'unit': {
                    'id': data[0],
                    'name': data[1],
                    'img': data[2],
                    'health': data[3],
                    'attack': data[4],
                    'price': data[5]
                },
                'qty': unit['qty']
            })
        mine[5] = units_descr

    except Exception, error:
        print 'ERROR: ', error
        return bad_request(error)
예제 #43
0
 def _connect(self, database, **kwargs):
     return connect(**kwargs)
예제 #44
0
    def _test_rolling_upgrade(self, path, nodes):
        """
        Test a rolling upgrade across given versions.
        An initial test cluster is started and then subsequently each node in
        the cluster is upgraded to the new version.
        After each upgraded node a SQL statement is executed that involves all
        nodes in the cluster, in order to check if communication between nodes
        is possible.
        """

        shards, replicas = (nodes, 1)
        expected_active_shards = shards + shards * replicas

        cluster = self._new_cluster(path.from_version, nodes)
        cluster.start()
        with connect(cluster.node().http_url, error_trace=True) as conn:
            c = conn.cursor()
            c.execute(f'''
                CREATE TABLE doc.t1 (
                    type BYTE,
                    value FLOAT
                ) CLUSTERED INTO {shards} SHARDS
                WITH (number_of_replicas={replicas})
            ''')
            insert_data(conn, 'doc', 't1', 1000)
            c.execute(f'''
                CREATE TABLE doc.parted (
                    id INT,
                    value INT
                ) CLUSTERED INTO {shards} SHARDS
                PARTITIONED BY (id)
                WITH (number_of_replicas=0, "write.wait_for_active_shards"=1)
            ''')
            c.execute("INSERT INTO doc.parted (id, value) VALUES (1, 1)")
            # Add the shards of the new partition primaries
            expected_active_shards += shards

        for idx, node in enumerate(cluster):
            new_node = self.upgrade_node(node, path.to_version)
            cluster[idx] = new_node
            with connect(new_node.http_url, error_trace=True) as conn:
                c = conn.cursor()
                wait_for_active_shards(c, expected_active_shards)
                c.execute('''
                    SELECT type, AVG(value)
                    FROM doc.t1
                    GROUP BY type
                ''')
                c.fetchall()
                # Ensure aggregation with different intermediate input works, this was an regression for 4.1 <-> 4.2
                c.execute('''
                    SELECT type, count(distinct value)
                    FROM doc.t1
                    GROUP BY type
                ''')
                c.fetchall()

                # Ensure scalar symbols are working across versions
                c.execute('''
                    SELECT type, value + 1
                    FROM doc.t1
                    WHERE value > 1
                    LIMIT 1
                ''')
                c.fetchone()

                # Ensure that inserts, which will create a new partition, are working while upgrading
                c.execute("INSERT INTO doc.parted (id, value) VALUES (?, ?)", [idx + 10, idx + 10])
                # Add the shards of the new partition primaries
                expected_active_shards += shards

        # Finally validate that all shards (primaries and replicas) of all partitions are started
        # and writes into the partitioned table while upgrading were successful
        with connect(cluster.node().http_url, error_trace=True) as conn:
            c = conn.cursor()
            wait_for_active_shards(c, expected_active_shards)
            c.execute('''
                REFRESH TABLE doc.parted
            ''')
            c.execute('''
                SELECT count(*)
                FROM doc.parted
            ''')
            res = c.fetchone()
            self.assertEqual(res[0], nodes + 1)
예제 #45
0
 def prepare(self):
     db = client.connect('127.0.0.1:4200')
     self.cursor = db.cursor()
예제 #46
0
 def setup(self):
     url = "{}:{}".format(self.host, self.port)
     self.conn = client.connect([url], error_trace=True)
     self.cursor = self.conn.cursor()
예제 #47
0
    def configure(self, configuration):
        """
        The expectation that configuration will have at least the following
        items
        
        .. code: python
        
            {
                "connection": {
                    "params": {
                        "host": "http://localhost:4200"
                    }
                }
            }
        
        :param configuration: 
        """
        connection = configuration.get("connection", {})
        tables_def, table_names = self.parse_table_def(
            configuration.get("tables_def"))

        self._data_table = table_names['data_table']
        self._topic_table = table_names['topics_table']

        params = connection.get("params", {})
        if not isinstance(params, dict):
            _log.error("Invalid params...must be a dictionary.")
            raise ValueError("params must be a dictionary.")

        schema = configuration.get("schema", "historian")
        host = params.get("host", None)
        error_trace = params.get("error_trace", False)

        if host is None:
            _log.error("Invalid configuration for params...must have host.")
            raise ValueError("invalid params['host'] value")
        elif host != self._host:
            _log.info("Changing host to {}".format(host))

        self._host = host

        client = CrateHistorian.get_client(host)
        if client is None:
            _log.error("Couldn't reach host: {}".format(host))
            raise ValueError("Connection to host not made!")

        self._schema = schema

        if error_trace != self._error_trace:
            _log.info("Changing error trace to: {}".format(error_trace))
            self._error_trace = error_trace

        # Close and reconnect the client or connect to different hosts.
        if self._client is not None:
            try:
                self._client.close()
            except:
                _log.warning("Closing of non-null client failed.")
            finally:
                self._client = None

        self._client = crate_client.connect(servers=self._host,
                                            error_trace=self._error_trace)

        # Attempt to create the schema
        create_schema(self._client, self._schema, table_names)

        # Cache topic and metadata
        self.load_topic_meta()
예제 #48
0
파일: mines.py 프로젝트: amrbz/heroes
def collect_mine():
    connection = client.connect(g.db)
    cursor = connection.cursor()

    data = request.get_json()
    hero_id = data['heroId']
    mine_id = data['mineId']

    try:
        cursor.execute("""
			SELECT 
				created,
				collected,
				production
			FROM mines
			WHERE id='{mine_id}'
		""".format(mine_id=mine_id))
        mine = cursor.fetchone()
        now = int(time.time())

        created = int(mine[0])
        collected = int(mine[1]) if mine[1] else now
        production = mine[2]

        diff = collected - created
        iterations = int(diff * 1.0 / production['freq'])
        balance = int(iterations * production['qty'])

        cursor.execute("""
			SELECT 
				balance
			FROM heroes
			WHERE id='{hero_id}'
		""".format(hero_id=hero_id))
        hero = cursor.fetchone()

        print 'BAL', hero[0]

        cursor.execute("""
			UPDATE heroes
			SET balance='{balance}'
			WHERE id='{hero_id}'
		""".format(hero_id=hero_id, balance=balance + int(hero[0])))
        cursor.execute("""REFRESH TABLE heroes""")

        cursor.execute("""
			UPDATE mines
			SET collected='{collected}'
			WHERE id='{mine_id}'
		""".format(mine_id=mine_id, collected=int(time.time())))
        cursor.execute("""REFRESH TABLE mines""")

        cursor.execute("""
			SELECT 
				h.id,
				h.name,
				h.img,
				h.balance,
				h.army,
				c.id,
				c.name,
				c.img
			FROM heroes h
			LEFT JOIN clans c ON c.id = h.clan_id
			WHERE h.id='{hero_id}'
		""".format(hero_id=hero_id))
        hero = cursor.fetchone()

        units_descr = []
        for unit in hero[4]:
            cursor.execute("""
				SELECT 
					id, 
					name,
					img,
					health,
					attack,
					price
				FROM units
				WHERE id='{unit_id}'
			""".format(unit_id=unit['unit_id']))
            data = cursor.fetchone()
            units_descr.append({
                'unit': {
                    'id': data[0],
                    'name': data[1],
                    'img': data[2],
                    'health': data[3],
                    'attack': data[4],
                    'price': data[5]
                },
                'qty': unit['qty']
            })
        hero[4] = units_descr

        cursor.execute("""
			SELECT 
				m.id,
				m.name,
				m.img,
				m.location,
				m.production,
				m.army,
				m.supply,
				m.collected
			FROM mines m
			WHERE m.id='{mine_id}'
		""".format(mine_id=mine_id))
        mine = cursor.fetchone()

        units_descr = []
        for unit in mine[5]:
            cursor.execute("""
				SELECT 
					id, 
					name,
					img,
					health,
					attack,
					price
				FROM units
				WHERE id='{unit_id}'
			""".format(unit_id=unit['unit_id']))
            data = cursor.fetchone()
            units_descr.append({
                'unit': {
                    'id': data[0],
                    'name': data[1],
                    'img': data[2],
                    'health': data[3],
                    'attack': data[4],
                    'price': data[5]
                },
                'qty': unit['qty']
            })
        mine[5] = units_descr

    except Exception, error:
        print 'ERROR: ', error
        return bad_request(error)
예제 #49
0
def setup(*args):
    with connect(node.http_url) as conn:
        c = conn.cursor()
        c.execute('create table x.demo (id int, name string, country string) \
                  with (number_of_replicas = 0)')
        c.execute('create blob table blobtable with (number_of_replicas = 0)')
예제 #50
0
파일: main.py 프로젝트: SilPan/CratePY
from crate import client
from crate.client.cursor import Cursor

connection = client.connect("http://localhost:4200", username="******")

cursor = connection.cursor()  # type: Cursor

try:
    cursor.execute(
        "CREATE TABLE IF NOT EXISTS strings (str STRING, num INTEGER)")
except:
    print("could not create table")

cursor.execute("INSERT INTO strings (str, num) VALUES (?, ?)",
               ("Hello world!", 42))

cursor.execute("INSERT INTO strings (str, num) VALUES (?, ?)",
               ("hi there", 99))

cursor.execute("REFRESH TABLE strings")

cursor.execute("SELECT str FROM strings")

try:
    print(cursor.fetchall())
except:
    print("no results there :(")

cursor.execute("SELECT str FROM strings WHERE num > 50")

try:
예제 #51
0
    def configure(self, configuration):
        """
        The expectation that configuration will have at least the following
        items
        
        .. code: python
        
            {
                "connection": {
                    "params": {
                        "host": "http://localhost:4200"
                    }
                }
            }
        
        :param configuration: 
        """
        connection = configuration.get("connection", {})
        params = connection.get("params", {})
        if not isinstance(params, dict):
            _log.error("Invalid params...must be a dictionary.")
            raise ValueError("params must be a dictionary.")

        schema = connection.get("schema", "historian")
        host = params.get("host", None)
        error_trace = params.get("error_trace", False)

        if host is None:
            _log.error("Invalid configuration for params...must have host.")
            raise ValueError("invalid params['host'] value")
        elif host != self._host:
            _log.info("Changing host to {}".format(host))

        self._host = host

        client = self.get_client(host)
        if client is None:
            _log.error("Couldn't reach host: {}".format(host))
            raise ValueError("Connection to host not made!")

        # Store class variables to be used later.
        if schema != self._schema:
            _log.info("Changing schema to: {}".format(schema))
            self._schema = schema

        if error_trace != self._error_trace:
            _log.info("Changing error trace to: {}".format(error_trace))
            self._error_trace = error_trace

        # Close and reconnect the client or connect to different hosts.
        if self._client is not None:
            try:
                self._client.close()
            except:
                _log.warning("Closing of non-null client failed.")
            finally:
                self._client = None

        self._client = crate_client.connect(servers=self._host,
                                            error_trace=self._error_trace)

        # Attempt to create the schema
        create_schema(self._client, self._schema)

        topics = self.get_topic_list()
        peers = self.vip.peerlist().get(timeout=5)
        if VOLTTRON_CENTRAL_PLATFORM in peers:
            topic_replace_map_vcp = self.vip.rpc.call(
                VOLTTRON_CENTRAL_PLATFORM,
                method="get_replace_map").get(timeout=5)
            # Always use VCP instead of local
            self._topic_replace_map = topic_replace_map_vcp

        for t in topics:
            self._topic_set.add(self.get_renamed_topic(t))

        self.vip.pubsub.subscribe(peer='pubsub',
                                  prefix="platform/config_updated",
                                  callback=self._peer_config_update)
예제 #52
0
    def run(self):
        self.check()

        client = docker.from_env()
        # if don't give a tag, then all image under this registry will be pulled
        repos = self.images_to_pull[0]["repo"]

        for repo in repos:
            tags = self.images_to_pull[1][repo]
            for tag in tags:
                private_repo = private_registry + repo + suffix + ":" + tag

                if localVolume != "":
                    if os.path.exists(localVolume) == False:
                        os.makedirs(localVolume)

                print "start running: ", private_repo

                # create a random name
                runName = '%d' % (random.randint(1, 100000000))

                # get present time
                startTime = time.time()

                # get present net data
                cnetdata = get_net_data()

                # run images
                container = client.containers.create(
                    image=private_repo,
                    environment=runEnvironment,
                    ports=runPorts,
                    volumes=runVolumes,
                    working_dir=runWorking_dir,
                    command=runCommand,
                    name=runName,
                    detach=True,
                    cpu_period=100000,
                    cpu_quota=150000,
                    mem_limit="2g",
                )

                container.start()

                while True:
                    if time.time() - startTime > 600:
                        break

                    try:
                        connection = crate_client.connect(
                            "http://localhost:4200", username="******")
                        cursor = connection.cursor()
                        cursor.execute('''CREATE TABLE GAMES
                                   (ID INT PRIMARY KEY     NOT NULL,
                                   NAME STRING);''')
                        print "successfully create table games!"
                        cursor.execute(
                            """INSERT INTO GAMES (ID, NAME)
                                        VALUES (?, ?)""",
                            (1, "Three kingdoms"))
                        print "successfully insert!"
                        cursor.execute(
                            "UPDATE GAMES set NAME = 'Dota2' where ID=1;")
                        print "successfully update!"
                        cursor.execute("SELECT ID, NAME from GAMES;")
                        rows = cursor.fetchall()
                        print rows
                        cursor.execute("DELETE from GAMES where ID=1;")
                        print "successfully delete!"
                        connection.close()
                        break
                    except:
                        time.sleep(0.1)  # wait 100ms
                        pass

                # print run time
                finishTime = time.time() - startTime

                print "finished in ", finishTime, "s"

                container_path = os.path.join("/var/lib/gear/private",
                                              private_repo)
                local_data = subprocess.check_output(
                    ['du', '-ms', container_path]).split()[0].decode('utf-8')

                print "local data: ", local_data

                pull_data = get_net_data() - cnetdata

                print "pull data: ", pull_data

                print "\n"

                try:
                    container.kill()
                except:
                    print "kill fail!"
                    pass

                container.remove(force=True)
                # cmd = '%s kill %s' % ("docker", runName)
                # rc = os.system(cmd)
                # assert(rc == 0)

                # record the image and its Running time
                result.append([tag, finishTime, int(local_data), pull_data])

                if auto != True:
                    raw_input("Next?")
                else:
                    time.sleep(5)

                if localVolume != "":
                    shutil.rmtree(localVolume)
예제 #53
0
 def test_create_hdfs_repository(self):
     with connect(f'{crate.http_url}') as conn:
         c = conn.cursor()
         stmt = '''create repository "test-repo" type hdfs with (uri = ?, path = '/data')'''
         # okay if it doesn't raise a exception
         c.execute(stmt, ('hdfs://127.0.0.1:{nnport}'.format(nnport=NN_PORT),))
예제 #54
0
def get_connection():
    connection = connect(CRATE_HOST, error_trace=True)
    return connection
예제 #55
0
from crate import client

tables = [
    'etmotorphysicaltest', 'etdeviceSM', 'etdeviceSE', 'etcontroltest',
    'etparticipant', 'etuser', 'etanswer', 'etquestionnaire'
]
mysql_prefix = 'mtmatest'

connection = client.connect()
cursor = connection.cursor()

# FIWARE adaptation starts here
# Global variables
refTests = {}
refDevices = {}
refTestsSensors = {}
refSensors = {}
refStartedTest = {}
refEndedTest = {}
list_of_answers = {}

# etmotorphysicaltest
dbConfiguration = 0
dbEntityId_test = 3
dbRefDevice = 6
dbRefUser = 7
dbDateTestEnded = 1
dbDateTestStarted = 2
dbTestType = 8

# etdevice
예제 #56
0
def refresh(table):
    with connect(crate_host) as conn:
        cursor = conn.cursor()
        cursor.execute("refresh table %s" % table)
예제 #57
0
    def send_blocks(self, height, miners=None):

        connection = client.connect(g.db)
        cursor = connection.cursor()

        if miners == None:
            cursor.execute("""REFRESH TABLE nodes""")
            cursor.execute("""
        SELECT n.id, n.tree
        FROM nodes n
        LEFT JOIN blocks b ON n.id=b.node_id
        WHERE emulation_id='{emulation_id}'
        AND height={height}
        ORDER BY b.mining_time
      """.format(emulation_id=self.emulation_id, height=height))
            miners = cursor.fetchall()
        miner = miners.pop(0)

        cursor.execute("""REFRESH TABLE blocks""")
        cursor.execute("""
      SELECT id, adopted
      FROM blocks
      WHERE node_id='{miner_id}'
      AND height='{height}'
    """.format(miner_id=miner[0], height=height))
        miner_block = cursor.fetchone()

        print '--> Init Time:', miner_block[1], 'Height', height, '\n'
        for node in miner[1]:
            node_id = node['node_id']
            distance = 10 if node['distance'] == 0 else node['distance']
            node_blocks = []

            cursor.execute("""
        SELECT count(provider)
        FROM blocks
        WHERE height='{height}'
        AND adopted<={adopted}
        GROUP BY provider
      """.format(height=height, adopted=miner_block[1]))
            providers = cursor.fetchone()[0]

            # Oversubscription = max(1, fork_count * B_size / Network Bandwidth)

            network_factor = max(1,
                                 providers * self.block_size / self.bandwidth)
            # diff = (network_factor*self.block_size/self.bandwidth + distance*self.ping + self.validation_time) * 1000

            # network_factor = min(1, self.bandwidth/(self.block_size*providers))
            # network_factor = min(1, self.bandwidth/(self.block_size*providers))
            # print '--> Providers:', providers, 'Network factor:', network_factor

            # diff = ((self.block_size/self.bandwidth+distance/self.ping)/network_factor + self.validation_time) * 1000
            # print '--> DIFF:', diff, 'Network factor', network_factor
            # diff = (distance * self.ping/network_factor + self.validation_time) * 1000
            # diff = (distance * self.ping/(float(random.randint(40,100))/100) + self.validation_time) * 1000
            # print '--> Adopted:  ', int(adopted), 'Diff:', diff

            # diff = (network_factor*self.block_size/self.bandwidth + distance*self.ping + self.validation_time) * 1000
            propagation_delay = network_factor * self.block_size / self.bandwidth
            transmission_delay = distance * self.ping
            overall_delay = propagation_delay + transmission_delay + self.validation_time
            diff = overall_delay * 1000

            # print("propagation, transmission and validation: {:.1f} ({}%), {:.1f} ({}%), {:.1f} ({}%)".format(
            #     propagation_delay, int(100 * propagation_delay / overall_delay),
            #     transmission_delay, int(100 * transmission_delay / overall_delay),
            #     self.validation_time, int(100 * self.validation_time / overall_delay),
            # ))

            adopted = (miner_block[1] + diff)

            # cursor.execute("""REFRESH TABLE nodes""")
            # for height in xrange(int(request.json['iterations'])):
            cursor.execute("""
        SELECT adopted
        FROM blocks
        WHERE node_id='{node_id}'
        AND height={height}
        ORDER BY node_id
      """.format(node_id=node_id, height=height))
            saved_adopted_time = cursor.fetchone()[0]

            if adopted < saved_adopted_time:
                print '--> Updating block adoption time from', saved_adopted_time, 'to', int(
                    adopted)
                cursor.execute("""REFRESH TABLE blocks""")
                cursor.execute("""
          UPDATE blocks
          SET 
            adopted={adopted},
            provider='{provider}'
          WHERE node_id='{node_id}'
          AND height={height} 
        """.format(adopted=adopted / 1000,
                   provider=miner[0],
                   node_id=node_id,
                   height=height))
                cursor.execute("""
          UPDATE blocks
          SET init_time={adopted}
          WHERE node_id='{node_id}'
          AND height={height} 
        """.format(adopted=adopted / 1000, node_id=node_id, height=height + 1))

        print '-' * 12

        # for block in blocks:
        #   cursor.execute("""
        #     SELECT id, height
        #     FROM blocks
        #     WHERE id='{block_id}'
        #   """.format(
        #     block_id=block['id']
        #   ))
        #   block_data = cursor.fetchone()

        #   if height == block_data[1]:
        #     if miner_block['adopted'] + diff < block['adopted']:
        #       print 'UPDATE NEEDED:', (miner_block['adopted'] + diff), block['adopted']
        #       # UPDATE BLOCK ADOPTION TIME WHERE SAME HEIGHT
        #       # DELETE NEXT BLOCKS

        # 1.Find block by key height
        # 2.Check if adopted time > than sended clock creation time +  diff
        # 3.

        # print '--> TIME', self.init_time, miner_block[1], self.init_time + miner_block[1]

        # if not blocks:
        #   node_blocks.append({
        #     'id': miner_block[0],
        #     'adopted': float(self.init_time + miner_block[1]),
        #     'provider': miner[0]
        #   })
        # else:
        #   last_block = blocks[-1]
        #   tm = self.init_time + miner_block[1]
        #   # if last_block['adopted'] < tm:

        # sql_blocks = ''
        # for el in node_blocks:
        #   sql = """{{id='{block_id}',adopted={adopted}, provider='{provider}'}}""".format(
        #     block_id=el['id'],
        #     adopted=el['adopted'],
        #     provider=el['provider']
        #   )

        #   if sql_blocks == '':
        #     sql_blocks = sql
        #   else:
        #     sql_blocks = ','.join((sql_blocks, sql))

        # if not blocks and sql_blocks:
        #   print '--> Updating'
        #   cursor.execute("""
        #     UPDATE nodes
        #     SET blocks=[{blocks}]
        #     WHERE id='{node_id}'
        #   """.format(
        #     node_id=node_id,
        #     blocks=sql_blocks
        #   ))

        cursor.close()
        connection.close()

        if len(miners) == 0:
            return True

        self.send_blocks(height, miners)
예제 #58
0
파일: mines.py 프로젝트: amrbz/heroes
def attack_mine():

    connection = client.connect(g.db)
    cursor = connection.cursor()

    data = request.get_json()
    hero_id = data['heroId']
    mine_id = data['mineId']

    if not hero_id:
        return bad_request('Hero ID is not provided')

    if not mine_id:
        return bad_request('Mine ID id not provided')

    try:
        hero_army_sql, mine_army_sql = battle(hero_id, mine_id)

        cursor.execute("""
			UPDATE heroes
			SET army=[{army}]
			WHERE id='{hero_id}'
		""".format(army=hero_army_sql, hero_id=hero_id))
        cursor.execute("""REFRESH TABLE heroes""")

        if mine_army_sql == '':
            cursor.execute("""
				UPDATE mines
				SET 
					army=[{army}],
					hero_id='{hero_id}'
				WHERE id='{mine_id}'
			""".format(army=mine_army_sql, mine_id=mine_id, hero_id=hero_id))
        else:
            cursor.execute("""
				UPDATE mines
				SET 
					army=[{army}]
				WHERE id='{mine_id}'
			""".format(army=mine_army_sql, mine_id=mine_id))
        cursor.execute("""REFRESH TABLE mines""")

        cursor.execute("""
			SELECT 
				h.id,
				h.name,
				h.img,
				h.balance,
				h.army,
				c.id,
				c.name,
				c.img
			FROM heroes h
			LEFT JOIN clans c ON c.id = h.clan_id
			WHERE h.id='{hero_id}'
		""".format(hero_id=hero_id))
        hero = cursor.fetchone()

        units_descr = []
        for unit in hero[4]:
            cursor.execute("""
				SELECT 
					id, 
					name,
					img,
					health,
					attack,
					price
				FROM units
				WHERE id='{unit_id}'
			""".format(unit_id=unit['unit_id']))
            data = cursor.fetchone()
            units_descr.append({
                'unit': {
                    'id': data[0],
                    'name': data[1],
                    'img': data[2],
                    'health': data[3],
                    'attack': data[4],
                    'price': data[5]
                },
                'qty': unit['qty']
            })
        hero[4] = units_descr

        cursor.execute("""
			SELECT 
				m.id,
				m.name,
				m.img,
				m.location,
				m.production,
				m.army,
				m.supply,
				m.collected
			FROM mines m
			WHERE m.id='{mine_id}'
		""".format(mine_id=mine_id))
        mine = cursor.fetchone()

        units_descr = []
        for unit in mine[5]:
            cursor.execute("""
				SELECT 
					id, 
					name,
					img,
					health,
					attack,
					price
				FROM units
				WHERE id='{unit_id}'
			""".format(unit_id=unit['unit_id']))
            data = cursor.fetchone()
            units_descr.append({
                'unit': {
                    'id': data[0],
                    'name': data[1],
                    'img': data[2],
                    'health': data[3],
                    'attack': data[4],
                    'price': data[5]
                },
                'qty': unit['qty']
            })
        mine[5] = units_descr

    except Exception, error:
        print 'ERROR: ', error
        return bad_request(error)
예제 #59
0
 def get_new_connection(self, conn_params):
     """Opens a connection to the database."""
     return Database.connect(**conn_params)
예제 #60
0
except ImportError:
    print("Crate not found, import crate pacakge.")

## import crate client interface
from crate import client
# query function definitions
from queryFunctions import createTablesQuery
from queryFunctions import insertDataQuery
# parse function definitions
from auxiliaryParseFunctions import columnToBool
from auxiliaryParseFunctions import columnToFloat
from auxiliaryParseFunctions import parseStops

## connect to our instance of the database
try:
    connection = client.connect("http://localhost:4200/")
except ConnectionError:
    print("Unable to connect to crate node")

cursor = connection.cursor()

## After looking at the data, we are going to make the following assumptions:
##      all data between quotation marks is going to be of type text
##      all numeric data is going to be of type bigint unless
##      unless it contains a comma, where will be of type double precision
##      I'm facing issues with matching boolean types so I will load them as text

print("creating tables \n")
createTablesQuery(cursor)

print("... \n")