def test_no_explosion(self): print "Sibling no explosion" client1 = riak.RiakClient(pb_port=12101, protocol='pbc') bucket1 = client1.bucket('dot', bucket_type='dots') client2 = riak.RiakClient(pb_port=12101, protocol='pbc') bucket2 = client2.bucket('dot', bucket_type='dots') key1 = bucket1.new("a", "Bob") self.gkey1 = key1.store() for i in self.gkey1.siblings: print "Y " + i.data key2 = bucket2.new("a", "Sue") self.gkey2 = key2.store() for i in self.gkey2.siblings: print "X " + i.data self.gkey1.siblings = [] self.gkey1.data = "Rita" self.gkey1.store() self.gkey2.siblings = [] self.gkey2.data = "Michelle" self.gkey2.store() sleep(5) obj = bucket1.get("a") for i in obj.siblings: print i.data obj.delete()
def test_siblings(): # Set up the bucket, clear any existing object... client = riak.RiakClient(HOST, PORT) bucket = client.bucket('multiBucket') bucket.set_allow_multiples(True) obj = bucket.get('foo') obj.delete() # Store the same object multiple times... for i in range(5): client = riak.RiakClient(HOST, PORT) bucket = client.bucket('multiBucket') obj = bucket.new('foo', randint()) obj.store() # Make sure the object has 5 siblings... assert (obj.has_siblings()) assert (obj.get_sibling_count() == 5) # Test get_sibling()/get_siblings()... siblings = obj.get_siblings() obj3 = obj.get_sibling(3) assert (siblings[3].get_data() == obj3.get_data()) # Resolve the conflict, and then do a get... obj3 = obj.get_sibling(3) obj3.store() obj.reload() assert (obj.get_data() == obj3.get_data()) # Clean up for next test... obj.delete()
def main(): import riak client = riak.RiakClient(port=8091) old_bucket = client.bucket('counters') old_bucket.set_allow_multiples(True) obj = old_bucket.get('foo') obj.delete() obj.reload() assert obj.get_data() is None assert obj.exists() == False bucket = Bucket(client, 'counters', count_resolve, retries=5) assert bucket.get('foo') is None assert bucket.modify('foo', count_modify(1)) == {'step': 1, 'last': 0} assert bucket.get('foo') == {'step': 1, 'last': 0} # create a bunch of siblings obj1 = riak.RiakClient(port=8091).bucket('counters').get('foo') obj2 = riak.RiakClient(port=8091).bucket('counters').get('foo') obj3 = riak.RiakClient(port=8091).bucket('counters').get('foo') obj1.set_data({'last': 1, 'step': 1}).store() # +1 obj2.set_data({'last': 1, 'step': 99}).store() # +99 obj3.set_data({'last': 1, 'step': 7}).store() # +7 obj.reload() assert obj.has_siblings() assert obj.get_sibling_count() == 3 assert bucket.get('foo') == {'step': 107, 'last': 1} obj5 = riak.RiakClient(port=8091).bucket('counters').get('foo') assert obj5.has_siblings() == False assert obj5.get_data() == {'step': 107, 'last': 1}
def setUp(self): self.key = self.bucket = "test-timak" self.c1 = riak.RiakClient() self.c2 = riak.RiakClient() self.b1 = self.c1.bucket(self.bucket) self.b2 = self.c2.bucket(self.bucket) self.c1.bucket(self.bucket).set_allow_multiples(True)
def deleteKey(delBucket, delKey, riakIP): riak.RiakClient(protocol='pbc', host=riakIP, pb_port=8087).bucket(delBucket).delete(delKey) if riak.RiakClient( protocol='pbc', host=riakIP, pb_port=8087).bucket(delBucket).get(delKey).data == None: print 'Successful delete: %s' % delKey else: print 'Failed delete: %s' % delKey return
def _cw_client_(self): ## this might be hacky, but it does stuff to the connection ## during the metaclass setup, which sucks. if self._cw_client is None or self._cw_backend != BACKEND: host = settings.RIAK['default']['HOST'] if BACKEND == 'PBC': self._cw_client = riak.RiakClient( host=host, port=8087, transport_class=riak.RiakPbcTransport) else: self._cw_client = riak.RiakClient(host=host) self._cw_backend = BACKEND return self._cw_client
def __init__(self, image_name, config, metadata_raw=None): ''' Initializes the class with the specified file contents. The metadata_file is a JSON format file, which is generated by the UKAIMetadataCreate() function or the flush() method of this class. image_name: The name of a virtual disk image stored in a object storage. metadata_raw: a raw metadata which is specified when a new UKAIMetadata object is inserted. Return values: This function does not return any values. ''' self._config = config if (metadata_raw != None): self._metadata = metadata_raw else: self._metadata = None client = riak.RiakClient(host=self._config.get('metadata_server')) bucket = client.bucket(UKAI_METADATA_BUCKET) self._metadata = bucket.get(image_name).data self._lock = [] for idx in range(0, len(self.blocks)): self._lock.append(threading.Lock())
def __init__(self, riak_url): url = urlparse(riak_url) self.client = riak.RiakClient(protocol=url.scheme, host=url.hostname, pb_port=url.port, http_port=url.port) self.executor = ThreadPoolExecutor(MAX_WORKERS)
def main(): if len(sys.argv) == 1: inf = sys.stdin port = 8087 else: inf = sys.stdin port = int(sys.argv[1]) i = 0 c = riak.RiakClient(port=port, transport_class=riak.RiakPbcTransport) b = c.bucket('tweets') for line in inf: try: j = json.loads(line) ts = datetime.datetime.strptime(j['created_at'], '%a %b %d %H:%M:%S +0000 %Y') j['created_at'] = ts.strftime('%Y%m%dT%H%M%S') (b.new(str(i), data=j)).store() except ValueError as (strerror): print "Couldn't decode: %s" % line print "Error: ", strerror i += 1 if i % 10 == 0: sys.stdout.write('.') sys.stdout.flush() print 'OK'
def main(): client = riak.RiakClient() MyObjectBucket = client.bucket('MyObjects') choice = -1 while (int(choice) != 0): choice = menu() print() if (int(choice) == 1): print('************* Create and fill *************\n') create_and_fill(MyObjectBucket) print() elif (int(choice) == 2): print('***************** Change *****************\n') change(MyObjectBucket) print() elif (int(choice) == 3): print('***************** Delete *****************\n') delete(MyObjectBucket) print() elif (int(choice) == 4): print('****************** Print ******************\n') print_data(MyObjectBucket) print() elif (int(choice) == 0): print('*************** Exiting... ***************\n') break else: print('Enter a number of [0-4]') print()
def __init__(self, riakHost='127.0.0.1', riakPort=8098): """ """ self.riakClient = riak.RiakClient(host=riakHost, http_port=riakPort) self.bucket = self.riakClient.bucket(RAINFALL_BUCKET) self.indexFactory = RainfallReadingIndexFactory() self.logger = logging.getLogger(__name__)
def post(self): post = json.loads(self.request.body) MyClient = riak.RiakClient(protocol=RIAK_PROTOCOL, http_port=RIAK_HTTP_PORT, host=RIAK_HOST) MyAdminBucket = MyClient.bucket(ADMIN_BUCKET_NAME) connection = None for c in MyAdminBucket.get('connection').data: if c['slug'] == post.get('connection', None): connection = c['connection'] sql = """SELECT * FROM ({}) AS CUBE LIMIT 10;""".format( post.get('sql', None)) e = create_engine(connection) connection = e.connect() try: resoverall = connection.execute(text(sql)) except: self.write({'sql': '', 'msg': 'Error!'}) self.finish() df = DataFrame(resoverall.fetchall()) if df.empty: self.finish() df.columns = resoverall.keys() df.head() self.write({'sql': df.to_json(orient='records'), 'msg': 'Success!'}) self.finish()
def install_slave(alsi, master_ip, our_ip): # Wait until the master node has complete his install and is alive before proceeding. alsi.milestone("Waiting for master to come alive.") _block_for_master_up(alsi, master_ip) alsi.milestone( "Master is alive. We are a slave node. Joining master. slave:%s master:%s" % (master_ip, our_ip)) alsi.runcmd( 'sudo riak-admin cluster join riak@{master}'.format(master=master_ip)) alsi.milestone("Updating install state in riak for ourselves (%s)" % our_ip) client = riak.RiakClient(protocol='pbc', nodes=[{'host': master_ip}]) # noinspection PyUnresolvedReferences client.resolver = riak.resolver.last_written_resolver b = client.bucket(INSTALLSTATE_BUCKET) completion_value = b.new(key=our_ip, data=time.asctime()) completion_value.store() _block_for_master_completion(alsi, master_ip) _install_tunedconfigs(alsi) _increase_jetty_concurrency(alsi) _restart_riak(alsi) alsi.milestone("Done.")
def quickDeleteAllKeys(delBucket, riakIP): for keys in riak.RiakClient(protocol='pbc', host=riakIP, pb_port=8087).bucket(delBucket).stream_keys(): for delKey in keys: quickDeleteKey(delBucket, delKey, riakIP) print 'Done' return
def __init__(self, *args, **kwargs): import riak self.riak = riak RC = settings.RIAK_TILE_CONNECTION self.client = riak.RiakClient(**RC) self.bucket = self.client.bucket(settings.RIAK_TILE_BUCKET) self.index = self.client.bucket(settings.RIAK_TILE_BUCKET + '_index')
def readdb(key): r = riak.RiakClient(pb_port=8087, protocol='pbc') b = r.bucket(bucket) # print 'readdb data'+ str(b.get(key).data) #return b.get(key).get_data() print b.get(key).data return b.get(key).data
def __init__(self, options, columns): super(RiakFDW, self).__init__(options, columns) log_to_postgres('options: %s' % options, DEBUG) log_to_postgres('columns: %s' % columns, DEBUG) if options.has_key('nodes'): nodes_option = options['nodes'] else: nodes_option = 'http://127.0.0.1:8098,pbc://127.0.0.1:8087' log_to_postgres( 'Using Default host: 127.0.0.1 http_port:8098 pb_port:8087') self.nodes = [] for parsed in map(lambda x: urlparse(x), nodes_option.split(',')): host = parsed.netloc.split(':')[0] if parsed.scheme == 'pbc': self.nodes.append({'host': host, 'pb_port': parsed.port}) else: self.nodes.append({'host': host, 'http_port': parsed.port}) self.client = r.RiakClient(nodes=self.nodes) self.client.set_decoder("image/jpeg", lambda data: Image.open(StringIO(data))) if options.has_key('bucket'): self.bucket = options['bucket'] else: log_to_postgres('bucket parameter is required.', ERROR) self.columns = columns self.row_id_column = columns.keys()[0]
def test_multiple_bucket_save(self): c = riak.RiakClient() m1 = TestMultipleBuckets(s="m1") m1.save() b1 = c.bucket("test_multibucket1") self.assertTrue(b1.get(m1.key).exists()) m2 = TestMultipleBuckets(s="m2") m2.save(bucket="test_multibucket2") b2 = c.bucket("test_multibucket2") self.assertTrue(b2.get(m2.key).exists()) m1key = m1.key del m1 self.assertTrue(m1key not in TestMultipleBuckets.instances) m2key = m2.key del m2 self.assertTrue(m2key not in TestMultipleBuckets.instances) m1 = TestMultipleBuckets.load(m1key, bucket="test_multibucket1") self.assertEquals("m1", m1.s) m2 = TestMultipleBuckets.load(m2key, bucket="test_multibucket2") self.assertEquals("m2", m2.s) m1.delete() m2.delete()
def test_objLinks(self): obj = SimpleModel("o1") obj2 = SimpleModel("o2") obj3 = SimpleModel("o3") self.assertEquals(set(), obj.links()) obj.addLink(obj2) self.assertEquals({(obj2, None)}, obj.links()) obj.addLink(obj3, "tag") self.assertEquals({(obj2, None), (obj3, "tag")}, obj.links()) obj.addLink(obj3, "tag2") self.assertEquals({(obj2, None), (obj3, "tag"), (obj3, "tag2")}, obj.links()) obj.removeLink(obj2) self.assertEquals({(obj3, "tag"), (obj3, "tag2")}, obj.links()) obj.removeLink(obj3, "tag2") self.assertEquals({(obj3, "tag")}, obj.links()) obj.removeLink(obj3, "tag") self.assertEquals(set(), obj.links()) obj.setLinks({(obj2, "lol"), (obj3, None)}) self.assertEquals({(obj2, "lol"), (obj3, None)}, obj.links()) c = riak.RiakClient() b = c.bucket("test") links = sorted(obj.links(b), key=lambda x: x.get_key()) self.assertEquals("o2", links[0].get_key()) self.assertEquals("o3", links[1].get_key())
def main(): description = 'Generate size statistics for all collections in all DBs in MongoDB' global args parser = ArgumentParser(description=description) parser.add_argument( '-H', '--host', default='wwwdev-a-1.qts.melodis.com', help= "mongodb host, e.g. 'api.foo.com' default to 'localhost' if not specified" ) parser.add_argument('-P', '--port', type=int, default=8098, help="riak port if not the default 8098") parser.add_argument('-d', '--database', default='', help="database (default is all)") args = parser.parse_args() client = riak.RiakClient(host='wwwdev-a-1.qts.melodis.com', port=8098) bucket = client.bucket('restaurants') mykeys = bucket.get_keys() print "Number of keys: %d" % (len(mykeys)) query = client.add('places') query.map( "function(v) { var data = JSON.parse(v.values[0].data); if(data._id is not None) { return [[v.key, data]]; } return []; }" ) for result in query.run(): print "%s - %s" % (result[0], result[1])
def setup(self, collection): """ `Setup()` handles all the necessary setup information for Riak. It creates an instance of a Riak Client and defines the collection to use for reads and writes. :param collection: :return: """ port = RIAK_PORT riak_servers = [ RIAK_1, RIAK_2, RIAK_3, ] riak_nodes = [] for server in riak_servers: riak_nodes.append({'host': str(server), 'http_port': port}) self.client = riak.RiakClient(nodes=riak_nodes) self.bucket = self.client.bucket(collection)
def __init__(self): if Riak_Connector.__rc != None: raise Exception("This class is a singleton!") else: self._client = riak.RiakClient(host=config['Riak']['Ip']) Riak_Connector.__rc = self
def get(): """Do the thing""" riak_client = riak.RiakClient(host='10.23.0.3', pb_port=8087, protocol='pbc') bucket = riak_client.bucket('hlr') try: msisdn = sys.argv[1] imsi = bucket.get_index('msisdn_bin', msisdn).results if not len(imsi): print '\033[93mExtension %s not found\033[0m' % (msisdn) else: print '----------------------------------------------------' print 'Extension: \033[95m%s\033[0m-%s-\033[92m%s\033[0m ' \ 'has IMSI \033[96m%s\033[0m' % (msisdn[:5], msisdn[5:6], msisdn[6:], imsi[0]) data = bucket.get(imsi[0]).data if data['authorized']: print "Extension: Authorised" else: print "Extension: \033[91mNOT\033[0m Authorised" try: host = socket.gethostbyaddr(data['home_bts']) home = host[0] host = socket.gethostbyaddr(data['current_bts']) current = host[0] except Exception as ex: home = data['home_bts'] current = data['current_bts'] print " Home BTS: %s" % (home) print "Last Seen: %s, %s" % (current, datetime.datetime.fromtimestamp( data['updated']).ctime()) print '----------------------------------------------------' except Exception as ex: print ex
def test_list(self): mock_db = riak.RiakClient() instance = ProjectStore(mock_db) (project, id) = instance.create("Sample", ["sample1", "sample2",]) self.assertEqual(instance.active_projects[id], project) instance.stop(id) self.assertEqual(instance.active_projects, {})
def __init__(self, project_name): """Create the database connection""" self.pr = NDProject(project_name) # connect to Riak self.rcli = riak.RiakClient(host=self.pr.getKVServer(), pb_port=8087, protocol='pbc')
def AddNBARecords(bucket_name, csvfile): """Push the NBA social media responses to Riak Args: bucket_name: Riak bucket name to push the data to csvfile: Name of the CSV file to read the data from """ client = riak.RiakClient(port=8091) bucket = client.bucket(bucket_name) start_time = time.time() with open(csvfile, 'rb') as f: reader = csv.reader(f) properties = reader.next() record_count = 0 time_spent = 1 try: for row in reader: record = {} for prop, val in itertools.izip(properties, row): record[prop] = val key = uuid.uuid4().hex record_count += 1 print 'Storing record #%d with key %s' % (record_count, key) bucket.new(key, record).store() time_spent = time.time() - start_time if record_count in xrange(1000, 1000000, 1000): print 'Time taken: %d secs, Avg writes/sec: %f' % ( time_spent, record_count / time_spent) except KeyboardInterrupt: print 'Interrupted by user. Exiting.' except Exception, ex: print 'Something went wrong: %s' % str(ex) print 'Total time taken: %d secs, Avg writes/sec: %f' % ( time_spent, record_count / time_spent)
def test_map_reduce_from_object(): # Create the object... client = riak.RiakClient(HOST, PORT) bucket = client.bucket("bucket") bucket.new("foo", 2).store() obj = bucket.get("foo") result = obj.map("Riak.mapValuesJson").run() assert (result == [2])
def __init__(self, nodes=[{ 'host': '127.0.0.1', 'pb_port': 8087 }], memcached_servers=['localhost:11211']): self.rc = riak.RiakClient(protocol='pbc', nodes=nodes) self.mc = pylibmc.Client(servers=memcached_servers)
def test_create(self): mock_db = riak.RiakClient() mock_bucket = mock_db.bucket("message") mock_id = "proj-1" instance = ProjectStore(mock_db) (project, id) = instance.create("Sample", ["sample1", "sample2",], id_key=mock_id) self.assertEqual(project.name, "Sample") self.assertTrue(project.id, mock_id)
def __init__(self, nodes, bucket='nodes', resolver=riak.resolver.last_written_resolver, **kwargs): self.conn = riak.RiakClient(nodes=nodes, resolver=resolver, **kwargs) self.bucket = self.conn.bucket(bucket) super(RiakNodeStorage, self).__init__(**kwargs)