def exportJsonDB(json_data, frameNum): """ Exports the JSON data to the Accumulo database """ conn = Accumulo(host="localhost", port=50096, user="******", password="******") json_data_parsed = json.loads( json_data) #put json data back into dictionary table = json_data_parsed['videoMetadata'][ 'videoName'] #get the video name and set that as the table name table = table.replace('.', '_') table = table.encode('ascii', 'ignore') if not conn.table_exists(table): conn.create_table(table) m = Mutation("row_%d" % frameNum) #table row number is the frame number m.put(cf="cf2", cq="cq2", val=json_data_parsed['imageBase64'] ) #saves the frame image separately from the metadata if 'LabeledImage' in json_data_parsed.keys(): m.put(cf="cf3", cq="cq3", val=json_data_parsed['LabeledImage'] ) #saves the labeled image separately from the metadata json_data_parsed.pop( 'LabeledImage', None) #delete the base64 representation of the labeled frame json_data_parsed.pop( 'imageBase64', None) #delete the base64 representation of the frame json_data = json.dumps(json_data_parsed) m.put(cf="cf1", cq="cq1", val=json_data) #set the first column to now only the metadata. conn.write(table, m) conn.close()
def _connect(self, host, port, user, password): try: self.__connection = Accumulo(host, port, user, password) self.__log.debug('Connected to StaticFile Store') except Exception as e: self.__log.exception('Error while connecting to StaticFile Store: %s' % str(e)) raise Exception('Error while connecting to StaticFile Store: %s' % str(e))
def printTableDB(table): """ Displays the data in the database """ conn = Accumulo(host="localhost", port=50096, user="******", password="******") for entry in conn.scan(table): print(entry.row, entry.cf, entry.cq, entry.cv, entry.ts, entry.val) conn.close()
def new(cls, elems, lbound, rbound, coin=BaseCoin(), conn_info=ConnInfo('localhost', 42424, 'root', 'secret'), table='__ADS_metadata___', elemclass=IntElem): """ Create a new skiplist that stores all of its data inside an Accumulo instance. Arguments: cls - the class implementing this class method elems - the elements to create the skiplist over lbound, rbound - the left and right boundary elements of the list coin - the source of randomness to use (see pace.ads.skiplist.coin) conn_info - how to connect to the Accumulo instance being used table - the name of the table to store the ADS in elemclass - the class to use to store the elements in the skiplist """ sl = cls(None, lbound, rbound, coin) if conn_info is not None: # For connecting to a live Accumulo instance host, port, user, password = conn_info conn = Accumulo(host=conn_info.host, port=conn_info.port, user=conn_info.user, password=conn_info.password) else: # For testing/debug conn = FakeConnection() sl.conn = conn sl.table = table sl.elemclass = elemclass if not conn.table_exists(table): conn.create_table(table) right = cls.nodeclass.newnode(sl, None, None, rbound, True) left = cls.nodeclass.newnode(sl, None, right, lbound, True) sl.root = left for elem in elems: sl.insert(elem) return sl
def __init__(self, host="localhost", port=42424, user="******", password="******", num_trials=100, filename='default_file.txt', seed=None, signer_ids=test_ids, pki=test_pki): self.conn = Accumulo(host=host, port=port, user=user, password=password) self.num_trials = num_trials self.filename = filename self.seed = seed self.signer_ids = signer_ids self.pki = pki
""" Reads lines from a data file and inserts a number of records """ from pyaccumulo import Accumulo, Mutation, Range import settings import sys sys.path sys.path.append('/bdsetup') table = "well_logs" table1 = "drill_logs" conn = Accumulo(host=settings.HOST, port=settings.PORT, user=settings.USER, password=settings.PASSWORD) if conn.table_exists(table): conn.delete_table(table) conn.create_table(table) wr = conn.create_batch_writer(table) print "Ingesting some data ..." f = open("/bdsetup/acculog.txt", "rb") for i in range(250): line = f.readline().rstrip() label = '%04d' % i mut = Mutation('r_%s' % label) mut.put(cq='cq1', val=line) #mut.put(cf='cf_%s'%label, cq='cq1', val=line)
def main(): parser = OptionParser() parser.add_option("-v", '--verbose', dest="verbose", action="store_true", default=False, help="Verbose output") accumulo_group = OptionGroup( parser, 'Options that control the accumulo connection') accumulo_group.add_option('--host', dest='host', default='localhost', help='Host for Accumulo. Default: localhost') accumulo_group.add_option('--user', dest='user', default='root', help='User for Accumulo. Default: root') accumulo_group.add_option('--password', dest='password', default='secret', help='Password for Accumulo user. Default: ...') accumulo_group.add_option('--port', dest='port', type='int', default=42424, help="Port for Accumulo. Default: 42424") parser.add_option_group(accumulo_group) output_group = OptionGroup(parser, 'Options that control output') output_group.add_option('--log-file', dest='log_file', default='output.log', help='Output file for performance numbers') output_group.add_option('--table-prefix', dest='table_prefix', default='perf', help='Prefix used for data tables') output_group.add_option('--profile', dest='profile', action='store_true', default=False, help="Profiles encryption code") output_group.add_option( '--cache_key', dest='cache_key', action='store_true', default=False, help='Keys are now cached during encryption and decryption') output_group.add_option( '--use_accumulo_keystore', dest='accumulo_keystore', action='store_true', default=False, help= "Keys are stored in Accumulo if option is included, otherwise they are stored locally" ) parser.add_option_group(output_group) test_group = OptionGroup(parser, "Options that control what tests are being run") test_group.add_option('--all', dest='all', action='store_true', default=False, help='Runs all the different tests') test_group.add_option( '--non-ceabac', dest='non_ceabac', action='store_true', default=False, help='Runs the non-CEABAC tests with a simple schema') test_group.add_option('--ceabac', dest='ceabac', action='store_true', default=False, help='Runs the CEABAC tests with a simple schema') test_group.add_option( '--vis-ceabac', dest='vis_ceabac', action='store_true', default=False, help='Runs CEABAC in CBC mode with varying visibility fields') test_group.add_option('--diff_schemas_ceabac', dest='diff_ceabac', action='store_true', default=False, help='Runs several different schemas for VIS_CBC') test_group.add_option('--diff_schemas_non_ceabac', dest='diff_non_ceabac', action='store_true', default=False, help='Runs several different schemas for AES_CBC') test_group.add_option( '--mixed_schemas', dest='mixed_schemas', action='store_true', default=False, help='Runs a set of schemas where the schemes are both CEABAC and not') parser.add_option_group(test_group) entries_group = OptionGroup( parser, "Options that control how many entries are run") entries_group.add_option('--num_entries', dest='num_entries', type='int', default=1000, help='Total number of cells being run') entries_group.add_option('--num_rows', dest='num_rows', type='int', default=100, help='Total number of rows being run') parser.add_option_group(entries_group) (cl_flags, _) = parser.parse_args() #set up logging if cl_flags.verbose: log_level = logging.DEBUG else: log_level = logging.INFO logging.basicConfig(filename=cl_flags.log_file, level=log_level, format='%(levelname)s-%(asctime)s: %(message)s') logger = logging.getLogger("performance_testing") #check inputs if cl_flags.all and (cl_flags.non_ceabac or cl_flags.ceabac or cl_flags.vis_ceabac): logger.error( '--all is already specified, do not need to define other tests to run' ) #create accumulo connection conn = Accumulo(host=cl_flags.host, port=cl_flags.port, user=cl_flags.user, password=cl_flags.password) #create benchmarker if cl_flags.cache_key: logger.info('Using the caching version of the pki') pki = DummyCachingEncryptionPKI( conn=conn if cl_flags.accumulo_keystore else None) else: pki = DummyEncryptionPKI( conn=conn if cl_flags.accumulo_keystore else None) benchmarker = Benchmarker(logger=logger, pki=pki, conn=conn) if cl_flags.all: run_non_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) run_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) run_vis_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) run_diff_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) run_diff_non_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) run_mixed_schemas(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) if cl_flags.non_ceabac: run_non_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) if cl_flags.ceabac: run_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) if cl_flags.vis_ceabac: run_vis_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) if cl_flags.diff_ceabac: run_diff_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) if cl_flags.diff_non_ceabac: run_diff_non_ceabac(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags) if cl_flags.mixed_schemas: run_mixed_schemas(benchmarker, cl_flags.table_prefix, logger, cl_flags.profile, cl_flags)