def run_migration(c, script): """Run a given migration script. This imports a `migrate` function from the script file. The documents in elasticsearch are iteratively given to this migration function. The intended use of this is to allow the user to preserve the existing document data, but modify it as needed. The primary usecase is for test data, because actual data should be rebuilt from source. """ migration_func = _import_migration_func(script) client = Client() # Query all documents resp = Individual.search(using=client).execute() for doc in resp.hits: migration_func(doc) doc.save(using=client) # Refresh to ensure all shards are up-to-date and ready for requests Individual._index.refresh(using=client)
def gumby_client(): hosts = os.getenv('ELASTICSEARCH_HOSTS') if not hosts: # ensure the default hosts = None else: hosts = [h.strip() for h in hosts.split(',')] return Client(hosts)
def init(c): """Initialize the elasticsearch instance""" client = Client() if Individual._index.exists(using=client): # XXX Just to ensure a clean slate each run =) Individual._index.delete(using=client) Individual.init(using=client)
def load_from_json(c, file): client = Client() with Path(file).open('r') as fb: data = json.load(fb) # Load as objects indvs = [Individual(**props) for props in data] # Persist the items for i, indv in enumerate(indvs): indv.save(using=client) # Refresh to ensure all shards are up-to-date and ready for requests Individual._index.refresh(using=client)
def dump_index(c): """Dump index as JSON to stdout""" client = Client() resp = Individual.search(using=client).execute() print(JSONSerializer().dumps([hit.to_dict() for hit in resp.hits]))
def load_random_data(c): """Loads random data into elasticsearch""" client = Client() load_individuals_index_with_random_data(client)
def drop(c): """Drop the elasticsearch instance""" client = Client() if Individual._index.exists(using=client): Individual._index.delete(using=client)