def available_datasets(type='%', name='%'): try: cur = co.Camoco("Camoco", type='Camoco').db.cursor() datasets = cur.execute(''' SELECT type, name, description, added FROM datasets WHERE type LIKE ? AND name LIKE ? ORDER BY type;''', (type,name)).fetchall() if datasets: datasets = pd.DataFrame( datasets, columns=["Type", "Name", "Description", "Date Added"], ).set_index(['Type']) else: datasets = pd.DataFrame( columns=["Type", "Name", "Description", "Date Added"] ) # Check to see if we are looking for a specific dataset if '%' not in type and '%' not in name: return True if name in datasets['Name'].values else False else: return datasets except CantOpenError as e: raise e
def redescribe_dataset(type,name,new_desc): c = co.Camoco("Camoco") c.db.cursor().execute(''' UPDATE datasets SET description = ? WHERE name = ? AND type = ?''', (new_desc,name,type) )
def del_dataset(type, name, force=False): # pragma no cover try: c = co.Camoco("Camoco") except CantOpenError: return True if force == False: c.log("Are you sure you want to delete:\n {}.{}", type, name) if input("[Y/n]").upper() != 'Y': c.log("Nothing Deleted") return c.log("Deleting {}", name) try: c.db.cursor().execute(''' DELETE FROM datasets WHERE name LIKE '{}' AND type LIKE '{}';'''.format(name, type)) except CantOpenError: pass try: dfiles = glob.glob( os.path.join(cf.options.basedir, 'databases', '{}.{}.*'.format(type, name))) for f in dfiles: c.log('Removing {}', f) try: os.remove(f) except IsADirectoryError: shutil.rmtree(f) except FileNotFoundError as e: pass if type == 'Expr': # also have to remove the COB specific refgen del_dataset('RefGen', 'Filtered' + name, force=force) del_dataset('Ontology', name + 'MCL', force=force) return True
def available_datasets(type="%", name="%"): # pragma no cover try: cur = co.Camoco("Camoco", type="Camoco").db.cursor() datasets = cur.execute( """ SELECT type, name, description, added FROM datasets WHERE type LIKE ? AND name LIKE ? ORDER BY type;""", (type, name), ).fetchall() if datasets: datasets = pd.DataFrame( datasets, columns=["Type", "Name", "Description", "Date Added"]).set_index(["Type"]) else: datasets = pd.DataFrame( columns=["Type", "Name", "Description", "Date Added"]) # Check to see if we are looking for a specific dataset if "%" not in type and "%" not in name: return True if name in datasets["Name"].values else False else: return datasets except CantOpenError as e: raise e
def redescribe_dataset(type, name, new_desc): # pragma no cover c = co.Camoco("Camoco") c.db.cursor().execute( """ UPDATE datasets SET description = ? WHERE name = ? AND type = ?""", (new_desc, name, type), )
def mv_dataset(type, name, new_name): # pragma no cover c = co.Camoco("Camoco") c.db.cursor().execute( ''' UPDATE datasets SET name = ? WHERE name = ? AND type = ?''', (new_name, name, type)) os.rename(c._resource('databases', '.'.join([type, name]) + ".db"), c._resource('databases', ".".join([type, new_name]) + ".db"))